2019-12-23 20:48:20 +00:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
2019-12-23 20:48:20 +00:00
|
|
|
"io"
|
2022-02-08 08:53:55 +00:00
|
|
|
"time"
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/operation"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
2019-12-23 20:48:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
Commands = append(Commands, &commandEcDecode{})
|
|
|
|
}
|
|
|
|
|
|
|
|
type commandEcDecode struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandEcDecode) Name() string {
|
|
|
|
return "ec.decode"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandEcDecode) Help() string {
|
|
|
|
return `decode a erasure coded volume into a normal volume
|
|
|
|
|
|
|
|
ec.decode [-collection=""] [-volumeId=<volume_id>]
|
|
|
|
|
|
|
|
`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
|
|
|
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
|
|
|
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
|
|
|
|
collection := encodeCommand.String("collection", "", "the collection name")
|
2021-10-25 21:39:20 +00:00
|
|
|
forceChanges := encodeCommand.Bool("force", false, "force the encoding even if the cluster has less than recommended 4 nodes")
|
2019-12-23 20:48:20 +00:00
|
|
|
if err = encodeCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2022-05-31 21:48:46 +00:00
|
|
|
infoAboutSimulationMode(writer, *forceChanges, "-force")
|
2019-12-23 20:48:20 +00:00
|
|
|
|
2021-12-10 21:24:38 +00:00
|
|
|
if err = commandEnv.confirmIsLocked(args); err != nil {
|
2021-09-14 05:13:34 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
vid := needle.VolumeId(*volumeId)
|
|
|
|
|
|
|
|
// collect topology information
|
2022-02-08 08:53:55 +00:00
|
|
|
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
|
2019-12-23 20:48:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-10-25 21:39:20 +00:00
|
|
|
if !*forceChanges {
|
2021-10-25 21:38:11 +00:00
|
|
|
var nodeCount int
|
|
|
|
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
|
|
|
nodeCount++
|
|
|
|
})
|
|
|
|
if nodeCount < erasure_coding.ParityShardsCount {
|
|
|
|
glog.V(0).Infof("skip erasure coding with %d nodes, less than recommended %d nodes", nodeCount, erasure_coding.ParityShardsCount)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
// volumeId is provided
|
|
|
|
if vid != 0 {
|
2020-02-26 05:50:12 +00:00
|
|
|
return doEcDecode(commandEnv, topologyInfo, *collection, vid)
|
2019-12-23 20:48:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// apply to all volumes in the collection
|
|
|
|
volumeIds := collectEcShardIds(topologyInfo, *collection)
|
|
|
|
fmt.Printf("ec encode volumes: %v\n", volumeIds)
|
|
|
|
for _, vid := range volumeIds {
|
2020-02-26 05:50:12 +00:00
|
|
|
if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil {
|
2019-12-23 20:48:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) {
|
2022-08-22 21:12:23 +00:00
|
|
|
|
|
|
|
if !commandEnv.isLocked() {
|
|
|
|
return fmt.Errorf("lock is lost")
|
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
// find volume location
|
|
|
|
nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid)
|
|
|
|
|
|
|
|
fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits)
|
|
|
|
|
|
|
|
// collect ec shards to the server with most space
|
2020-02-26 05:50:12 +00:00
|
|
|
targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid)
|
2019-12-23 20:48:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("collectEcShards for volume %d: %v", vid, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// generate a normal volume
|
2020-11-28 08:14:11 +00:00
|
|
|
err = generateNormalVolume(commandEnv.option.GrpcDialOption, vid, collection, targetNodeLocation)
|
2019-12-23 20:48:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// delete the previous ec shards
|
2020-02-26 05:50:12 +00:00
|
|
|
err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid)
|
2019-12-23 20:48:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("delete ec shards for volume %d: %v", vid, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection string, targetNodeLocation pb.ServerAddress, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, vid needle.VolumeId) error {
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
// mount volume
|
2021-12-26 08:15:03 +00:00
|
|
|
if err := operation.WithVolumeServerClient(false, targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
2020-02-26 05:50:12 +00:00
|
|
|
_, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{
|
2019-12-23 20:48:20 +00:00
|
|
|
VolumeId: uint32(vid),
|
|
|
|
})
|
|
|
|
return mountErr
|
|
|
|
}); err != nil {
|
|
|
|
return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// unmount ec shards
|
|
|
|
for location, ecIndexBits := range nodeToEcIndexBits {
|
|
|
|
fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
|
2020-02-26 05:50:12 +00:00
|
|
|
err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice())
|
2019-12-23 20:48:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// delete ec shards
|
|
|
|
for location, ecIndexBits := range nodeToEcIndexBits {
|
|
|
|
fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds())
|
2020-02-26 05:50:12 +00:00
|
|
|
err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice())
|
2019-12-23 20:48:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer pb.ServerAddress) error {
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer)
|
|
|
|
|
2021-12-26 08:15:03 +00:00
|
|
|
err := operation.WithVolumeServerClient(false, sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
2020-02-26 05:50:12 +00:00
|
|
|
_, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{
|
2019-12-23 20:48:20 +00:00
|
|
|
VolumeId: uint32(vid),
|
|
|
|
Collection: collection,
|
|
|
|
})
|
|
|
|
return genErr
|
|
|
|
})
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[pb.ServerAddress]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation pb.ServerAddress, err error) {
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
maxShardCount := 0
|
|
|
|
var exisitngEcIndexBits erasure_coding.ShardBits
|
|
|
|
for loc, ecIndexBits := range nodeToEcIndexBits {
|
2019-12-24 08:00:45 +00:00
|
|
|
toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount()
|
|
|
|
if toBeCopiedShardCount > maxShardCount {
|
|
|
|
maxShardCount = toBeCopiedShardCount
|
2019-12-23 20:48:20 +00:00
|
|
|
targetNodeLocation = loc
|
|
|
|
exisitngEcIndexBits = ecIndexBits
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits)
|
|
|
|
|
|
|
|
var copiedEcIndexBits erasure_coding.ShardBits
|
|
|
|
for loc, ecIndexBits := range nodeToEcIndexBits {
|
|
|
|
if loc == targetNodeLocation {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-12-24 02:06:13 +00:00
|
|
|
needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards()
|
2019-12-23 20:48:20 +00:00
|
|
|
if needToCopyEcIndexBits.ShardIdCount() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-12-26 08:15:03 +00:00
|
|
|
err = operation.WithVolumeServerClient(false, targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation)
|
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
_, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{
|
2019-12-23 20:48:20 +00:00
|
|
|
VolumeId: uint32(vid),
|
|
|
|
Collection: collection,
|
|
|
|
ShardIds: needToCopyEcIndexBits.ToUint32Slice(),
|
|
|
|
CopyEcxFile: false,
|
|
|
|
CopyEcjFile: true,
|
2019-12-28 20:44:59 +00:00
|
|
|
CopyVifFile: true,
|
2021-09-13 05:47:52 +00:00
|
|
|
SourceDataNode: string(loc),
|
2019-12-23 20:48:20 +00:00
|
|
|
})
|
|
|
|
if copyErr != nil {
|
|
|
|
return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits)
|
|
|
|
|
|
|
|
return targetNodeLocation, err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-10-01 19:10:11 +00:00
|
|
|
func lookupVolumeIds(commandEnv *CommandEnv, volumeIds []string) (volumeIdLocations []*master_pb.LookupVolumeResponse_VolumeIdLocation, err error) {
|
2021-10-01 13:51:22 +00:00
|
|
|
var resp *master_pb.LookupVolumeResponse
|
2021-12-26 08:15:03 +00:00
|
|
|
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
|
2021-10-01 13:51:22 +00:00
|
|
|
resp, err = client.LookupVolume(context.Background(), &master_pb.LookupVolumeRequest{VolumeOrFileIds: volumeIds})
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
2021-10-01 19:10:11 +00:00
|
|
|
return nil, err
|
2021-10-01 13:51:22 +00:00
|
|
|
}
|
2021-10-01 19:10:11 +00:00
|
|
|
return resp.VolumeIdLocations, nil
|
2021-10-01 13:51:22 +00:00
|
|
|
}
|
|
|
|
|
2022-02-08 08:53:55 +00:00
|
|
|
func collectTopologyInfo(commandEnv *CommandEnv, delayBeforeCollecting time.Duration) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {
|
|
|
|
|
|
|
|
if delayBeforeCollecting > 0 {
|
|
|
|
time.Sleep(delayBeforeCollecting)
|
|
|
|
}
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
var resp *master_pb.VolumeListResponse
|
2021-12-26 08:15:03 +00:00
|
|
|
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
|
2020-02-26 05:50:12 +00:00
|
|
|
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
|
2019-12-23 20:48:20 +00:00
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-02-22 08:28:42 +00:00
|
|
|
return resp.TopologyInfo, resp.VolumeSizeLimitMb, nil
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) {
|
|
|
|
|
|
|
|
vidMap := make(map[uint32]bool)
|
|
|
|
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
2021-02-16 13:13:48 +00:00
|
|
|
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
|
|
|
for _, v := range diskInfo.EcShardInfos {
|
|
|
|
if v.Collection == selectedCollection {
|
|
|
|
vidMap[v.Id] = true
|
|
|
|
}
|
2019-12-23 20:48:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
for vid := range vidMap {
|
|
|
|
vids = append(vids, needle.VolumeId(vid))
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[pb.ServerAddress]erasure_coding.ShardBits {
|
2019-12-23 20:48:20 +00:00
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
nodeToEcIndexBits := make(map[pb.ServerAddress]erasure_coding.ShardBits)
|
2019-12-23 20:48:20 +00:00
|
|
|
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
2021-02-16 13:13:48 +00:00
|
|
|
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
|
|
|
for _, v := range diskInfo.EcShardInfos {
|
|
|
|
if v.Id == uint32(vid) {
|
2021-09-13 05:47:52 +00:00
|
|
|
nodeToEcIndexBits[pb.NewServerAddressFromDataNode(dn)] = erasure_coding.ShardBits(v.EcIndexBits)
|
2021-02-16 13:13:48 +00:00
|
|
|
}
|
2019-12-23 20:48:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
return nodeToEcIndexBits
|
|
|
|
}
|