mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
able to purge extra ec shard copies
This commit is contained in:
parent
11cffb3168
commit
b05456fe07
|
@ -126,8 +126,20 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
|
||||||
|
|
||||||
baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
|
baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
|
||||||
|
|
||||||
for _, shardId := range req.ShardIds {
|
found := false
|
||||||
os.Remove(baseFilename + erasure_coding.ToExt(int(shardId)))
|
for _, location := range vs.store.Locations {
|
||||||
|
if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) {
|
||||||
|
found = true
|
||||||
|
baseFilename = path.Join(location.Directory, baseFilename)
|
||||||
|
for _, shardId := range req.ShardIds {
|
||||||
|
os.Remove(baseFilename + erasure_coding.ToExt(int(shardId)))
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// check whether to delete the ecx file also
|
// check whether to delete the ecx file also
|
||||||
|
|
|
@ -129,36 +129,12 @@ func balanceEcVolumes(commandEnv *commandEnv, collection string, applyBalancing
|
||||||
|
|
||||||
for vid, locations := range vidLocations {
|
for vid, locations := range vidLocations {
|
||||||
|
|
||||||
// collect all ec nodes with at least one free slot
|
if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil {
|
||||||
var possibleDestinationEcNodes []*EcNode
|
return err
|
||||||
for _, ecNode := range allEcNodes {
|
|
||||||
if ecNode.freeEcSlot > 0 {
|
|
||||||
possibleDestinationEcNodes = append(possibleDestinationEcNodes, ecNode)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculate average number of shards an ec node should have for one volume
|
if err := doBalanceEcShards(ctx, commandEnv, collection, vid, locations, allEcNodes, applyBalancing); err != nil {
|
||||||
averageShardsPerEcNode := int(math.Ceil(float64(erasure_coding.TotalShardsCount) / float64(len(possibleDestinationEcNodes))))
|
return err
|
||||||
|
|
||||||
fmt.Printf("vid %d averageShardsPerEcNode %+v\n", vid, averageShardsPerEcNode)
|
|
||||||
|
|
||||||
// check whether this volume has ecNodes that are over average
|
|
||||||
isOverLimit := false
|
|
||||||
for _, ecNode := range locations {
|
|
||||||
shardBits := findEcVolumeShards(ecNode, vid)
|
|
||||||
if shardBits.ShardIdCount() > averageShardsPerEcNode {
|
|
||||||
isOverLimit = true
|
|
||||||
fmt.Printf("vid %d %s has %d shards, isOverLimit %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverLimit)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isOverLimit {
|
|
||||||
|
|
||||||
if err := spreadShardsIntoMoreDataNodes(ctx, commandEnv, averageShardsPerEcNode, collection, vid, locations, possibleDestinationEcNodes, applyBalancing); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -166,6 +142,67 @@ func balanceEcVolumes(commandEnv *commandEnv, collection string, applyBalancing
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func doBalanceEcShards(ctx context.Context, commandEnv *commandEnv, collection string, vid needle.VolumeId, locations []*EcNode, allEcNodes []*EcNode, applyBalancing bool) error {
|
||||||
|
// collect all ec nodes with at least one free slot
|
||||||
|
var possibleDestinationEcNodes []*EcNode
|
||||||
|
for _, ecNode := range allEcNodes {
|
||||||
|
if ecNode.freeEcSlot > 0 {
|
||||||
|
possibleDestinationEcNodes = append(possibleDestinationEcNodes, ecNode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// calculate average number of shards an ec node should have for one volume
|
||||||
|
averageShardsPerEcNode := int(math.Ceil(float64(erasure_coding.TotalShardsCount) / float64(len(possibleDestinationEcNodes))))
|
||||||
|
fmt.Printf("vid %d averageShardsPerEcNode %+v\n", vid, averageShardsPerEcNode)
|
||||||
|
// check whether this volume has ecNodes that are over average
|
||||||
|
isOverLimit := false
|
||||||
|
for _, ecNode := range locations {
|
||||||
|
shardBits := findEcVolumeShards(ecNode, vid)
|
||||||
|
if shardBits.ShardIdCount() > averageShardsPerEcNode {
|
||||||
|
isOverLimit = true
|
||||||
|
fmt.Printf("vid %d %s has %d shards, isOverLimit %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverLimit)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isOverLimit {
|
||||||
|
if err := spreadShardsIntoMoreDataNodes(ctx, commandEnv, averageShardsPerEcNode, collection, vid, locations, possibleDestinationEcNodes, applyBalancing); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func doDeduplicateEcShards(ctx context.Context, commandEnv *commandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error {
|
||||||
|
|
||||||
|
// check whether this volume has ecNodes that are over average
|
||||||
|
shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount)
|
||||||
|
for _, ecNode := range locations {
|
||||||
|
shardBits := findEcVolumeShards(ecNode, vid)
|
||||||
|
for _, shardId := range shardBits.ShardIds() {
|
||||||
|
shardToLocations[shardId] = append(shardToLocations[shardId], ecNode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for shardId, ecNodes := range shardToLocations {
|
||||||
|
if len(ecNodes) <= 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sortEcNodes(ecNodes)
|
||||||
|
fmt.Printf("ec shard %d.%d has %d copies, removing from %+v\n", vid, shardId, len(ecNodes), ecNodes[1:])
|
||||||
|
if !applyBalancing {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, ecNode := range ecNodes[1:] {
|
||||||
|
duplicatedShardIds := []uint32{uint32(shardId)}
|
||||||
|
if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func spreadShardsIntoMoreDataNodes(ctx context.Context, commandEnv *commandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
|
func spreadShardsIntoMoreDataNodes(ctx context.Context, commandEnv *commandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
|
||||||
|
|
||||||
for _, ecNode := range existingLocations {
|
for _, ecNode := range existingLocations {
|
||||||
|
|
Loading…
Reference in a new issue