mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
add sortEcNodesByFreeslotsDecending and sortEcNodesByFreeslotsAscending
addressing https://github.com/chrislusf/seaweedfs/issues/1111
This commit is contained in:
parent
f056baa384
commit
40514c5362
|
@ -207,7 +207,7 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti
|
|||
if len(ecNodes) <= 1 {
|
||||
continue
|
||||
}
|
||||
sortEcNodes(ecNodes)
|
||||
sortEcNodesByFreeslotsAscending(ecNodes)
|
||||
fmt.Printf("ec shard %d.%d has %d copies, keeping %v\n", vid, shardId, len(ecNodes), ecNodes[0].info.Id)
|
||||
if !applyBalancing {
|
||||
continue
|
||||
|
@ -442,7 +442,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack
|
|||
|
||||
func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, expectedTotalEcShards int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
|
||||
|
||||
sortEcNodes(possibleDestinationEcNodes)
|
||||
sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes)
|
||||
averageShardsPerEcNode := ceilDivide(expectedTotalEcShards, len(possibleDestinationEcNodes))
|
||||
|
||||
for _, destEcNode := range possibleDestinationEcNodes {
|
||||
|
|
|
@ -112,12 +112,18 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId,
|
|||
}
|
||||
}
|
||||
|
||||
func sortEcNodes(ecNodes []*EcNode) {
|
||||
func sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) {
|
||||
sort.Slice(ecNodes, func(i, j int) bool {
|
||||
return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot
|
||||
})
|
||||
}
|
||||
|
||||
func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
|
||||
sort.Slice(ecNodes, func(i, j int) bool {
|
||||
return ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot
|
||||
})
|
||||
}
|
||||
|
||||
type CandidateEcNode struct {
|
||||
ecNode *EcNode
|
||||
shardCount int
|
||||
|
@ -202,7 +208,7 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen
|
|||
totalFreeEcSlots += freeEcSlots
|
||||
})
|
||||
|
||||
sortEcNodes(ecNodes)
|
||||
sortEcNodesByFreeslotsDecending(ecNodes)
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
|
|||
return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount)
|
||||
}
|
||||
|
||||
sortEcNodes(allEcNodes)
|
||||
sortEcNodesByFreeslotsDecending(allEcNodes)
|
||||
|
||||
if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount {
|
||||
return fmt.Errorf("disk space is not enough")
|
||||
|
|
Loading…
Reference in a new issue