mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
refactor(shell): Decending
-> Descending
(#3675)
Signed-off-by: Ryan Russell <git@ryanrussell.org> Signed-off-by: Ryan Russell <git@ryanrussell.org>
This commit is contained in:
parent
dfbd8efd26
commit
bd2dc6d641
|
@ -453,7 +453,7 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
||||||
|
|
||||||
func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
|
func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error {
|
||||||
|
|
||||||
sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes)
|
sortEcNodesByFreeslotsDescending(possibleDestinationEcNodes)
|
||||||
|
|
||||||
for _, destEcNode := range possibleDestinationEcNodes {
|
for _, destEcNode := range possibleDestinationEcNodes {
|
||||||
if destEcNode.info.Id == existingLocation.info.Id {
|
if destEcNode.info.Id == existingLocation.info.Id {
|
||||||
|
|
|
@ -118,7 +118,7 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) {
|
func sortEcNodesByFreeslotsDescending(ecNodes []*EcNode) {
|
||||||
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
|
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
|
||||||
return a.freeEcSlot > b.freeEcSlot
|
return a.freeEcSlot > b.freeEcSlot
|
||||||
})
|
})
|
||||||
|
@ -217,7 +217,7 @@ func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes
|
||||||
// find out all volume servers with one slot left.
|
// find out all volume servers with one slot left.
|
||||||
ecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(topologyInfo, selectedDataCenter)
|
ecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(topologyInfo, selectedDataCenter)
|
||||||
|
|
||||||
sortEcNodesByFreeslotsDecending(ecNodes)
|
sortEcNodesByFreeslotsDescending(ecNodes)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ func TestEcDistribution(t *testing.T) {
|
||||||
// find out all volume servers with one slot left.
|
// find out all volume servers with one slot left.
|
||||||
ecNodes, totalFreeEcSlots := collectEcVolumeServersByDc(topologyInfo, "")
|
ecNodes, totalFreeEcSlots := collectEcVolumeServersByDc(topologyInfo, "")
|
||||||
|
|
||||||
sortEcNodesByFreeslotsDecending(ecNodes)
|
sortEcNodesByFreeslotsDescending(ecNodes)
|
||||||
|
|
||||||
if totalFreeEcSlots < erasure_coding.TotalShardsCount {
|
if totalFreeEcSlots < erasure_coding.TotalShardsCount {
|
||||||
println("not enough free ec shard slots", totalFreeEcSlots)
|
println("not enough free ec shard slots", totalFreeEcSlots)
|
||||||
|
|
|
@ -115,7 +115,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s
|
||||||
return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount)
|
return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
sortEcNodesByFreeslotsDecending(allEcNodes)
|
sortEcNodesByFreeslotsDescending(allEcNodes)
|
||||||
|
|
||||||
if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount {
|
if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount {
|
||||||
return fmt.Errorf("disk space is not enough")
|
return fmt.Errorf("disk space is not enough")
|
||||||
|
|
Loading…
Reference in a new issue