mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
ec.balance: collect dc rack info
This commit is contained in:
parent
7912a812f1
commit
9d9162ca35
|
@ -145,25 +145,29 @@ func balanceEcVolumes(commandEnv *CommandEnv, collection string, applyBalancing
|
|||
func doBalanceEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, allEcNodes []*EcNode, applyBalancing bool) error {
|
||||
// collect all ec nodes with at least one free slot
|
||||
var possibleDestinationEcNodes []*EcNode
|
||||
possibleDataCenters := make(map[string]int)
|
||||
possibleRacks := make(map[string]int)
|
||||
for _, ecNode := range allEcNodes {
|
||||
if ecNode.freeEcSlot > 0 {
|
||||
possibleDestinationEcNodes = append(possibleDestinationEcNodes, ecNode)
|
||||
possibleDataCenters[ecNode.dc] += ecNode.freeEcSlot
|
||||
possibleRacks[ecNode.dc+"/"+ecNode.rack] += ecNode.freeEcSlot
|
||||
}
|
||||
}
|
||||
// calculate average number of shards an ec node should have for one volume
|
||||
averageShardsPerEcNode := int(math.Ceil(float64(erasure_coding.TotalShardsCount) / float64(len(possibleDestinationEcNodes))))
|
||||
fmt.Printf("vid %d averageShardsPerEcNode %+v\n", vid, averageShardsPerEcNode)
|
||||
fmt.Printf("vid %d averageShards Per EcNode:%d\n", vid, averageShardsPerEcNode)
|
||||
// check whether this volume has ecNodes that are over average
|
||||
isOverLimit := false
|
||||
isOverPerNodeAverage := false
|
||||
for _, ecNode := range locations {
|
||||
shardBits := findEcVolumeShards(ecNode, vid)
|
||||
if shardBits.ShardIdCount() > averageShardsPerEcNode {
|
||||
isOverLimit = true
|
||||
fmt.Printf("vid %d %s has %d shards, isOverLimit %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverLimit)
|
||||
isOverPerNodeAverage = true
|
||||
fmt.Printf("vid %d %s has %d shards, isOverPerNodeAverage %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverPerNodeAverage)
|
||||
break
|
||||
}
|
||||
}
|
||||
if isOverLimit {
|
||||
if isOverPerNodeAverage {
|
||||
if err := spreadShardsIntoMoreDataNodes(ctx, commandEnv, averageShardsPerEcNode, collection, vid, locations, possibleDestinationEcNodes, applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -98,11 +98,11 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption
|
|||
return
|
||||
}
|
||||
|
||||
func eachDataNode(topo *master_pb.TopologyInfo, fn func(*master_pb.DataNodeInfo)) {
|
||||
func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc, rack string, dn *master_pb.DataNodeInfo)) {
|
||||
for _, dc := range topo.DataCenterInfos {
|
||||
for _, rack := range dc.RackInfos {
|
||||
for _, dn := range rack.DataNodeInfos {
|
||||
fn(dn)
|
||||
fn(dc.Id, rack.Id, dn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -128,6 +128,8 @@ func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) {
|
|||
|
||||
type EcNode struct {
|
||||
info *master_pb.DataNodeInfo
|
||||
dc string
|
||||
rack string
|
||||
freeEcSlot int
|
||||
}
|
||||
|
||||
|
@ -144,10 +146,12 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv) (ecNodes []*EcN
|
|||
}
|
||||
|
||||
// find out all volume servers with one slot left.
|
||||
eachDataNode(resp.TopologyInfo, func(dn *master_pb.DataNodeInfo) {
|
||||
eachDataNode(resp.TopologyInfo, func(dc, rack string, dn *master_pb.DataNodeInfo) {
|
||||
if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 {
|
||||
ecNodes = append(ecNodes, &EcNode{
|
||||
info: dn,
|
||||
dc: dc,
|
||||
rack: rack,
|
||||
freeEcSlot: int(freeEcSlots),
|
||||
})
|
||||
totalFreeEcSlots += freeEcSlots
|
||||
|
|
Loading…
Reference in a new issue