seaweedfs/weed/mq/pub_balancer/balance_brokers.go
Chris Lu 580940bf82
Merge accumulated changes related to message queue (#5098)
* balance partitions on brokers

* prepare topic partition first and then publish, move partition

* purge unused APIs

* clean up

* adjust logs

* add BalanceTopics() grpc API

* configure topic

* configure topic command

* refactor

* repair missing partitions

* sequence of operations to ensure ordering

* proto to close publishers and consumers

* rename file

* topic partition versioned by unixTimeNs

* create local topic partition

* close publishers

* randomize the client name

* wait until no publishers

* logs

* close stop publisher channel

* send last ack

* comments

* comment

* comments

* support list of brokers

* add cli options

* Update .gitignore

* logs

* return io.eof directly

* refactor

* optionally create topic

* refactoring

* detect consumer disconnection

* sub client wait for more messages

* subscribe by time stamp

* rename

* rename to sub_balancer

* rename

* adjust comments

* rename

* fix compilation

* rename

* rename

* SubscriberToSubCoordinator

* sticky rebalance

* go fmt

* add tests

* balance partitions on brokers

* prepare topic partition first and then publish, move partition

* purge unused APIs

* clean up

* adjust logs

* add BalanceTopics() grpc API

* configure topic

* configure topic command

* refactor

* repair missing partitions

* sequence of operations to ensure ordering

* proto to close publishers and consumers

* rename file

* topic partition versioned by unixTimeNs

* create local topic partition

* close publishers

* randomize the client name

* wait until no publishers

* logs

* close stop publisher channel

* send last ack

* comments

* comment

* comments

* support list of brokers

* add cli options

* Update .gitignore

* logs

* return io.eof directly

* refactor

* optionally create topic

* refactoring

* detect consumer disconnection

* sub client wait for more messages

* subscribe by time stamp

* rename

* rename to sub_balancer

* rename

* adjust comments

* rename

* fix compilation

* rename

* rename

* SubscriberToSubCoordinator

* sticky rebalance

* go fmt

* add tests

* tracking topic=>broker

* merge

* comment
2023-12-11 12:05:54 -08:00

53 lines
1.8 KiB
Go

package pub_balancer
import (
cmap "github.com/orcaman/concurrent-map/v2"
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
"math/rand"
)
func BalanceTopicPartitionOnBrokers(brokers cmap.ConcurrentMap[string, *BrokerStats]) BalanceAction {
// 1. calculate the average number of partitions per broker
var totalPartitionCount int32
var totalBrokerCount int32
for brokerStats := range brokers.IterBuffered() {
totalBrokerCount++
totalPartitionCount += brokerStats.Val.TopicPartitionCount
}
averagePartitionCountPerBroker := totalPartitionCount / totalBrokerCount
minPartitionCountPerBroker := averagePartitionCountPerBroker
maxPartitionCountPerBroker := averagePartitionCountPerBroker
var sourceBroker, targetBroker string
var candidatePartition *topic.TopicPartition
for brokerStats := range brokers.IterBuffered() {
if minPartitionCountPerBroker > brokerStats.Val.TopicPartitionCount {
minPartitionCountPerBroker = brokerStats.Val.TopicPartitionCount
targetBroker = brokerStats.Key
}
if maxPartitionCountPerBroker < brokerStats.Val.TopicPartitionCount {
maxPartitionCountPerBroker = brokerStats.Val.TopicPartitionCount
sourceBroker = brokerStats.Key
// select a random partition from the source broker
randomePartitionIndex := rand.Intn(int(brokerStats.Val.TopicPartitionCount))
index := 0
for topicPartitionStats := range brokerStats.Val.TopicPartitionStats.IterBuffered() {
if index == randomePartitionIndex {
candidatePartition = &topicPartitionStats.Val.TopicPartition
break
} else {
index++
}
}
}
}
if minPartitionCountPerBroker >= maxPartitionCountPerBroker-1 {
return nil
}
// 2. move the partitions from the source broker to the target broker
return &BalanceActionMove{
TopicPartition: *candidatePartition,
SourceBroker: sourceBroker,
TargetBroker: targetBroker,
}
}