2022-07-31 20:23:44 +00:00
|
|
|
package broker
|
|
|
|
|
|
|
|
import (
|
2023-08-21 05:53:05 +00:00
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/mq/topic"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
2022-07-31 20:23:44 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
|
|
|
|
)
|
|
|
|
|
2023-08-21 05:53:05 +00:00
|
|
|
// For a new or re-configured topic, or one of the broker went offline,
|
|
|
|
// the pub clients ask one broker what are the brokers for all the topic partitions.
|
|
|
|
// The broker will lock the topic on write.
|
|
|
|
// 1. if the topic is not found, create the topic, and allocate the topic partitions to the brokers
|
|
|
|
// 2. if the topic is found, return the brokers for the topic partitions
|
|
|
|
// For a topic to read from, the sub clients ask one broker what are the brokers for all the topic partitions.
|
|
|
|
// The broker will lock the topic on read.
|
|
|
|
// 1. if the topic is not found, return error
|
|
|
|
// 2. if the topic is found, return the brokers for the topic partitions
|
|
|
|
//
|
|
|
|
// If the topic needs to be re-balanced, the admin client will lock the topic,
|
|
|
|
// 1. collect throughput information for all the brokers
|
|
|
|
// 2. adjust the topic partitions to the brokers
|
|
|
|
// 3. notify the brokers to add/remove partitions to host
|
|
|
|
// 3.1 When locking the topic, the partitions and brokers should be remembered in the lock.
|
|
|
|
// 4. the brokers will stop process incoming messages if not the right partition
|
|
|
|
// 4.1 the pub clients will need to re-partition the messages and publish to the right brokers for the partition3
|
|
|
|
// 4.2 the sub clients will need to change the brokers to read from
|
|
|
|
//
|
|
|
|
// The following is from each individual component's perspective:
|
|
|
|
// For a pub client
|
|
|
|
// For current topic/partition, ask one broker for the brokers for the topic partitions
|
|
|
|
// 1. connect to the brokers and keep sending, until the broker returns error, or the broker leader is moved.
|
|
|
|
// For a sub client
|
|
|
|
// For current topic/partition, ask one broker for the brokers for the topic partitions
|
|
|
|
// 1. connect to the brokers and keep reading, until the broker returns error, or the broker leader is moved.
|
|
|
|
// For a broker
|
|
|
|
// Upon a pub client lookup:
|
|
|
|
// 1. lock the topic
|
|
|
|
// 2. if already has topic partition assignment, check all brokers are healthy
|
|
|
|
// 3. if not, create topic partition assignment
|
|
|
|
// 2. return the brokers for the topic partitions
|
|
|
|
// 3. unlock the topic
|
|
|
|
// Upon a sub client lookup:
|
|
|
|
// 1. lock the topic
|
|
|
|
// 2. if already has topic partition assignment, check all brokers are healthy
|
|
|
|
// 3. if not, return error
|
|
|
|
// 2. return the brokers for the topic partitions
|
|
|
|
// 3. unlock the topic
|
|
|
|
// For an admin tool
|
|
|
|
// 0. collect stats from all the brokers, and find the topic worth moving
|
|
|
|
// 1. lock the topic
|
|
|
|
// 2. collect throughput information for all the brokers
|
|
|
|
// 3. adjust the topic partitions to the brokers
|
|
|
|
// 4. notify the brokers to add/remove partitions to host
|
|
|
|
// 5. the brokers will stop process incoming messages if not the right partition
|
|
|
|
// 6. unlock the topic
|
|
|
|
|
2022-07-31 20:23:44 +00:00
|
|
|
/*
|
2023-08-21 05:53:05 +00:00
|
|
|
The messages are buffered in memory, and saved to filer under
|
2022-07-31 20:23:44 +00:00
|
|
|
/topics/<topic>/<date>/<hour>/<segment>/*.msg
|
|
|
|
/topics/<topic>/<date>/<hour>/segment
|
|
|
|
/topics/<topic>/info/segment_<id>.meta
|
2023-08-21 05:53:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
2022-07-31 20:23:44 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
func (broker *MessageQueueBroker) Publish(stream mq_pb.SeaweedMessaging_PublishServer) error {
|
2023-08-21 05:53:05 +00:00
|
|
|
// 1. write to the volume server
|
|
|
|
// 2. find the topic metadata owning filer
|
|
|
|
// 3. write to the filer
|
|
|
|
|
|
|
|
var localTopicPartition *topic.LocalPartition
|
2023-08-28 00:50:59 +00:00
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
response := &mq_pb.PublishResponse{}
|
|
|
|
// TODO check whether current broker should be the leader for the topic partition
|
2023-08-28 16:02:12 +00:00
|
|
|
initMessage := req.GetInit()
|
|
|
|
if initMessage != nil {
|
|
|
|
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
|
|
|
|
localTopicPartition = broker.localTopicManager.GetTopicPartition(t, p)
|
2023-08-28 00:50:59 +00:00
|
|
|
if localTopicPartition == nil {
|
2023-08-28 16:02:12 +00:00
|
|
|
localTopicPartition = topic.NewLocalPartition(t, p, true, nil)
|
2023-09-01 07:36:51 +00:00
|
|
|
broker.localTopicManager.AddTopicPartition(t, localTopicPartition)
|
2023-08-28 00:50:59 +00:00
|
|
|
}
|
2023-08-28 16:02:12 +00:00
|
|
|
} else {
|
|
|
|
response.Error = fmt.Sprintf("topic %v partition %v not found", initMessage.Topic, initMessage.Partition)
|
|
|
|
glog.Errorf("topic %v partition %v not found", initMessage.Topic, initMessage.Partition)
|
|
|
|
return stream.Send(response)
|
2023-08-28 00:50:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// process each published messages
|
2023-08-21 05:53:05 +00:00
|
|
|
for {
|
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the received message
|
|
|
|
sequence := req.GetSequence()
|
|
|
|
response := &mq_pb.PublishResponse{
|
|
|
|
AckSequence: sequence,
|
|
|
|
}
|
|
|
|
if dataMessage := req.GetData(); dataMessage != nil {
|
2023-08-28 16:02:12 +00:00
|
|
|
print("+")
|
2023-08-28 00:50:59 +00:00
|
|
|
localTopicPartition.Publish(dataMessage)
|
2023-08-21 05:53:05 +00:00
|
|
|
}
|
|
|
|
if err := stream.Send(response); err != nil {
|
|
|
|
glog.Errorf("Error sending setup response: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-31 20:23:44 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-08-21 05:53:05 +00:00
|
|
|
|
|
|
|
// AssignTopicPartitions Runs on the assigned broker, to execute the topic partition assignment
|
|
|
|
func (broker *MessageQueueBroker) AssignTopicPartitions(c context.Context, request *mq_pb.AssignTopicPartitionsRequest) (*mq_pb.AssignTopicPartitionsResponse, error) {
|
|
|
|
ret := &mq_pb.AssignTopicPartitionsResponse{}
|
|
|
|
self := pb.ServerAddress(fmt.Sprintf("%s:%d", broker.option.Ip, broker.option.Port))
|
|
|
|
|
2023-08-28 01:59:04 +00:00
|
|
|
for _, brokerPartition := range request.BrokerPartitionAssignments {
|
|
|
|
localPartiton := topic.FromPbBrokerPartitionAssignment(self, brokerPartition)
|
2023-08-21 05:53:05 +00:00
|
|
|
broker.localTopicManager.AddTopicPartition(
|
|
|
|
topic.FromPbTopic(request.Topic),
|
|
|
|
localPartiton)
|
|
|
|
if request.IsLeader {
|
|
|
|
for _, follower := range localPartiton.FollowerBrokers {
|
2023-08-28 00:50:59 +00:00
|
|
|
err := pb.WithBrokerGrpcClient(false, follower.String(), broker.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
|
2023-08-21 05:53:05 +00:00
|
|
|
_, err := client.AssignTopicPartitions(context.Background(), request)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return ret, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret, nil
|
|
|
|
}
|