seaweedfs/weed/messaging/broker/broker_grpc_server_subscribe.go

164 lines
4.6 KiB
Go
Raw Normal View History

2020-04-18 08:12:01 +00:00
package broker
import (
"fmt"
2020-04-18 08:12:01 +00:00
"io"
"strings"
2020-04-18 08:12:01 +00:00
"time"
"github.com/golang/protobuf/proto"
2020-09-01 07:21:19 +00:00
"github.com/chrislusf/seaweedfs/weed/filer"
2020-04-18 08:12:01 +00:00
"github.com/chrislusf/seaweedfs/weed/glog"
2020-04-19 07:18:32 +00:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2020-04-18 08:12:01 +00:00
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
)
func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error {
// process initial request
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
2020-05-17 01:53:54 +00:00
var processedTsNs int64
var messageCount int64
2020-04-18 08:12:01 +00:00
subscriberId := in.Init.SubscriberId
2020-04-19 07:18:32 +00:00
// TODO look it up
topicConfig := &messaging_pb.TopicConfiguration{
2020-04-30 10:05:34 +00:00
// IsTransient: true,
}
2020-04-18 08:12:01 +00:00
// get lock
tp := TopicPartition{
Namespace: in.Init.Namespace,
Topic: in.Init.Topic,
Partition: in.Init.Partition,
}
2020-05-17 01:53:54 +00:00
fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String())
defer func() {
fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs))
}()
2020-05-12 15:48:00 +00:00
lock := broker.topicManager.RequestLock(tp, topicConfig, false)
defer broker.topicManager.ReleaseLock(tp, false)
2020-04-18 08:12:01 +00:00
isConnected := true
go func() {
for isConnected {
2020-05-17 15:57:47 +00:00
if _, err := stream.Recv(); err != nil {
2020-05-18 00:38:31 +00:00
// println("disconnecting connection to", subscriberId, tp.String())
isConnected = false
lock.cond.Signal()
}
}
}()
2020-04-18 08:12:01 +00:00
lastReadTime := time.Now()
switch in.Init.StartPosition {
case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP:
lastReadTime = time.Unix(0, in.Init.TimestampNs)
case messaging_pb.SubscriberMessage_InitMessage_LATEST:
case messaging_pb.SubscriberMessage_InitMessage_EARLIEST:
lastReadTime = time.Unix(0, 0)
2020-04-18 08:12:01 +00:00
}
// how to process each message
// an error returned will end the subscription
eachMessageFn := func(m *messaging_pb.Message) error {
err := stream.Send(&messaging_pb.BrokerMessage{
Data: m,
2020-04-18 08:12:01 +00:00
})
if err != nil {
glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
}
return err
}
2020-04-28 09:05:44 +00:00
eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
m := &messaging_pb.Message{}
if err = proto.Unmarshal(logEntry.Data, m); err != nil {
glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
return err
}
2020-04-30 10:05:34 +00:00
// fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
if err = eachMessageFn(m); err != nil {
glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
return err
}
2020-05-08 09:47:22 +00:00
if m.IsClose {
// println("processed EOF")
return io.EOF
}
processedTsNs = logEntry.TsNs
2020-05-08 09:47:22 +00:00
messageCount++
return nil
2020-04-28 09:05:44 +00:00
}
2020-07-05 22:43:06 +00:00
if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil {
if err != io.EOF {
2020-05-17 01:53:54 +00:00
// println("stopping from persisted logs", err.Error())
return err
}
}
if processedTsNs != 0 {
lastReadTime = time.Unix(0, processedTsNs)
}
2020-05-17 01:53:54 +00:00
// fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime)
2020-05-08 09:47:22 +00:00
err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool {
2020-05-16 04:38:42 +00:00
lock.Mutex.Lock()
lock.cond.Wait()
lock.Mutex.Unlock()
return isConnected
2020-04-28 09:05:44 +00:00
}, eachLogEntryFn)
return err
2020-04-18 08:12:01 +00:00
}
func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) {
2020-08-30 04:01:14 +00:00
startTime = startTime.UTC()
startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
sizeBuf := make([]byte, 4)
startTsNs := startTime.UnixNano()
2020-05-09 07:31:34 +00:00
topicDir := genTopicDir(tp.Namespace, tp.Topic)
2020-04-30 10:05:34 +00:00
partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition)
return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error {
dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name)
return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error {
if dayEntry.Name == startDate {
if strings.Compare(hourMinuteEntry.Name, startHourMinute) < 0 {
return nil
}
}
2020-05-08 09:47:22 +00:00
if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) {
2020-04-30 10:05:34 +00:00
return nil
}
// println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name)
2020-09-01 07:21:19 +00:00
chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks)
defer chunkedFileReader.Close()
2020-09-01 07:21:19 +00:00
if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
chunkedFileReader.Close()
if err == io.EOF {
2020-05-08 09:47:22 +00:00
return err
}
return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err)
}
return nil
}, "", false, 24*60)
}, startDate, true, 366)
}