2018-10-31 08:11:19 +00:00
|
|
|
package sub
|
2018-09-17 07:27:56 +00:00
|
|
|
|
|
|
|
import (
|
2018-10-11 07:08:13 +00:00
|
|
|
"encoding/json"
|
2018-09-21 08:56:43 +00:00
|
|
|
"fmt"
|
2021-10-14 04:27:58 +00:00
|
|
|
"os"
|
2018-10-11 07:08:13 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2018-09-21 08:56:43 +00:00
|
|
|
|
2018-09-17 07:27:56 +00:00
|
|
|
"github.com/Shopify/sarama"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2018-09-21 08:56:43 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
"github.com/golang/protobuf/proto"
|
2018-09-17 07:27:56 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2018-09-21 08:56:43 +00:00
|
|
|
NotificationInputs = append(NotificationInputs, &KafkaInput{})
|
2018-09-17 07:27:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type KafkaInput struct {
|
|
|
|
topic string
|
|
|
|
consumer sarama.Consumer
|
|
|
|
messageChan chan *sarama.ConsumerMessage
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *KafkaInput) GetName() string {
|
|
|
|
return "kafka"
|
|
|
|
}
|
|
|
|
|
2020-01-29 17:09:55 +00:00
|
|
|
func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error {
|
|
|
|
glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
|
|
|
|
glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
|
2018-09-17 07:27:56 +00:00
|
|
|
return k.initialize(
|
2020-01-29 17:09:55 +00:00
|
|
|
configuration.GetStringSlice(prefix+"hosts"),
|
|
|
|
configuration.GetString(prefix+"topic"),
|
|
|
|
configuration.GetString(prefix+"offsetFile"),
|
|
|
|
configuration.GetInt(prefix+"offsetSaveIntervalSeconds"),
|
2018-09-17 07:27:56 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-09-23 08:34:40 +00:00
|
|
|
func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string, offsetSaveIntervalSeconds int) (err error) {
|
2018-09-17 07:27:56 +00:00
|
|
|
config := sarama.NewConfig()
|
|
|
|
config.Consumer.Return.Errors = true
|
|
|
|
k.consumer, err = sarama.NewConsumer(hosts, config)
|
2018-09-20 05:31:45 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
} else {
|
|
|
|
glog.V(0).Infof("connected to %v", hosts)
|
|
|
|
}
|
|
|
|
|
2018-09-17 07:27:56 +00:00
|
|
|
k.topic = topic
|
|
|
|
k.messageChan = make(chan *sarama.ConsumerMessage, 1)
|
|
|
|
|
|
|
|
partitions, err := k.consumer.Partitions(topic)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2018-09-23 08:34:40 +00:00
|
|
|
progress := loadProgress(offsetFile)
|
|
|
|
if progress == nil || progress.Topic != topic {
|
|
|
|
progress = &KafkaProgress{
|
|
|
|
Topic: topic,
|
|
|
|
PartitionOffsets: make(map[int32]int64),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
progress.lastSaveTime = time.Now()
|
|
|
|
progress.offsetFile = offsetFile
|
|
|
|
progress.offsetSaveIntervalSeconds = offsetSaveIntervalSeconds
|
|
|
|
|
2018-09-17 07:27:56 +00:00
|
|
|
for _, partition := range partitions {
|
2018-09-23 08:34:40 +00:00
|
|
|
offset, found := progress.PartitionOffsets[partition]
|
|
|
|
if !found {
|
|
|
|
offset = sarama.OffsetOldest
|
|
|
|
} else {
|
|
|
|
offset += 1
|
|
|
|
}
|
|
|
|
partitionConsumer, err := k.consumer.ConsumePartition(topic, partition, offset)
|
2018-09-17 07:27:56 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-partitionConsumer.Errors():
|
|
|
|
fmt.Println(err)
|
|
|
|
case msg := <-partitionConsumer.Messages():
|
|
|
|
k.messageChan <- msg
|
2018-09-23 08:34:40 +00:00
|
|
|
if err := progress.setOffset(msg.Partition, msg.Offset); err != nil {
|
|
|
|
glog.Warningf("set kafka offset: %v", err)
|
|
|
|
}
|
2018-09-17 07:27:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-26 19:08:44 +00:00
|
|
|
func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) {
|
2018-09-17 07:27:56 +00:00
|
|
|
|
|
|
|
msg := <-k.messageChan
|
|
|
|
|
|
|
|
key = string(msg.Key)
|
|
|
|
message = &filer_pb.EventNotification{}
|
|
|
|
err = proto.Unmarshal(msg.Value, message)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2018-09-23 08:34:40 +00:00
|
|
|
|
|
|
|
type KafkaProgress struct {
|
|
|
|
Topic string `json:"topic"`
|
|
|
|
PartitionOffsets map[int32]int64 `json:"partitionOffsets"`
|
|
|
|
offsetFile string
|
|
|
|
lastSaveTime time.Time
|
|
|
|
offsetSaveIntervalSeconds int
|
|
|
|
sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func loadProgress(offsetFile string) *KafkaProgress {
|
|
|
|
progress := &KafkaProgress{}
|
2021-10-14 04:27:58 +00:00
|
|
|
data, err := os.ReadFile(offsetFile)
|
2018-09-23 08:34:40 +00:00
|
|
|
if err != nil {
|
|
|
|
glog.Warningf("failed to read kafka progress file: %s", offsetFile)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err = json.Unmarshal(data, progress)
|
|
|
|
if err != nil {
|
|
|
|
glog.Warningf("failed to read kafka progress message: %s", string(data))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return progress
|
|
|
|
}
|
|
|
|
|
|
|
|
func (progress *KafkaProgress) saveProgress() error {
|
|
|
|
data, err := json.Marshal(progress)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to marshal progress: %v", err)
|
|
|
|
}
|
2022-02-05 05:32:27 +00:00
|
|
|
err = util.WriteFile(progress.offsetFile, data, 0640)
|
2018-09-23 08:34:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to save progress to %s: %v", progress.offsetFile, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
progress.lastSaveTime = time.Now()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (progress *KafkaProgress) setOffset(parition int32, offset int64) error {
|
|
|
|
progress.Lock()
|
|
|
|
defer progress.Unlock()
|
|
|
|
|
|
|
|
progress.PartitionOffsets[parition] = offset
|
|
|
|
if int(time.Now().Sub(progress.lastSaveTime).Seconds()) > progress.offsetSaveIntervalSeconds {
|
|
|
|
return progress.saveProgress()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|