2018-10-31 08:11:19 +00:00
|
|
|
package sub
|
2018-09-17 07:27:56 +00:00
|
|
|
|
|
|
|
import (
|
2018-10-11 07:08:13 +00:00
|
|
|
"encoding/json"
|
2018-09-21 08:56:43 +00:00
|
|
|
"fmt"
|
2018-10-11 07:08:13 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2018-09-21 08:56:43 +00:00
|
|
|
|
2018-09-17 07:27:56 +00:00
|
|
|
"github.com/Shopify/sarama"
|
2020-11-17 06:26:58 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/log"
|
2018-09-17 07:27:56 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2018-09-21 08:56:43 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
"github.com/golang/protobuf/proto"
|
2018-09-17 07:27:56 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2018-09-21 08:56:43 +00:00
|
|
|
NotificationInputs = append(NotificationInputs, &KafkaInput{})
|
2018-09-17 07:27:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type KafkaInput struct {
|
|
|
|
topic string
|
|
|
|
consumer sarama.Consumer
|
|
|
|
messageChan chan *sarama.ConsumerMessage
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *KafkaInput) GetName() string {
|
|
|
|
return "kafka"
|
|
|
|
}
|
|
|
|
|
2020-01-29 17:09:55 +00:00
|
|
|
func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
|
|
|
|
log.Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
|
2018-09-17 07:27:56 +00:00
|
|
|
return k.initialize(
|
2020-01-29 17:09:55 +00:00
|
|
|
configuration.GetStringSlice(prefix+"hosts"),
|
|
|
|
configuration.GetString(prefix+"topic"),
|
|
|
|
configuration.GetString(prefix+"offsetFile"),
|
|
|
|
configuration.GetInt(prefix+"offsetSaveIntervalSeconds"),
|
2018-09-17 07:27:56 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-09-23 08:34:40 +00:00
|
|
|
func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string, offsetSaveIntervalSeconds int) (err error) {
|
2018-09-17 07:27:56 +00:00
|
|
|
config := sarama.NewConfig()
|
|
|
|
config.Consumer.Return.Errors = true
|
|
|
|
k.consumer, err = sarama.NewConsumer(hosts, config)
|
2018-09-20 05:31:45 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
} else {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("connected to %v", hosts)
|
2018-09-20 05:31:45 +00:00
|
|
|
}
|
|
|
|
|
2018-09-17 07:27:56 +00:00
|
|
|
k.topic = topic
|
|
|
|
k.messageChan = make(chan *sarama.ConsumerMessage, 1)
|
|
|
|
|
|
|
|
partitions, err := k.consumer.Partitions(topic)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2018-09-23 08:34:40 +00:00
|
|
|
progress := loadProgress(offsetFile)
|
|
|
|
if progress == nil || progress.Topic != topic {
|
|
|
|
progress = &KafkaProgress{
|
|
|
|
Topic: topic,
|
|
|
|
PartitionOffsets: make(map[int32]int64),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
progress.lastSaveTime = time.Now()
|
|
|
|
progress.offsetFile = offsetFile
|
|
|
|
progress.offsetSaveIntervalSeconds = offsetSaveIntervalSeconds
|
|
|
|
|
2018-09-17 07:27:56 +00:00
|
|
|
for _, partition := range partitions {
|
2018-09-23 08:34:40 +00:00
|
|
|
offset, found := progress.PartitionOffsets[partition]
|
|
|
|
if !found {
|
|
|
|
offset = sarama.OffsetOldest
|
|
|
|
} else {
|
|
|
|
offset += 1
|
|
|
|
}
|
|
|
|
partitionConsumer, err := k.consumer.ConsumePartition(topic, partition, offset)
|
2018-09-17 07:27:56 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-partitionConsumer.Errors():
|
|
|
|
fmt.Println(err)
|
|
|
|
case msg := <-partitionConsumer.Messages():
|
|
|
|
k.messageChan <- msg
|
2018-09-23 08:34:40 +00:00
|
|
|
if err := progress.setOffset(msg.Partition, msg.Offset); err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("set kafka offset: %v", err)
|
2018-09-23 08:34:40 +00:00
|
|
|
}
|
2018-09-17 07:27:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) {
|
|
|
|
|
|
|
|
msg := <-k.messageChan
|
|
|
|
|
|
|
|
key = string(msg.Key)
|
|
|
|
message = &filer_pb.EventNotification{}
|
|
|
|
err = proto.Unmarshal(msg.Value, message)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2018-09-23 08:34:40 +00:00
|
|
|
|
|
|
|
type KafkaProgress struct {
|
|
|
|
Topic string `json:"topic"`
|
|
|
|
PartitionOffsets map[int32]int64 `json:"partitionOffsets"`
|
|
|
|
offsetFile string
|
|
|
|
lastSaveTime time.Time
|
|
|
|
offsetSaveIntervalSeconds int
|
|
|
|
sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func loadProgress(offsetFile string) *KafkaProgress {
|
|
|
|
progress := &KafkaProgress{}
|
|
|
|
data, err := ioutil.ReadFile(offsetFile)
|
|
|
|
if err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("failed to read kafka progress file: %s", offsetFile)
|
2018-09-23 08:34:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err = json.Unmarshal(data, progress)
|
|
|
|
if err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("failed to read kafka progress message: %s", string(data))
|
2018-09-23 08:34:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return progress
|
|
|
|
}
|
|
|
|
|
|
|
|
func (progress *KafkaProgress) saveProgress() error {
|
|
|
|
data, err := json.Marshal(progress)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to marshal progress: %v", err)
|
|
|
|
}
|
|
|
|
err = ioutil.WriteFile(progress.offsetFile, data, 0640)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to save progress to %s: %v", progress.offsetFile, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
progress.lastSaveTime = time.Now()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (progress *KafkaProgress) setOffset(parition int32, offset int64) error {
|
|
|
|
progress.Lock()
|
|
|
|
defer progress.Unlock()
|
|
|
|
|
|
|
|
progress.PartitionOffsets[parition] = offset
|
|
|
|
if int(time.Now().Sub(progress.lastSaveTime).Seconds()) > progress.offsetSaveIntervalSeconds {
|
|
|
|
return progress.saveProgress()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|