2020-03-30 08:19:33 +00:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
2020-04-20 07:08:47 +00:00
|
|
|
"fmt"
|
2020-04-05 07:51:16 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2020-04-20 07:08:47 +00:00
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2020-03-30 08:19:33 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-04-05 19:51:21 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2021-07-05 08:01:16 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// MaxUnsyncedEvents send empty notification with timestamp when certain amount of events have been filtered
|
|
|
|
MaxUnsyncedEvents = 1e3
|
2020-03-30 08:19:33 +00:00
|
|
|
)
|
|
|
|
|
2020-04-13 04:00:55 +00:00
|
|
|
func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error {
|
2020-03-30 08:19:33 +00:00
|
|
|
|
|
|
|
peerAddress := findClientAddress(stream.Context(), 0)
|
|
|
|
|
2021-12-30 08:23:57 +00:00
|
|
|
alreadyKnown, clientName := fs.addClient(req.ClientName, peerAddress, req.ClientId)
|
|
|
|
if alreadyKnown {
|
|
|
|
return fmt.Errorf("duplicated subscription detected for client %s id %d", clientName, req.ClientId)
|
|
|
|
}
|
|
|
|
defer fs.deleteClient(clientName, req.ClientId)
|
2020-03-30 08:19:33 +00:00
|
|
|
|
2020-04-28 06:49:46 +00:00
|
|
|
lastReadTime := time.Unix(0, req.SinceNs)
|
2020-04-29 00:30:04 +00:00
|
|
|
glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
2020-04-20 06:54:32 +00:00
|
|
|
|
2021-07-05 08:01:16 +00:00
|
|
|
eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName)
|
2020-07-05 22:43:06 +00:00
|
|
|
|
|
|
|
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
|
|
|
|
|
2021-01-11 10:08:55 +00:00
|
|
|
var processedTsNs int64
|
2021-07-01 21:01:25 +00:00
|
|
|
var readPersistedLogErr error
|
|
|
|
var readInMemoryLogErr error
|
2022-05-30 22:20:51 +00:00
|
|
|
var isDone bool
|
2020-07-05 22:43:06 +00:00
|
|
|
|
2020-09-09 18:21:23 +00:00
|
|
|
for {
|
2021-01-11 10:08:55 +00:00
|
|
|
|
2021-06-27 12:51:28 +00:00
|
|
|
glog.V(4).Infof("read on disk %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
2021-06-24 19:46:00 +00:00
|
|
|
|
2022-05-30 22:20:51 +00:00
|
|
|
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
|
2021-07-01 21:01:25 +00:00
|
|
|
if readPersistedLogErr != nil {
|
|
|
|
return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
|
2021-01-11 10:08:55 +00:00
|
|
|
}
|
2022-05-30 22:20:51 +00:00
|
|
|
if isDone {
|
|
|
|
return nil
|
|
|
|
}
|
2021-01-11 10:08:55 +00:00
|
|
|
|
|
|
|
if processedTsNs != 0 {
|
|
|
|
lastReadTime = time.Unix(0, processedTsNs)
|
|
|
|
}
|
|
|
|
|
2021-06-27 12:51:28 +00:00
|
|
|
glog.V(4).Infof("read in memory %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
2021-06-24 19:46:00 +00:00
|
|
|
|
2022-05-30 22:25:21 +00:00
|
|
|
lastReadTime, isDone, readInMemoryLogErr = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData("aggMeta:"+clientName, lastReadTime, req.UntilNs, func() bool {
|
2020-09-09 18:21:23 +00:00
|
|
|
fs.filer.MetaAggregator.ListenersLock.Lock()
|
|
|
|
fs.filer.MetaAggregator.ListenersCond.Wait()
|
|
|
|
fs.filer.MetaAggregator.ListenersLock.Unlock()
|
|
|
|
return true
|
|
|
|
}, eachLogEntryFn)
|
2021-07-01 21:01:25 +00:00
|
|
|
if readInMemoryLogErr != nil {
|
|
|
|
if readInMemoryLogErr == log_buffer.ResumeFromDiskError {
|
2021-01-11 10:08:55 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-07-01 21:01:25 +00:00
|
|
|
glog.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr)
|
|
|
|
if readInMemoryLogErr != log_buffer.ResumeError {
|
2020-09-09 18:21:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2022-05-30 22:25:21 +00:00
|
|
|
if isDone {
|
|
|
|
return nil
|
|
|
|
}
|
2021-06-27 13:31:04 +00:00
|
|
|
|
2021-07-01 21:01:25 +00:00
|
|
|
time.Sleep(1127 * time.Millisecond)
|
2020-09-09 18:21:23 +00:00
|
|
|
}
|
2020-07-05 22:43:06 +00:00
|
|
|
|
2021-07-01 21:01:25 +00:00
|
|
|
return readInMemoryLogErr
|
2020-07-05 22:43:06 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-07-05 22:50:07 +00:00
|
|
|
func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeLocalMetadataServer) error {
|
|
|
|
|
|
|
|
peerAddress := findClientAddress(stream.Context(), 0)
|
|
|
|
|
2022-01-24 00:14:22 +00:00
|
|
|
alreadyKnown, clientName := fs.addClient(req.ClientName, peerAddress, req.ClientId)
|
|
|
|
if alreadyKnown {
|
|
|
|
return fmt.Errorf("duplicated local subscription detected for client %s id %d", clientName, req.ClientId)
|
|
|
|
}
|
|
|
|
defer fs.deleteClient(clientName, req.ClientId)
|
2020-07-05 22:50:07 +00:00
|
|
|
|
|
|
|
lastReadTime := time.Unix(0, req.SinceNs)
|
|
|
|
glog.V(0).Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
|
|
|
|
2021-07-05 08:01:16 +00:00
|
|
|
eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName)
|
2020-07-05 22:50:07 +00:00
|
|
|
|
|
|
|
eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn)
|
|
|
|
|
2021-01-11 10:08:55 +00:00
|
|
|
var processedTsNs int64
|
2021-07-01 21:01:25 +00:00
|
|
|
var readPersistedLogErr error
|
|
|
|
var readInMemoryLogErr error
|
2022-05-30 22:20:51 +00:00
|
|
|
var isDone bool
|
2020-07-14 05:55:28 +00:00
|
|
|
|
2020-09-09 18:21:23 +00:00
|
|
|
for {
|
2021-01-11 10:08:55 +00:00
|
|
|
// println("reading from persisted logs ...")
|
2021-06-24 19:46:00 +00:00
|
|
|
glog.V(0).Infof("read on disk %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
2022-05-30 22:20:51 +00:00
|
|
|
processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn)
|
2021-07-01 21:01:25 +00:00
|
|
|
if readPersistedLogErr != nil {
|
2021-10-17 20:50:34 +00:00
|
|
|
glog.V(0).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr)
|
2021-07-01 21:01:25 +00:00
|
|
|
return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
|
2021-01-11 10:08:55 +00:00
|
|
|
}
|
2022-05-30 22:20:51 +00:00
|
|
|
if isDone {
|
|
|
|
return nil
|
|
|
|
}
|
2021-01-11 10:08:55 +00:00
|
|
|
|
|
|
|
if processedTsNs != 0 {
|
|
|
|
lastReadTime = time.Unix(0, processedTsNs)
|
2021-07-01 21:01:25 +00:00
|
|
|
} else {
|
|
|
|
if readInMemoryLogErr == log_buffer.ResumeFromDiskError {
|
|
|
|
time.Sleep(1127 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
2021-01-11 10:08:55 +00:00
|
|
|
}
|
|
|
|
|
2021-06-24 19:46:00 +00:00
|
|
|
glog.V(0).Infof("read in memory %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
2021-01-11 10:08:55 +00:00
|
|
|
|
2022-05-30 22:25:21 +00:00
|
|
|
lastReadTime, isDone, readInMemoryLogErr = fs.filer.LocalMetaLogBuffer.LoopProcessLogData("localMeta:"+clientName, lastReadTime, req.UntilNs, func() bool {
|
2020-09-09 18:21:23 +00:00
|
|
|
fs.listenersLock.Lock()
|
|
|
|
fs.listenersCond.Wait()
|
|
|
|
fs.listenersLock.Unlock()
|
|
|
|
return true
|
|
|
|
}, eachLogEntryFn)
|
2021-07-01 21:01:25 +00:00
|
|
|
if readInMemoryLogErr != nil {
|
|
|
|
if readInMemoryLogErr == log_buffer.ResumeFromDiskError {
|
2021-01-11 10:08:55 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-07-01 21:01:25 +00:00
|
|
|
glog.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr)
|
|
|
|
if readInMemoryLogErr != log_buffer.ResumeError {
|
2020-09-09 18:21:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2022-05-30 22:25:21 +00:00
|
|
|
if isDone {
|
|
|
|
return nil
|
|
|
|
}
|
2020-09-09 18:21:23 +00:00
|
|
|
}
|
2020-07-05 22:50:07 +00:00
|
|
|
|
2021-07-01 21:01:25 +00:00
|
|
|
return readInMemoryLogErr
|
2020-07-05 22:50:07 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-07-05 22:43:06 +00:00
|
|
|
func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) func(logEntry *filer_pb.LogEntry) error {
|
|
|
|
return func(logEntry *filer_pb.LogEntry) error {
|
|
|
|
event := &filer_pb.SubscribeMetadataResponse{}
|
|
|
|
if err := proto.Unmarshal(logEntry.Data, event); err != nil {
|
|
|
|
glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
|
|
|
return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-05 08:01:16 +00:00
|
|
|
func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer, clientName string) func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error {
|
|
|
|
filtered := 0
|
|
|
|
|
2020-07-05 22:43:06 +00:00
|
|
|
return func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error {
|
2021-07-05 08:01:16 +00:00
|
|
|
defer func() {
|
|
|
|
if filtered > MaxUnsyncedEvents {
|
|
|
|
if err := stream.Send(&filer_pb.SubscribeMetadataResponse{
|
|
|
|
EventNotification: &filer_pb.EventNotification{},
|
|
|
|
TsNs: tsNs,
|
|
|
|
}); err == nil {
|
|
|
|
filtered = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2020-04-20 07:08:47 +00:00
|
|
|
|
2021-07-05 08:01:16 +00:00
|
|
|
filtered++
|
2020-09-09 18:21:23 +00:00
|
|
|
foundSelf := false
|
2020-08-29 06:48:48 +00:00
|
|
|
for _, sig := range eventNotification.Signatures {
|
2021-07-05 08:01:16 +00:00
|
|
|
if sig == req.Signature && req.Signature != 0 {
|
2020-08-29 06:48:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-09-09 18:21:23 +00:00
|
|
|
if sig == fs.filer.Signature {
|
|
|
|
foundSelf = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundSelf {
|
|
|
|
eventNotification.Signatures = append(eventNotification.Signatures, fs.filer.Signature)
|
2020-08-29 06:48:48 +00:00
|
|
|
}
|
|
|
|
|
2020-04-20 07:08:47 +00:00
|
|
|
// get complete path to the file or directory
|
|
|
|
var entryName string
|
|
|
|
if eventNotification.OldEntry != nil {
|
|
|
|
entryName = eventNotification.OldEntry.Name
|
|
|
|
} else if eventNotification.NewEntry != nil {
|
|
|
|
entryName = eventNotification.NewEntry.Name
|
|
|
|
}
|
|
|
|
|
|
|
|
fullpath := util.Join(dirPath, entryName)
|
|
|
|
|
|
|
|
// skip on filer internal meta logs
|
2020-09-01 07:21:19 +00:00
|
|
|
if strings.HasPrefix(fullpath, filer.SystemLogDir) {
|
2020-04-05 07:51:16 +00:00
|
|
|
return nil
|
2020-03-30 08:19:33 +00:00
|
|
|
}
|
|
|
|
|
2021-09-01 06:23:08 +00:00
|
|
|
if hasPrefixIn(fullpath, req.PathPrefixes) {
|
|
|
|
// good
|
2021-09-01 09:45:42 +00:00
|
|
|
} else {
|
2021-09-01 06:23:08 +00:00
|
|
|
if !strings.HasPrefix(fullpath, req.PathPrefix) {
|
|
|
|
if eventNotification.NewParentPath != "" {
|
|
|
|
newFullPath := util.Join(eventNotification.NewParentPath, entryName)
|
|
|
|
if !strings.HasPrefix(newFullPath, req.PathPrefix) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else {
|
2020-12-07 08:10:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2020-04-20 07:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
message := &filer_pb.SubscribeMetadataResponse{
|
|
|
|
Directory: dirPath,
|
|
|
|
EventNotification: eventNotification,
|
2020-04-22 04:16:13 +00:00
|
|
|
TsNs: tsNs,
|
2020-04-20 07:08:47 +00:00
|
|
|
}
|
2020-07-14 05:55:28 +00:00
|
|
|
// println("sending", dirPath, entryName)
|
2020-04-20 07:08:47 +00:00
|
|
|
if err := stream.Send(message); err != nil {
|
|
|
|
glog.V(0).Infof("=> client %v: %+v", clientName, err)
|
|
|
|
return err
|
|
|
|
}
|
2021-07-05 08:01:16 +00:00
|
|
|
filtered = 0
|
2020-04-20 07:08:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-03-30 08:19:33 +00:00
|
|
|
}
|
|
|
|
|
2021-09-01 06:23:08 +00:00
|
|
|
func hasPrefixIn(text string, prefixes []string) bool {
|
|
|
|
for _, p := range prefixes {
|
|
|
|
if strings.HasPrefix(text, p) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-12-30 08:23:57 +00:00
|
|
|
func (fs *FilerServer) addClient(clientType string, clientAddress string, clientId int32) (alreadyKnown bool, clientName string) {
|
2020-03-30 08:19:33 +00:00
|
|
|
clientName = clientType + "@" + clientAddress
|
|
|
|
glog.V(0).Infof("+ listener %v", clientName)
|
2021-12-30 08:23:57 +00:00
|
|
|
if clientId != 0 {
|
|
|
|
fs.knownListenersLock.Lock()
|
|
|
|
_, alreadyKnown = fs.knownListeners[clientId]
|
|
|
|
fs.knownListenersLock.Unlock()
|
|
|
|
}
|
2020-03-30 08:19:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-12-30 08:23:57 +00:00
|
|
|
func (fs *FilerServer) deleteClient(clientName string, clientId int32) {
|
2020-03-30 08:19:33 +00:00
|
|
|
glog.V(0).Infof("- listener %v", clientName)
|
2021-12-30 08:23:57 +00:00
|
|
|
if clientId != 0 {
|
|
|
|
fs.knownListenersLock.Lock()
|
|
|
|
delete(fs.knownListeners, clientId)
|
|
|
|
fs.knownListenersLock.Unlock()
|
|
|
|
}
|
2020-03-30 08:19:33 +00:00
|
|
|
}
|