2020-09-01 07:21:19 +00:00
|
|
|
package filer
|
2018-08-13 08:20:49 +00:00
|
|
|
|
|
|
|
import (
|
2020-04-28 06:49:46 +00:00
|
|
|
"context"
|
2020-03-30 08:19:33 +00:00
|
|
|
"fmt"
|
2020-04-28 06:49:46 +00:00
|
|
|
"io"
|
2021-09-25 08:18:44 +00:00
|
|
|
"math"
|
2020-03-30 08:19:33 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
|
2018-09-21 08:56:43 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2018-09-16 08:18:30 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/notification"
|
2018-08-13 08:20:49 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-03-30 08:19:33 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2018-08-13 08:20:49 +00:00
|
|
|
)
|
|
|
|
|
2020-08-29 06:48:48 +00:00
|
|
|
func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) {
|
2020-03-30 20:03:43 +00:00
|
|
|
var fullpath string
|
2018-08-13 08:20:49 +00:00
|
|
|
if oldEntry != nil {
|
2020-03-30 20:03:43 +00:00
|
|
|
fullpath = string(oldEntry.FullPath)
|
2018-08-13 08:20:49 +00:00
|
|
|
} else if newEntry != nil {
|
2020-03-30 20:03:43 +00:00
|
|
|
fullpath = string(newEntry.FullPath)
|
2018-08-13 08:20:49 +00:00
|
|
|
} else {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-30 20:03:43 +00:00
|
|
|
// println("fullpath:", fullpath)
|
2020-03-30 08:19:33 +00:00
|
|
|
|
2020-04-12 21:03:07 +00:00
|
|
|
if strings.HasPrefix(fullpath, SystemLogDir) {
|
2020-03-30 08:19:33 +00:00
|
|
|
return
|
|
|
|
}
|
2020-09-09 18:21:23 +00:00
|
|
|
foundSelf := false
|
|
|
|
for _, sig := range signatures {
|
|
|
|
if sig == f.Signature {
|
|
|
|
foundSelf = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundSelf {
|
|
|
|
signatures = append(signatures, f.Signature)
|
|
|
|
}
|
2018-08-13 08:33:21 +00:00
|
|
|
|
2020-03-30 08:19:33 +00:00
|
|
|
newParentPath := ""
|
|
|
|
if newEntry != nil {
|
|
|
|
newParentPath, _ = newEntry.FullPath.DirAndName()
|
|
|
|
}
|
|
|
|
eventNotification := &filer_pb.EventNotification{
|
2020-07-01 15:06:20 +00:00
|
|
|
OldEntry: oldEntry.ToProtoEntry(),
|
|
|
|
NewEntry: newEntry.ToProtoEntry(),
|
|
|
|
DeleteChunks: deleteChunks,
|
|
|
|
NewParentPath: newParentPath,
|
|
|
|
IsFromOtherCluster: isFromOtherCluster,
|
2020-09-09 18:21:23 +00:00
|
|
|
Signatures: signatures,
|
2020-03-30 08:19:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if notification.Queue != nil {
|
2020-07-01 15:06:20 +00:00
|
|
|
glog.V(3).Infof("notifying entry update %v", fullpath)
|
2021-02-11 08:59:36 +00:00
|
|
|
if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
|
|
|
|
// throw message
|
|
|
|
glog.Error(err)
|
|
|
|
}
|
2020-03-30 08:19:33 +00:00
|
|
|
}
|
|
|
|
|
2020-06-28 21:34:51 +00:00
|
|
|
f.logMetaEvent(ctx, fullpath, eventNotification)
|
2020-03-30 08:19:33 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-06-28 21:34:51 +00:00
|
|
|
func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
|
2020-03-30 20:03:43 +00:00
|
|
|
|
|
|
|
dir, _ := util.FullPath(fullpath).DirAndName()
|
|
|
|
|
2020-04-13 04:00:55 +00:00
|
|
|
event := &filer_pb.SubscribeMetadataResponse{
|
2020-03-30 08:19:33 +00:00
|
|
|
Directory: dir,
|
|
|
|
EventNotification: eventNotification,
|
2020-04-22 04:16:13 +00:00
|
|
|
TsNs: time.Now().UnixNano(),
|
2020-03-30 08:19:33 +00:00
|
|
|
}
|
|
|
|
data, err := proto.Marshal(event)
|
|
|
|
if err != nil {
|
2020-04-13 04:00:55 +00:00
|
|
|
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
|
2020-03-30 08:19:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-30 00:37:19 +00:00
|
|
|
f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
|
2020-03-30 08:19:33 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
|
2020-04-09 03:32:57 +00:00
|
|
|
|
2020-10-22 07:35:46 +00:00
|
|
|
if len(buf) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-30 04:01:14 +00:00
|
|
|
startTime, stopTime = startTime.UTC(), stopTime.UTC()
|
|
|
|
|
2021-09-25 08:18:44 +00:00
|
|
|
targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.%08x", SystemLogDir,
|
|
|
|
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFileId,
|
2020-04-12 21:03:07 +00:00
|
|
|
// startTime.Second(), startTime.Nanosecond(),
|
|
|
|
)
|
2018-09-16 18:20:36 +00:00
|
|
|
|
2020-07-14 18:25:50 +00:00
|
|
|
for {
|
|
|
|
if err := f.appendToFile(targetFile, buf); err != nil {
|
|
|
|
glog.V(1).Infof("log write failed %s: %v", targetFile, err)
|
|
|
|
time.Sleep(737 * time.Millisecond)
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
2020-03-30 08:19:33 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-28 06:49:46 +00:00
|
|
|
|
2020-07-05 22:43:06 +00:00
|
|
|
func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
|
2020-04-28 06:49:46 +00:00
|
|
|
|
2020-08-30 04:01:14 +00:00
|
|
|
startTime = startTime.UTC()
|
2020-04-28 06:49:46 +00:00
|
|
|
startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
|
2021-09-25 08:18:44 +00:00
|
|
|
startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute())
|
2020-04-28 06:49:46 +00:00
|
|
|
|
|
|
|
sizeBuf := make([]byte, 4)
|
|
|
|
startTsNs := startTime.UnixNano()
|
|
|
|
|
2021-04-24 18:49:03 +00:00
|
|
|
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "")
|
2020-04-28 06:49:46 +00:00
|
|
|
if listDayErr != nil {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
for _, dayEntry := range dayEntries {
|
|
|
|
// println("checking day", dayEntry.FullPath)
|
2021-09-25 08:18:44 +00:00
|
|
|
hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "")
|
2020-04-28 06:49:46 +00:00
|
|
|
if listHourMinuteErr != nil {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
for _, hourMinuteEntry := range hourMinuteEntries {
|
|
|
|
// println("checking hh-mm", hourMinuteEntry.FullPath)
|
|
|
|
if dayEntry.Name() == startDate {
|
2021-09-25 08:18:44 +00:00
|
|
|
hourMinute := util.FileNameBase(hourMinuteEntry.Name())
|
|
|
|
if strings.Compare(hourMinute, startHourMinute) < 0 {
|
2020-04-28 06:49:46 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// println("processing", hourMinuteEntry.FullPath)
|
|
|
|
chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
|
2020-07-05 22:43:06 +00:00
|
|
|
if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
|
2020-04-28 06:49:46 +00:00
|
|
|
chunkedFileReader.Close()
|
|
|
|
if err == io.EOF {
|
2020-08-30 00:37:19 +00:00
|
|
|
continue
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
chunkedFileReader.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, nil
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
|
2020-07-05 22:43:06 +00:00
|
|
|
func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
|
2020-04-28 06:49:46 +00:00
|
|
|
for {
|
|
|
|
n, err := r.Read(sizeBuf)
|
|
|
|
if err != nil {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, err
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
if n != 4 {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
size := util.BytesToUint32(sizeBuf)
|
2020-04-30 10:05:34 +00:00
|
|
|
// println("entry size", size)
|
2020-04-28 06:49:46 +00:00
|
|
|
entryData := make([]byte, size)
|
|
|
|
n, err = r.Read(entryData)
|
|
|
|
if err != nil {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, err
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
if n != int(size) {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
logEntry := &filer_pb.LogEntry{}
|
|
|
|
if err = proto.Unmarshal(entryData, logEntry); err != nil {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, err
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
if logEntry.TsNs <= ns {
|
2021-01-11 10:08:55 +00:00
|
|
|
continue
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
// println("each log: ", logEntry.TsNs)
|
|
|
|
if err := eachLogEntryFn(logEntry); err != nil {
|
2020-07-05 22:43:06 +00:00
|
|
|
return lastTsNs, err
|
|
|
|
} else {
|
|
|
|
lastTsNs = logEntry.TsNs
|
2020-04-28 06:49:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|