seaweedfs/weed/filer/meta_aggregator.go

281 lines
8.4 KiB
Go
Raw Normal View History

2020-09-01 07:21:19 +00:00
package filer
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"io"
"strings"
"sync"
"sync/atomic"
"time"
"google.golang.org/grpc"
2022-08-17 19:05:07 +00:00
"google.golang.org/protobuf/proto"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
)
type MetaAggregator struct {
2021-11-06 21:23:35 +00:00
filer *Filer
self pb.ServerAddress
isLeader bool
grpcDialOption grpc.DialOption
MetaLogBuffer *log_buffer.LogBuffer
peerStatues map[pb.ServerAddress]int
2021-11-06 21:23:35 +00:00
peerStatuesLock sync.Mutex
// notifying clients
ListenersLock sync.Mutex
ListenersCond *sync.Cond
}
// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
// The old data comes from what each LocalMetadata persisted on disk.
2021-11-06 21:23:35 +00:00
func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc.DialOption) *MetaAggregator {
t := &MetaAggregator{
2021-11-06 21:23:35 +00:00
filer: filer,
self: self,
grpcDialOption: grpcDialOption,
peerStatues: make(map[pb.ServerAddress]int),
}
t.ListenersCond = sync.NewCond(&t.ListenersLock)
2024-01-15 08:20:12 +00:00
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, nil, func() {
t.ListenersCond.Broadcast()
})
return t
}
func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startFrom time.Time) {
2021-11-06 21:23:35 +00:00
address := pb.ServerAddress(update.Address)
if update.IsAdd {
// every filer should subscribe to a new filer
if ma.setActive(address, true) {
2022-07-12 09:56:19 +00:00
go ma.loopSubscribeToOneFiler(ma.filer, ma.self, address, startFrom)
}
2021-11-06 21:23:35 +00:00
} else {
ma.setActive(address, false)
}
}
func (ma *MetaAggregator) setActive(address pb.ServerAddress, isActive bool) (notDuplicated bool) {
2021-11-06 21:23:35 +00:00
ma.peerStatuesLock.Lock()
defer ma.peerStatuesLock.Unlock()
if isActive {
if _, found := ma.peerStatues[address]; found {
ma.peerStatues[address] += 1
} else {
ma.peerStatues[address] = 1
notDuplicated = true
}
2021-11-06 21:23:35 +00:00
} else {
if _, found := ma.peerStatues[address]; found {
delete(ma.peerStatues, address)
}
2021-11-06 21:23:35 +00:00
}
return
2021-11-06 21:23:35 +00:00
}
func (ma *MetaAggregator) isActive(address pb.ServerAddress) (isActive bool) {
2021-11-06 21:23:35 +00:00
ma.peerStatuesLock.Lock()
defer ma.peerStatuesLock.Unlock()
var count int
count, isActive = ma.peerStatues[address]
return count > 0 && isActive
}
2022-07-12 09:56:19 +00:00
func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time) {
lastTsNs := startFrom.UnixNano()
for {
2022-07-12 09:56:19 +00:00
glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs)
nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs)
if !ma.isActive(peer) {
glog.V(0).Infof("stop subscribing remote %s meta change", peer)
return
}
if err != nil {
errLvl := glog.Level(0)
if strings.Contains(err.Error(), "duplicated local subscription detected") {
errLvl = glog.Level(4)
}
glog.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err)
}
if lastTsNs < nextLastTsNs {
lastTsNs = nextLastTsNs
}
time.Sleep(1733 * time.Millisecond)
}
}
func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom int64) (int64, error) {
2020-09-06 05:52:15 +00:00
/*
Each filer reads the "filer.store.id", which is the store's signature when filer starts.
2020-09-06 05:52:15 +00:00
When reading from other filers' local meta changes:
* if the received change does not contain signature from self, apply the change to current filer store.
2020-09-06 05:52:15 +00:00
Upon connecting to other filers, need to remember their signature and their offsets.
2020-09-06 05:52:15 +00:00
*/
2020-09-06 05:52:15 +00:00
var maybeReplicateMetadataChange func(*filer_pb.SubscribeMetadataResponse)
lastPersistTime := time.Now()
lastTsNs := startFrom
2020-09-06 07:29:16 +00:00
peerSignature, err := ma.readFilerStoreSignature(peer)
if err != nil {
return lastTsNs, fmt.Errorf("connecting to peer filer %s: %v", peer, err)
}
2020-07-13 07:05:20 +00:00
2021-03-09 22:13:48 +00:00
// when filer store is not shared by multiple filers
2020-09-06 07:29:16 +00:00
if peerSignature != f.Signature {
2020-09-06 07:50:38 +00:00
if prevTsNs, err := ma.readOffset(f, peer, peerSignature); err == nil {
lastTsNs = prevTsNs
defer func(prevTsNs int64) {
if lastTsNs != prevTsNs && lastTsNs != lastPersistTime.UnixNano() {
if err := ma.updateOffset(f, peer, peerSignature, lastTsNs); err == nil {
glog.V(0).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
} else {
glog.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
}
}
}(prevTsNs)
}
2020-07-13 07:05:20 +00:00
2020-09-06 07:29:16 +00:00
glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
2020-09-06 07:50:51 +00:00
var counter int64
2020-09-06 19:10:37 +00:00
var synced bool
maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
if err := Replay(f.Store, event); err != nil {
2020-09-06 07:29:16 +00:00
glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
return
}
2020-09-06 07:50:51 +00:00
counter++
2020-09-06 07:12:41 +00:00
if lastPersistTime.Add(time.Minute).Before(time.Now()) {
2020-09-06 07:29:16 +00:00
if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil {
2020-09-06 07:12:41 +00:00
if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() {
2020-09-06 07:50:51 +00:00
glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
2020-09-12 11:08:03 +00:00
} else if !synced {
2020-09-06 19:10:37 +00:00
synced = true
glog.V(0).Infof("synced with %s", peer)
2020-09-06 07:12:41 +00:00
}
lastPersistTime = time.Now()
2020-09-06 07:50:51 +00:00
counter = 0
} else {
2020-09-06 07:29:16 +00:00
glog.V(0).Infof("failed to update offset for %v: %v", peer, err)
}
}
}
}
processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
data, err := proto.Marshal(event)
if err != nil {
glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return err
}
dir := event.Directory
2020-07-08 06:17:17 +00:00
// println("received meta change", dir, "size", len(data))
2021-09-26 18:54:13 +00:00
ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
if maybeReplicateMetadataChange != nil {
maybeReplicateMetadataChange(event)
}
return nil
}
glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId)
err = pb.WithFilerClient(true, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
atomic.AddInt32(&ma.filer.UniqueFilerEpoch, 1)
stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
ClientName: "filer:" + string(self),
PathPrefix: "/",
SinceNs: lastTsNs,
ClientId: ma.filer.UniqueFilerId,
ClientEpoch: atomic.LoadInt32(&ma.filer.UniqueFilerEpoch),
})
if err != nil {
return fmt.Errorf("subscribe: %v", err)
}
for {
resp, listenErr := stream.Recv()
if listenErr == io.EOF {
return nil
}
if listenErr != nil {
return listenErr
}
if err := processEventFn(resp); err != nil {
return fmt.Errorf("process %v: %v", resp, err)
}
f.onMetadataChangeEvent(resp)
lastTsNs = resp.TsNs
}
})
return lastTsNs, err
}
func (ma *MetaAggregator) readFilerStoreSignature(peer pb.ServerAddress) (sig int32, err error) {
err = pb.WithFilerClient(false, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return err
}
2020-09-06 07:29:16 +00:00
sig = resp.Signature
return nil
})
return
}
2020-09-12 11:08:03 +00:00
const (
2020-09-06 07:29:16 +00:00
MetaOffsetPrefix = "Meta"
)
func GetPeerMetaOffsetKey(peerSignature int32) []byte {
2020-09-12 11:08:03 +00:00
key := []byte(MetaOffsetPrefix + "xxxx")
2020-09-06 07:29:16 +00:00
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
return key
}
func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignature int32) (lastTsNs int64, err error) {
key := GetPeerMetaOffsetKey(peerSignature)
2020-09-06 07:29:16 +00:00
value, err := f.Store.KvGet(context.Background(), key)
if err != nil {
return 0, fmt.Errorf("readOffset %s : %v", peer, err)
}
lastTsNs = int64(util.BytesToUint64(value))
glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs)
return
}
func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSignature int32, lastTsNs int64) (err error) {
2020-09-06 07:29:16 +00:00
key := GetPeerMetaOffsetKey(peerSignature)
value := make([]byte, 8)
util.Uint64toBytes(value, uint64(lastTsNs))
2020-09-06 07:29:16 +00:00
err = f.Store.KvPut(context.Background(), key, value)
if err != nil {
return fmt.Errorf("updateOffset %s : %v", peer, err)
}
glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs)
return
}