mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
refactor
This commit is contained in:
parent
26eff062c8
commit
868f7875d7
|
@ -1,20 +1,30 @@
|
||||||
package lock_manager
|
package lock_manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DistributedLockManager struct {
|
type DistributedLockManager struct {
|
||||||
lockManager *LockManager
|
lockManager *LockManager
|
||||||
|
LockRing *LockRing
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDistributedLockManager() *DistributedLockManager {
|
func NewDistributedLockManager() *DistributedLockManager {
|
||||||
return &DistributedLockManager{
|
return &DistributedLockManager{
|
||||||
lockManager: NewLockManager(),
|
lockManager: NewLockManager(),
|
||||||
|
LockRing: NewLockRing(time.Second * 5),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dlm *DistributedLockManager) Lock(host pb.ServerAddress, key string, expiredAtNs int64, token string, servers []pb.ServerAddress) (renewToken string, movedTo pb.ServerAddress, err error) {
|
func (dlm *DistributedLockManager) Lock(host pb.ServerAddress, key string, expiredAtNs int64, token string) (renewToken string, movedTo pb.ServerAddress, err error) {
|
||||||
|
servers := dlm.LockRing.GetSnapshot()
|
||||||
|
if servers == nil {
|
||||||
|
err = fmt.Errorf("no lock server found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
server := hashKeyToServer(key, servers)
|
server := hashKeyToServer(key, servers)
|
||||||
if server != host {
|
if server != host {
|
||||||
movedTo = server
|
movedTo = server
|
||||||
|
@ -24,7 +34,13 @@ func (dlm *DistributedLockManager) Lock(host pb.ServerAddress, key string, expir
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dlm *DistributedLockManager) Unlock(host pb.ServerAddress, key string, token string, servers []pb.ServerAddress) (movedTo pb.ServerAddress, err error) {
|
func (dlm *DistributedLockManager) Unlock(host pb.ServerAddress, key string, token string) (movedTo pb.ServerAddress, err error) {
|
||||||
|
servers := dlm.LockRing.GetSnapshot()
|
||||||
|
if servers == nil {
|
||||||
|
err = fmt.Errorf("no lock server found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
server := hashKeyToServer(key, servers)
|
server := hashKeyToServer(key, servers)
|
||||||
if server != host {
|
if server != host {
|
||||||
movedTo = server
|
movedTo = server
|
||||||
|
|
|
@ -49,7 +49,7 @@ type Filer struct {
|
||||||
Signature int32
|
Signature int32
|
||||||
FilerConf *FilerConf
|
FilerConf *FilerConf
|
||||||
RemoteStorage *FilerRemoteStorage
|
RemoteStorage *FilerRemoteStorage
|
||||||
LockRing *lock_manager.LockRing
|
Dlm *lock_manager.DistributedLockManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress,
|
func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress,
|
||||||
|
@ -61,7 +61,7 @@ func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOptio
|
||||||
FilerConf: NewFilerConf(),
|
FilerConf: NewFilerConf(),
|
||||||
RemoteStorage: NewFilerRemoteStorage(),
|
RemoteStorage: NewFilerRemoteStorage(),
|
||||||
UniqueFilerId: util.RandomInt32(),
|
UniqueFilerId: util.RandomInt32(),
|
||||||
LockRing: lock_manager.NewLockRing(time.Second * 5),
|
Dlm: lock_manager.NewDistributedLockManager(),
|
||||||
}
|
}
|
||||||
if f.UniqueFilerId < 0 {
|
if f.UniqueFilerId < 0 {
|
||||||
f.UniqueFilerId = -f.UniqueFilerId
|
f.UniqueFilerId = -f.UniqueFilerId
|
||||||
|
@ -120,9 +120,9 @@ func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*maste
|
||||||
address := pb.ServerAddress(update.Address)
|
address := pb.ServerAddress(update.Address)
|
||||||
|
|
||||||
if update.IsAdd {
|
if update.IsAdd {
|
||||||
f.LockRing.AddServer(address)
|
f.Dlm.LockRing.AddServer(address)
|
||||||
} else {
|
} else {
|
||||||
f.LockRing.RemoveServer(address)
|
f.Dlm.LockRing.RemoveServer(address)
|
||||||
}
|
}
|
||||||
f.MetaAggregator.OnPeerUpdate(update, startFrom)
|
f.MetaAggregator.OnPeerUpdate(update, startFrom)
|
||||||
})
|
})
|
||||||
|
|
|
@ -13,15 +13,10 @@ import (
|
||||||
func (fs *FilerServer) Lock(ctx context.Context, req *filer_pb.LockRequest) (resp *filer_pb.LockResponse, err error) {
|
func (fs *FilerServer) Lock(ctx context.Context, req *filer_pb.LockRequest) (resp *filer_pb.LockResponse, err error) {
|
||||||
|
|
||||||
resp = &filer_pb.LockResponse{}
|
resp = &filer_pb.LockResponse{}
|
||||||
snapshot := fs.filer.LockRing.GetSnapshot()
|
|
||||||
if snapshot == nil {
|
|
||||||
resp.Error = "no lock server found"
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var movedTo pb.ServerAddress
|
var movedTo pb.ServerAddress
|
||||||
expiredAtNs := time.Now().Add(time.Duration(req.SecondsToLock) * time.Second).UnixNano()
|
expiredAtNs := time.Now().Add(time.Duration(req.SecondsToLock) * time.Second).UnixNano()
|
||||||
resp.RenewToken, movedTo, err = fs.dlm.Lock(fs.option.Host, req.Name, expiredAtNs, req.PreviousLockToken, snapshot)
|
resp.RenewToken, movedTo, err = fs.filer.Dlm.Lock(fs.option.Host, req.Name, expiredAtNs, req.PreviousLockToken)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resp.Error = fmt.Sprintf("%v", err)
|
resp.Error = fmt.Sprintf("%v", err)
|
||||||
|
@ -37,13 +32,8 @@ func (fs *FilerServer) Lock(ctx context.Context, req *filer_pb.LockRequest) (res
|
||||||
func (fs *FilerServer) Unlock(ctx context.Context, req *filer_pb.UnlockRequest) (resp *filer_pb.UnlockResponse, err error) {
|
func (fs *FilerServer) Unlock(ctx context.Context, req *filer_pb.UnlockRequest) (resp *filer_pb.UnlockResponse, err error) {
|
||||||
|
|
||||||
resp = &filer_pb.UnlockResponse{}
|
resp = &filer_pb.UnlockResponse{}
|
||||||
snapshot := fs.filer.LockRing.GetSnapshot()
|
|
||||||
if snapshot == nil {
|
|
||||||
resp.Error = "no lock server found"
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = fs.dlm.Unlock(fs.option.Host, req.Name, req.LockToken, snapshot)
|
_, err = fs.filer.Dlm.Unlock(fs.option.Host, req.Name, req.LockToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resp.Error = fmt.Sprintf("%v", err)
|
resp.Error = fmt.Sprintf("%v", err)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +46,7 @@ func (fs *FilerServer) Unlock(ctx context.Context, req *filer_pb.UnlockRequest)
|
||||||
func (fs *FilerServer) TransferLocks(ctx context.Context, req *filer_pb.TransferLocksRequest) (*filer_pb.TransferLocksResponse, error) {
|
func (fs *FilerServer) TransferLocks(ctx context.Context, req *filer_pb.TransferLocksRequest) (*filer_pb.TransferLocksResponse, error) {
|
||||||
|
|
||||||
for _, lock := range req.Locks {
|
for _, lock := range req.Locks {
|
||||||
fs.dlm.InsertLock(lock.Name, lock.ExpiredAtNs, lock.RenewToken)
|
fs.filer.Dlm.InsertLock(lock.Name, lock.ExpiredAtNs, lock.RenewToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &filer_pb.TransferLocksResponse{}, nil
|
return &filer_pb.TransferLocksResponse{}, nil
|
||||||
|
@ -64,13 +54,13 @@ func (fs *FilerServer) TransferLocks(ctx context.Context, req *filer_pb.Transfer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FilerServer) OnDlmChangeSnapshot(snapshot []pb.ServerAddress) {
|
func (fs *FilerServer) OnDlmChangeSnapshot(snapshot []pb.ServerAddress) {
|
||||||
locks := fs.dlm.SelectNotOwnedLocks(fs.option.Host, snapshot)
|
locks := fs.filer.Dlm.SelectNotOwnedLocks(fs.option.Host, snapshot)
|
||||||
if len(locks) == 0 {
|
if len(locks) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lock := range locks {
|
for _, lock := range locks {
|
||||||
server := fs.dlm.CalculateTargetServer(lock.Key, snapshot)
|
server := fs.filer.Dlm.CalculateTargetServer(lock.Key, snapshot)
|
||||||
if err := pb.WithFilerClient(false, 0, server, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if err := pb.WithFilerClient(false, 0, server, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
_, err := client.TransferLocks(context.Background(), &filer_pb.TransferLocksRequest{
|
_, err := client.TransferLocks(context.Background(), &filer_pb.TransferLocksRequest{
|
||||||
Locks: []*filer_pb.Lock{
|
Locks: []*filer_pb.Lock{
|
||||||
|
|
|
@ -3,7 +3,6 @@ package weed_server
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -95,9 +94,6 @@ type FilerServer struct {
|
||||||
// track known metadata listeners
|
// track known metadata listeners
|
||||||
knownListenersLock sync.Mutex
|
knownListenersLock sync.Mutex
|
||||||
knownListeners map[int32]int32
|
knownListeners map[int32]int32
|
||||||
|
|
||||||
// distributed lock manager
|
|
||||||
dlm *lock_manager.DistributedLockManager
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
|
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
|
||||||
|
@ -185,8 +181,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
||||||
fs.filer.Shutdown()
|
fs.filer.Shutdown()
|
||||||
})
|
})
|
||||||
|
|
||||||
fs.dlm = lock_manager.NewDistributedLockManager()
|
fs.filer.Dlm.LockRing.SetTakeSnapshotCallback(fs.OnDlmChangeSnapshot)
|
||||||
fs.filer.LockRing.SetTakeSnapshotCallback(fs.OnDlmChangeSnapshot)
|
|
||||||
|
|
||||||
return fs, nil
|
return fs, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue