2018-05-11 09:20:15 +00:00
|
|
|
package filer2
|
|
|
|
|
|
|
|
import (
|
2018-07-29 04:02:56 +00:00
|
|
|
"context"
|
2018-05-11 09:20:15 +00:00
|
|
|
"fmt"
|
2018-05-27 18:52:26 +00:00
|
|
|
"os"
|
|
|
|
"strings"
|
2018-05-12 20:45:29 +00:00
|
|
|
"time"
|
2018-07-29 04:02:56 +00:00
|
|
|
|
2019-12-12 17:11:10 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
|
|
|
"github.com/karlseguin/ccache"
|
|
|
|
|
2018-07-29 04:02:56 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2020-03-08 01:01:39 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-02-20 23:44:17 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2020-04-11 19:43:17 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
|
2018-07-28 09:10:32 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
2018-05-11 09:20:15 +00:00
|
|
|
)
|
|
|
|
|
2019-12-12 17:11:10 +00:00
|
|
|
const PaginationSize = 1024 * 256
|
|
|
|
|
2018-12-03 03:42:50 +00:00
|
|
|
var (
|
|
|
|
OS_UID = uint32(os.Getuid())
|
|
|
|
OS_GID = uint32(os.Getgid())
|
|
|
|
)
|
|
|
|
|
2018-05-11 09:20:15 +00:00
|
|
|
type Filer struct {
|
2020-02-20 23:44:17 +00:00
|
|
|
store *FilerStoreWrapper
|
|
|
|
directoryCache *ccache.Cache
|
|
|
|
MasterClient *wdclient.MasterClient
|
|
|
|
fileIdDeletionQueue *util.UnboundedQueue
|
|
|
|
GrpcDialOption grpc.DialOption
|
2020-02-25 06:28:45 +00:00
|
|
|
DirBucketsPath string
|
2020-04-12 06:37:10 +00:00
|
|
|
FsyncBuckets []string
|
2020-02-25 06:28:45 +00:00
|
|
|
buckets *FilerBuckets
|
2020-03-06 08:49:47 +00:00
|
|
|
Cipher bool
|
2020-04-20 06:54:32 +00:00
|
|
|
MetaLogBuffer *log_buffer.LogBuffer
|
2020-04-12 21:03:07 +00:00
|
|
|
metaLogCollection string
|
|
|
|
metaLogReplication string
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2020-04-18 22:17:27 +00:00
|
|
|
func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
|
2018-11-21 04:56:28 +00:00
|
|
|
f := &Filer{
|
2020-02-20 23:44:17 +00:00
|
|
|
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
|
2020-04-18 22:17:27 +00:00
|
|
|
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters),
|
2020-02-20 23:44:17 +00:00
|
|
|
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
|
|
|
GrpcDialOption: grpcDialOption,
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
2020-04-20 06:54:32 +00:00
|
|
|
f.MetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
|
2020-04-12 21:03:07 +00:00
|
|
|
f.metaLogCollection = collection
|
|
|
|
f.metaLogReplication = replication
|
2018-11-21 04:56:28 +00:00
|
|
|
|
|
|
|
go f.loopProcessingDeletion()
|
|
|
|
|
|
|
|
return f
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-27 18:52:26 +00:00
|
|
|
func (f *Filer) SetStore(store FilerStore) {
|
2019-05-17 09:03:23 +00:00
|
|
|
f.store = NewFilerStoreWrapper(store)
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-27 18:52:26 +00:00
|
|
|
func (f *Filer) DisableDirectoryCache() {
|
2018-05-12 20:45:29 +00:00
|
|
|
f.directoryCache = nil
|
|
|
|
}
|
2018-05-11 09:20:15 +00:00
|
|
|
|
2018-07-28 09:10:32 +00:00
|
|
|
func (fs *Filer) GetMaster() string {
|
2018-07-28 21:22:46 +00:00
|
|
|
return fs.MasterClient.GetMaster()
|
2018-07-28 09:10:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *Filer) KeepConnectedToMaster() {
|
2018-07-28 21:22:46 +00:00
|
|
|
fs.MasterClient.KeepConnectedToMaster()
|
2018-07-28 09:10:32 +00:00
|
|
|
}
|
|
|
|
|
2019-03-31 06:08:29 +00:00
|
|
|
func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
|
|
|
|
return f.store.BeginTransaction(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) CommitTransaction(ctx context.Context) error {
|
|
|
|
return f.store.CommitTransaction(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Filer) RollbackTransaction(ctx context.Context) error {
|
|
|
|
return f.store.RollbackTransaction(ctx)
|
|
|
|
}
|
|
|
|
|
2020-07-01 05:53:53 +00:00
|
|
|
func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool) error {
|
2018-05-12 20:45:29 +00:00
|
|
|
|
2018-12-03 07:16:17 +00:00
|
|
|
if string(entry.FullPath) == "/" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
dirParts := strings.Split(string(entry.FullPath), "/")
|
|
|
|
|
|
|
|
// fmt.Printf("directory parts: %+v\n", dirParts)
|
|
|
|
|
|
|
|
var lastDirectoryEntry *Entry
|
|
|
|
|
|
|
|
for i := 1; i < len(dirParts); i++ {
|
2020-04-05 20:11:43 +00:00
|
|
|
dirPath := "/" + util.Join(dirParts[:i]...)
|
2018-05-12 20:45:29 +00:00
|
|
|
// fmt.Printf("%d directory: %+v\n", i, dirPath)
|
|
|
|
|
|
|
|
// first check local cache
|
|
|
|
dirEntry := f.cacheGetDirectory(dirPath)
|
|
|
|
|
|
|
|
// not found, check the store directly
|
|
|
|
if dirEntry == nil {
|
2018-05-19 19:07:15 +00:00
|
|
|
glog.V(4).Infof("find uncached directory: %s", dirPath)
|
2020-03-23 07:01:34 +00:00
|
|
|
dirEntry, _ = f.FindEntry(ctx, util.FullPath(dirPath))
|
2018-05-19 19:40:24 +00:00
|
|
|
} else {
|
2020-01-25 01:26:18 +00:00
|
|
|
// glog.V(4).Infof("found cached directory: %s", dirPath)
|
2018-05-12 20:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// no such existing directory
|
2018-05-19 19:07:15 +00:00
|
|
|
if dirEntry == nil {
|
2018-05-12 20:45:29 +00:00
|
|
|
|
|
|
|
// create the directory
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
dirEntry = &Entry{
|
2020-03-23 07:01:34 +00:00
|
|
|
FullPath: util.FullPath(dirPath),
|
2018-05-12 20:45:29 +00:00
|
|
|
Attr: Attr{
|
2020-02-25 06:28:45 +00:00
|
|
|
Mtime: now,
|
|
|
|
Crtime: now,
|
2020-04-27 12:48:48 +00:00
|
|
|
Mode: os.ModeDir | entry.Mode | 0110,
|
2020-02-25 06:28:45 +00:00
|
|
|
Uid: entry.Uid,
|
|
|
|
Gid: entry.Gid,
|
|
|
|
Collection: entry.Collection,
|
|
|
|
Replication: entry.Replication,
|
2020-04-27 12:48:48 +00:00
|
|
|
UserName: entry.UserName,
|
|
|
|
GroupNames: entry.GroupNames,
|
2018-05-12 20:45:29 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-05-21 08:25:30 +00:00
|
|
|
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
|
2019-03-15 22:55:34 +00:00
|
|
|
mkdirErr := f.store.InsertEntry(ctx, dirEntry)
|
2018-05-12 20:45:29 +00:00
|
|
|
if mkdirErr != nil {
|
2020-03-23 07:01:34 +00:00
|
|
|
if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
|
2020-01-25 06:01:51 +00:00
|
|
|
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
|
2018-09-30 07:49:52 +00:00
|
|
|
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
|
|
|
}
|
|
|
|
} else {
|
2020-02-25 06:28:45 +00:00
|
|
|
f.maybeAddBucket(dirEntry)
|
2020-07-01 05:53:53 +00:00
|
|
|
f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster)
|
2018-05-12 20:45:29 +00:00
|
|
|
}
|
2018-08-13 08:20:49 +00:00
|
|
|
|
2018-07-12 04:53:31 +00:00
|
|
|
} else if !dirEntry.IsDirectory() {
|
2020-01-25 04:06:58 +00:00
|
|
|
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
2018-07-12 04:53:31 +00:00
|
|
|
return fmt.Errorf("%s is a file", dirPath)
|
2018-05-12 20:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// cache the directory entry
|
|
|
|
f.cacheSetDirectory(dirPath, dirEntry, i)
|
|
|
|
|
|
|
|
// remember the direct parent directory entry
|
|
|
|
if i == len(dirParts)-1 {
|
|
|
|
lastDirectoryEntry = dirEntry
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2018-05-11 09:20:15 +00:00
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
if lastDirectoryEntry == nil {
|
2020-01-25 04:06:58 +00:00
|
|
|
glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
|
2018-05-12 20:45:29 +00:00
|
|
|
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
|
|
|
|
}
|
|
|
|
|
2018-05-19 19:40:24 +00:00
|
|
|
/*
|
2018-05-27 18:52:26 +00:00
|
|
|
if !hasWritePermission(lastDirectoryEntry, entry) {
|
|
|
|
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
|
|
|
|
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
|
|
|
|
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
|
|
|
|
}
|
2018-05-19 19:40:24 +00:00
|
|
|
*/
|
2018-05-12 20:45:29 +00:00
|
|
|
|
2019-03-15 22:55:34 +00:00
|
|
|
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
|
2018-05-31 03:24:57 +00:00
|
|
|
|
2020-01-26 08:50:18 +00:00
|
|
|
glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl)
|
2018-09-22 07:11:46 +00:00
|
|
|
if oldEntry == nil {
|
2019-03-15 22:55:34 +00:00
|
|
|
if err := f.store.InsertEntry(ctx, entry); err != nil {
|
2019-06-21 06:45:30 +00:00
|
|
|
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
|
2018-09-22 07:11:46 +00:00
|
|
|
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
} else {
|
2020-01-22 19:42:40 +00:00
|
|
|
if o_excl {
|
2020-01-25 06:01:51 +00:00
|
|
|
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
2020-01-22 19:42:40 +00:00
|
|
|
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
|
|
|
}
|
2019-03-15 22:55:34 +00:00
|
|
|
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
2019-06-21 06:45:30 +00:00
|
|
|
glog.Errorf("update entry %s: %v", entry.FullPath, err)
|
2018-09-22 07:11:46 +00:00
|
|
|
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
|
|
|
}
|
2018-05-12 20:45:29 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 06:28:45 +00:00
|
|
|
f.maybeAddBucket(entry)
|
2020-07-01 05:53:53 +00:00
|
|
|
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster)
|
2018-08-13 08:20:49 +00:00
|
|
|
|
2018-07-22 00:40:00 +00:00
|
|
|
f.deleteChunksIfNotNew(oldEntry, entry)
|
2018-06-17 20:02:22 +00:00
|
|
|
|
2020-01-25 06:13:06 +00:00
|
|
|
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
|
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
return nil
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 22:55:34 +00:00
|
|
|
func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
|
2018-11-29 08:07:54 +00:00
|
|
|
if oldEntry != nil {
|
|
|
|
if oldEntry.IsDirectory() && !entry.IsDirectory() {
|
2019-06-21 06:45:30 +00:00
|
|
|
glog.Errorf("existing %s is a directory", entry.FullPath)
|
2018-11-29 08:07:54 +00:00
|
|
|
return fmt.Errorf("existing %s is a directory", entry.FullPath)
|
|
|
|
}
|
|
|
|
if !oldEntry.IsDirectory() && entry.IsDirectory() {
|
2019-06-21 06:45:30 +00:00
|
|
|
glog.Errorf("existing %s is a file", entry.FullPath)
|
2018-11-29 08:07:54 +00:00
|
|
|
return fmt.Errorf("existing %s is a file", entry.FullPath)
|
|
|
|
}
|
|
|
|
}
|
2019-03-15 22:55:34 +00:00
|
|
|
return f.store.UpdateEntry(ctx, entry)
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 07:01:34 +00:00
|
|
|
func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
|
2018-12-03 03:42:50 +00:00
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
if string(p) == "/" {
|
|
|
|
return &Entry{
|
|
|
|
FullPath: p,
|
|
|
|
Attr: Attr{
|
|
|
|
Mtime: now,
|
|
|
|
Crtime: now,
|
2018-12-03 03:59:47 +00:00
|
|
|
Mode: os.ModeDir | 0755,
|
2018-12-03 03:42:50 +00:00
|
|
|
Uid: OS_UID,
|
|
|
|
Gid: OS_GID,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
2020-03-09 08:02:01 +00:00
|
|
|
entry, err = f.store.FindEntry(ctx, p)
|
|
|
|
if entry != nil && entry.TtlSec > 0 {
|
|
|
|
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
|
|
|
|
f.store.DeleteEntry(ctx, p.Child(entry.Name()))
|
|
|
|
return nil, filer_pb.ErrNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 07:01:34 +00:00
|
|
|
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
|
2018-05-14 06:56:16 +00:00
|
|
|
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
|
2018-07-22 00:40:00 +00:00
|
|
|
p = p[0 : len(p)-1]
|
2018-05-12 20:45:29 +00:00
|
|
|
}
|
2020-03-10 06:28:01 +00:00
|
|
|
|
|
|
|
var makeupEntries []*Entry
|
|
|
|
entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
|
|
|
|
for expiredCount > 0 && err == nil {
|
|
|
|
makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount)
|
|
|
|
if err == nil {
|
|
|
|
entries = append(entries, makeupEntries...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
|
2020-03-23 07:01:34 +00:00
|
|
|
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) {
|
2020-03-09 08:02:01 +00:00
|
|
|
listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
|
|
|
|
if listErr != nil {
|
2020-03-10 06:28:01 +00:00
|
|
|
return listedEntries, expiredCount, "", listErr
|
2020-03-09 08:02:01 +00:00
|
|
|
}
|
|
|
|
for _, entry := range listedEntries {
|
2020-03-10 06:28:01 +00:00
|
|
|
lastFileName = entry.Name()
|
2020-03-09 08:02:01 +00:00
|
|
|
if entry.TtlSec > 0 {
|
|
|
|
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
|
|
|
|
f.store.DeleteEntry(ctx, p.Child(entry.Name()))
|
|
|
|
expiredCount++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
entries = append(entries, entry)
|
|
|
|
}
|
|
|
|
return
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 01:27:25 +00:00
|
|
|
func (f *Filer) cacheDelDirectory(dirpath string) {
|
2019-04-02 00:03:04 +00:00
|
|
|
|
|
|
|
if dirpath == "/" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-12 01:27:25 +00:00
|
|
|
if f.directoryCache == nil {
|
2018-07-19 08:21:44 +00:00
|
|
|
return
|
2018-07-12 01:27:25 +00:00
|
|
|
}
|
|
|
|
f.directoryCache.Delete(dirpath)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-27 18:52:26 +00:00
|
|
|
func (f *Filer) cacheGetDirectory(dirpath string) *Entry {
|
2019-04-02 00:03:04 +00:00
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
if f.directoryCache == nil {
|
|
|
|
return nil
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
2018-05-12 20:45:29 +00:00
|
|
|
item := f.directoryCache.Get(dirpath)
|
|
|
|
if item == nil {
|
2018-05-11 09:20:15 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-05-12 20:45:29 +00:00
|
|
|
return item.Value().(*Entry)
|
|
|
|
}
|
2018-05-11 09:20:15 +00:00
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) {
|
2018-05-11 09:20:15 +00:00
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
if f.directoryCache == nil {
|
|
|
|
return
|
2018-05-11 09:27:57 +00:00
|
|
|
}
|
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
minutes := 60
|
|
|
|
if level < 10 {
|
|
|
|
minutes -= level * 6
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-12 20:45:29 +00:00
|
|
|
f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute)
|
2018-05-11 09:20:15 +00:00
|
|
|
}
|
2020-03-15 03:30:26 +00:00
|
|
|
|
|
|
|
func (f *Filer) Shutdown() {
|
2020-04-20 06:54:32 +00:00
|
|
|
f.MetaLogBuffer.Shutdown()
|
2020-03-15 03:30:26 +00:00
|
|
|
f.store.Shutdown()
|
|
|
|
}
|