mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
filer: remove replication, collection, disk_type info from entry metadata
these metadata can change and are not used
This commit is contained in:
parent
cbf46de5f4
commit
4fd5f96598
|
@ -166,14 +166,11 @@ message FuseAttributes {
|
||||||
uint32 gid = 5;
|
uint32 gid = 5;
|
||||||
int64 crtime = 6; // unix time in seconds
|
int64 crtime = 6; // unix time in seconds
|
||||||
string mime = 7;
|
string mime = 7;
|
||||||
string replication = 8;
|
|
||||||
string collection = 9;
|
|
||||||
int32 ttl_sec = 10;
|
int32 ttl_sec = 10;
|
||||||
string user_name = 11; // for hdfs
|
string user_name = 11; // for hdfs
|
||||||
repeated string group_name = 12; // for hdfs
|
repeated string group_name = 12; // for hdfs
|
||||||
string symlink_target = 13;
|
string symlink_target = 13;
|
||||||
bytes md5 = 14;
|
bytes md5 = 14;
|
||||||
string disk_type = 15;
|
|
||||||
uint32 rdev = 16;
|
uint32 rdev = 16;
|
||||||
uint64 inode = 17;
|
uint64 inode = 17;
|
||||||
}
|
}
|
||||||
|
|
|
@ -410,16 +410,14 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||||
Entry: &filer_pb.Entry{
|
Entry: &filer_pb.Entry{
|
||||||
Name: fileName,
|
Name: fileName,
|
||||||
Attributes: &filer_pb.FuseAttributes{
|
Attributes: &filer_pb.FuseAttributes{
|
||||||
Crtime: time.Now().Unix(),
|
Crtime: time.Now().Unix(),
|
||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
Gid: task.gid,
|
Gid: task.gid,
|
||||||
Uid: task.uid,
|
Uid: task.uid,
|
||||||
FileSize: uint64(task.fileSize),
|
FileSize: uint64(task.fileSize),
|
||||||
FileMode: uint32(task.fileMode),
|
FileMode: uint32(task.fileMode),
|
||||||
Mime: mimeType,
|
Mime: mimeType,
|
||||||
Replication: *worker.options.replication,
|
TtlSec: worker.options.ttlSec,
|
||||||
Collection: *worker.options.collection,
|
|
||||||
TtlSec: worker.options.ttlSec,
|
|
||||||
},
|
},
|
||||||
Chunks: chunks,
|
Chunks: chunks,
|
||||||
},
|
},
|
||||||
|
@ -547,16 +545,14 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||||
Entry: &filer_pb.Entry{
|
Entry: &filer_pb.Entry{
|
||||||
Name: fileName,
|
Name: fileName,
|
||||||
Attributes: &filer_pb.FuseAttributes{
|
Attributes: &filer_pb.FuseAttributes{
|
||||||
Crtime: time.Now().Unix(),
|
Crtime: time.Now().Unix(),
|
||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
Gid: task.gid,
|
Gid: task.gid,
|
||||||
Uid: task.uid,
|
Uid: task.uid,
|
||||||
FileSize: uint64(task.fileSize),
|
FileSize: uint64(task.fileSize),
|
||||||
FileMode: uint32(task.fileMode),
|
FileMode: uint32(task.fileMode),
|
||||||
Mime: mimeType,
|
Mime: mimeType,
|
||||||
Replication: replication,
|
TtlSec: worker.options.ttlSec,
|
||||||
Collection: collection,
|
|
||||||
TtlSec: worker.options.ttlSec,
|
|
||||||
},
|
},
|
||||||
Chunks: manifestedChunks,
|
Chunks: manifestedChunks,
|
||||||
},
|
},
|
||||||
|
|
|
@ -15,10 +15,7 @@ type Attr struct {
|
||||||
Uid uint32 // owner uid
|
Uid uint32 // owner uid
|
||||||
Gid uint32 // group gid
|
Gid uint32 // group gid
|
||||||
Mime string // mime type
|
Mime string // mime type
|
||||||
Replication string // replication
|
|
||||||
Collection string // collection name
|
|
||||||
TtlSec int32 // ttl in seconds
|
TtlSec int32 // ttl in seconds
|
||||||
DiskType string
|
|
||||||
UserName string
|
UserName string
|
||||||
GroupNames []string
|
GroupNames []string
|
||||||
SymlinkTarget string
|
SymlinkTarget string
|
||||||
|
|
|
@ -39,10 +39,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
|
||||||
Uid: entry.Uid,
|
Uid: entry.Uid,
|
||||||
Gid: entry.Gid,
|
Gid: entry.Gid,
|
||||||
Mime: entry.Mime,
|
Mime: entry.Mime,
|
||||||
Collection: entry.Attr.Collection,
|
|
||||||
Replication: entry.Attr.Replication,
|
|
||||||
TtlSec: entry.Attr.TtlSec,
|
TtlSec: entry.Attr.TtlSec,
|
||||||
DiskType: entry.Attr.DiskType,
|
|
||||||
UserName: entry.Attr.UserName,
|
UserName: entry.Attr.UserName,
|
||||||
GroupName: entry.Attr.GroupNames,
|
GroupName: entry.Attr.GroupNames,
|
||||||
SymlinkTarget: entry.Attr.SymlinkTarget,
|
SymlinkTarget: entry.Attr.SymlinkTarget,
|
||||||
|
@ -67,10 +64,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
|
||||||
t.Uid = attr.Uid
|
t.Uid = attr.Uid
|
||||||
t.Gid = attr.Gid
|
t.Gid = attr.Gid
|
||||||
t.Mime = attr.Mime
|
t.Mime = attr.Mime
|
||||||
t.Collection = attr.Collection
|
|
||||||
t.Replication = attr.Replication
|
|
||||||
t.TtlSec = attr.TtlSec
|
t.TtlSec = attr.TtlSec
|
||||||
t.DiskType = attr.DiskType
|
|
||||||
t.UserName = attr.UserName
|
t.UserName = attr.UserName
|
||||||
t.GroupNames = attr.GroupName
|
t.GroupNames = attr.GroupName
|
||||||
t.SymlinkTarget = attr.SymlinkTarget
|
t.SymlinkTarget = attr.SymlinkTarget
|
||||||
|
|
|
@ -37,8 +37,6 @@ type Filer struct {
|
||||||
fileIdDeletionQueue *util.UnboundedQueue
|
fileIdDeletionQueue *util.UnboundedQueue
|
||||||
GrpcDialOption grpc.DialOption
|
GrpcDialOption grpc.DialOption
|
||||||
DirBucketsPath string
|
DirBucketsPath string
|
||||||
FsyncBuckets []string
|
|
||||||
buckets *FilerBuckets
|
|
||||||
Cipher bool
|
Cipher bool
|
||||||
LocalMetaLogBuffer *log_buffer.LogBuffer
|
LocalMetaLogBuffer *log_buffer.LogBuffer
|
||||||
metaLogCollection string
|
metaLogCollection string
|
||||||
|
@ -217,7 +215,6 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f.maybeAddBucket(entry)
|
|
||||||
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
|
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
|
||||||
|
|
||||||
f.deleteChunksIfNotNew(oldEntry, entry)
|
f.deleteChunksIfNotNew(oldEntry, entry)
|
||||||
|
@ -254,15 +251,13 @@ func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, di
|
||||||
dirEntry = &Entry{
|
dirEntry = &Entry{
|
||||||
FullPath: util.FullPath(dirPath),
|
FullPath: util.FullPath(dirPath),
|
||||||
Attr: Attr{
|
Attr: Attr{
|
||||||
Mtime: now,
|
Mtime: now,
|
||||||
Crtime: now,
|
Crtime: now,
|
||||||
Mode: os.ModeDir | entry.Mode | 0111,
|
Mode: os.ModeDir | entry.Mode | 0111,
|
||||||
Uid: entry.Uid,
|
Uid: entry.Uid,
|
||||||
Gid: entry.Gid,
|
Gid: entry.Gid,
|
||||||
Collection: entry.Collection,
|
UserName: entry.UserName,
|
||||||
Replication: entry.Replication,
|
GroupNames: entry.GroupNames,
|
||||||
UserName: entry.UserName,
|
|
||||||
GroupNames: entry.GroupNames,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,7 +269,6 @@ func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, di
|
||||||
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
f.maybeAddBucket(dirEntry)
|
|
||||||
f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
|
f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,76 +1,9 @@
|
||||||
package filer
|
package filer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"math"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type BucketName string
|
|
||||||
type BucketOption struct {
|
|
||||||
Name BucketName
|
|
||||||
Replication string
|
|
||||||
fsync bool
|
|
||||||
}
|
|
||||||
type FilerBuckets struct {
|
|
||||||
dirBucketsPath string
|
|
||||||
buckets map[BucketName]*BucketOption
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filer) LoadBuckets() {
|
|
||||||
|
|
||||||
f.buckets = &FilerBuckets{
|
|
||||||
buckets: make(map[BucketName]*BucketOption),
|
|
||||||
}
|
|
||||||
|
|
||||||
limit := int64(math.MaxInt32)
|
|
||||||
|
|
||||||
entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "", "")
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
glog.V(1).Infof("no buckets found: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
shouldFsyncMap := make(map[string]bool)
|
|
||||||
for _, bucket := range f.FsyncBuckets {
|
|
||||||
shouldFsyncMap[bucket] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(1).Infof("buckets found: %d", len(entries))
|
|
||||||
|
|
||||||
f.buckets.Lock()
|
|
||||||
for _, entry := range entries {
|
|
||||||
_, shouldFsnyc := shouldFsyncMap[entry.Name()]
|
|
||||||
f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
|
|
||||||
Name: BucketName(entry.Name()),
|
|
||||||
Replication: entry.Replication,
|
|
||||||
fsync: shouldFsnyc,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.buckets.Unlock()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
|
|
||||||
|
|
||||||
f.buckets.RLock()
|
|
||||||
defer f.buckets.RUnlock()
|
|
||||||
|
|
||||||
option, found := f.buckets.buckets[BucketName(buketName)]
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return option.Replication, option.fsync
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filer) isBucket(entry *Entry) bool {
|
func (f *Filer) isBucket(entry *Entry) bool {
|
||||||
if !entry.IsDirectory() {
|
if !entry.IsDirectory() {
|
||||||
return false
|
return false
|
||||||
|
@ -83,43 +16,6 @@ func (f *Filer) isBucket(entry *Entry) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
f.buckets.RLock()
|
return true
|
||||||
defer f.buckets.RUnlock()
|
|
||||||
|
|
||||||
_, found := f.buckets.buckets[BucketName(dirName)]
|
|
||||||
|
|
||||||
return found
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filer) maybeAddBucket(entry *Entry) {
|
|
||||||
if !entry.IsDirectory() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
parent, dirName := entry.FullPath.DirAndName()
|
|
||||||
if parent != f.DirBucketsPath {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.addBucket(dirName, &BucketOption{
|
|
||||||
Name: BucketName(dirName),
|
|
||||||
Replication: entry.Replication,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) {
|
|
||||||
|
|
||||||
f.buckets.Lock()
|
|
||||||
defer f.buckets.Unlock()
|
|
||||||
|
|
||||||
f.buckets.buckets[BucketName(buketName)] = bucketOption
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filer) deleteBucket(buketName string) {
|
|
||||||
|
|
||||||
f.buckets.Lock()
|
|
||||||
defer f.buckets.Unlock()
|
|
||||||
|
|
||||||
delete(f.buckets.buckets, BucketName(buketName))
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,6 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
||||||
if isDeleteCollection {
|
if isDeleteCollection {
|
||||||
collectionName := entry.Name()
|
collectionName := entry.Name()
|
||||||
f.doDeleteCollection(collectionName)
|
f.doDeleteCollection(collectionName)
|
||||||
f.deleteBucket(collectionName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -53,12 +53,10 @@ func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, conte
|
||||||
Name: name,
|
Name: name,
|
||||||
IsDirectory: false,
|
IsDirectory: false,
|
||||||
Attributes: &filer_pb.FuseAttributes{
|
Attributes: &filer_pb.FuseAttributes{
|
||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
Crtime: time.Now().Unix(),
|
Crtime: time.Now().Unix(),
|
||||||
FileMode: uint32(0644),
|
FileMode: uint32(0644),
|
||||||
Collection: "",
|
FileSize: uint64(len(content)),
|
||||||
Replication: "",
|
|
||||||
FileSize: uint64(len(content)),
|
|
||||||
},
|
},
|
||||||
Content: content,
|
Content: content,
|
||||||
},
|
},
|
||||||
|
|
|
@ -58,16 +58,14 @@ func (wfs *WFS) Mknod(cancel <-chan struct{}, in *fuse.MknodIn, name string, out
|
||||||
Name: name,
|
Name: name,
|
||||||
IsDirectory: false,
|
IsDirectory: false,
|
||||||
Attributes: &filer_pb.FuseAttributes{
|
Attributes: &filer_pb.FuseAttributes{
|
||||||
Mtime: now,
|
Mtime: now,
|
||||||
Crtime: now,
|
Crtime: now,
|
||||||
FileMode: uint32(fileMode),
|
FileMode: uint32(fileMode),
|
||||||
Uid: in.Uid,
|
Uid: in.Uid,
|
||||||
Gid: in.Gid,
|
Gid: in.Gid,
|
||||||
Collection: wfs.option.Collection,
|
TtlSec: wfs.option.TtlSec,
|
||||||
Replication: wfs.option.Replication,
|
Rdev: in.Rdev,
|
||||||
TtlSec: wfs.option.TtlSec,
|
Inode: inode,
|
||||||
Rdev: in.Rdev,
|
|
||||||
Inode: inode,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,6 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status {
|
||||||
entry.Attributes.Crtime = time.Now().Unix()
|
entry.Attributes.Crtime = time.Now().Unix()
|
||||||
}
|
}
|
||||||
entry.Attributes.Mtime = time.Now().Unix()
|
entry.Attributes.Mtime = time.Now().Unix()
|
||||||
entry.Attributes.Collection, entry.Attributes.Replication = fh.dirtyPages.GetStorageOptions()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
request := &filer_pb.CreateEntryRequest{
|
request := &filer_pb.CreateEntryRequest{
|
||||||
|
|
|
@ -166,14 +166,11 @@ message FuseAttributes {
|
||||||
uint32 gid = 5;
|
uint32 gid = 5;
|
||||||
int64 crtime = 6; // unix time in seconds
|
int64 crtime = 6; // unix time in seconds
|
||||||
string mime = 7;
|
string mime = 7;
|
||||||
string replication = 8;
|
|
||||||
string collection = 9;
|
|
||||||
int32 ttl_sec = 10;
|
int32 ttl_sec = 10;
|
||||||
string user_name = 11; // for hdfs
|
string user_name = 11; // for hdfs
|
||||||
repeated string group_name = 12; // for hdfs
|
repeated string group_name = 12; // for hdfs
|
||||||
string symlink_target = 13;
|
string symlink_target = 13;
|
||||||
bytes md5 = 14;
|
bytes md5 = 14;
|
||||||
string disk_type = 15;
|
|
||||||
uint32 rdev = 16;
|
uint32 rdev = 16;
|
||||||
uint64 inode = 17;
|
uint64 inode = 17;
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -151,8 +151,6 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
|
||||||
newEntry := filer.FromPbEntry(req.Directory, req.Entry)
|
newEntry := filer.FromPbEntry(req.Directory, req.Entry)
|
||||||
newEntry.Chunks = chunks
|
newEntry.Chunks = chunks
|
||||||
newEntry.TtlSec = so.TtlSeconds
|
newEntry.TtlSec = so.TtlSeconds
|
||||||
newEntry.Collection = so.Collection
|
|
||||||
newEntry.DiskType = so.DiskType
|
|
||||||
|
|
||||||
createErr := fs.filer.CreateEntry(ctx, newEntry, req.OExcl, req.IsFromOtherCluster, req.Signatures, req.SkipCheckParentDirectory)
|
createErr := fs.filer.CreateEntry(ctx, newEntry, req.OExcl, req.IsFromOtherCluster, req.Signatures, req.SkipCheckParentDirectory)
|
||||||
|
|
||||||
|
@ -218,10 +216,10 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry
|
||||||
|
|
||||||
if newEntry.Attributes != nil {
|
if newEntry.Attributes != nil {
|
||||||
so, _ := fs.detectStorageOption(fullpath,
|
so, _ := fs.detectStorageOption(fullpath,
|
||||||
newEntry.Attributes.Collection,
|
"",
|
||||||
newEntry.Attributes.Replication,
|
"",
|
||||||
newEntry.Attributes.TtlSec,
|
newEntry.Attributes.TtlSec,
|
||||||
newEntry.Attributes.DiskType,
|
"",
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
|
@ -266,7 +264,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo
|
||||||
}
|
}
|
||||||
|
|
||||||
entry.Chunks = append(entry.Chunks, req.Chunks...)
|
entry.Chunks = append(entry.Chunks, req.Chunks...)
|
||||||
so, err := fs.detectStorageOption(string(fullpath), entry.Collection, entry.Replication, entry.TtlSec, entry.DiskType, "", "", "")
|
so, err := fs.detectStorageOption(string(fullpath), "", "", entry.TtlSec, "", "", "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("detectStorageOption: %v", err)
|
glog.Warningf("detectStorageOption: %v", err)
|
||||||
return &filer_pb.AppendToEntryResponse{}, err
|
return &filer_pb.AppendToEntryResponse{}, err
|
||||||
|
|
|
@ -174,8 +174,6 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
|
||||||
}
|
}
|
||||||
fs.filer.AggregateFromPeers(option.Host, existingNodes, startFromTime)
|
fs.filer.AggregateFromPeers(option.Host, existingNodes, startFromTime)
|
||||||
|
|
||||||
fs.filer.LoadBuckets()
|
|
||||||
|
|
||||||
fs.filer.LoadFilerConf()
|
fs.filer.LoadFilerConf()
|
||||||
|
|
||||||
fs.filer.LoadRemoteStorageConfAndMapping()
|
fs.filer.LoadRemoteStorageConfAndMapping()
|
||||||
|
|
|
@ -200,10 +200,9 @@ func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication
|
||||||
}
|
}
|
||||||
|
|
||||||
// required by buckets folder
|
// required by buckets folder
|
||||||
bucketDefaultCollection, bucketDefaultReplication, fsync := "", "", false
|
bucketDefaultCollection := ""
|
||||||
if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") {
|
if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") {
|
||||||
bucketDefaultCollection = fs.filer.DetectBucket(util.FullPath(requestURI))
|
bucketDefaultCollection = fs.filer.DetectBucket(util.FullPath(requestURI))
|
||||||
bucketDefaultReplication, fsync = fs.filer.ReadBucketOption(bucketDefaultCollection)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ttlSeconds == 0 {
|
if ttlSeconds == 0 {
|
||||||
|
@ -215,14 +214,14 @@ func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication
|
||||||
}
|
}
|
||||||
|
|
||||||
return &operation.StorageOption{
|
return &operation.StorageOption{
|
||||||
Replication: util.Nvl(qReplication, rule.Replication, bucketDefaultReplication, fs.option.DefaultReplication),
|
Replication: util.Nvl(qReplication, rule.Replication, fs.option.DefaultReplication),
|
||||||
Collection: util.Nvl(qCollection, rule.Collection, bucketDefaultCollection, fs.option.Collection),
|
Collection: util.Nvl(qCollection, rule.Collection, bucketDefaultCollection, fs.option.Collection),
|
||||||
DataCenter: util.Nvl(dataCenter, rule.DataCenter, fs.option.DataCenter),
|
DataCenter: util.Nvl(dataCenter, rule.DataCenter, fs.option.DataCenter),
|
||||||
Rack: util.Nvl(rack, rule.Rack, fs.option.Rack),
|
Rack: util.Nvl(rack, rule.Rack, fs.option.Rack),
|
||||||
DataNode: util.Nvl(dataNode, rule.DataNode, fs.option.DataNode),
|
DataNode: util.Nvl(dataNode, rule.DataNode, fs.option.DataNode),
|
||||||
TtlSeconds: ttlSeconds,
|
TtlSeconds: ttlSeconds,
|
||||||
DiskType: util.Nvl(diskType, rule.DiskType),
|
DiskType: util.Nvl(diskType, rule.DiskType),
|
||||||
Fsync: fsync || rule.Fsync,
|
Fsync: rule.Fsync,
|
||||||
VolumeGrowthCount: rule.VolumeGrowthCount,
|
VolumeGrowthCount: rule.VolumeGrowthCount,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -201,18 +201,15 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||||
entry = &filer.Entry{
|
entry = &filer.Entry{
|
||||||
FullPath: util.FullPath(path),
|
FullPath: util.FullPath(path),
|
||||||
Attr: filer.Attr{
|
Attr: filer.Attr{
|
||||||
Mtime: time.Now(),
|
Mtime: time.Now(),
|
||||||
Crtime: time.Now(),
|
Crtime: time.Now(),
|
||||||
Mode: os.FileMode(mode),
|
Mode: os.FileMode(mode),
|
||||||
Uid: OS_UID,
|
Uid: OS_UID,
|
||||||
Gid: OS_GID,
|
Gid: OS_GID,
|
||||||
Replication: so.Replication,
|
TtlSec: so.TtlSeconds,
|
||||||
Collection: so.Collection,
|
Mime: contentType,
|
||||||
TtlSec: so.TtlSeconds,
|
Md5: md5bytes,
|
||||||
DiskType: so.DiskType,
|
FileSize: uint64(chunkOffset),
|
||||||
Mime: contentType,
|
|
||||||
Md5: md5bytes,
|
|
||||||
FileSize: uint64(chunkOffset),
|
|
||||||
},
|
},
|
||||||
Content: content,
|
Content: content,
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,17 +73,14 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht
|
||||||
entry := &filer.Entry{
|
entry := &filer.Entry{
|
||||||
FullPath: util.FullPath(path),
|
FullPath: util.FullPath(path),
|
||||||
Attr: filer.Attr{
|
Attr: filer.Attr{
|
||||||
Mtime: time.Now(),
|
Mtime: time.Now(),
|
||||||
Crtime: time.Now(),
|
Crtime: time.Now(),
|
||||||
Mode: 0660,
|
Mode: 0660,
|
||||||
Uid: OS_UID,
|
Uid: OS_UID,
|
||||||
Gid: OS_GID,
|
Gid: OS_GID,
|
||||||
Replication: so.Replication,
|
TtlSec: so.TtlSeconds,
|
||||||
Collection: so.Collection,
|
Mime: pu.MimeType,
|
||||||
TtlSec: so.TtlSeconds,
|
Md5: util.Base64Md5ToBytes(pu.ContentMd5),
|
||||||
DiskType: so.DiskType,
|
|
||||||
Mime: pu.MimeType,
|
|
||||||
Md5: util.Base64Md5ToBytes(pu.ContentMd5),
|
|
||||||
},
|
},
|
||||||
Chunks: fileChunks,
|
Chunks: fileChunks,
|
||||||
}
|
}
|
||||||
|
|
|
@ -218,14 +218,12 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f
|
||||||
Name: name,
|
Name: name,
|
||||||
IsDirectory: perm&os.ModeDir > 0,
|
IsDirectory: perm&os.ModeDir > 0,
|
||||||
Attributes: &filer_pb.FuseAttributes{
|
Attributes: &filer_pb.FuseAttributes{
|
||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
Crtime: time.Now().Unix(),
|
Crtime: time.Now().Unix(),
|
||||||
FileMode: uint32(perm),
|
FileMode: uint32(perm),
|
||||||
Uid: fs.option.Uid,
|
Uid: fs.option.Uid,
|
||||||
Gid: fs.option.Gid,
|
Gid: fs.option.Gid,
|
||||||
Collection: fs.option.Collection,
|
TtlSec: 0,
|
||||||
Replication: fs.option.Replication,
|
|
||||||
TtlSec: 0,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Signatures: []int32{fs.signature},
|
Signatures: []int32{fs.signature},
|
||||||
|
@ -478,8 +476,6 @@ func (f *WebDavFile) Write(buf []byte) (int, error) {
|
||||||
|
|
||||||
flushErr := f.fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
flushErr := f.fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
f.entry.Attributes.Mtime = time.Now().Unix()
|
f.entry.Attributes.Mtime = time.Now().Unix()
|
||||||
f.entry.Attributes.Collection = f.collection
|
|
||||||
f.entry.Attributes.Replication = f.replication
|
|
||||||
|
|
||||||
request := &filer_pb.UpdateEntryRequest{
|
request := &filer_pb.UpdateEntryRequest{
|
||||||
Directory: dir,
|
Directory: dir,
|
||||||
|
|
|
@ -34,9 +34,6 @@ func (c *commandS3BucketCreate) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
|
|
||||||
bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
bucketName := bucketCommand.String("name", "", "bucket name")
|
bucketName := bucketCommand.String("name", "", "bucket name")
|
||||||
replication := bucketCommand.String("replication", "", "replication setting for the bucket, if not set "+
|
|
||||||
"it will honor the value defined by the filer if it exists, "+
|
|
||||||
"else it will honor the value defined on the master")
|
|
||||||
if err = bucketCommand.Parse(args); err != nil {
|
if err = bucketCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -59,11 +56,9 @@ func (c *commandS3BucketCreate) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
Name: *bucketName,
|
Name: *bucketName,
|
||||||
IsDirectory: true,
|
IsDirectory: true,
|
||||||
Attributes: &filer_pb.FuseAttributes{
|
Attributes: &filer_pb.FuseAttributes{
|
||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
Crtime: time.Now().Unix(),
|
Crtime: time.Now().Unix(),
|
||||||
FileMode: uint32(0777 | os.ModeDir),
|
FileMode: uint32(0777 | os.ModeDir),
|
||||||
Collection: *bucketName,
|
|
||||||
Replication: *replication,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,9 +67,6 @@ func (c *commandS3BucketList) Do(args []string, commandEnv *CommandEnv, writer i
|
||||||
if entry.Quota > 0 {
|
if entry.Quota > 0 {
|
||||||
fmt.Fprintf(writer, "\tquota:%d\tusage:%.2f%%", entry.Quota, float64(collectionSize)*100/float64(entry.Quota))
|
fmt.Fprintf(writer, "\tquota:%d\tusage:%.2f%%", entry.Quota, float64(collectionSize)*100/float64(entry.Quota))
|
||||||
}
|
}
|
||||||
if entry.Attributes.Replication != "" && entry.Attributes.Replication != "000" {
|
|
||||||
fmt.Fprintf(writer, "\treplication:%s", entry.Attributes.Replication)
|
|
||||||
}
|
|
||||||
fmt.Fprintln(writer)
|
fmt.Fprintln(writer)
|
||||||
return nil
|
return nil
|
||||||
}, "", false, math.MaxUint32)
|
}, "", false, math.MaxUint32)
|
||||||
|
|
Loading…
Reference in a new issue