mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
renaming
This commit is contained in:
parent
6c7fa567d4
commit
3852307e94
|
@ -146,15 +146,15 @@ func runFuse(cmd *Command, args []string) bool {
|
|||
panic(fmt.Errorf("concurrentWriters: %s", err))
|
||||
}
|
||||
case "cacheDir":
|
||||
mountOptions.cacheDir = ¶meter.value
|
||||
mountOptions.cacheDirForRead = ¶meter.value
|
||||
case "cacheCapacityMB":
|
||||
if parsed, err := strconv.ParseInt(parameter.value, 0, 64); err == nil {
|
||||
mountOptions.cacheSizeMB = &parsed
|
||||
mountOptions.cacheSizeMBForRead = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("cacheCapacityMB: %s", err))
|
||||
}
|
||||
case "cacheDirWrite":
|
||||
mountOptions.cacheDirWrite = ¶meter.value
|
||||
mountOptions.cacheDirForWrite = ¶meter.value
|
||||
case "dataCenter":
|
||||
mountOptions.dataCenter = ¶meter.value
|
||||
case "allowOthers":
|
||||
|
|
|
@ -17,9 +17,9 @@ type MountOptions struct {
|
|||
ttlSec *int
|
||||
chunkSizeLimitMB *int
|
||||
concurrentWriters *int
|
||||
cacheDir *string
|
||||
cacheDirWrite *string
|
||||
cacheSizeMB *int64
|
||||
cacheDirForRead *string
|
||||
cacheDirForWrite *string
|
||||
cacheSizeMBForRead *int64
|
||||
dataCenter *string
|
||||
allowOthers *bool
|
||||
umaskString *string
|
||||
|
@ -55,9 +55,9 @@ func init() {
|
|||
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
|
||||
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
|
||||
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers")
|
||||
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
|
||||
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 0, "file chunk read cache capacity in MB")
|
||||
mountOptions.cacheDirWrite = cmdMount.Flag.String("cacheDirWrite", os.TempDir(), "buffer writes mostly for large files")
|
||||
mountOptions.cacheDirForRead = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
|
||||
mountOptions.cacheSizeMBForRead = cmdMount.Flag.Int64("cacheCapacityMB", 0, "file chunk read cache capacity in MB")
|
||||
mountOptions.cacheDirForWrite = cmdMount.Flag.String("cacheDirWrite", os.TempDir(), "buffer writes mostly for large files")
|
||||
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
|
||||
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
|
||||
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
|
||||
|
|
|
@ -225,9 +225,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
DiskType: types.ToDiskType(*option.diskType),
|
||||
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
|
||||
ConcurrentWriters: *option.concurrentWriters,
|
||||
CacheDir: *option.cacheDir,
|
||||
CacheSizeMB: *option.cacheSizeMB,
|
||||
CacheDirWrite: *option.cacheDirWrite,
|
||||
CacheDirForRead: *option.cacheDirForRead,
|
||||
CacheSizeMBForRead: *option.cacheSizeMBForRead,
|
||||
CacheDirForWrite: *option.cacheDirForWrite,
|
||||
DataCenter: *option.dataCenter,
|
||||
Quota: int64(*option.collectionQuota) * 1024 * 1024,
|
||||
MountUid: uid,
|
||||
|
|
|
@ -29,7 +29,7 @@ func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages {
|
|||
fh: fh,
|
||||
}
|
||||
|
||||
swapFileDir := fh.wfs.option.getTempFilePageDir()
|
||||
swapFileDir := fh.wfs.option.getUniqueCacheDirForWrite()
|
||||
|
||||
dirtyPages.uploadPipeline = page_writer.NewUploadPipeline(fh.wfs.concurrentWriters, chunkSize,
|
||||
dirtyPages.saveChunkedFileIntervalToStorage, fh.wfs.option.ConcurrentWriters, swapFileDir)
|
||||
|
|
|
@ -38,9 +38,9 @@ type Option struct {
|
|||
DiskType types.DiskType
|
||||
ChunkSizeLimit int64
|
||||
ConcurrentWriters int
|
||||
CacheDir string
|
||||
CacheSizeMB int64
|
||||
CacheDirWrite string
|
||||
CacheDirForRead string
|
||||
CacheSizeMBForRead int64
|
||||
CacheDirForWrite string
|
||||
DataCenter string
|
||||
Umask os.FileMode
|
||||
Quota int64
|
||||
|
@ -57,8 +57,8 @@ type Option struct {
|
|||
Cipher bool // whether encrypt data on volume server
|
||||
UidGidMapper *meta_cache.UidGidMapper
|
||||
|
||||
uniqueCacheDir string
|
||||
uniqueCacheTempPageDir string
|
||||
uniqueCacheDirForRead string
|
||||
uniqueCacheDirForWrite string
|
||||
}
|
||||
|
||||
type WFS struct {
|
||||
|
@ -92,11 +92,11 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||
|
||||
wfs.option.filerIndex = int32(rand.Intn(len(option.FilerAddresses)))
|
||||
wfs.option.setupUniqueCacheDirectory()
|
||||
if option.CacheSizeMB > 0 {
|
||||
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDir(), option.CacheSizeMB, 1024*1024)
|
||||
if option.CacheSizeMBForRead > 0 {
|
||||
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024)
|
||||
}
|
||||
|
||||
wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDir(), "meta"), option.UidGidMapper,
|
||||
wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDirForRead(), "meta"), option.UidGidMapper,
|
||||
util.FullPath(option.FilerMountRootPath),
|
||||
func(path util.FullPath) {
|
||||
wfs.inodeToPath.MarkChildrenCached(path)
|
||||
|
@ -106,8 +106,8 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||
})
|
||||
grace.OnInterrupt(func() {
|
||||
wfs.metaCache.Shutdown()
|
||||
os.RemoveAll(option.getUniqueCacheDir())
|
||||
os.RemoveAll(option.getTempFilePageDir())
|
||||
os.RemoveAll(option.getUniqueCacheDirForRead())
|
||||
os.RemoveAll(option.getUniqueCacheDirForWrite())
|
||||
})
|
||||
|
||||
if wfs.option.ConcurrentWriters > 0 {
|
||||
|
@ -193,16 +193,16 @@ func (wfs *WFS) getCurrentFiler() pb.ServerAddress {
|
|||
|
||||
func (option *Option) setupUniqueCacheDirectory() {
|
||||
cacheUniqueId := util.Md5String([]byte(option.MountDirectory + string(option.FilerAddresses[0]) + option.FilerMountRootPath + util.Version()))[0:8]
|
||||
option.uniqueCacheDir = path.Join(option.CacheDir, cacheUniqueId)
|
||||
os.MkdirAll(option.uniqueCacheDir, os.FileMode(0777)&^option.Umask)
|
||||
option.uniqueCacheTempPageDir = filepath.Join(path.Join(option.CacheDirWrite, cacheUniqueId), "swap")
|
||||
os.MkdirAll(option.uniqueCacheTempPageDir, os.FileMode(0777)&^option.Umask)
|
||||
option.uniqueCacheDirForRead = path.Join(option.CacheDirForRead, cacheUniqueId)
|
||||
os.MkdirAll(option.uniqueCacheDirForRead, os.FileMode(0777)&^option.Umask)
|
||||
option.uniqueCacheDirForWrite = filepath.Join(path.Join(option.CacheDirForWrite, cacheUniqueId), "swap")
|
||||
os.MkdirAll(option.uniqueCacheDirForWrite, os.FileMode(0777)&^option.Umask)
|
||||
}
|
||||
|
||||
func (option *Option) getTempFilePageDir() string {
|
||||
return option.uniqueCacheTempPageDir
|
||||
func (option *Option) getUniqueCacheDirForWrite() string {
|
||||
return option.uniqueCacheDirForWrite
|
||||
}
|
||||
|
||||
func (option *Option) getUniqueCacheDir() string {
|
||||
return option.uniqueCacheDir
|
||||
func (option *Option) getUniqueCacheDirForRead() string {
|
||||
return option.uniqueCacheDirForRead
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue