filer: add option to cache small files to filer store

This commit is contained in:
Chris Lu 2020-12-04 22:39:43 -08:00
parent d171d9f988
commit 263eb29e9f
4 changed files with 6 additions and 1 deletions

View file

@ -42,6 +42,7 @@ type FilerOptions struct {
cipher *bool
peers *string
metricsHttpPort *int
cacheToFilerLimit *int
// default leveldb directory, used in "weed server" mode
defaultLevelDbDirectory *string
@ -65,6 +66,7 @@ func init() {
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
f.cacheToFilerLimit = cmdFiler.Flag.Int("cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
// start s3 on filer
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
@ -149,6 +151,7 @@ func (fo *FilerOptions) startFiler() {
Host: *fo.ip,
Port: uint32(*fo.port),
Cipher: *fo.cipher,
CacheToFilerLimit: int64(*fo.cacheToFilerLimit),
Filers: peers,
})
if nfs_err != nil {

View file

@ -94,6 +94,7 @@ func init() {
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.cacheToFilerLimit = cmdServer.Flag.Int("filer.cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")

View file

@ -55,6 +55,7 @@ type FilerOption struct {
Port uint32
recursiveDelete bool
Cipher bool
CacheToFilerLimit int64
Filers []string
}

View file

@ -236,7 +236,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
break
}
}
if chunkOffset < 2048 {
if chunkOffset < fs.option.CacheToFilerLimit {
smallContent = content
}
return fileChunks, md5Hash, chunkOffset, nil, smallContent