mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
filer: add option to cache small files to filer store
This commit is contained in:
parent
d171d9f988
commit
263eb29e9f
|
@ -42,6 +42,7 @@ type FilerOptions struct {
|
||||||
cipher *bool
|
cipher *bool
|
||||||
peers *string
|
peers *string
|
||||||
metricsHttpPort *int
|
metricsHttpPort *int
|
||||||
|
cacheToFilerLimit *int
|
||||||
|
|
||||||
// default leveldb directory, used in "weed server" mode
|
// default leveldb directory, used in "weed server" mode
|
||||||
defaultLevelDbDirectory *string
|
defaultLevelDbDirectory *string
|
||||||
|
@ -65,6 +66,7 @@ func init() {
|
||||||
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
|
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
|
||||||
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
||||||
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||||
|
f.cacheToFilerLimit = cmdFiler.Flag.Int("cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
|
||||||
|
|
||||||
// start s3 on filer
|
// start s3 on filer
|
||||||
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
|
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
|
||||||
|
@ -149,6 +151,7 @@ func (fo *FilerOptions) startFiler() {
|
||||||
Host: *fo.ip,
|
Host: *fo.ip,
|
||||||
Port: uint32(*fo.port),
|
Port: uint32(*fo.port),
|
||||||
Cipher: *fo.cipher,
|
Cipher: *fo.cipher,
|
||||||
|
CacheToFilerLimit: int64(*fo.cacheToFilerLimit),
|
||||||
Filers: peers,
|
Filers: peers,
|
||||||
})
|
})
|
||||||
if nfs_err != nil {
|
if nfs_err != nil {
|
||||||
|
|
|
@ -94,6 +94,7 @@ func init() {
|
||||||
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
|
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
|
||||||
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
|
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
|
||||||
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
||||||
|
filerOptions.cacheToFilerLimit = cmdServer.Flag.Int("filer.cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
|
||||||
|
|
||||||
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
|
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
|
||||||
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
|
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
|
||||||
|
|
|
@ -55,6 +55,7 @@ type FilerOption struct {
|
||||||
Port uint32
|
Port uint32
|
||||||
recursiveDelete bool
|
recursiveDelete bool
|
||||||
Cipher bool
|
Cipher bool
|
||||||
|
CacheToFilerLimit int64
|
||||||
Filers []string
|
Filers []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -236,7 +236,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if chunkOffset < 2048 {
|
if chunkOffset < fs.option.CacheToFilerLimit {
|
||||||
smallContent = content
|
smallContent = content
|
||||||
}
|
}
|
||||||
return fileChunks, md5Hash, chunkOffset, nil, smallContent
|
return fileChunks, md5Hash, chunkOffset, nil, smallContent
|
||||||
|
|
Loading…
Reference in a new issue