filer: change to saveToFilerLimit from cacheToFilerLimit

short circuit saving small files to volume server
This commit is contained in:
Chris Lu 2021-01-10 23:14:46 -08:00
parent f0d3b3bf93
commit 1efb51ba84
4 changed files with 35 additions and 33 deletions

View file

@ -42,7 +42,7 @@ type FilerOptions struct {
cipher *bool
peers *string
metricsHttpPort *int
cacheToFilerLimit *int
saveToFilerLimit *int
defaultLevelDbDirectory *string
}
@ -64,7 +64,7 @@ func init() {
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
f.cacheToFilerLimit = cmdFiler.Flag.Int("cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
// start s3 on filer
@ -139,18 +139,18 @@ func (fo *FilerOptions) startFiler() {
Masters: strings.Split(*fo.masters, ","),
Collection: *fo.collection,
DefaultReplication: *fo.defaultReplicaPlacement,
DisableDirListing: *fo.disableDirListing,
MaxMB: *fo.maxMB,
DirListingLimit: *fo.dirListingLimit,
DataCenter: *fo.dataCenter,
Rack: *fo.rack,
DefaultLevelDbDir: defaultLevelDbDirectory,
DisableHttp: *fo.disableHttp,
Host: *fo.ip,
Port: uint32(*fo.port),
Cipher: *fo.cipher,
CacheToFilerLimit: int64(*fo.cacheToFilerLimit),
Filers: peers,
DisableDirListing: *fo.disableDirListing,
MaxMB: *fo.maxMB,
DirListingLimit: *fo.dirListingLimit,
DataCenter: *fo.dataCenter,
Rack: *fo.rack,
DefaultLevelDbDir: defaultLevelDbDirectory,
DisableHttp: *fo.disableHttp,
Host: *fo.ip,
Port: uint32(*fo.port),
Cipher: *fo.cipher,
SaveToFilerLimit: *fo.saveToFilerLimit,
Filers: peers,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)

View file

@ -95,7 +95,7 @@ func init() {
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.cacheToFilerLimit = cmdServer.Flag.Int("filer.cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")

View file

@ -46,18 +46,18 @@ type FilerOption struct {
Collection string
DefaultReplication string
DisableDirListing bool
MaxMB int
DirListingLimit int
DataCenter string
Rack string
DefaultLevelDbDir string
DisableHttp bool
Host string
Port uint32
recursiveDelete bool
Cipher bool
CacheToFilerLimit int64
Filers []string
MaxMB int
DirListingLimit int
DataCenter string
Rack string
DefaultLevelDbDir string
DisableHttp bool
Host string
Port uint32
recursiveDelete bool
Cipher bool
SaveToFilerLimit int
Filers []string
}
type FilerServer struct {

View file

@ -207,7 +207,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
chunkOffset := int64(0)
var smallContent, content []byte
var smallContent []byte
for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
@ -216,6 +216,13 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
if err != nil {
return nil, nil, 0, err, nil
}
if chunkOffset == 0 {
if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
smallContent = data
chunkOffset += int64(len(data))
break
}
}
dataReader := util.NewBytesReader(data)
// retry to assign a different file id
@ -242,8 +249,6 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
return nil, nil, 0, uploadErr, nil
}
content = data
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
break
@ -263,9 +268,6 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
}
}
if chunkOffset < fs.option.CacheToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && chunkOffset < 4*1024 {
smallContent = content
}
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}