improve "ls -al" performance for large directory

This commit is contained in:
Chris Lu 2018-11-08 07:37:34 -08:00
parent 6e11923551
commit cbd94b18a5
4 changed files with 35 additions and 7 deletions

View file

@ -5,13 +5,14 @@ package command
import (
"fmt"
"runtime"
"strings"
"time"
"bazil.org/fuse"
"bazil.org/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
"strings"
)
func runMount(cmd *Command, args []string) bool {
@ -72,6 +73,7 @@ func runMount(cmd *Command, args []string) bool {
ChunkSizeLimit: int64(*mountOptions.chunkSizeLimitMB) * 1024 * 1024,
DataCenter: *mountOptions.dataCenter,
DirListingLimit: *mountOptions.dirListingLimit,
EntryCacheTtl: 3 * time.Second,
}))
if err != nil {
fuse.Unmount(*mountOptions.dir)

View file

@ -70,7 +70,7 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
dir.attributes = resp.Entry.Attributes
}
dir.wfs.listDirectoryEntriesCache.Set(dir.Path, resp.Entry, 300*time.Millisecond)
dir.wfs.listDirectoryEntriesCache.Set(dir.Path, resp.Entry, dir.wfs.option.EntryCacheTtl)
return nil
})
@ -210,7 +210,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
entry = resp.Entry
dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, 1000*time.Millisecond)
dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, dir.wfs.option.EntryCacheTtl)
return nil
})
@ -252,6 +252,8 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
return fuse.EIO
}
cacheTtl := estimatedCacheTtl(len(resp.Entries))
for _, entry := range resp.Entries {
if entry.IsDirectory {
dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir}
@ -260,7 +262,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File}
ret = append(ret, dirent)
}
dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, 300*time.Millisecond)
dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl)
}
return nil
@ -337,3 +339,24 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
})
}
func estimatedCacheTtl(numEntries int) time.Duration {
if numEntries < 100 {
// 30 ms per entry
return 3 * time.Second
}
if numEntries < 1000 {
// 10 ms per entry
return 10 * time.Second
}
if numEntries < 10000 {
// 10 ms per entry
return 100 * time.Second
}
if numEntries < 100000 {
// 10 ms per entry
return 1000 * time.Second
}
// 2 ms per entry
return time.Duration(numEntries*2) * time.Millisecond
}

View file

@ -153,7 +153,7 @@ func (file *File) maybeLoadAttributes(ctx context.Context) error {
glog.V(1).Infof("file attr %v %+v: %d", file.fullpath(), file.entry.Attributes, filer2.TotalSize(file.entry.Chunks))
file.wfs.listDirectoryEntriesCache.Set(file.fullpath(), file.entry, 300*time.Millisecond)
file.wfs.listDirectoryEntriesCache.Set(file.fullpath(), file.entry, file.wfs.option.EntryCacheTtl)
return nil
})

View file

@ -1,14 +1,16 @@
package filesys
import (
"fmt"
"sync"
"time"
"bazil.org/fuse"
"bazil.org/fuse/fs"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/karlseguin/ccache"
"sync"
)
type Option struct {
@ -20,6 +22,7 @@ type Option struct {
ChunkSizeLimit int64
DataCenter string
DirListingLimit int
EntryCacheTtl time.Duration
}
type WFS struct {