mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
refactor
adjust for faster test
This commit is contained in:
parent
a37535cd9f
commit
31fc7bb2e1
|
@ -86,7 +86,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
|
||||
if option.CacheSizeMB > 0 {
|
||||
os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask)
|
||||
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB)
|
||||
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
|
||||
}
|
||||
|
||||
wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), option.UidGidMapper)
|
||||
|
|
|
@ -100,7 +100,7 @@ type WebDavFile struct {
|
|||
|
||||
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
|
||||
|
||||
chunkCache := chunk_cache.NewTieredChunkCache(256, option.CacheDir, option.CacheSizeMB)
|
||||
chunkCache := chunk_cache.NewTieredChunkCache(256, option.CacheDir, option.CacheSizeMB, 1024*1024)
|
||||
return &WebDavFileSystem{
|
||||
option: option,
|
||||
chunkCache: chunkCache,
|
||||
|
|
|
@ -7,12 +7,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
const (
|
||||
memCacheSizeLimit = 1024 * 1024
|
||||
onDiskCacheSizeLimit0 = memCacheSizeLimit
|
||||
onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit
|
||||
)
|
||||
|
||||
type ChunkCache interface {
|
||||
GetChunk(fileId string, minSize uint64) (data []byte)
|
||||
SetChunk(fileId string, data []byte)
|
||||
|
@ -23,17 +17,21 @@ type TieredChunkCache struct {
|
|||
memCache *ChunkCacheInMemory
|
||||
diskCaches []*OnDiskCacheLayer
|
||||
sync.RWMutex
|
||||
onDiskCacheSizeLimit0 uint64
|
||||
onDiskCacheSizeLimit1 uint64
|
||||
}
|
||||
|
||||
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeMB int64) *TieredChunkCache {
|
||||
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache {
|
||||
|
||||
c := &TieredChunkCache{
|
||||
memCache: NewChunkCacheInMemory(maxEntries),
|
||||
}
|
||||
c.diskCaches = make([]*OnDiskCacheLayer, 3)
|
||||
c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4)
|
||||
c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4)
|
||||
c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4)
|
||||
c.onDiskCacheSizeLimit0 = uint64(unitSize)
|
||||
c.onDiskCacheSizeLimit1 = 4 * c.onDiskCacheSizeLimit0
|
||||
c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeInUnit*unitSize/4, 4)
|
||||
c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeInUnit*unitSize/4, 4)
|
||||
c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeInUnit*unitSize/2, 4)
|
||||
|
||||
return c
|
||||
}
|
||||
|
@ -51,7 +49,7 @@ func (c *TieredChunkCache) GetChunk(fileId string, minSize uint64) (data []byte)
|
|||
|
||||
func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byte) {
|
||||
|
||||
if minSize < memCacheSizeLimit {
|
||||
if minSize < c.onDiskCacheSizeLimit0 {
|
||||
data = c.memCache.GetChunk(fileId)
|
||||
if len(data) >= int(minSize) {
|
||||
return data
|
||||
|
@ -64,13 +62,13 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt
|
|||
return nil
|
||||
}
|
||||
|
||||
if minSize < onDiskCacheSizeLimit0 {
|
||||
if minSize < c.onDiskCacheSizeLimit0 {
|
||||
data = c.diskCaches[0].getChunk(fid.Key)
|
||||
if len(data) >= int(minSize) {
|
||||
return data
|
||||
}
|
||||
}
|
||||
if minSize < onDiskCacheSizeLimit1 {
|
||||
if minSize < c.onDiskCacheSizeLimit1 {
|
||||
data = c.diskCaches[1].getChunk(fid.Key)
|
||||
if len(data) >= int(minSize) {
|
||||
return data
|
||||
|
@ -101,7 +99,7 @@ func (c *TieredChunkCache) SetChunk(fileId string, data []byte) {
|
|||
|
||||
func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) {
|
||||
|
||||
if len(data) < memCacheSizeLimit {
|
||||
if len(data) < int(c.onDiskCacheSizeLimit0) {
|
||||
c.memCache.SetChunk(fileId, data)
|
||||
}
|
||||
|
||||
|
@ -111,9 +109,9 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) {
|
|||
return
|
||||
}
|
||||
|
||||
if len(data) < onDiskCacheSizeLimit0 {
|
||||
if len(data) < int(c.onDiskCacheSizeLimit0) {
|
||||
c.diskCaches[0].setChunk(fid.Key, data)
|
||||
} else if len(data) < onDiskCacheSizeLimit1 {
|
||||
} else if len(data) < int(c.onDiskCacheSizeLimit1) {
|
||||
c.diskCaches[1].setChunk(fid.Key, data)
|
||||
} else {
|
||||
c.diskCaches[2].setChunk(fid.Key, data)
|
||||
|
|
|
@ -14,9 +14,9 @@ func TestOnDisk(t *testing.T) {
|
|||
tmpDir, _ := ioutil.TempDir("", "c")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
totalDiskSizeMb := int64(32)
|
||||
totalDiskSizeInKB := int64(32)
|
||||
|
||||
cache := NewTieredChunkCache(0, tmpDir, totalDiskSizeMb)
|
||||
cache := NewTieredChunkCache(0, tmpDir, totalDiskSizeInKB, 1024)
|
||||
|
||||
writeCount := 5
|
||||
type test_data struct {
|
||||
|
@ -26,7 +26,7 @@ func TestOnDisk(t *testing.T) {
|
|||
}
|
||||
testData := make([]*test_data, writeCount)
|
||||
for i := 0; i < writeCount; i++ {
|
||||
buff := make([]byte, 1024*1024)
|
||||
buff := make([]byte, 1024)
|
||||
rand.Read(buff)
|
||||
testData[i] = &test_data{
|
||||
data: buff,
|
||||
|
@ -45,7 +45,7 @@ func TestOnDisk(t *testing.T) {
|
|||
|
||||
cache.Shutdown()
|
||||
|
||||
cache = NewTieredChunkCache(0, tmpDir, totalDiskSizeMb)
|
||||
cache = NewTieredChunkCache(0, tmpDir, totalDiskSizeInKB, 1024)
|
||||
|
||||
for i := 0; i < writeCount; i++ {
|
||||
data := cache.GetChunk(testData[i].fileId, testData[i].size)
|
||||
|
|
|
@ -14,11 +14,11 @@ type OnDiskCacheLayer struct {
|
|||
diskCaches []*ChunkCacheVolume
|
||||
}
|
||||
|
||||
func NewOnDiskCacheLayer(dir, namePrefix string, diskSizeMB int64, segmentCount int) *OnDiskCacheLayer {
|
||||
func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount int) *OnDiskCacheLayer {
|
||||
|
||||
volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000)
|
||||
volumeCount, volumeSize := int(diskSize/(30000*1024*1024)), int64(30000*1024*1024)
|
||||
if volumeCount < segmentCount {
|
||||
volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount)
|
||||
volumeCount, volumeSize = segmentCount, diskSize/int64(segmentCount)
|
||||
}
|
||||
|
||||
c := &OnDiskCacheLayer{}
|
||||
|
|
Loading…
Reference in a new issue