From 0d331c1e3ae0d038ae972279a63d2ff9a70e25f4 Mon Sep 17 00:00:00 2001 From: Mike Tolman Date: Fri, 5 Aug 2016 15:46:45 -0600 Subject: [PATCH] Revert "Changing needle_byte_cache so that it doesn't grow so big when larger files are added." This reverts commit 87fee21ef597a8b1bac5352d1327c13f87eeb000. --- weed/storage/needle_byte_cache.go | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/weed/storage/needle_byte_cache.go b/weed/storage/needle_byte_cache.go index 930ead81d..ae35a48ba 100644 --- a/weed/storage/needle_byte_cache.go +++ b/weed/storage/needle_byte_cache.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/golang-lru" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/chrislusf/seaweedfs/weed/glog" ) var ( @@ -25,7 +24,7 @@ In caching, the string~[]byte mapping is cached */ func init() { bytesPool = util.NewBytesPool() - bytesCache, _ = lru.NewWithEvict(50, func(key interface{}, value interface{}) { + bytesCache, _ = lru.NewWithEvict(512, func(key interface{}, value interface{}) { value.(*Block).decreaseReference() }) } @@ -47,37 +46,22 @@ func (block *Block) increaseReference() { // get bytes from the LRU cache of []byte first, then from the bytes pool // when []byte in LRU cache is evicted, it will be put back to the bytes pool func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) { - //Skip the cache if we are looking for a block that is too big to fit in the cache (defaulting to 10MB) - cacheable := readSize <= (1024*1024*10) - if !cacheable { - glog.V(4).Infoln("Block too big to keep in cache. Size:", readSize) - } - cacheKey := string("") - if cacheable { // check cache, return if found - cacheKey = fmt.Sprintf("%d:%d:%d", r.Fd(), offset >> 3, readSize) + cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize) if obj, found := bytesCache.Get(cacheKey); found { - glog.V(4).Infoln("Found block in cache. Size:", readSize) block = obj.(*Block) block.increaseReference() dataSlice = block.Bytes[0:readSize] return dataSlice, block, nil - } } // get the []byte from pool b := bytesPool.Get(readSize) // refCount = 2, one by the bytesCache, one by the actual needle object - refCount := int32(1) - if cacheable { - refCount = 2 - } - block = &Block{Bytes: b, refCount: refCount} + block = &Block{Bytes: b, refCount: 2} dataSlice = block.Bytes[0:readSize] _, err = r.ReadAt(dataSlice, offset) - if cacheable { bytesCache.Add(cacheKey, block) - } return dataSlice, block, err }