Changing needle_byte_cache so that it doesn't grow so big when larger files are added.

This commit is contained in:
Mike Tolman 2016-08-05 15:14:24 -06:00
parent 761ef1c73e
commit 87fee21ef5

View file

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/golang-lru" "github.com/hashicorp/golang-lru"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/glog"
) )
var ( var (
@ -24,7 +25,7 @@ In caching, the string~[]byte mapping is cached
*/ */
func init() { func init() {
bytesPool = util.NewBytesPool() bytesPool = util.NewBytesPool()
bytesCache, _ = lru.NewWithEvict(512, func(key interface{}, value interface{}) { bytesCache, _ = lru.NewWithEvict(50, func(key interface{}, value interface{}) {
value.(*Block).decreaseReference() value.(*Block).decreaseReference()
}) })
} }
@ -46,22 +47,37 @@ func (block *Block) increaseReference() {
// get bytes from the LRU cache of []byte first, then from the bytes pool // get bytes from the LRU cache of []byte first, then from the bytes pool
// when []byte in LRU cache is evicted, it will be put back to the bytes pool // when []byte in LRU cache is evicted, it will be put back to the bytes pool
func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) { func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) {
//Skip the cache if we are looking for a block that is too big to fit in the cache (defaulting to 10MB)
cacheable := readSize <= (1024*1024*10)
if !cacheable {
glog.V(4).Infoln("Block too big to keep in cache. Size:", readSize)
}
cacheKey := string("")
if cacheable {
// check cache, return if found // check cache, return if found
cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize) cacheKey = fmt.Sprintf("%d:%d:%d", r.Fd(), offset >> 3, readSize)
if obj, found := bytesCache.Get(cacheKey); found { if obj, found := bytesCache.Get(cacheKey); found {
glog.V(4).Infoln("Found block in cache. Size:", readSize)
block = obj.(*Block) block = obj.(*Block)
block.increaseReference() block.increaseReference()
dataSlice = block.Bytes[0:readSize] dataSlice = block.Bytes[0:readSize]
return dataSlice, block, nil return dataSlice, block, nil
}
} }
// get the []byte from pool // get the []byte from pool
b := bytesPool.Get(readSize) b := bytesPool.Get(readSize)
// refCount = 2, one by the bytesCache, one by the actual needle object // refCount = 2, one by the bytesCache, one by the actual needle object
block = &Block{Bytes: b, refCount: 2} refCount := int32(1)
if cacheable {
refCount = 2
}
block = &Block{Bytes: b, refCount: refCount}
dataSlice = block.Bytes[0:readSize] dataSlice = block.Bytes[0:readSize]
_, err = r.ReadAt(dataSlice, offset) _, err = r.ReadAt(dataSlice, offset)
if cacheable {
bytesCache.Add(cacheKey, block) bytesCache.Add(cacheKey, block)
}
return dataSlice, block, err return dataSlice, block, err
} }