seaweedfs/weed/storage/needle_byte_cache.go

76 lines
1.8 KiB
Go
Raw Normal View History

package storage
import (
"fmt"
"os"
"sync/atomic"
"github.com/hashicorp/golang-lru"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
bytesCache *lru.Cache
bytesPool *util.BytesPool
)
/*
There are one level of caching, and one level of pooling.
In pooling, all []byte are fetched and returned to the pool bytesPool.
2016-04-15 19:06:43 +00:00
In caching, the string~[]byte mapping is cached
*/
func init() {
bytesPool = util.NewBytesPool()
2016-04-15 19:06:43 +00:00
bytesCache, _ = lru.NewWithEvict(512, func(key interface{}, value interface{}) {
value.(*Block).decreaseReference()
})
}
type Block struct {
Bytes []byte
refCount int32
}
func (block *Block) decreaseReference() {
if atomic.AddInt32(&block.refCount, -1) == 0 {
bytesPool.Put(block.Bytes)
}
}
func (block *Block) increaseReference() {
atomic.AddInt32(&block.refCount, 1)
}
// get bytes from the LRU cache of []byte first, then from the bytes pool
// when []byte in LRU cache is evicted, it will be put back to the bytes pool
2016-04-17 19:03:45 +00:00
func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) {
// check cache, return if found
cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize)
if obj, found := bytesCache.Get(cacheKey); found {
block = obj.(*Block)
block.increaseReference()
2016-04-17 19:03:45 +00:00
dataSlice = block.Bytes[0:readSize]
return dataSlice, block, nil
}
// get the []byte from pool
b := bytesPool.Get(readSize)
// refCount = 2, one by the bytesCache, one by the actual needle object
block = &Block{Bytes: b, refCount: 2}
2016-04-17 19:03:45 +00:00
dataSlice = block.Bytes[0:readSize]
_, err = r.ReadAt(dataSlice, offset)
bytesCache.Add(cacheKey, block)
2016-04-17 19:03:45 +00:00
return dataSlice, block, err
}
func (n *Needle) ReleaseMemory() {
if n.rawBlock != nil {
n.rawBlock.decreaseReference()
}
}
func ReleaseBytes(b []byte) {
bytesPool.Put(b)
}