2020-04-14 04:58:10 +00:00
|
|
|
package chunk_cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"path"
|
|
|
|
"sort"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
|
|
|
)
|
|
|
|
|
|
|
|
type OnDiskCacheLayer struct {
|
|
|
|
diskCaches []*ChunkCacheVolume
|
|
|
|
}
|
|
|
|
|
2020-09-27 17:41:29 +00:00
|
|
|
func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount int) *OnDiskCacheLayer {
|
2020-04-14 05:19:27 +00:00
|
|
|
|
2020-09-27 17:41:29 +00:00
|
|
|
volumeCount, volumeSize := int(diskSize/(30000*1024*1024)), int64(30000*1024*1024)
|
2020-04-14 05:19:27 +00:00
|
|
|
if volumeCount < segmentCount {
|
2020-09-27 17:41:29 +00:00
|
|
|
volumeCount, volumeSize = segmentCount, diskSize/int64(segmentCount)
|
2020-04-14 05:19:27 +00:00
|
|
|
}
|
|
|
|
|
2020-04-14 04:58:10 +00:00
|
|
|
c := &OnDiskCacheLayer{}
|
|
|
|
for i := 0; i < volumeCount; i++ {
|
|
|
|
fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i))
|
2020-09-27 17:58:19 +00:00
|
|
|
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize)
|
2020-04-14 04:58:10 +00:00
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("failed to add cache %s : %v", fileName, err)
|
|
|
|
} else {
|
|
|
|
c.diskCaches = append(c.diskCaches, diskCache)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep newest cache to the front
|
|
|
|
sort.Slice(c.diskCaches, func(i, j int) bool {
|
|
|
|
return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime)
|
|
|
|
})
|
|
|
|
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) {
|
|
|
|
|
2021-05-31 23:42:55 +00:00
|
|
|
if len(c.diskCaches) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-04-14 04:58:10 +00:00
|
|
|
if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
|
|
|
|
t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
|
|
|
|
if resetErr != nil {
|
|
|
|
glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for i := len(c.diskCaches) - 1; i > 0; i-- {
|
|
|
|
c.diskCaches[i] = c.diskCaches[i-1]
|
|
|
|
}
|
|
|
|
c.diskCaches[0] = t
|
|
|
|
}
|
|
|
|
|
2020-06-26 17:01:55 +00:00
|
|
|
if err := c.diskCaches[0].WriteNeedle(needleId, data); err != nil {
|
|
|
|
glog.V(0).Infof("cache write %v size %d: %v", needleId, len(data), err)
|
|
|
|
}
|
2020-04-14 04:58:10 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-04-21 00:48:06 +00:00
|
|
|
func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
|
2020-04-14 04:58:10 +00:00
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
for _, diskCache := range c.diskCaches {
|
|
|
|
data, err = diskCache.GetNeedle(needleId)
|
|
|
|
if err == storage.ErrorNotFound {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(data) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-04-28 23:13:37 +00:00
|
|
|
func (c *OnDiskCacheLayer) getChunkSlice(needleId types.NeedleId, offset, length uint64) (data []byte) {
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
for _, diskCache := range c.diskCaches {
|
|
|
|
data, err = diskCache.getNeedleSlice(needleId, offset, length)
|
|
|
|
if err == storage.ErrorNotFound {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(data) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-04-21 00:48:06 +00:00
|
|
|
func (c *OnDiskCacheLayer) shutdown() {
|
2020-04-14 04:58:10 +00:00
|
|
|
|
|
|
|
for _, diskCache := range c.diskCaches {
|
|
|
|
diskCache.Shutdown()
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|