seaweedfs/weed/filesys/dirty_page.go

171 lines
4.1 KiB
Go
Raw Normal View History

package filesys
import (
"bytes"
"io"
2019-02-15 17:59:22 +00:00
"sync"
2018-07-22 00:39:10 +00:00
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
2018-07-22 00:39:10 +00:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type ContinuousDirtyPages struct {
intervals *ContinuousIntervals
f *File
lock sync.Mutex
collection string
replication string
}
2018-05-29 08:21:21 +00:00
func newDirtyPages(file *File) *ContinuousDirtyPages {
return &ContinuousDirtyPages{
intervals: &ContinuousIntervals{},
f: file,
}
2018-05-29 08:21:21 +00:00
}
2019-01-01 10:14:40 +00:00
var counter = int32(0)
2020-02-26 06:23:59 +00:00
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
2020-08-16 02:55:28 +00:00
glog.V(5).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
2019-01-01 10:14:40 +00:00
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
2018-05-31 05:09:24 +00:00
// this is more than what buffer can hold.
2020-02-26 06:23:59 +00:00
return pages.flushAndSave(offset, data)
}
2020-01-26 21:01:11 +00:00
pages.intervals.AddInterval(data, offset)
var chunk *filer_pb.FileChunk
var hasSavedData bool
if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit {
chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage()
if hasSavedData {
chunks = append(chunks, chunk)
2018-09-10 09:39:41 +00:00
}
}
return
}
2020-02-26 06:23:59 +00:00
func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
var chunk *filer_pb.FileChunk
var newChunks []*filer_pb.FileChunk
// flush existing
if newChunks, err = pages.saveExistingPagesToStorage(); err == nil {
if newChunks != nil {
chunks = append(chunks, newChunks...)
}
} else {
return
}
// flush the new page
if chunk, err = pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
chunks = append(chunks, chunk)
}
} else {
glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
return
}
func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer_pb.FileChunk, err error) {
var hasSavedData bool
var chunk *filer_pb.FileChunk
for {
chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage()
if !hasSavedData {
return chunks, err
}
if err == nil {
if chunk != nil {
chunks = append(chunks, chunk)
}
} else {
return
2018-05-28 21:32:16 +00:00
}
}
}
func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *filer_pb.FileChunk, hasSavedData bool, err error) {
maxList := pages.intervals.RemoveLargestIntervalLinkedList()
if maxList == nil {
return nil, false, nil
}
fileSize := int64(pages.f.entry.Attributes.FileSize)
for {
chunkSize := min(maxList.Size(), fileSize-maxList.Offset())
if chunkSize == 0 {
return
}
chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize)
if err == nil {
if chunk != nil {
hasSavedData = true
}
2020-08-16 02:55:28 +00:00
glog.V(4).Infof("saveToStorage %s %s [%d,%d) of %d bytes", pages.f.fullpath(), chunk.GetFileIdString(), maxList.Offset(), maxList.Offset()+chunkSize, fileSize)
2020-04-10 04:43:26 +00:00
return
} else {
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+chunkSize, err)
2020-04-10 04:43:26 +00:00
time.Sleep(5 * time.Second)
}
}
}
func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) {
dir, _ := pages.f.fullpath().DirAndName()
reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(dir)(reader, pages.f.Name, offset)
if err != nil {
return nil, err
}
pages.collection, pages.replication = collection, replication
return chunk, nil
}
2018-05-29 08:21:21 +00:00
func maxUint64(x, y uint64) uint64 {
if x > y {
return x
}
return y
}
2018-05-29 08:21:21 +00:00
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
2020-08-17 18:12:10 +00:00
func (pages *ContinuousDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
return pages.intervals.ReadDataAt(data, startOffset)
}