2018-05-28 19:30:17 +00:00
|
|
|
package filesys
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
|
|
|
"fmt"
|
|
|
|
"bytes"
|
|
|
|
"io"
|
|
|
|
"time"
|
|
|
|
"context"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
)
|
|
|
|
|
|
|
|
type DirtyPage struct {
|
|
|
|
Offset int64
|
|
|
|
Data []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
type ContinuousDirtyPages struct {
|
|
|
|
sync.Mutex
|
|
|
|
|
|
|
|
pages []*DirtyPage
|
|
|
|
f *File
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunk *filer_pb.FileChunk, err error) {
|
|
|
|
pages.Lock()
|
|
|
|
defer pages.Unlock()
|
|
|
|
|
2018-05-29 05:45:52 +00:00
|
|
|
isPerfectOverwrite := false
|
|
|
|
isPerfectAppend := false
|
2018-05-28 19:30:17 +00:00
|
|
|
if len(pages.pages) > 0 {
|
|
|
|
lastPage := pages.pages[len(pages.pages)-1]
|
|
|
|
if lastPage.Offset+int64(len(lastPage.Data)) == offset {
|
|
|
|
// write continuous pages
|
2018-05-29 05:45:52 +00:00
|
|
|
glog.V(4).Infof("%s/%s append [%d,%d)", pages.f.dir.Path, pages.f.Name, offset, offset+int64(len(data)))
|
2018-05-28 19:30:17 +00:00
|
|
|
isPerfectAppend = true
|
|
|
|
}
|
2018-05-29 05:45:52 +00:00
|
|
|
if pages.pages[0].Offset == offset && pages.totalSize() == int64(len(data)) {
|
|
|
|
glog.V(4).Infof("%s/%s overwrite [%d,%d)", pages.f.dir.Path, pages.f.Name, offset, offset+int64(len(data)))
|
|
|
|
isPerfectOverwrite = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
glog.V(4).Infof("%s/%s append [%d,%d)", pages.f.dir.Path, pages.f.Name, offset, offset+int64(len(data)))
|
|
|
|
isPerfectAppend = true
|
2018-05-28 19:30:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
isPerfectReplace := false
|
|
|
|
for _, page := range pages.pages {
|
|
|
|
if page.Offset == offset && len(page.Data) == len(data) {
|
|
|
|
// perfect replace
|
2018-05-29 05:45:52 +00:00
|
|
|
glog.V(4).Infof("%s/%s replace [%d,%d)", pages.f.dir.Path, pages.f.Name, offset, offset+int64(len(data)))
|
2018-05-28 19:30:17 +00:00
|
|
|
page.Data = data
|
|
|
|
isPerfectReplace = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if isPerfectReplace {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:52 +00:00
|
|
|
if isPerfectAppend || isPerfectOverwrite {
|
|
|
|
if isPerfectAppend {
|
|
|
|
glog.V(4).Infof("%s/%s append2 [%d,%d)", pages.f.dir.Path, pages.f.Name, offset, offset+int64(len(data)))
|
|
|
|
pages.pages = append(pages.pages, &DirtyPage{
|
|
|
|
Offset: offset,
|
|
|
|
Data: data,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if isPerfectOverwrite {
|
|
|
|
glog.V(4).Infof("%s/%s overwrite2 [%d,%d)", pages.f.dir.Path, pages.f.Name, offset, offset+int64(len(data)))
|
|
|
|
pages.pages = []*DirtyPage{&DirtyPage{
|
|
|
|
Offset: offset,
|
|
|
|
Data: data,
|
|
|
|
}}
|
|
|
|
}
|
2018-05-28 20:42:25 +00:00
|
|
|
|
2018-05-28 20:44:27 +00:00
|
|
|
if pages.f.wfs.chunkSizeLimit > 0 && pages.totalSize() >= pages.f.wfs.chunkSizeLimit {
|
2018-05-28 20:42:25 +00:00
|
|
|
chunk, err = pages.saveToStorage(ctx)
|
|
|
|
pages.pages = nil
|
2018-05-29 05:45:52 +00:00
|
|
|
glog.V(4).Infof("%s/%s over size limit [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
|
2018-05-28 20:42:25 +00:00
|
|
|
}
|
|
|
|
return
|
2018-05-28 19:30:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
chunk, err = pages.saveToStorage(ctx)
|
|
|
|
|
2018-05-29 05:45:52 +00:00
|
|
|
glog.V(4).Infof("%s/%s saved [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
|
2018-05-28 19:30:17 +00:00
|
|
|
|
|
|
|
pages.pages = []*DirtyPage{&DirtyPage{
|
|
|
|
Offset: offset,
|
|
|
|
Data: data,
|
|
|
|
}}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) {
|
|
|
|
|
|
|
|
pages.Lock()
|
|
|
|
defer pages.Unlock()
|
|
|
|
|
|
|
|
if chunk, err = pages.saveToStorage(ctx); err == nil {
|
|
|
|
pages.pages = nil
|
2018-05-28 21:32:16 +00:00
|
|
|
if chunk != nil {
|
2018-05-29 05:45:52 +00:00
|
|
|
glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
|
2018-05-28 21:32:16 +00:00
|
|
|
}
|
2018-05-28 19:30:17 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pages *ContinuousDirtyPages) totalSize() (total int64) {
|
|
|
|
for _, page := range pages.pages {
|
|
|
|
total += int64(len(page.Data))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context) (*filer_pb.FileChunk, error) {
|
|
|
|
|
|
|
|
if len(pages.pages) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var fileId, host string
|
|
|
|
|
|
|
|
if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
|
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
2018-05-28 20:24:48 +00:00
|
|
|
Replication: pages.f.wfs.replication,
|
|
|
|
Collection: pages.f.wfs.collection,
|
2018-05-28 19:30:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.AssignVolume(ctx, request)
|
|
|
|
if err != nil {
|
|
|
|
glog.V(0).Infof("assign volume failure %v: %v", request, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fileId, host = resp.FileId, resp.Url
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, fmt.Errorf("filer assign volume: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var readers []io.Reader
|
|
|
|
for _, page := range pages.pages {
|
|
|
|
readers = append(readers, bytes.NewReader(page.Data))
|
|
|
|
}
|
|
|
|
|
|
|
|
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
|
|
|
|
bufReader := io.MultiReader(readers...)
|
|
|
|
uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "")
|
|
|
|
if err != nil {
|
|
|
|
glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err)
|
|
|
|
return nil, fmt.Errorf("upload data: %v", err)
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err)
|
|
|
|
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &filer_pb.FileChunk{
|
|
|
|
FileId: fileId,
|
|
|
|
Offset: pages.pages[0].Offset,
|
|
|
|
Size: uint64(pages.totalSize()),
|
|
|
|
Mtime: time.Now().UnixNano(),
|
|
|
|
}, nil
|
|
|
|
|
|
|
|
}
|