mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
add a place holder for later merge consecutive chunks
This commit is contained in:
parent
bc888226fc
commit
79b8e6a8c3
|
@ -164,6 +164,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
}
|
||||
|
||||
var entry *filer.Entry
|
||||
var newChunks []*filer_pb.FileChunk
|
||||
var mergedChunks []*filer_pb.FileChunk
|
||||
|
||||
isAppend := isAppend(r)
|
||||
|
@ -186,7 +187,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
}
|
||||
entry.FileSize += uint64(chunkOffset)
|
||||
}
|
||||
mergedChunks = append(entry.Chunks, fileChunks...)
|
||||
newChunks = append(entry.Chunks, fileChunks...)
|
||||
|
||||
// TODO
|
||||
if len(entry.Content) > 0 {
|
||||
|
@ -196,7 +197,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
|
||||
} else {
|
||||
glog.V(4).Infoln("saving", path)
|
||||
mergedChunks = fileChunks
|
||||
newChunks = fileChunks
|
||||
entry = &filer.Entry{
|
||||
FullPath: util.FullPath(path),
|
||||
Attr: filer.Attr{
|
||||
|
@ -217,6 +218,13 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
}
|
||||
}
|
||||
|
||||
// maybe concatenate small chunks into one whole chunk
|
||||
mergedChunks, replyerr = fs.maybeMergeChunks(so, newChunks)
|
||||
if replyerr != nil {
|
||||
glog.V(0).Infof("merge chunks %s: %v", r.RequestURI, replyerr)
|
||||
mergedChunks = newChunks
|
||||
}
|
||||
|
||||
// maybe compact entry chunks
|
||||
mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks)
|
||||
if replyerr != nil {
|
||||
|
|
11
weed/server/filer_server_handlers_write_merge.go
Normal file
11
weed/server/filer_server_handlers_write_merge.go
Normal file
|
@ -0,0 +1,11 @@
|
|||
package weed_server
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
func (fs *FilerServer) maybeMergeChunks(so *operation.StorageOption, inputChunks []*filer_pb.FileChunk) (mergedChunks []*filer_pb.FileChunk, err error) {
|
||||
//TODO merge consecutive smaller chunks into a large chunk to reduce number of chunks
|
||||
return inputChunks, nil
|
||||
}
|
Loading…
Reference in a new issue