mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #2694 from banjiaojuhao/filer_http-write_with_offset
filer_http: support uploading file with offset
This commit is contained in:
commit
5f6f9e9a53
|
@ -126,10 +126,6 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
|
|||
return
|
||||
}
|
||||
|
||||
func isAppend(r *http.Request) bool {
|
||||
return r.URL.Query().Get("op") == "append"
|
||||
}
|
||||
|
||||
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
|
||||
|
||||
// detect file mode
|
||||
|
@ -161,8 +157,11 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
|
||||
var entry *filer.Entry
|
||||
var mergedChunks []*filer_pb.FileChunk
|
||||
|
||||
isAppend := r.URL.Query().Get("op") == "append"
|
||||
isOffsetWrite := fileChunks[0].Offset > 0
|
||||
// when it is an append
|
||||
if isAppend(r) {
|
||||
if isAppend || isOffsetWrite {
|
||||
existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path))
|
||||
if findErr != nil && findErr != filer_pb.ErrNotFound {
|
||||
glog.V(0).Infof("failing to find %s: %v", path, findErr)
|
||||
|
@ -173,11 +172,13 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
entry.Mtime = time.Now()
|
||||
entry.Md5 = nil
|
||||
// adjust chunk offsets
|
||||
for _, chunk := range fileChunks {
|
||||
chunk.Offset += int64(entry.FileSize)
|
||||
if isAppend {
|
||||
for _, chunk := range fileChunks {
|
||||
chunk.Offset += int64(entry.FileSize)
|
||||
}
|
||||
entry.FileSize += uint64(chunkOffset)
|
||||
}
|
||||
mergedChunks = append(entry.Chunks, fileChunks...)
|
||||
entry.FileSize += uint64(chunkOffset)
|
||||
|
||||
// TODO
|
||||
if len(entry.Content) > 0 {
|
||||
|
@ -215,6 +216,10 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
return
|
||||
}
|
||||
entry.Chunks = mergedChunks
|
||||
if isOffsetWrite {
|
||||
entry.Md5 = nil
|
||||
entry.FileSize = entry.Size()
|
||||
}
|
||||
|
||||
filerResult = &FilerPostResult{
|
||||
Name: fileName,
|
||||
|
|
|
@ -3,10 +3,12 @@ package weed_server
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -28,6 +30,22 @@ var bufPool = sync.Pool{
|
|||
}
|
||||
|
||||
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
|
||||
query := r.URL.Query()
|
||||
isAppend := query.Get("op") == "append"
|
||||
|
||||
if query.Has("offset") {
|
||||
offset := query.Get("offset")
|
||||
offsetInt, err := strconv.ParseInt(offset, 10, 64)
|
||||
if err != nil || offsetInt < 0 {
|
||||
err = fmt.Errorf("invalid 'offset': '%s'", offset)
|
||||
return nil, nil, 0, err, nil
|
||||
}
|
||||
if isAppend && offsetInt > 0 {
|
||||
err = fmt.Errorf("cannot set offset when op=append")
|
||||
return nil, nil, 0, err, nil
|
||||
}
|
||||
chunkOffset = offsetInt
|
||||
}
|
||||
|
||||
md5Hash = md5.New()
|
||||
var partReader = io.NopCloser(io.TeeReader(reader, md5Hash))
|
||||
|
@ -63,7 +81,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
|||
bytesBufferLimitCond.Signal()
|
||||
break
|
||||
}
|
||||
if chunkOffset == 0 && !isAppend(r) {
|
||||
if chunkOffset == 0 && !isAppend {
|
||||
if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) {
|
||||
chunkOffset += dataSize
|
||||
smallContent = make([]byte, dataSize)
|
||||
|
|
Loading…
Reference in a new issue