mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #2731 from banjiaojuhao/filer_add-back-isAppend
filer: add back isAppend function
This commit is contained in:
commit
b97a2cc08a
|
@ -126,6 +126,10 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isAppend(r *http.Request) bool {
|
||||||
|
return r.URL.Query().Get("op") == "append"
|
||||||
|
}
|
||||||
|
|
||||||
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
|
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
|
||||||
|
|
||||||
// detect file mode
|
// detect file mode
|
||||||
|
@ -158,10 +162,9 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||||
var entry *filer.Entry
|
var entry *filer.Entry
|
||||||
var mergedChunks []*filer_pb.FileChunk
|
var mergedChunks []*filer_pb.FileChunk
|
||||||
|
|
||||||
isAppend := r.URL.Query().Get("op") == "append"
|
|
||||||
isOffsetWrite := len(fileChunks) > 0 && fileChunks[0].Offset > 0
|
isOffsetWrite := len(fileChunks) > 0 && fileChunks[0].Offset > 0
|
||||||
// when it is an append
|
// when it is an append
|
||||||
if isAppend || isOffsetWrite {
|
if isAppend(r) || isOffsetWrite {
|
||||||
existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path))
|
existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path))
|
||||||
if findErr != nil && findErr != filer_pb.ErrNotFound {
|
if findErr != nil && findErr != filer_pb.ErrNotFound {
|
||||||
glog.V(0).Infof("failing to find %s: %v", path, findErr)
|
glog.V(0).Infof("failing to find %s: %v", path, findErr)
|
||||||
|
@ -172,7 +175,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
||||||
entry.Mtime = time.Now()
|
entry.Mtime = time.Now()
|
||||||
entry.Md5 = nil
|
entry.Md5 = nil
|
||||||
// adjust chunk offsets
|
// adjust chunk offsets
|
||||||
if isAppend {
|
if isAppend(r) {
|
||||||
for _, chunk := range fileChunks {
|
for _, chunk := range fileChunks {
|
||||||
chunk.Offset += int64(entry.FileSize)
|
chunk.Offset += int64(entry.FileSize)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,6 @@ var bufPool = sync.Pool{
|
||||||
|
|
||||||
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
|
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
|
||||||
query := r.URL.Query()
|
query := r.URL.Query()
|
||||||
isAppend := query.Get("op") == "append"
|
|
||||||
|
|
||||||
if query.Has("offset") {
|
if query.Has("offset") {
|
||||||
offset := query.Get("offset")
|
offset := query.Get("offset")
|
||||||
|
@ -40,7 +39,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
||||||
err = fmt.Errorf("invalid 'offset': '%s'", offset)
|
err = fmt.Errorf("invalid 'offset': '%s'", offset)
|
||||||
return nil, nil, 0, err, nil
|
return nil, nil, 0, err, nil
|
||||||
}
|
}
|
||||||
if isAppend && offsetInt > 0 {
|
if isAppend(r) && offsetInt > 0 {
|
||||||
err = fmt.Errorf("cannot set offset when op=append")
|
err = fmt.Errorf("cannot set offset when op=append")
|
||||||
return nil, nil, 0, err, nil
|
return nil, nil, 0, err, nil
|
||||||
}
|
}
|
||||||
|
@ -81,7 +80,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
||||||
bytesBufferLimitCond.Signal()
|
bytesBufferLimitCond.Signal()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if chunkOffset == 0 && !isAppend {
|
if chunkOffset == 0 && !isAppend(r) {
|
||||||
if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) {
|
if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) {
|
||||||
chunkOffset += dataSize
|
chunkOffset += dataSize
|
||||||
smallContent = make([]byte, dataSize)
|
smallContent = make([]byte, dataSize)
|
||||||
|
|
Loading…
Reference in a new issue