filer: avoid stuck uploader

fix https://github.com/chrislusf/seaweedfs/issues/1980

reverting the file upload batch executor
This commit is contained in:
Chris Lu 2021-04-09 13:05:15 -07:00
parent 93f4146ffa
commit 0b82edc0d2

View file

@ -6,9 +6,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"runtime"
"strings" "strings"
"sync"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
@ -20,143 +18,75 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
var ( func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
limitedUploadProcessor = util.NewLimitedOutOfOrderProcessor(int32(runtime.NumCPU())) var fileChunks []*filer_pb.FileChunk
)
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, dataSize int64, err error, smallContent []byte) { md5Hash := md5.New()
md5Hash = md5.New()
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
// save small content directly chunkOffset := int64(0)
if !isAppend(r) && ((0 < contentLength && contentLength < fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && contentLength < 4*1024) { var smallContent []byte
smallContent, err = ioutil.ReadAll(partReader)
dataSize = int64(len(smallContent))
return
}
resultsChan := make(chan *ChunkCreationResult, 2) for {
limitedReader := io.LimitReader(partReader, int64(chunkSize))
var waitForAllData sync.WaitGroup data, err := ioutil.ReadAll(limitedReader)
waitForAllData.Add(1) if err != nil {
go func() { return nil, nil, 0, err, nil
// process upload results }
defer waitForAllData.Done() if chunkOffset == 0 && !isAppend(r) {
for result := range resultsChan { if len(data) < int(fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
if result.err != nil { smallContent = data
err = result.err chunkOffset += int64(len(data))
break
}
}
dataReader := util.NewBytesReader(data)
// retry to assign a different file id
var fileId, urlLocation string
var auth security.EncodedJwt
var assignErr, uploadErr error
var uploadResult *operation.UploadResult
for i := 0; i < 3; i++ {
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
if assignErr != nil {
return nil, nil, 0, assignErr, nil
}
// upload the chunk to the volume server
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
if uploadErr != nil {
time.Sleep(251 * time.Millisecond)
continue continue
} }
break
// Save to chunk manifest structure
fileChunks = append(fileChunks, result.chunk)
} }
}()
var lock sync.Mutex
readOffset := int64(0)
var wg sync.WaitGroup
for err == nil {
wg.Add(1)
request := func() {
defer wg.Done()
var localOffset int64
// read from the input
lock.Lock()
localOffset = readOffset
limitedReader := io.LimitReader(partReader, int64(chunkSize))
data, readErr := ioutil.ReadAll(limitedReader)
readOffset += int64(len(data))
lock.Unlock()
// handle read errors
if readErr != nil {
if err == nil {
err = readErr
}
if readErr != io.EOF {
resultsChan <- &ChunkCreationResult{
err: readErr,
}
}
return
}
if len(data) == 0 {
readErr = io.EOF
if err == nil {
err = readErr
}
return
}
// upload
dataReader := util.NewBytesReader(data)
fileId, uploadResult, uploadErr := fs.doCreateChunk(w, r, so, dataReader, fileName, contentType)
if uploadErr != nil {
if err == nil {
err = uploadErr
}
resultsChan <- &ChunkCreationResult{
err: uploadErr,
}
return
}
glog.V(4).Infof("uploaded %s to %s [%d,%d)", fileName, fileId, localOffset, localOffset+int64(uploadResult.Size))
// send back uploaded file chunk
resultsChan <- &ChunkCreationResult{
chunk: uploadResult.ToPbFileChunk(fileId, localOffset),
}
}
limitedUploadProcessor.Execute(request)
}
go func() {
wg.Wait()
close(resultsChan)
}()
waitForAllData.Wait()
if err == io.EOF {
err = nil
}
return fileChunks, md5Hash, readOffset, err, nil
}
type ChunkCreationResult struct {
chunk *filer_pb.FileChunk
err error
}
func (fs *FilerServer) doCreateChunk(w http.ResponseWriter, r *http.Request, so *operation.StorageOption, dataReader *util.BytesReader, fileName string, contentType string) (string, *operation.UploadResult, error) {
// retry to assign a different file id
var fileId, urlLocation string
var auth security.EncodedJwt
var assignErr, uploadErr error
var uploadResult *operation.UploadResult
for i := 0; i < 3; i++ {
// assign one file id for one chunk
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
if assignErr != nil {
return "", nil, assignErr
}
// upload the chunk to the volume server
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
if uploadErr != nil { if uploadErr != nil {
time.Sleep(251 * time.Millisecond) return nil, nil, 0, uploadErr, nil
continue }
// if last chunk exhausted the reader exactly at the border
if uploadResult.Size == 0 {
break
}
// Save to chunk manifest structure
fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
// reset variables for the next chunk
chunkOffset = chunkOffset + int64(uploadResult.Size)
// if last chunk was not at full chunk size, but already exhausted the reader
if int64(uploadResult.Size) < int64(chunkSize) {
break
} }
break
} }
return fileId, uploadResult, uploadErr
return fileChunks, md5Hash, chunkOffset, nil, smallContent
} }
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) { func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {