diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 55a56c6f5..9c4a7498b 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -127,29 +127,29 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures parts := make([]*s3.CompletedPart, len(chunkViews)) - var wg sync.WaitGroup - for chunkIndex, chunk := range chunkViews { - partId := chunkIndex + 1 - wg.Add(1) - go func(chunk *filer.ChunkView, index int) { - defer wg.Done() - if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { - err = uploadErr - glog.Errorf("uploadPart: %v", uploadErr) - } else { - parts[index] = part - } - }(chunk, chunkIndex) - } - wg.Wait() - - // for small files - if len(entry.Content) > 0 { - parts = make([]*s3.CompletedPart, 1) + if len(parts) > 0 { + var wg sync.WaitGroup + for chunkIndex, chunk := range chunkViews { + partId := chunkIndex + 1 + wg.Add(1) + go func(chunk *filer.ChunkView, index int) { + defer wg.Done() + if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { + err = uploadErr + glog.Errorf("uploadPart: %v", uploadErr) + } else { + parts[index] = part + } + }(chunk, chunkIndex) + } + wg.Wait() + } else if len(entry.Content) > 0 { + // for small files if part, uploadErr := s3sink.doUploadPart(key, uploadId, 1, bytes.NewReader(entry.Content)); uploadErr != nil { err = uploadErr glog.Errorf("uploadPart: %v", uploadErr) } else { + parts = make([]*s3.CompletedPart, 1) parts[0] = part } }