remove ctx if possible

This commit is contained in:
Chris Lu 2020-02-25 22:23:59 -08:00
parent ca4ca1ae6f
commit 97ab8a1976
26 changed files with 94 additions and 132 deletions

View file

@ -107,9 +107,7 @@ func runCopy(cmd *Command, args []string) bool {
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
ctx := context.Background()
masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress)
masters, collection, replication, maxMB, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
if err != nil {
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
return false
@ -149,7 +147,7 @@ func runCopy(cmd *Command, args []string) bool {
filerHost: filerUrl.Host,
filerGrpcAddress: filerGrpcAddress,
}
if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil {
if err := worker.copyFiles(fileCopyTaskChan); err != nil {
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
return
}
@ -160,9 +158,9 @@ func runCopy(cmd *Command, args []string) bool {
return true
}
func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) {
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) {
err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{})
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
@ -211,9 +209,9 @@ type FileCopyWorker struct {
filerGrpcAddress string
}
func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error {
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
for task := range fileCopyTaskChan {
if err := worker.doEachCopy(ctx, task); err != nil {
if err := worker.doEachCopy(task); err != nil {
return err
}
}
@ -229,7 +227,7 @@ type FileCopyTask struct {
gid uint32
}
func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error {
func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
f, err := os.Open(task.sourceLocation)
if err != nil {

View file

@ -126,10 +126,9 @@ func (s3opt *S3Options) startS3Server() bool {
filerBucketsPath := "/buckets"
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
ctx := context.Background()
err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{})
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}

View file

@ -34,7 +34,7 @@ func (pages *ContinuousDirtyPages) releaseResource() {
var counter = int32(0)
func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
pages.lock.Lock()
defer pages.lock.Unlock()
@ -43,7 +43,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
return pages.flushAndSave(ctx, offset, data)
return pages.flushAndSave(offset, data)
}
pages.intervals.AddInterval(data, offset)
@ -61,7 +61,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da
return
}
func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
var chunk *filer_pb.FileChunk
var newChunks []*filer_pb.FileChunk
@ -206,7 +206,7 @@ func min(x, y int64) int64 {
return y
}
func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int) {
func (pages *ContinuousDirtyPages) ReadDirtyData(data []byte, startOffset int64) (offset int64, size int) {
pages.lock.Lock()
defer pages.lock.Unlock()

View file

@ -53,9 +53,9 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
buff := make([]byte, req.Size)
totalRead, err := fh.readFromChunks(ctx, buff, req.Offset)
totalRead, err := fh.readFromChunks(buff, req.Offset)
if err == nil {
dirtyOffset, dirtySize := fh.readFromDirtyPages(ctx, buff, req.Offset)
dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset)
if totalRead+req.Offset < dirtyOffset+int64(dirtySize) {
totalRead = dirtyOffset + int64(dirtySize) - req.Offset
}
@ -71,11 +71,11 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
return err
}
func (fh *FileHandle) readFromDirtyPages(ctx context.Context, buff []byte, startOffset int64) (offset int64, size int) {
return fh.dirtyPages.ReadDirtyData(ctx, buff, startOffset)
func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) {
return fh.dirtyPages.ReadDirtyData(buff, startOffset)
}
func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset int64) (int64, error) {
func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
// this value should come from the filer instead of the old f
if len(fh.f.entry.Chunks) == 0 {
@ -106,7 +106,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize)))
// glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data)
chunks, err := fh.dirtyPages.AddPage(req.Offset, req.Data)
if err != nil {
glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
return fuse.EIO

View file

@ -60,8 +60,7 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error {
if err != nil {
return err
}
ctx := context.Background()
err = k.topic.Send(ctx, &pubsub.Message{
err = k.topic.Send(context.Background(), &pubsub.Message{
Body: bytes,
Metadata: map[string]string{"key": key},
})

View file

@ -45,8 +45,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) {
}
func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
ctx := context.Background()
client, err := b2.NewClient(ctx, accountId, accountKey)
client, err := b2.NewClient(context.Background(), accountId, accountKey)
if err != nil {
return err
}

View file

@ -50,7 +50,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
g.bucket = bucketName
g.dir = dir
ctx := context.Background()
// Creates a client.
if google_application_credentials == "" {
var found bool
@ -59,7 +58,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
}
}
client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials))
client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials))
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}

View file

@ -113,7 +113,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
wg.Add(1)
go func(chunk *filer2.ChunkView) {
defer wg.Done()
if part, uploadErr := s3sink.uploadPart(context.Background(), key, uploadId, partId, chunk); uploadErr != nil {
if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
err = uploadErr
} else {
parts = append(parts, part)

View file

@ -103,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
}
// To upload a part
func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
var readSeeker io.ReadSeeker
readSeeker, err := s3sink.buildReadSeeker(ctx, chunk)
readSeeker, err := s3sink.buildReadSeeker(chunk)
if err != nil {
glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
@ -156,7 +156,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
return err
}
func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) {
func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) {
fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
if err != nil {
return nil, err

View file

@ -1,7 +1,6 @@
package s3api
import (
"context"
"encoding/xml"
"fmt"
"path/filepath"
@ -11,10 +10,11 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/google/uuid"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/google/uuid"
)
type InitiateMultipartUploadResult struct {
@ -22,11 +22,11 @@ type InitiateMultipartUploadResult struct {
s3.CreateMultipartUploadOutput
}
func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
uploadId, _ := uuid.NewRandom()
uploadIdString := uploadId.String()
if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
@ -52,11 +52,11 @@ type CompleteMultipartUploadResult struct {
s3.CompleteMultipartUploadOutput
}
func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0)
entries, err := s3a.list(uploadDirectory, "", "", false, 0)
if err != nil {
glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
@ -96,7 +96,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
dirName = dirName[:len(dirName)-1]
}
err = s3a.mkFile(ctx, dirName, entryName, finalParts)
err = s3a.mkFile(dirName, entryName, finalParts)
if err != nil {
glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
@ -112,22 +112,22 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C
},
}
if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
}
return
}
func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
if err != nil {
glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
}
if exists {
err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
}
if err != nil {
glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
@ -142,7 +142,7 @@ type ListMultipartUploadsResult struct {
s3.ListMultipartUploadsOutput
}
func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
output = &ListMultipartUploadsResult{
ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{
@ -155,7 +155,7 @@ func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.List
},
}
entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
if err != nil {
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
return
@ -179,7 +179,7 @@ type ListPartsResult struct {
s3.ListPartsOutput
}
func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
output = &ListPartsResult{
ListPartsOutput: s3.ListPartsOutput{
Bucket: input.Bucket,
@ -190,8 +190,7 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts
},
}
entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
"", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
if err != nil {
glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload

View file

@ -12,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
entry := &filer_pb.Entry{
@ -46,7 +46,7 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d
})
}
func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
entry := &filer_pb.Entry{
@ -77,7 +77,7 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string,
})
}
func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -90,7 +90,7 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s
}
glog.V(4).Infof("read directory: %v", request)
stream, err := client.ListEntries(ctx, request)
stream, err := client.ListEntries(context.Background(), request)
if err != nil {
glog.V(0).Infof("read directory %v: %v", request, err)
return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err)
@ -117,7 +117,7 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s
}
func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error {
func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDirectory, isDeleteData, isRecursive bool) error {
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -129,7 +129,7 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr
}
glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
if _, err := client.DeleteEntry(ctx, request); err != nil {
if _, err := client.DeleteEntry(context.Background(), request); err != nil {
glog.V(0).Infof("delete entry %v: %v", request, err)
return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
}
@ -139,13 +139,11 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr
}
func (s3a *S3ApiServer) streamRemove(ctx context.Context, quiet bool,
fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool),
respFn func(err string)) error {
func (s3a *S3ApiServer) streamRemove(quiet bool, fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool), respFn func(err string)) error {
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
stream, err := client.StreamDeleteEntries(ctx)
stream, err := client.StreamDeleteEntries(context.Background())
if err != nil {
glog.V(0).Infof("stream delete entry: %v", err)
return fmt.Errorf("stream delete entry: %v", err)
@ -194,7 +192,7 @@ func (s3a *S3ApiServer) streamRemove(ctx context.Context, quiet bool,
}
func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -204,7 +202,7 @@ func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string,
}
glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
resp, err := client.LookupDirectoryEntry(ctx, request)
resp, err := client.LookupDirectoryEntry(context.Background(), request)
if err != nil {
glog.V(0).Infof("exists entry %v: %v", request, err)
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)

View file

@ -32,7 +32,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
var response ListAllMyBucketsResult
entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32)
entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@ -66,7 +66,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
bucket := vars["bucket"]
// create the folder for bucket, but lazily create actual collection
if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil {
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
return
}
@ -79,7 +79,6 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
vars := mux.Vars(r)
bucket := vars["bucket"]
ctx := context.Background()
err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// delete collection
@ -88,14 +87,14 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
}
glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil {
if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil {
return fmt.Errorf("delete collection %s: %v", bucket, err)
}
return nil
})
err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true)
err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@ -110,8 +109,6 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
vars := mux.Vars(r)
bucket := vars["bucket"]
ctx := context.Background()
err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
@ -120,7 +117,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
}
glog.V(1).Infof("lookup bucket: %v", request)
if resp, err := client.LookupDirectoryEntry(ctx, request); err != nil || resp.Entry == nil {
if resp, err := client.LookupDirectoryEntry(context.Background(), request); err != nil || resp.Entry == nil {
return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
}

View file

@ -1,7 +1,6 @@
package s3api
import (
"context"
"crypto/md5"
"encoding/json"
"encoding/xml"
@ -170,7 +169,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
var deletedObjects []ObjectIdentifier
var deleteErrors []DeleteError
s3a.streamRemove(context.Background(), deleteObjects.Quiet, func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool) {
s3a.streamRemove(deleteObjects.Quiet, func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool) {
if index >= len(deleteObjects.Objects) {
finished = true
return

View file

@ -1,7 +1,6 @@
package s3api
import (
"context"
"fmt"
"net/http"
"net/url"
@ -27,7 +26,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
bucket = vars["bucket"]
object = vars["object"]
response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{
response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
})
@ -52,7 +51,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{
response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
@ -78,7 +77,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
// Get upload id.
uploadID, _, _, _ := getObjectResources(r.URL.Query())
response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{
response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
UploadId: aws.String(uploadID),
@ -113,7 +112,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
}
}
response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{
response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{
Bucket: aws.String(bucket),
Delimiter: aws.String(delimiter),
EncodingType: aws.String(encodingType),
@ -150,7 +149,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
return
}
response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{
response, errCode := s3a.listObjectParts(&s3.ListPartsInput{
Bucket: aws.String(bucket),
Key: objectKey(aws.String(object)),
MaxParts: aws.Int64(int64(maxParts)),
@ -176,10 +175,8 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
rAuthType := getRequestAuthType(r)
ctx := context.Background()
uploadID := r.URL.Query().Get("uploadId")
exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true)
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
if !exists {
writeErrorResponse(w, ErrNoSuchUpload, r.URL)
return

View file

@ -43,9 +43,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
marker = startAfter
}
ctx := context.Background()
response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@ -63,8 +61,6 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
vars := mux.Vars(r)
bucket := vars["bucket"]
ctx := context.Background()
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 {
@ -76,7 +72,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
return
}
response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
@ -86,7 +82,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
writeSuccessResponseXML(w, encodeResponse(response))
}
func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
// convert full path prefix into directory name and prefix for entry name
dir, prefix := filepath.Split(originalPrefix)
@ -105,7 +101,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr
InclusiveStartFrom: false,
}
stream, err := client.ListEntries(ctx, request)
stream, err := client.ListEntries(context.Background(), request)
if err != nil {
return fmt.Errorf("list buckets: %v", err)
}

View file

@ -232,9 +232,8 @@ func (fs *FilerServer) StreamDeleteEntries(stream filer_pb.SeaweedFiler_StreamDe
if err != nil {
return fmt.Errorf("receive delete entry request: %v", err)
}
ctx := context.Background()
fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))
err = fs.filer.DeleteEntryMetaAndData(ctx, fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData)
err = fs.filer.DeleteEntryMetaAndData(context.Background(), fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData)
resp := &filer_pb.DeleteEntryResponse{}
if err != nil {
resp.Error = err.Error()

View file

@ -42,7 +42,7 @@ func (vs *VolumeServer) heartbeat() {
continue
}
vs.store.MasterAddress = master
newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil {
glog.V(0).Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
@ -53,16 +53,16 @@ func (vs *VolumeServer) heartbeat() {
}
}
func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption)
grpcConection, err := util.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption)
if err != nil {
return "", fmt.Errorf("fail to dial %s : %v", masterNode, err)
}
defer grpcConection.Close()
client := master_pb.NewSeaweedClient(grpcConection)
stream, err := client.SendHeartbeat(ctx)
stream, err := client.SendHeartbeat(context.Background())
if err != nil {
glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err)
return "", err

View file

@ -55,15 +55,15 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
// println("source:", volFileInfoResp.String())
// copy ecx file
if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
return err
}
if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
return err
}
if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
return err
}
@ -95,10 +95,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
}, err
}
func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32,
compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool, ignoreSourceFileNotFound bool) error {
func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error {
copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
VolumeId: vid,
Ext: ext,
CompactionRevision: compactRevision,

View file

@ -110,7 +110,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
// copy ec data slices
for _, shardId := range req.ShardIds {
if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
return err
}
}
@ -118,7 +118,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
if req.CopyEcxFile {
// copy ecx file
if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil {
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil {
return err
}
return nil
@ -126,14 +126,14 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
if req.CopyEcjFile {
// copy ecj file
if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil {
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil {
return err
}
}
if req.CopyVifFile {
// copy vif file
if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil {
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil {
return err
}
}

View file

@ -1,7 +1,6 @@
package weed_server
import (
"context"
"errors"
"fmt"
"net/http"
@ -98,7 +97,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId)
if hasEcVolume {
count, err := vs.store.DeleteEcShardNeedle(context.Background(), ecVolume, n, cookie)
count, err := vs.store.DeleteEcShardNeedle(ecVolume, n, cookie)
writeDeleteResult(err, count, w, r)
return
}

View file

@ -1,7 +1,6 @@
package shell
import (
"context"
"flag"
"fmt"
"io"
@ -160,11 +159,9 @@ func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack {
func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error {
ctx := context.Background()
fmt.Printf("balanceEcVolumes %s\n", collection)
if err := deleteDuplicatedEcShards(ctx, commandEnv, allEcNodes, collection, applyBalancing); err != nil {
if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil {
return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err)
}
@ -179,7 +176,7 @@ func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*E
return nil
}
func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error {
func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error {
// vid => []ecNode
vidLocations := collectVolumeIdToEcNodes(allEcNodes)
// deduplicate ec shards

View file

@ -107,7 +107,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId)
}
// balance the ec shards to current cluster
err = spreadEcShards(context.Background(), commandEnv, vid, collection, locations)
err = spreadEcShards(commandEnv, vid, collection, locations)
if err != nil {
return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err)
}
@ -149,7 +149,7 @@ func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId,
}
func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "")
if err != nil {

View file

@ -1,7 +1,6 @@
package shell
import (
"context"
"fmt"
"io"
@ -43,11 +42,9 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i
v := util.GetViper()
notification.LoadConfiguration(v, "notification.")
ctx := context.Background()
var dirCount, fileCount uint64
err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
if entry.IsDirectory {
dirCount++

View file

@ -1,7 +1,6 @@
package shell
import (
"context"
"flag"
"fmt"
"io"
@ -59,8 +58,6 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
return parseErr
}
ctx := context.Background()
t := time.Now()
fileName := *outputFileName
if fileName == "" {
@ -89,7 +86,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
var dirCount, fileCount uint64
err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) {
protoMessage := &filer_pb.FullEntry{
Dir: string(parentPath),
@ -128,8 +125,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
return err
}
func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient,
parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
func doTraverseBFS(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
K := 5
@ -151,7 +147,7 @@ func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.Fil
continue
}
dir := t.(filer2.FullPath)
processErr := processOneDirectory(ctx, writer, filerClient, dir, queue, &jobQueueWg, fn)
processErr := processOneDirectory(writer, filerClient, dir, queue, &jobQueueWg, fn)
if processErr != nil {
err = processErr
}
@ -164,9 +160,7 @@ func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.Fil
return
}
func processOneDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient,
parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup,
fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
func processOneDirectory(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) {
return filer2.ReadDirAllEntries(filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) {

View file

@ -1,7 +1,6 @@
package shell
import (
"context"
"fmt"
"io"
"strings"
@ -37,9 +36,7 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ
dir, name := filer2.FullPath(path).DirAndName()
ctx := context.Background()
dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1)
dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1)
if terr == nil {
fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount)
@ -49,7 +46,7 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ
}
func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) {
func treeTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) {
prefix.addMarker(level)
@ -65,7 +62,7 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient fi
if entry.IsDirectory {
directoryCount++
subDir := dir.Child(entry.Name)
dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1)
dirCount, fCount, terr := treeTraverseDirectory(writer, filerClient, subDir, "", prefix, level+1)
directoryCount += dirCount
fileCount += fCount
err = terr

View file

@ -12,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) {
func (s *Store) DeleteEcShardNeedle(ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) {
count, err := s.ReadEcShardNeedle(ecVolume.VolumeId, n)