mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
add context.Context
This commit is contained in:
parent
cece860bfd
commit
55bab1b456
|
@ -106,14 +106,14 @@ func runCopy(cmd *Command, args []string) bool {
|
|||
copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
|
||||
|
||||
for _, fileOrDir := range fileOrDirs {
|
||||
if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) {
|
||||
if !doEachCopy(context.Background(), fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, path string) bool {
|
||||
func doEachCopy(ctx context.Context, fileOrDir string, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, path string) bool {
|
||||
f, err := os.Open(fileOrDir)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err)
|
||||
|
@ -131,7 +131,7 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDia
|
|||
if mode.IsDir() {
|
||||
files, _ := ioutil.ReadDir(fileOrDir)
|
||||
for _, subFileOrDir := range files {
|
||||
if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, grpcDialOption, path+fi.Name()+"/") {
|
||||
if !doEachCopy(ctx, fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, grpcDialOption, path+fi.Name()+"/") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -153,13 +153,13 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDia
|
|||
}
|
||||
|
||||
if chunkCount == 1 {
|
||||
return uploadFileAsOne(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi)
|
||||
return uploadFileAsOne(ctx, filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi)
|
||||
}
|
||||
|
||||
return uploadFileInChunks(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi, chunkCount, chunkSize)
|
||||
return uploadFileInChunks(ctx, filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi, chunkCount, chunkSize)
|
||||
}
|
||||
|
||||
func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo) bool {
|
||||
func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo) bool {
|
||||
|
||||
// upload the file content
|
||||
fileName := filepath.Base(f.Name())
|
||||
|
@ -204,7 +204,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc.
|
|||
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName)
|
||||
}
|
||||
|
||||
if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if err := withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
request := &filer_pb.CreateEntryRequest{
|
||||
Directory: urlFolder,
|
||||
Entry: &filer_pb.Entry{
|
||||
|
@ -225,7 +225,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc.
|
|||
},
|
||||
}
|
||||
|
||||
if _, err := client.CreateEntry(context.Background(), request); err != nil {
|
||||
if _, err := client.CreateEntry(ctx, request); err != nil {
|
||||
return fmt.Errorf("update fh: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
@ -237,7 +237,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc.
|
|||
return true
|
||||
}
|
||||
|
||||
func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool {
|
||||
func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool {
|
||||
|
||||
fileName := filepath.Base(f.Name())
|
||||
mimeType := detectMimeType(f)
|
||||
|
@ -281,7 +281,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption gr
|
|||
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
|
||||
}
|
||||
|
||||
if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if err := withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
request := &filer_pb.CreateEntryRequest{
|
||||
Directory: urlFolder,
|
||||
Entry: &filer_pb.Entry{
|
||||
|
@ -302,7 +302,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption gr
|
|||
},
|
||||
}
|
||||
|
||||
if _, err := client.CreateEntry(context.Background(), request); err != nil {
|
||||
if _, err := client.CreateEntry(ctx, request); err != nil {
|
||||
return fmt.Errorf("update fh: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
@ -332,9 +332,9 @@ func detectMimeType(f *os.File) string {
|
|||
return mimeType
|
||||
}
|
||||
|
||||
func withFilerClient(filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
|
||||
grpcConnection, err := util.GrpcDial(filerAddress, grpcDialOption)
|
||||
grpcConnection, err := util.GrpcDial(ctx, filerAddress, grpcDialOption)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to dial %s: %v", filerAddress, err)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -116,7 +117,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
|||
} else {
|
||||
glog.V(1).Infof("modify: %s", key)
|
||||
}
|
||||
if err = replicator.Replicate(key, m); err != nil {
|
||||
if err = replicator.Replicate(context.Background(), key, m); err != nil {
|
||||
glog.Errorf("replicate %s: %+v", key, err)
|
||||
} else {
|
||||
glog.V(1).Infof("replicated %s", key)
|
||||
|
|
|
@ -50,6 +50,8 @@ func TestCreateFileAndList(t *testing.T) {
|
|||
filer.SetStore(store)
|
||||
filer.DisableDirectoryCache()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
entry1 := &filer2.Entry{
|
||||
FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"),
|
||||
Attr: filer2.Attr{
|
||||
|
@ -68,11 +70,11 @@ func TestCreateFileAndList(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
filer.CreateEntry(entry1)
|
||||
filer.CreateEntry(entry2)
|
||||
filer.CreateEntry(ctx, entry1)
|
||||
filer.CreateEntry(ctx, entry2)
|
||||
|
||||
// checking the 2 files
|
||||
entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100)
|
||||
entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "", false, 100)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("list entries: %v", err)
|
||||
|
@ -95,21 +97,21 @@ func TestCreateFileAndList(t *testing.T) {
|
|||
}
|
||||
|
||||
// checking the offset
|
||||
entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100)
|
||||
entries, err = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("list entries count: %v", len(entries))
|
||||
return
|
||||
}
|
||||
|
||||
// checking one upper directory
|
||||
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
|
||||
entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("list entries count: %v", len(entries))
|
||||
return
|
||||
}
|
||||
|
||||
// checking root directory
|
||||
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100)
|
||||
entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("list entries count: %v", len(entries))
|
||||
return
|
||||
|
@ -125,18 +127,18 @@ func TestCreateFileAndList(t *testing.T) {
|
|||
Gid: 5678,
|
||||
},
|
||||
}
|
||||
filer.CreateEntry(entry3)
|
||||
filer.CreateEntry(ctx, entry3)
|
||||
|
||||
// checking one upper directory
|
||||
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
|
||||
entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100)
|
||||
if len(entries) != 2 {
|
||||
t.Errorf("list entries count: %v", len(entries))
|
||||
return
|
||||
}
|
||||
|
||||
// delete file and count
|
||||
filer.DeleteEntryMetaAndData(context.Background(), file3Path, false, false)
|
||||
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100)
|
||||
filer.DeleteEntryMetaAndData(ctx, file3Path, false, false)
|
||||
entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("list entries count: %v", len(entries))
|
||||
return
|
||||
|
|
|
@ -29,7 +29,7 @@ var _ = fs.NodeRemover(&Dir{})
|
|||
var _ = fs.NodeRenamer(&Dir{})
|
||||
var _ = fs.NodeSetattrer(&Dir{})
|
||||
|
||||
func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
|
||||
func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||
|
||||
// https://github.com/bazil/fuse/issues/196
|
||||
attr.Valid = time.Second
|
||||
|
@ -56,7 +56,7 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
|
|||
|
||||
parent, name := filepath.Split(dir.Path)
|
||||
|
||||
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: parent,
|
||||
|
@ -64,7 +64,7 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
|
|||
}
|
||||
|
||||
glog.V(1).Infof("read dir %s attr: %v", dir.Path, request)
|
||||
resp, err := client.LookupDirectoryEntry(context, request)
|
||||
resp, err := client.LookupDirectoryEntry(ctx, request)
|
||||
if err != nil {
|
||||
if err == filer2.ErrNotFound {
|
||||
return nil
|
||||
|
@ -132,7 +132,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
|
|||
glog.V(1).Infof("create: %v", request)
|
||||
|
||||
if request.Entry.IsDirectory {
|
||||
if err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
if err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if _, err := client.CreateEntry(ctx, request); err != nil {
|
||||
glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err)
|
||||
return fuse.EIO
|
||||
|
@ -155,7 +155,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
|
|||
|
||||
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
|
||||
|
||||
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.CreateEntryRequest{
|
||||
Directory: dir.Path,
|
||||
|
@ -199,7 +199,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
|
|||
}
|
||||
|
||||
if entry == nil {
|
||||
err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: dir.Path,
|
||||
|
@ -243,7 +243,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
|
|||
|
||||
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
|
||||
|
||||
err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
paginationLimit := 1024
|
||||
remaining := dir.wfs.option.DirListingLimit
|
||||
|
@ -306,7 +306,7 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
|
|||
func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error {
|
||||
|
||||
var entry *filer_pb.Entry
|
||||
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: dir.Path,
|
||||
|
@ -329,9 +329,9 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro
|
|||
return err
|
||||
}
|
||||
|
||||
dir.wfs.deleteFileChunks(entry.Chunks)
|
||||
dir.wfs.deleteFileChunks(ctx, entry.Chunks)
|
||||
|
||||
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.DeleteEntryRequest{
|
||||
Directory: dir.Path,
|
||||
|
@ -355,7 +355,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro
|
|||
|
||||
func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error {
|
||||
|
||||
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.DeleteEntryRequest{
|
||||
Directory: dir.Path,
|
||||
|
@ -401,7 +401,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
|
|||
}
|
||||
|
||||
parentDir, name := filer2.FullPath(dir.Path).DirAndName()
|
||||
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.UpdateEntryRequest{
|
||||
Directory: parentDir,
|
||||
|
|
|
@ -35,7 +35,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
|
|||
},
|
||||
}
|
||||
|
||||
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if _, err := client.CreateEntry(ctx, request); err != nil {
|
||||
glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err)
|
||||
return fuse.EIO
|
||||
|
|
|
@ -15,7 +15,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
|
||||
newDir := newDirectory.(*Dir)
|
||||
|
||||
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
// find existing entry
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
|
|
|
@ -167,7 +167,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte
|
|||
var fileId, host string
|
||||
var auth security.EncodedJwt
|
||||
|
||||
if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
if err := pages.f.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.AssignVolumeRequest{
|
||||
Count: 1,
|
||||
|
|
|
@ -109,7 +109,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
|
|||
return nil
|
||||
}
|
||||
|
||||
return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return file.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.UpdateEntryRequest{
|
||||
Directory: file.dir.Path,
|
||||
|
@ -144,7 +144,7 @@ func (file *File) maybeLoadAttributes(ctx context.Context) error {
|
|||
file.setEntry(entry)
|
||||
// glog.V(1).Infof("file attr read cached %v attributes", file.Name)
|
||||
} else {
|
||||
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := file.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Name: file.Name,
|
||||
|
|
|
@ -73,7 +73,7 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
|
|||
|
||||
vid2Locations := make(map[string]*filer_pb.Locations)
|
||||
|
||||
err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := fh.f.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
|
||||
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
|
||||
|
@ -197,7 +197,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
return fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return fh.f.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
if fh.f.entry.Attributes != nil {
|
||||
fh.f.entry.Attributes.Mime = fh.contentType
|
||||
|
@ -221,7 +221,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
|||
chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks)
|
||||
fh.f.entry.Chunks = chunks
|
||||
// fh.f.entryViewCache = nil
|
||||
fh.f.wfs.deleteFileChunks(garbages)
|
||||
fh.f.wfs.deleteFileChunks(ctx, garbages)
|
||||
|
||||
if _, err := client.CreateEntry(ctx, request); err != nil {
|
||||
return fmt.Errorf("update fh: %v", err)
|
||||
|
|
|
@ -73,9 +73,9 @@ func (wfs *WFS) Root() (fs.Node, error) {
|
|||
return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil
|
||||
}
|
||||
|
||||
func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
func (wfs *WFS) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
|
||||
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
|
||||
return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
|
||||
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
|
||||
return fn(client)
|
||||
}, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
|
||||
|
@ -133,7 +133,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
|
|||
|
||||
if wfs.stats.lastChecked < time.Now().Unix()-20 {
|
||||
|
||||
err := wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.StatisticsRequest{
|
||||
Collection: wfs.option.Collection,
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
|
||||
func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) {
|
||||
if len(chunks) == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -15,8 +15,8 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
|
|||
fileIds = append(fileIds, chunk.FileId)
|
||||
}
|
||||
|
||||
wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
deleteFileIds(context.Background(), wfs.option.GrpcDialOption, client, fileIds)
|
||||
wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
|
@ -13,12 +14,14 @@ import (
|
|||
|
||||
func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
grpcAddress, err := toVolumeServerGrpcAddress(volumeServer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
|
||||
return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
|
||||
client := volume_server_pb.NewVolumeServerClient(grpcConnection)
|
||||
return fn(client)
|
||||
}, grpcAddress, grpcDialOption)
|
||||
|
@ -37,12 +40,14 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
|
|||
|
||||
func withMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0)
|
||||
if parseErr != nil {
|
||||
return fmt.Errorf("failed to parse master grpc %v", masterServer)
|
||||
}
|
||||
|
||||
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
|
||||
return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
|
||||
client := master_pb.NewSeaweedClient(grpcConnection)
|
||||
return fn(client)
|
||||
}, masterGrpcAddress, grpcDialOption)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package replication
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
|
@ -29,7 +30,7 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin
|
|||
}
|
||||
}
|
||||
|
||||
func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) error {
|
||||
func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error {
|
||||
if !strings.HasPrefix(key, r.source.Dir) {
|
||||
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
|
||||
return nil
|
||||
|
@ -39,23 +40,23 @@ func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification)
|
|||
key = newKey
|
||||
if message.OldEntry != nil && message.NewEntry == nil {
|
||||
glog.V(4).Infof("deleting %v", key)
|
||||
return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks)
|
||||
return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks)
|
||||
}
|
||||
if message.OldEntry == nil && message.NewEntry != nil {
|
||||
glog.V(4).Infof("creating %v", key)
|
||||
return r.sink.CreateEntry(key, message.NewEntry)
|
||||
return r.sink.CreateEntry(ctx, key, message.NewEntry)
|
||||
}
|
||||
if message.OldEntry == nil && message.NewEntry == nil {
|
||||
glog.V(0).Infof("weird message %+v", message)
|
||||
return nil
|
||||
}
|
||||
|
||||
foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewEntry, message.DeleteChunks)
|
||||
foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewEntry, message.DeleteChunks)
|
||||
if foundExisting {
|
||||
glog.V(4).Infof("updated %v", key)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("creating missing %v", key)
|
||||
return r.sink.CreateEntry(key, message.NewEntry)
|
||||
return r.sink.CreateEntry(ctx, key, message.NewEntry)
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
|
||||
key = cleanKey(key)
|
||||
|
||||
|
@ -78,8 +78,6 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo
|
|||
key = key + "/"
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if _, err := g.containerURL.NewBlobURL(key).Delete(ctx,
|
||||
azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil {
|
||||
return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err)
|
||||
|
@ -89,7 +87,7 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo
|
|||
|
||||
}
|
||||
|
||||
func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
|
||||
|
||||
key = cleanKey(key)
|
||||
|
||||
|
@ -100,8 +98,6 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
totalSize := filer2.TotalSize(entry.Chunks)
|
||||
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a URL that references a to-be-created blob in your
|
||||
// Azure Storage account's container.
|
||||
appendBlobURL := g.containerURL.NewAppendBlobURL(key)
|
||||
|
@ -113,7 +109,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
|
||||
for _, chunk := range chunkViews {
|
||||
|
||||
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
|
||||
fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -136,7 +132,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
|
||||
}
|
||||
|
||||
func (g *AzureSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
key = cleanKey(key)
|
||||
// TODO improve efficiency
|
||||
return false, nil
|
||||
|
|
|
@ -58,7 +58,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
|
||||
key = cleanKey(key)
|
||||
|
||||
|
@ -66,8 +66,6 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool)
|
|||
key = key + "/"
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
bucket, err := g.client.Bucket(ctx, g.bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -79,7 +77,7 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool)
|
|||
|
||||
}
|
||||
|
||||
func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
|
||||
|
||||
key = cleanKey(key)
|
||||
|
||||
|
@ -90,8 +88,6 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
totalSize := filer2.TotalSize(entry.Chunks)
|
||||
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
bucket, err := g.client.Bucket(ctx, g.bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -102,7 +98,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
|
||||
for _, chunk := range chunkViews {
|
||||
|
||||
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
|
||||
fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -128,7 +124,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
|
||||
}
|
||||
|
||||
func (g *B2Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
|
||||
key = cleanKey(key)
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
|
||||
func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) {
|
||||
if len(sourceChunks) == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic
|
|||
wg.Add(1)
|
||||
go func(chunk *filer_pb.FileChunk) {
|
||||
defer wg.Done()
|
||||
replicatedChunk, e := fs.replicateOneChunk(chunk)
|
||||
replicatedChunk, e := fs.replicateOneChunk(ctx, chunk)
|
||||
if e != nil {
|
||||
err = e
|
||||
}
|
||||
|
@ -34,9 +34,9 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic
|
|||
return
|
||||
}
|
||||
|
||||
func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
|
||||
func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) {
|
||||
|
||||
fileId, err := fs.fetchAndWrite(sourceChunk)
|
||||
fileId, err := fs.fetchAndWrite(ctx, sourceChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy %s: %v", sourceChunk.FileId, err)
|
||||
}
|
||||
|
@ -51,9 +51,9 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
|
||||
func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) {
|
||||
|
||||
filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.FileId)
|
||||
filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.FileId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read part %s: %v", sourceChunk.FileId, err)
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
|
|||
var host string
|
||||
var auth security.EncodedJwt
|
||||
|
||||
if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.AssignVolumeRequest{
|
||||
Count: 1,
|
||||
|
@ -72,7 +72,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
|
|||
DataCenter: fs.dataCenter,
|
||||
}
|
||||
|
||||
resp, err := client.AssignVolume(context.Background(), request)
|
||||
resp, err := client.AssignVolume(ctx, request)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("assign volume failure %v: %v", request, err)
|
||||
return err
|
||||
|
@ -103,9 +103,9 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
|
|||
return
|
||||
}
|
||||
|
||||
func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
|
||||
grpcConnection, err := util.GrpcDial(fs.grpcAddress, fs.grpcDialOption)
|
||||
grpcConnection, err := util.GrpcDial(ctx, fs.grpcAddress, fs.grpcDialOption)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
|
||||
}
|
||||
|
|
|
@ -63,8 +63,8 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
dir, name := filer2.FullPath(key).DirAndName()
|
||||
|
||||
|
@ -75,7 +75,7 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo
|
|||
}
|
||||
|
||||
glog.V(1).Infof("delete entry: %v", request)
|
||||
_, err := client.DeleteEntry(context.Background(), request)
|
||||
_, err := client.DeleteEntry(ctx, request)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("delete entry %s: %v", key, err)
|
||||
return fmt.Errorf("delete entry %s: %v", key, err)
|
||||
|
@ -85,12 +85,11 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo
|
|||
})
|
||||
}
|
||||
|
||||
func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
|
||||
|
||||
return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
dir, name := filer2.FullPath(key).DirAndName()
|
||||
ctx := context.Background()
|
||||
|
||||
// look up existing entry
|
||||
lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
|
||||
|
@ -105,7 +104,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
}
|
||||
}
|
||||
|
||||
replicatedChunks, err := fs.replicateChunks(entry.Chunks)
|
||||
replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks)
|
||||
|
||||
if err != nil {
|
||||
glog.V(0).Infof("replicate entry chunks %s: %v", key, err)
|
||||
|
@ -134,15 +133,13 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
})
|
||||
}
|
||||
|
||||
func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
|
||||
ctx := context.Background()
|
||||
func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
|
||||
dir, name := filer2.FullPath(key).DirAndName()
|
||||
|
||||
// read existing entry
|
||||
var existingEntry *filer_pb.Entry
|
||||
err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: dir,
|
||||
|
@ -186,7 +183,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
|
|||
}
|
||||
|
||||
// replicate the chunks that are new in the source
|
||||
replicatedChunks, err := fs.replicateChunks(newChunks)
|
||||
replicatedChunks, err := fs.replicateChunks(ctx, newChunks)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("replicte %s chunks error: %v", key, err)
|
||||
}
|
||||
|
@ -194,7 +191,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry,
|
|||
}
|
||||
|
||||
// save updated meta data
|
||||
return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.UpdateEntryRequest{
|
||||
Directory: dir,
|
||||
|
|
|
@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
|
|||
return nil
|
||||
}
|
||||
|
||||
func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
|
||||
if isDirectory {
|
||||
key = key + "/"
|
||||
}
|
||||
|
||||
if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil {
|
||||
if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil {
|
||||
return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err)
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool)
|
|||
|
||||
}
|
||||
|
||||
func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
|
||||
|
||||
if entry.IsDirectory {
|
||||
return nil
|
||||
|
@ -92,13 +92,11 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
totalSize := filer2.TotalSize(entry.Chunks)
|
||||
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize))
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx)
|
||||
|
||||
for _, chunk := range chunkViews {
|
||||
|
||||
fileUrl, err := g.filerSource.LookupFileId(chunk.FileId)
|
||||
fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -121,7 +119,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
|
||||
}
|
||||
|
||||
func (g *GcsSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
// TODO improve efficiency
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package sink
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
|
@ -9,9 +10,9 @@ import (
|
|||
type ReplicationSink interface {
|
||||
GetName() string
|
||||
Initialize(configuration util.Configuration) error
|
||||
DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error
|
||||
CreateEntry(key string, entry *filer_pb.Entry) error
|
||||
UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
|
||||
DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error
|
||||
CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error
|
||||
UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error)
|
||||
GetSinkToDirectory() string
|
||||
SetSourceFiler(s *source.FilerSource)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package S3Sink
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -76,7 +77,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, buc
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error {
|
||||
|
||||
key = cleanKey(key)
|
||||
|
||||
|
@ -88,7 +89,7 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b
|
|||
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
||||
func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error {
|
||||
|
||||
key = cleanKey(key)
|
||||
|
||||
|
@ -111,7 +112,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
wg.Add(1)
|
||||
go func(chunk *filer2.ChunkView) {
|
||||
defer wg.Done()
|
||||
if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil {
|
||||
if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil {
|
||||
err = uploadErr
|
||||
} else {
|
||||
parts = append(parts, part)
|
||||
|
@ -125,11 +126,11 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return s3sink.completeMultipartUpload(key, uploadId, parts)
|
||||
return s3sink.completeMultipartUpload(ctx, key, uploadId, parts)
|
||||
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) {
|
||||
key = cleanKey(key)
|
||||
// TODO improve efficiency
|
||||
return false, nil
|
||||
|
|
|
@ -2,6 +2,7 @@ package S3Sink
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
|
@ -81,7 +82,7 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error {
|
|||
}
|
||||
|
||||
// To complete multipart upload
|
||||
func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.CompletedPart) error {
|
||||
func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId string, parts []*s3.CompletedPart) error {
|
||||
input := &s3.CompleteMultipartUploadInput{
|
||||
Bucket: aws.String(s3sink.bucket),
|
||||
Key: aws.String(key),
|
||||
|
@ -102,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.
|
|||
}
|
||||
|
||||
// To upload a part
|
||||
func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
|
||||
func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) {
|
||||
var readSeeker io.ReadSeeker
|
||||
|
||||
readSeeker, err := s3sink.buildReadSeeker(chunk)
|
||||
readSeeker, err := s3sink.buildReadSeeker(ctx, chunk)
|
||||
if err != nil {
|
||||
glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
|
||||
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
|
||||
|
@ -155,8 +156,8 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
|
|||
return err
|
||||
}
|
||||
|
||||
func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) {
|
||||
fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId)
|
||||
func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) {
|
||||
fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -39,16 +39,16 @@ func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
|
||||
func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) {
|
||||
|
||||
vid2Locations := make(map[string]*filer_pb.Locations)
|
||||
|
||||
vid := volumeId(part)
|
||||
|
||||
err = fs.withFilerClient(fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
glog.V(4).Infof("read lookup volume id locations: %v", vid)
|
||||
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
|
||||
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
|
||||
VolumeIds: []string{vid},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -77,9 +77,9 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
|
||||
func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) {
|
||||
|
||||
fileUrl, err := fs.LookupFileId(part)
|
||||
fileUrl, err := fs.LookupFileId(ctx, part)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
@ -89,9 +89,9 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade
|
|||
return filename, header, readCloser, err
|
||||
}
|
||||
|
||||
func (fs *FilerSource) withFilerClient(grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
|
||||
grpcConnection, err := util.GrpcDial(fs.grpcAddress, grpcDialOption)
|
||||
grpcConnection, err := util.GrpcDial(ctx, fs.grpcAddress, grpcDialOption)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
@ -21,11 +22,11 @@ type InitiateMultipartUploadResult struct {
|
|||
s3.CreateMultipartUploadOutput
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
|
||||
func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
|
||||
uploadId, _ := uuid.NewV4()
|
||||
uploadIdString := uploadId.String()
|
||||
|
||||
if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
|
||||
if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
|
||||
if entry.Extended == nil {
|
||||
entry.Extended = make(map[string][]byte)
|
||||
}
|
||||
|
@ -51,11 +52,11 @@ type CompleteMultipartUploadResult struct {
|
|||
s3.CompleteMultipartUploadOutput
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
|
||||
func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
|
||||
|
||||
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
|
||||
|
||||
entries, err := s3a.list(uploadDirectory, "", "", false, 0)
|
||||
entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0)
|
||||
if err != nil {
|
||||
glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
|
||||
return nil, ErrNoSuchUpload
|
||||
|
@ -90,7 +91,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
|||
}
|
||||
dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
|
||||
|
||||
err = s3a.mkFile(dirName, entryName, finalParts)
|
||||
err = s3a.mkFile(ctx, dirName, entryName, finalParts)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
|
||||
|
@ -105,22 +106,22 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
|||
},
|
||||
}
|
||||
|
||||
if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
|
||||
if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil {
|
||||
glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
|
||||
func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
|
||||
|
||||
exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
|
||||
exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
|
||||
return nil, ErrNoSuchUpload
|
||||
}
|
||||
if exists {
|
||||
err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
|
||||
err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
|
||||
|
@ -135,7 +136,7 @@ type ListMultipartUploadsResult struct {
|
|||
s3.ListMultipartUploadsOutput
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
|
||||
func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
|
||||
|
||||
output = &ListMultipartUploadsResult{
|
||||
ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{
|
||||
|
@ -148,7 +149,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
|
|||
},
|
||||
}
|
||||
|
||||
entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
|
||||
entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
|
||||
if err != nil {
|
||||
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
|
||||
return
|
||||
|
@ -172,7 +173,7 @@ type ListPartsResult struct {
|
|||
s3.ListPartsOutput
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
|
||||
func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
|
||||
output = &ListPartsResult{
|
||||
ListPartsOutput: s3.ListPartsOutput{
|
||||
Bucket: input.Bucket,
|
||||
|
@ -183,7 +184,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
|
|||
},
|
||||
}
|
||||
|
||||
entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
|
||||
entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
|
||||
"", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
|
||||
if err != nil {
|
||||
glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
|
||||
|
|
|
@ -10,8 +10,8 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
|
||||
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error {
|
||||
return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
entry := &filer_pb.Entry{
|
||||
Name: dirName,
|
||||
|
@ -35,7 +35,7 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun
|
|||
}
|
||||
|
||||
glog.V(1).Infof("mkdir: %v", request)
|
||||
if _, err := client.CreateEntry(context.Background(), request); err != nil {
|
||||
if _, err := client.CreateEntry(ctx, request); err != nil {
|
||||
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
|
||||
}
|
||||
|
||||
|
@ -43,8 +43,8 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun
|
|||
})
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
|
||||
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
|
||||
return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
entry := &filer_pb.Entry{
|
||||
Name: fileName,
|
||||
|
@ -65,7 +65,7 @@ func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chun
|
|||
}
|
||||
|
||||
glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
|
||||
if _, err := client.CreateEntry(context.Background(), request); err != nil {
|
||||
if _, err := client.CreateEntry(ctx, request); err != nil {
|
||||
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
|
||||
}
|
||||
|
||||
|
@ -73,9 +73,9 @@ func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chun
|
|||
})
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
|
||||
func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
|
||||
|
||||
err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.ListEntriesRequest{
|
||||
Directory: parentDirectoryPath,
|
||||
|
@ -86,7 +86,7 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl
|
|||
}
|
||||
|
||||
glog.V(4).Infof("read directory: %v", request)
|
||||
resp, err := client.ListEntries(context.Background(), request)
|
||||
resp, err := client.ListEntries(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err)
|
||||
}
|
||||
|
@ -100,11 +100,9 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl
|
|||
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error {
|
||||
func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error {
|
||||
|
||||
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx := context.Background()
|
||||
return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.DeleteEntryRequest{
|
||||
Directory: parentDirectoryPath,
|
||||
|
@ -123,11 +121,9 @@ func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirec
|
|||
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
|
||||
func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) {
|
||||
|
||||
err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx := context.Background()
|
||||
err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: parentDirectoryPath,
|
||||
|
|
|
@ -31,7 +31,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
var response ListAllMyBucketsResult
|
||||
|
||||
entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
|
||||
entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32)
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
|
@ -65,7 +65,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
|
|||
bucket := vars["bucket"]
|
||||
|
||||
// create the folder for bucket, but lazily create actual collection
|
||||
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil {
|
||||
if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -78,9 +78,8 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
|
|||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx := context.Background()
|
||||
ctx := context.Background()
|
||||
err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
// delete collection
|
||||
deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
|
||||
|
@ -95,7 +94,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
|
|||
return nil
|
||||
})
|
||||
|
||||
err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true)
|
||||
err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true)
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
|
@ -110,7 +109,9 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
|
|||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
ctx := context.Background()
|
||||
|
||||
err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: s3a.option.BucketsPath,
|
||||
|
@ -118,7 +119,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
glog.V(1).Infof("lookup bucket: %v", request)
|
||||
if _, err := client.LookupDirectoryEntry(context.Background(), request); err != nil {
|
||||
if _, err := client.LookupDirectoryEntry(ctx, request); err != nil {
|
||||
return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package s3api
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
|
@ -35,9 +36,9 @@ func encodeResponse(response interface{}) []byte {
|
|||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
|
||||
grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
|
||||
grpcConnection, err := util.GrpcDial(ctx, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
|
@ -25,7 +26,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
|
|||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{
|
||||
response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
})
|
||||
|
@ -50,7 +51,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
|
|||
// Get upload id.
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
|
||||
response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{
|
||||
response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
UploadId: aws.String(uploadID),
|
||||
|
@ -76,7 +77,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
|
|||
// Get upload id.
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
|
||||
response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{
|
||||
response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
UploadId: aws.String(uploadID),
|
||||
|
@ -111,7 +112,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
|
|||
}
|
||||
}
|
||||
|
||||
response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{
|
||||
response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Delimiter: aws.String(delimiter),
|
||||
EncodingType: aws.String(encodingType),
|
||||
|
@ -148,7 +149,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
response, errCode := s3a.listObjectParts(&s3.ListPartsInput{
|
||||
response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(object),
|
||||
MaxParts: aws.Int64(int64(maxParts)),
|
||||
|
@ -174,8 +175,10 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
rAuthType := getRequestAuthType(r)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
uploadID := r.URL.Query().Get("uploadId")
|
||||
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
|
||||
exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true)
|
||||
if !exists {
|
||||
writeErrorResponse(w, ErrNoSuchUpload, r.URL)
|
||||
return
|
||||
|
|
|
@ -44,7 +44,9 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
|
|||
marker = startAfter
|
||||
}
|
||||
|
||||
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
|
||||
ctx := context.Background()
|
||||
|
||||
response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
|
@ -62,6 +64,8 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
|
|||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
|
||||
|
||||
if maxKeys < 0 {
|
||||
|
@ -73,7 +77,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker)
|
||||
response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker)
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
|
@ -83,13 +87,13 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
|
|||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
|
||||
func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) {
|
||||
|
||||
// convert full path prefix into directory name and prefix for entry name
|
||||
dir, prefix := filepath.Split(originalPrefix)
|
||||
|
||||
// check filer
|
||||
err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.ListEntriesRequest{
|
||||
Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir),
|
||||
|
@ -99,7 +103,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys
|
|||
InclusiveStartFrom: false,
|
||||
}
|
||||
|
||||
resp, err := client.ListEntries(context.Background(), request)
|
||||
resp, err := client.ListEntries(ctx, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("list buckets: %v", err)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func (vs *VolumeServer) heartbeat() {
|
|||
glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress)
|
||||
continue
|
||||
}
|
||||
newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
|
||||
newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("heartbeat error: %v", err)
|
||||
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
|
||||
|
@ -45,16 +45,16 @@ func (vs *VolumeServer) heartbeat() {
|
|||
}
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
|
||||
func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
|
||||
|
||||
grpcConection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption)
|
||||
grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("fail to dial %s : %v", masterNode, err)
|
||||
}
|
||||
defer grpcConection.Close()
|
||||
|
||||
client := master_pb.NewSeaweedClient(grpcConection)
|
||||
stream, err := client.SendHeartbeat(context.Background())
|
||||
stream, err := client.SendHeartbeat(ctx)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err)
|
||||
return "", err
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -33,7 +34,7 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server {
|
|||
return grpc.NewServer(options...)
|
||||
}
|
||||
|
||||
func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
// opts = append(opts, grpc.WithBlock())
|
||||
// opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second)))
|
||||
var options []grpc.DialOption
|
||||
|
@ -48,10 +49,10 @@ func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
|
|||
options = append(options, opt)
|
||||
}
|
||||
}
|
||||
return grpc.Dial(address, options...)
|
||||
return grpc.DialContext(ctx, address, options...)
|
||||
}
|
||||
|
||||
func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {
|
||||
func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {
|
||||
|
||||
grpcClientsLock.Lock()
|
||||
|
||||
|
@ -61,7 +62,7 @@ func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts
|
|||
return fn(existingConnection)
|
||||
}
|
||||
|
||||
grpcConnection, err := GrpcDial(address, opts...)
|
||||
grpcConnection, err := GrpcDial(ctx, address, opts...)
|
||||
if err != nil {
|
||||
grpcClientsLock.Unlock()
|
||||
return fmt.Errorf("fail to dial %s: %v", address, err)
|
||||
|
|
|
@ -53,7 +53,7 @@ func (mc *MasterClient) KeepConnectedToMaster() {
|
|||
func (mc *MasterClient) tryAllMasters() {
|
||||
for _, master := range mc.masters {
|
||||
glog.V(0).Infof("Connecting to master %v", master)
|
||||
gprcErr := withMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
|
||||
stream, err := client.KeepConnected(context.Background())
|
||||
if err != nil {
|
||||
|
@ -99,14 +99,14 @@ func (mc *MasterClient) tryAllMasters() {
|
|||
}
|
||||
}
|
||||
|
||||
func withMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error {
|
||||
func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error {
|
||||
|
||||
masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0)
|
||||
if parseErr != nil {
|
||||
return fmt.Errorf("failed to parse master grpc %v", master)
|
||||
}
|
||||
|
||||
grpcConnection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption)
|
||||
grpcConnection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to dial %s: %v", master, err)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue