2016-07-21 06:45:55 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
2019-04-05 07:04:00 +00:00
|
|
|
"context"
|
2016-07-21 06:45:55 +00:00
|
|
|
"fmt"
|
2019-04-05 07:04:00 +00:00
|
|
|
"io"
|
|
|
|
"net/http"
|
2016-07-21 06:45:55 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-05-30 07:54:56 +00:00
|
|
|
"strconv"
|
2019-04-05 07:04:00 +00:00
|
|
|
"strings"
|
2019-04-06 06:35:30 +00:00
|
|
|
"sync"
|
2018-05-30 07:54:56 +00:00
|
|
|
"time"
|
2019-06-05 08:30:24 +00:00
|
|
|
|
2020-01-29 17:09:55 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2021-10-14 04:27:58 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2019-06-05 08:30:24 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
2020-03-04 08:39:47 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
2019-06-05 08:30:24 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
2020-03-09 08:02:01 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2019-06-05 08:30:24 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2021-10-14 04:27:58 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
2019-06-05 08:30:24 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
2016-07-21 06:45:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-04-06 06:35:30 +00:00
|
|
|
copy CopyOptions
|
|
|
|
waitGroup sync.WaitGroup
|
2016-07-21 06:45:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type CopyOptions struct {
|
2019-10-24 14:26:23 +00:00
|
|
|
include *string
|
|
|
|
replication *string
|
|
|
|
collection *string
|
|
|
|
ttl *string
|
2020-12-16 17:14:05 +00:00
|
|
|
diskType *string
|
2019-10-24 14:26:23 +00:00
|
|
|
maxMB *int
|
|
|
|
masterClient *wdclient.MasterClient
|
|
|
|
concurrenctFiles *int
|
|
|
|
concurrenctChunks *int
|
|
|
|
grpcDialOption grpc.DialOption
|
|
|
|
masters []string
|
2020-03-06 08:49:47 +00:00
|
|
|
cipher bool
|
2020-03-09 08:02:01 +00:00
|
|
|
ttlSec int32
|
2021-05-13 04:45:39 +00:00
|
|
|
checkSize *bool
|
|
|
|
verbose *bool
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2021-07-24 01:44:53 +00:00
|
|
|
cmdFilerCopy.Run = runCopy // break init cycle
|
|
|
|
cmdFilerCopy.IsDebug = cmdFilerCopy.Flag.Bool("debug", false, "verbose debug information")
|
|
|
|
copy.include = cmdFilerCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
|
|
|
|
copy.replication = cmdFilerCopy.Flag.String("replication", "", "replication type")
|
|
|
|
copy.collection = cmdFilerCopy.Flag.String("collection", "", "optional collection name")
|
|
|
|
copy.ttl = cmdFilerCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
|
|
|
copy.diskType = cmdFilerCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
|
|
|
copy.maxMB = cmdFilerCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
|
|
|
|
copy.concurrenctFiles = cmdFilerCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
|
|
|
|
copy.concurrenctChunks = cmdFilerCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
|
|
|
|
copy.checkSize = cmdFilerCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file")
|
|
|
|
copy.verbose = cmdFilerCopy.Flag.Bool("verbose", false, "print out details during copying")
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
|
2021-07-24 01:44:53 +00:00
|
|
|
var cmdFilerCopy = &Command{
|
2016-07-21 22:00:07 +00:00
|
|
|
UsageLine: "filer.copy file_or_dir1 [file_or_dir2 file_or_dir3] http://localhost:8888/path/to/a/folder/",
|
2016-07-21 06:45:55 +00:00
|
|
|
Short: "copy one or a list of files to a filer folder",
|
|
|
|
Long: `copy one or a list of files, or batch copy one whole folder recursively, to a filer folder
|
|
|
|
|
|
|
|
It can copy one or a list of files or folders.
|
|
|
|
|
|
|
|
If copying a whole folder recursively:
|
|
|
|
All files under the folder and subfolders will be copyed.
|
|
|
|
Optional parameter "-include" allows you to specify the file name patterns.
|
|
|
|
|
2018-09-28 08:58:34 +00:00
|
|
|
If "maxMB" is set to a positive number, files larger than it would be split into chunks.
|
2016-07-21 06:45:55 +00:00
|
|
|
|
2020-09-09 18:21:23 +00:00
|
|
|
`,
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func runCopy(cmd *Command, args []string) bool {
|
2019-02-18 20:11:52 +00:00
|
|
|
|
2019-06-05 08:30:24 +00:00
|
|
|
util.LoadConfiguration("security", false)
|
2019-02-18 20:11:52 +00:00
|
|
|
|
2016-07-21 06:45:55 +00:00
|
|
|
if len(args) <= 1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
filerDestination := args[len(args)-1]
|
2018-07-22 00:39:10 +00:00
|
|
|
fileOrDirs := args[0 : len(args)-1]
|
2016-07-21 06:45:55 +00:00
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
filerAddress, urlPath, err := pb.ParseUrl(filerDestination)
|
2016-07-21 06:45:55 +00:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
|
|
|
|
return false
|
|
|
|
}
|
2018-05-30 06:46:45 +00:00
|
|
|
if !strings.HasSuffix(urlPath, "/") {
|
2021-01-06 12:21:34 +00:00
|
|
|
fmt.Printf("The last argument should be a folder and end with \"/\"\n")
|
2018-12-24 02:20:11 +00:00
|
|
|
return false
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
|
2020-01-29 17:09:55 +00:00
|
|
|
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
2018-06-06 06:37:41 +00:00
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerAddress)
|
2019-06-23 08:57:35 +00:00
|
|
|
if err != nil {
|
2021-09-13 05:47:52 +00:00
|
|
|
fmt.Printf("read from filer %s: %v\n", filerAddress, err)
|
2019-06-23 08:57:35 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-10-25 21:15:53 +00:00
|
|
|
if strings.HasPrefix(urlPath, dirBuckets+"/") {
|
2020-10-25 18:21:33 +00:00
|
|
|
restPath := urlPath[len(dirBuckets)+1:]
|
|
|
|
if strings.Index(restPath, "/") > 0 {
|
|
|
|
expectedBucket := restPath[:strings.Index(restPath, "/")]
|
|
|
|
if *copy.collection == "" {
|
|
|
|
*copy.collection = expectedBucket
|
2020-11-11 21:00:05 +00:00
|
|
|
} else if *copy.collection != expectedBucket {
|
2020-10-25 18:21:33 +00:00
|
|
|
fmt.Printf("destination %s uses collection \"%s\": unexpected collection \"%v\"\n", urlPath, expectedBucket, *copy.collection)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-23 08:57:35 +00:00
|
|
|
if *copy.collection == "" {
|
|
|
|
*copy.collection = collection
|
|
|
|
}
|
|
|
|
if *copy.replication == "" {
|
|
|
|
*copy.replication = replication
|
|
|
|
}
|
|
|
|
if *copy.maxMB == 0 {
|
|
|
|
*copy.maxMB = int(maxMB)
|
|
|
|
}
|
|
|
|
copy.masters = masters
|
2020-03-06 08:49:47 +00:00
|
|
|
copy.cipher = cipher
|
2019-06-23 08:57:35 +00:00
|
|
|
|
2020-03-09 08:02:01 +00:00
|
|
|
ttl, err := needle.ReadTTL(*copy.ttl)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
copy.ttlSec = int32(ttl.Minutes()) * 60
|
|
|
|
|
2021-07-24 01:44:53 +00:00
|
|
|
if *cmdFilerCopy.IsDebug {
|
2020-04-28 06:10:23 +00:00
|
|
|
grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
|
2019-04-07 16:13:24 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 14:26:23 +00:00
|
|
|
fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles)
|
2019-04-06 06:35:30 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(fileCopyTaskChan)
|
|
|
|
for _, fileOrDir := range fileOrDirs {
|
|
|
|
if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil {
|
2020-12-14 17:39:33 +00:00
|
|
|
fmt.Fprintf(os.Stderr, "genFileCopyTask : %v\n", err)
|
2019-04-06 06:35:30 +00:00
|
|
|
break
|
|
|
|
}
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
2019-04-06 06:35:30 +00:00
|
|
|
}()
|
2019-10-24 14:26:23 +00:00
|
|
|
for i := 0; i < *copy.concurrenctFiles; i++ {
|
2019-04-06 06:35:30 +00:00
|
|
|
waitGroup.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer waitGroup.Done()
|
|
|
|
worker := FileCopyWorker{
|
2021-09-13 05:47:52 +00:00
|
|
|
options: ©,
|
|
|
|
filerAddress: filerAddress,
|
2019-04-06 06:35:30 +00:00
|
|
|
}
|
2020-02-26 06:23:59 +00:00
|
|
|
if err := worker.copyFiles(fileCopyTaskChan); err != nil {
|
2019-04-06 06:35:30 +00:00
|
|
|
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
2019-04-06 06:35:30 +00:00
|
|
|
waitGroup.Wait()
|
|
|
|
|
2016-07-21 06:45:55 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress pb.ServerAddress) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) {
|
2021-12-26 08:15:03 +00:00
|
|
|
err = pb.WithGrpcFilerClient(false, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2020-02-26 06:23:59 +00:00
|
|
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
2019-06-23 08:57:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
|
|
|
}
|
|
|
|
masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb
|
2020-10-25 18:21:33 +00:00
|
|
|
dirBuckets = resp.DirBuckets
|
2020-03-06 08:49:47 +00:00
|
|
|
cipher = resp.Cipher
|
2019-06-23 08:57:35 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-06 06:35:30 +00:00
|
|
|
func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan FileCopyTask) error {
|
2016-07-21 06:45:55 +00:00
|
|
|
|
2019-04-06 06:35:30 +00:00
|
|
|
fi, err := os.Stat(fileOrDir)
|
2016-07-21 06:45:55 +00:00
|
|
|
if err != nil {
|
2020-12-14 17:39:33 +00:00
|
|
|
fmt.Fprintf(os.Stderr, "Error: read file %s: %v\n", fileOrDir, err)
|
2019-04-06 06:35:30 +00:00
|
|
|
return nil
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mode := fi.Mode()
|
2019-04-07 18:31:50 +00:00
|
|
|
uid, gid := util.GetFileUidGid(fi)
|
2021-06-13 14:09:06 +00:00
|
|
|
fileSize := fi.Size()
|
|
|
|
if mode.IsDir() {
|
|
|
|
fileSize = 0
|
|
|
|
}
|
2019-04-07 18:31:50 +00:00
|
|
|
|
2019-04-06 06:35:30 +00:00
|
|
|
fileCopyTaskChan <- FileCopyTask{
|
|
|
|
sourceLocation: fileOrDir,
|
2020-10-30 17:46:31 +00:00
|
|
|
destinationUrlPath: destPath,
|
2021-06-13 14:09:06 +00:00
|
|
|
fileSize: fileSize,
|
2019-04-06 06:35:30 +00:00
|
|
|
fileMode: fi.Mode(),
|
2019-04-07 18:31:50 +00:00
|
|
|
uid: uid,
|
|
|
|
gid: gid,
|
2019-04-06 06:35:30 +00:00
|
|
|
}
|
|
|
|
|
2021-04-21 18:17:43 +00:00
|
|
|
if mode.IsDir() {
|
2021-10-14 04:27:58 +00:00
|
|
|
files, _ := os.ReadDir(fileOrDir)
|
2021-04-21 18:17:43 +00:00
|
|
|
for _, subFileOrDir := range files {
|
2021-05-13 04:45:39 +00:00
|
|
|
cleanedDestDirectory := filepath.Clean(destPath + fi.Name())
|
|
|
|
if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), cleanedDestDirectory+"/", fileCopyTaskChan); err != nil {
|
2021-04-21 18:17:43 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-06 06:35:30 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type FileCopyWorker struct {
|
2021-09-13 05:47:52 +00:00
|
|
|
options *CopyOptions
|
|
|
|
filerAddress pb.ServerAddress
|
2019-04-06 06:35:30 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 06:23:59 +00:00
|
|
|
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
|
2019-04-06 06:35:30 +00:00
|
|
|
for task := range fileCopyTaskChan {
|
2020-02-26 06:23:59 +00:00
|
|
|
if err := worker.doEachCopy(task); err != nil {
|
2019-04-06 06:35:30 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
2019-04-06 06:35:30 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type FileCopyTask struct {
|
|
|
|
sourceLocation string
|
|
|
|
destinationUrlPath string
|
|
|
|
fileSize int64
|
|
|
|
fileMode os.FileMode
|
2019-04-07 18:31:50 +00:00
|
|
|
uid uint32
|
|
|
|
gid uint32
|
2019-04-06 06:35:30 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 06:23:59 +00:00
|
|
|
func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
|
2019-04-06 06:35:30 +00:00
|
|
|
|
|
|
|
f, err := os.Open(task.sourceLocation)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Failed to open file %s: %v\n", task.sourceLocation, err)
|
|
|
|
if _, ok := err.(*os.PathError); ok {
|
|
|
|
fmt.Printf("skipping %s\n", task.sourceLocation)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
2016-07-21 06:45:55 +00:00
|
|
|
|
|
|
|
// this is a regular file
|
2019-04-06 06:35:30 +00:00
|
|
|
if *worker.options.include != "" {
|
|
|
|
if ok, _ := filepath.Match(*worker.options.include, filepath.Base(task.sourceLocation)); !ok {
|
|
|
|
return nil
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-13 04:45:39 +00:00
|
|
|
if shouldCopy, err := worker.checkExistingFileFirst(task, f); err != nil {
|
|
|
|
return fmt.Errorf("check existing file: %v", err)
|
|
|
|
} else if !shouldCopy {
|
|
|
|
if *worker.options.verbose {
|
|
|
|
fmt.Printf("skipping copied file: %v\n", f.Name())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-30 06:46:45 +00:00
|
|
|
// find the chunk count
|
2019-04-06 06:35:30 +00:00
|
|
|
chunkSize := int64(*worker.options.maxMB * 1024 * 1024)
|
2018-05-30 06:46:45 +00:00
|
|
|
chunkCount := 1
|
2019-04-06 06:35:30 +00:00
|
|
|
if chunkSize > 0 && task.fileSize > chunkSize {
|
|
|
|
chunkCount = int(task.fileSize/chunkSize) + 1
|
2018-05-30 06:46:45 +00:00
|
|
|
}
|
|
|
|
|
2018-05-30 08:05:26 +00:00
|
|
|
if chunkCount == 1 {
|
2020-02-26 05:50:12 +00:00
|
|
|
return worker.uploadFileAsOne(task, f)
|
2018-05-30 08:05:26 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
return worker.uploadFileInChunks(task, f, chunkCount, chunkSize)
|
2018-05-30 08:05:26 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 04:45:39 +00:00
|
|
|
func (worker *FileCopyWorker) checkExistingFileFirst(task FileCopyTask, f *os.File) (shouldCopy bool, err error) {
|
|
|
|
|
|
|
|
shouldCopy = true
|
|
|
|
|
|
|
|
if !*worker.options.checkSize {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
fileStat, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
shouldCopy = false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-12-26 08:15:03 +00:00
|
|
|
err = pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2021-05-13 04:45:39 +00:00
|
|
|
|
|
|
|
request := &filer_pb.LookupDirectoryEntryRequest{
|
|
|
|
Directory: task.destinationUrlPath,
|
|
|
|
Name: filepath.Base(f.Name()),
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, lookupErr := client.LookupDirectoryEntry(context.Background(), request)
|
|
|
|
if lookupErr != nil {
|
|
|
|
// mostly not found error
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if fileStat.Size() == int64(filer.FileSize(resp.Entry)) {
|
|
|
|
shouldCopy = false
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error {
|
2018-05-30 08:05:26 +00:00
|
|
|
|
2018-05-30 06:46:45 +00:00
|
|
|
// upload the file content
|
2018-05-31 03:48:13 +00:00
|
|
|
fileName := filepath.Base(f.Name())
|
2021-04-21 18:17:43 +00:00
|
|
|
var mimeType string
|
2018-05-30 06:46:45 +00:00
|
|
|
|
2018-05-31 05:28:14 +00:00
|
|
|
var chunks []*filer_pb.FileChunk
|
2020-02-25 06:28:45 +00:00
|
|
|
var assignResult *filer_pb.AssignVolumeResponse
|
|
|
|
var assignError error
|
2016-07-21 06:45:55 +00:00
|
|
|
|
2021-05-06 10:37:51 +00:00
|
|
|
if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 {
|
2021-04-21 18:17:43 +00:00
|
|
|
|
|
|
|
mimeType = detectMimeType(f)
|
2021-10-14 04:27:58 +00:00
|
|
|
data, err := io.ReadAll(f)
|
2021-04-21 18:17:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-05-31 05:28:14 +00:00
|
|
|
|
2021-10-02 06:23:39 +00:00
|
|
|
err = util.Retry("upload", func() error {
|
|
|
|
// assign a volume
|
2021-12-26 08:15:03 +00:00
|
|
|
assignErr := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2020-02-25 06:28:45 +00:00
|
|
|
|
2021-05-07 14:29:26 +00:00
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: *worker.options.replication,
|
|
|
|
Collection: *worker.options.collection,
|
|
|
|
TtlSec: worker.options.ttlSec,
|
|
|
|
DiskType: *worker.options.diskType,
|
|
|
|
Path: task.destinationUrlPath,
|
|
|
|
}
|
|
|
|
|
|
|
|
assignResult, assignError = client.AssignVolume(context.Background(), request)
|
|
|
|
if assignError != nil {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
|
|
|
|
}
|
|
|
|
if assignResult.Error != "" {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
|
|
|
}
|
2021-09-13 05:47:52 +00:00
|
|
|
if assignResult.Location.Url == "" {
|
2021-07-24 03:01:43 +00:00
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignResult)
|
|
|
|
}
|
2021-05-07 14:29:26 +00:00
|
|
|
return nil
|
|
|
|
})
|
2021-10-02 06:23:39 +00:00
|
|
|
if assignErr != nil {
|
|
|
|
return assignErr
|
|
|
|
}
|
2018-05-31 05:28:14 +00:00
|
|
|
|
2021-10-02 06:24:54 +00:00
|
|
|
// upload data
|
2021-10-02 06:23:39 +00:00
|
|
|
targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId
|
|
|
|
uploadOption := &operation.UploadOption{
|
|
|
|
UploadUrl: targetUrl,
|
|
|
|
Filename: fileName,
|
|
|
|
Cipher: worker.options.cipher,
|
|
|
|
IsInputCompressed: false,
|
|
|
|
MimeType: mimeType,
|
|
|
|
PairMap: nil,
|
|
|
|
Jwt: security.EncodedJwt(assignResult.Auth),
|
|
|
|
}
|
|
|
|
uploadResult, err := operation.UploadData(data, uploadOption)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
|
|
|
}
|
|
|
|
if *worker.options.verbose {
|
|
|
|
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName)
|
|
|
|
chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2018-05-31 05:28:14 +00:00
|
|
|
if err != nil {
|
2021-10-02 06:23:39 +00:00
|
|
|
return fmt.Errorf("upload %v: %v\n", fileName, err)
|
2018-05-31 05:28:14 +00:00
|
|
|
}
|
|
|
|
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
|
2021-12-26 08:15:03 +00:00
|
|
|
if err := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2018-05-31 05:28:14 +00:00
|
|
|
request := &filer_pb.CreateEntryRequest{
|
2019-04-06 06:35:30 +00:00
|
|
|
Directory: task.destinationUrlPath,
|
2018-05-31 05:28:14 +00:00
|
|
|
Entry: &filer_pb.Entry{
|
|
|
|
Name: fileName,
|
|
|
|
Attributes: &filer_pb.FuseAttributes{
|
2018-06-10 23:57:32 +00:00
|
|
|
Crtime: time.Now().Unix(),
|
|
|
|
Mtime: time.Now().Unix(),
|
2019-04-07 18:31:50 +00:00
|
|
|
Gid: task.gid,
|
|
|
|
Uid: task.uid,
|
2019-04-06 06:35:30 +00:00
|
|
|
FileSize: uint64(task.fileSize),
|
|
|
|
FileMode: uint32(task.fileMode),
|
2018-06-10 23:57:32 +00:00
|
|
|
Mime: mimeType,
|
2019-04-06 06:35:30 +00:00
|
|
|
Replication: *worker.options.replication,
|
|
|
|
Collection: *worker.options.collection,
|
2020-03-09 08:02:01 +00:00
|
|
|
TtlSec: worker.options.ttlSec,
|
2018-05-31 05:28:14 +00:00
|
|
|
},
|
|
|
|
Chunks: chunks,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
if err := filer_pb.CreateEntry(client, request); err != nil {
|
2018-05-31 05:28:14 +00:00
|
|
|
return fmt.Errorf("update fh: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2021-09-13 05:47:52 +00:00
|
|
|
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName, err)
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
|
|
|
|
2019-04-06 06:35:30 +00:00
|
|
|
return nil
|
2016-07-21 06:45:55 +00:00
|
|
|
}
|
2018-05-30 06:46:45 +00:00
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
|
2018-05-30 07:54:56 +00:00
|
|
|
|
2018-05-31 03:48:13 +00:00
|
|
|
fileName := filepath.Base(f.Name())
|
2018-05-31 03:24:57 +00:00
|
|
|
mimeType := detectMimeType(f)
|
|
|
|
|
2019-10-24 14:26:23 +00:00
|
|
|
chunksChan := make(chan *filer_pb.FileChunk, chunkCount)
|
|
|
|
|
|
|
|
concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
var uploadError error
|
2020-02-25 06:28:45 +00:00
|
|
|
var collection, replication string
|
2019-10-24 14:26:23 +00:00
|
|
|
|
|
|
|
fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount)
|
|
|
|
for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
concurrentChunks <- struct{}{}
|
|
|
|
go func(i int64) {
|
|
|
|
defer func() {
|
|
|
|
wg.Done()
|
|
|
|
<-concurrentChunks
|
|
|
|
}()
|
|
|
|
// assign a volume
|
2020-02-25 06:28:45 +00:00
|
|
|
var assignResult *filer_pb.AssignVolumeResponse
|
|
|
|
var assignError error
|
2021-05-07 14:29:26 +00:00
|
|
|
err := util.Retry("assignVolume", func() error {
|
2021-12-26 08:15:03 +00:00
|
|
|
return pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2021-05-07 14:29:26 +00:00
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: *worker.options.replication,
|
|
|
|
Collection: *worker.options.collection,
|
|
|
|
TtlSec: worker.options.ttlSec,
|
|
|
|
DiskType: *worker.options.diskType,
|
|
|
|
Path: task.destinationUrlPath + fileName,
|
|
|
|
}
|
|
|
|
|
|
|
|
assignResult, assignError = client.AssignVolume(context.Background(), request)
|
|
|
|
if assignError != nil {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignError)
|
|
|
|
}
|
|
|
|
if assignResult.Error != "" {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2019-10-24 14:26:23 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2022-01-12 06:22:39 +00:00
|
|
|
uploadError = fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
|
|
|
return
|
2019-10-24 14:26:23 +00:00
|
|
|
}
|
2018-05-30 07:54:56 +00:00
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId
|
2020-02-25 06:28:45 +00:00
|
|
|
if collection == "" {
|
|
|
|
collection = assignResult.Collection
|
|
|
|
}
|
|
|
|
if replication == "" {
|
|
|
|
replication = assignResult.Replication
|
|
|
|
}
|
2018-05-30 08:05:26 +00:00
|
|
|
|
2021-09-06 23:20:49 +00:00
|
|
|
uploadOption := &operation.UploadOption{
|
|
|
|
UploadUrl: targetUrl,
|
2021-09-13 05:47:52 +00:00
|
|
|
Filename: fileName + "-" + strconv.FormatInt(i+1, 10),
|
2021-09-06 23:20:49 +00:00
|
|
|
Cipher: worker.options.cipher,
|
|
|
|
IsInputCompressed: false,
|
|
|
|
MimeType: "",
|
|
|
|
PairMap: nil,
|
|
|
|
Jwt: security.EncodedJwt(assignResult.Auth),
|
|
|
|
}
|
|
|
|
uploadResult, err, _ := operation.Upload(io.NewSectionReader(f, i*chunkSize, chunkSize), uploadOption)
|
2019-10-24 14:26:23 +00:00
|
|
|
if err != nil {
|
|
|
|
uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
|
|
|
return
|
|
|
|
}
|
2020-05-10 10:50:30 +00:00
|
|
|
chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize)
|
2020-05-01 00:20:44 +00:00
|
|
|
|
2019-10-24 14:26:23 +00:00
|
|
|
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(chunksChan)
|
2018-05-30 07:54:56 +00:00
|
|
|
|
2019-10-24 14:26:23 +00:00
|
|
|
var chunks []*filer_pb.FileChunk
|
|
|
|
for chunk := range chunksChan {
|
|
|
|
chunks = append(chunks, chunk)
|
2018-05-30 07:54:56 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 17:16:01 +00:00
|
|
|
if uploadError != nil {
|
|
|
|
var fileIds []string
|
|
|
|
for _, chunk := range chunks {
|
|
|
|
fileIds = append(fileIds, chunk.FileId)
|
|
|
|
}
|
2021-09-13 05:47:52 +00:00
|
|
|
operation.DeleteFiles(func() pb.ServerAddress {
|
|
|
|
return pb.ServerAddress(copy.masters[0])
|
2021-02-18 04:55:55 +00:00
|
|
|
}, false, worker.options.grpcDialOption, fileIds)
|
2019-10-24 17:16:01 +00:00
|
|
|
return uploadError
|
|
|
|
}
|
|
|
|
|
2021-08-19 06:46:54 +00:00
|
|
|
manifestedChunks, manifestErr := filer.MaybeManifestize(worker.saveDataAsChunk, chunks)
|
|
|
|
if manifestErr != nil {
|
|
|
|
return fmt.Errorf("create manifest: %v", manifestErr)
|
|
|
|
}
|
|
|
|
|
2021-12-26 08:15:03 +00:00
|
|
|
if err := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2018-05-30 07:54:56 +00:00
|
|
|
request := &filer_pb.CreateEntryRequest{
|
2019-04-06 06:35:30 +00:00
|
|
|
Directory: task.destinationUrlPath,
|
2018-05-30 07:54:56 +00:00
|
|
|
Entry: &filer_pb.Entry{
|
2018-05-31 03:48:13 +00:00
|
|
|
Name: fileName,
|
2018-05-30 07:54:56 +00:00
|
|
|
Attributes: &filer_pb.FuseAttributes{
|
2018-06-10 23:57:32 +00:00
|
|
|
Crtime: time.Now().Unix(),
|
|
|
|
Mtime: time.Now().Unix(),
|
2019-04-07 18:31:50 +00:00
|
|
|
Gid: task.gid,
|
|
|
|
Uid: task.uid,
|
2019-04-06 06:35:30 +00:00
|
|
|
FileSize: uint64(task.fileSize),
|
|
|
|
FileMode: uint32(task.fileMode),
|
2018-06-10 23:57:32 +00:00
|
|
|
Mime: mimeType,
|
2020-02-25 06:28:45 +00:00
|
|
|
Replication: replication,
|
|
|
|
Collection: collection,
|
2020-03-09 08:02:01 +00:00
|
|
|
TtlSec: worker.options.ttlSec,
|
2018-05-30 07:54:56 +00:00
|
|
|
},
|
2021-08-19 06:46:54 +00:00
|
|
|
Chunks: manifestedChunks,
|
2018-05-30 07:54:56 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
if err := filer_pb.CreateEntry(client, request); err != nil {
|
2018-05-30 07:54:56 +00:00
|
|
|
return fmt.Errorf("update fh: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2021-09-13 05:47:52 +00:00
|
|
|
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName, err)
|
2018-05-30 07:54:56 +00:00
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName)
|
2018-05-30 08:05:26 +00:00
|
|
|
|
2019-04-06 06:35:30 +00:00
|
|
|
return nil
|
2018-05-30 06:46:45 +00:00
|
|
|
}
|
2018-05-30 06:52:27 +00:00
|
|
|
|
|
|
|
func detectMimeType(f *os.File) string {
|
|
|
|
head := make([]byte, 512)
|
2019-01-17 01:17:19 +00:00
|
|
|
f.Seek(0, io.SeekStart)
|
2018-05-30 06:52:27 +00:00
|
|
|
n, err := f.Read(head)
|
2018-05-31 05:28:14 +00:00
|
|
|
if err == io.EOF {
|
|
|
|
return ""
|
|
|
|
}
|
2018-05-30 06:52:27 +00:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("read head of %v: %v\n", f.Name(), err)
|
2020-03-07 14:07:56 +00:00
|
|
|
return ""
|
2018-05-30 06:52:27 +00:00
|
|
|
}
|
2019-01-17 01:17:19 +00:00
|
|
|
f.Seek(0, io.SeekStart)
|
2018-05-30 06:52:27 +00:00
|
|
|
mimeType := http.DetectContentType(head[:n])
|
2020-03-07 14:07:56 +00:00
|
|
|
if mimeType == "application/octet-stream" {
|
|
|
|
return ""
|
|
|
|
}
|
2018-05-30 06:52:27 +00:00
|
|
|
return mimeType
|
|
|
|
}
|
2021-08-19 06:46:54 +00:00
|
|
|
|
|
|
|
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
|
|
|
|
|
|
|
|
var fileId, host string
|
|
|
|
var auth security.EncodedJwt
|
|
|
|
|
2021-12-26 08:15:03 +00:00
|
|
|
if flushErr := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
2021-08-19 06:46:54 +00:00
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
assignErr := util.Retry("assignVolume", func() error {
|
|
|
|
request := &filer_pb.AssignVolumeRequest{
|
|
|
|
Count: 1,
|
|
|
|
Replication: *worker.options.replication,
|
|
|
|
Collection: *worker.options.collection,
|
|
|
|
TtlSec: worker.options.ttlSec,
|
|
|
|
DiskType: *worker.options.diskType,
|
|
|
|
Path: name,
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := client.AssignVolume(ctx, request)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, err)
|
|
|
|
}
|
|
|
|
if resp.Error != "" {
|
|
|
|
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
|
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
fileId, host, auth = resp.FileId, resp.Location.Url, security.EncodedJwt(resp.Auth)
|
2021-08-19 06:46:54 +00:00
|
|
|
collection, replication = resp.Collection, resp.Replication
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if assignErr != nil {
|
|
|
|
return assignErr
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}); flushErr != nil {
|
|
|
|
return nil, collection, replication, fmt.Errorf("filerGrpcAddress assign volume: %v", flushErr)
|
|
|
|
}
|
|
|
|
|
2021-09-06 23:20:49 +00:00
|
|
|
uploadOption := &operation.UploadOption{
|
|
|
|
UploadUrl: fmt.Sprintf("http://%s/%s", host, fileId),
|
|
|
|
Filename: name,
|
|
|
|
Cipher: worker.options.cipher,
|
|
|
|
IsInputCompressed: false,
|
|
|
|
MimeType: "",
|
|
|
|
PairMap: nil,
|
|
|
|
Jwt: auth,
|
|
|
|
}
|
|
|
|
uploadResult, flushErr, _ := operation.Upload(reader, uploadOption)
|
2021-08-19 06:46:54 +00:00
|
|
|
if flushErr != nil {
|
|
|
|
return nil, collection, replication, fmt.Errorf("upload data: %v", flushErr)
|
|
|
|
}
|
|
|
|
if uploadResult.Error != "" {
|
|
|
|
return nil, collection, replication, fmt.Errorf("upload result: %v", uploadResult.Error)
|
|
|
|
}
|
|
|
|
return uploadResult.ToPbFileChunk(fileId, offset), collection, replication, nil
|
|
|
|
}
|