seaweedfs/weed/command/filer_export.go

192 lines
5.3 KiB
Go
Raw Normal View History

2018-08-23 06:54:22 +00:00
package command
import (
2019-03-15 22:55:34 +00:00
"context"
2018-08-23 06:54:22 +00:00
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
2018-11-04 19:59:08 +00:00
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
2018-08-23 06:54:22 +00:00
)
func init() {
cmdFilerExport.Run = runFilerExport // break init cycle
}
var cmdFilerExport = &Command{
2019-01-17 01:17:19 +00:00
UsageLine: "filer.export -sourceStore=mysql -targetStore=cassandra",
2018-08-23 06:54:22 +00:00
Short: "export meta data in filer store",
Long: `Iterate the file tree and export all metadata out
Both source and target store:
* should be a store name already specified in filer.toml
* do not need to be enabled state
If target store is empty, only the directory tree will be listed.
If target store is "notification", the list of entries will be sent to notification.
This is usually used to bootstrap filer replication to a remote system.
2018-08-23 06:54:22 +00:00
`,
}
var (
// filerExportOutputFile = cmdFilerExport.Flag.String("output", "", "the output file. If empty, only list out the directory tree")
filerExportSourceStore = cmdFilerExport.Flag.String("sourceStore", "", "the source store name in filer.toml, default to currently enabled store")
filerExportTargetStore = cmdFilerExport.Flag.String("targetStore", "", "the target store name in filer.toml, or \"notification\" to export all files to message queue")
2018-11-04 20:07:33 +00:00
dir = cmdFilerExport.Flag.String("dir", "/", "only process files under this directory")
dirListLimit = cmdFilerExport.Flag.Int("dirListLimit", 100000, "limit directory list size")
dryRun = cmdFilerExport.Flag.Bool("dryRun", false, "not actually moving data")
verboseFilerExport = cmdFilerExport.Flag.Bool("v", false, "verbose entry details")
2018-08-23 06:54:22 +00:00
)
type statistics struct {
directoryCount int
fileCount int
}
func runFilerExport(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("filer", true)
config := viper.GetViper()
2018-08-23 07:02:04 +00:00
var sourceStore, targetStore filer2.FilerStore
2018-08-23 06:54:22 +00:00
for _, store := range filer2.Stores {
if store.GetName() == *filerExportSourceStore || *filerExportSourceStore == "" && config.GetBool(store.GetName()+".enabled") {
2018-08-23 06:54:22 +00:00
viperSub := config.Sub(store.GetName())
if err := store.Initialize(viperSub); err != nil {
glog.Fatalf("Failed to initialize source store for %s: %+v",
2018-08-23 06:54:22 +00:00
store.GetName(), err)
} else {
sourceStore = store
}
break
}
}
2018-08-23 07:02:04 +00:00
for _, store := range filer2.Stores {
if store.GetName() == *filerExportTargetStore {
viperSub := config.Sub(store.GetName())
if err := store.Initialize(viperSub); err != nil {
glog.Fatalf("Failed to initialize target store for %s: %+v",
2018-08-23 07:02:04 +00:00
store.GetName(), err)
} else {
targetStore = store
}
break
}
}
2018-08-23 06:54:22 +00:00
if sourceStore == nil {
glog.Errorf("Failed to find source store %s", *filerExportSourceStore)
println("existing data sources are:")
for _, store := range filer2.Stores {
println(" " + store.GetName())
}
return false
}
if targetStore == nil && *filerExportTargetStore != "" && *filerExportTargetStore != "notification" {
glog.Errorf("Failed to find target store %s", *filerExportTargetStore)
println("existing data sources are:")
for _, store := range filer2.Stores {
println(" " + store.GetName())
}
return false
}
2019-03-15 22:55:34 +00:00
ctx := context.Background()
2018-08-23 06:54:22 +00:00
stat := statistics{}
2018-08-23 07:02:04 +00:00
var fn func(level int, entry *filer2.Entry) error
if *filerExportTargetStore == "notification" {
weed_server.LoadConfiguration("notification", false)
v := viper.GetViper()
notification.LoadConfiguration(v.Sub("notification"))
fn = func(level int, entry *filer2.Entry) error {
printout(level, entry)
if *dryRun {
return nil
}
return notification.Queue.SendMessage(
string(entry.FullPath),
&filer_pb.EventNotification{
NewEntry: entry.ToProtoEntry(),
},
)
}
} else if targetStore == nil {
2018-08-23 07:02:04 +00:00
fn = printout
} else {
fn = func(level int, entry *filer2.Entry) error {
printout(level, entry)
if *dryRun {
return nil
}
2019-03-15 22:55:34 +00:00
return targetStore.InsertEntry(ctx, entry)
2018-08-23 07:02:04 +00:00
}
}
2019-03-15 22:55:34 +00:00
doTraverse(ctx, &stat, sourceStore, filer2.FullPath(*dir), 0, fn)
2018-08-23 06:54:22 +00:00
glog.Infof("processed %d directories, %d files", stat.directoryCount, stat.fileCount)
return true
}
2019-03-15 22:55:34 +00:00
func doTraverse(ctx context.Context, stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) {
2018-08-23 06:54:22 +00:00
limit := *dirListLimit
2018-08-23 06:54:22 +00:00
lastEntryName := ""
for {
2019-03-15 22:55:34 +00:00
entries, err := filerStore.ListDirectoryEntries(ctx, parentPath, lastEntryName, false, limit)
2018-08-23 06:54:22 +00:00
if err != nil {
break
}
for _, entry := range entries {
if fnErr := fn(level, entry); fnErr != nil {
glog.Errorf("failed to process entry: %s", entry.FullPath)
}
if entry.IsDirectory() {
stat.directoryCount++
2019-03-15 22:55:34 +00:00
doTraverse(ctx, stat, filerStore, entry.FullPath, level+1, fn)
2018-08-23 06:54:22 +00:00
} else {
stat.fileCount++
}
lastEntryName = entry.Name()
2018-08-23 06:54:22 +00:00
}
if len(entries) < limit {
break
}
}
}
func printout(level int, entry *filer2.Entry) error {
for i := 0; i < level; i++ {
if i == level-1 {
print("+-")
} else {
print("| ")
}
2018-08-23 06:54:22 +00:00
}
2018-11-21 23:12:47 +00:00
print(entry.FullPath.Name())
2018-11-23 08:26:15 +00:00
if *verboseFilerExport {
for _, chunk := range entry.Chunks {
print("[")
print(chunk.FileId)
print(",")
print(chunk.Offset)
print(",")
print(chunk.Size)
print(")")
}
2018-11-21 23:12:47 +00:00
}
println()
2018-08-23 06:54:22 +00:00
return nil
}