2016-06-03 01:09:14 +00:00
|
|
|
package command
|
2015-05-26 07:58:41 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2019-02-18 20:11:52 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/server"
|
|
|
|
"github.com/spf13/viper"
|
2015-05-26 07:58:41 +00:00
|
|
|
|
2016-06-03 01:09:14 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
2015-05-26 07:58:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
s BackupOptions
|
|
|
|
)
|
|
|
|
|
|
|
|
type BackupOptions struct {
|
|
|
|
master *string
|
|
|
|
collection *string
|
|
|
|
dir *string
|
|
|
|
volumeId *int
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
cmdBackup.Run = runBackup // break init cycle
|
|
|
|
s.master = cmdBackup.Flag.String("server", "localhost:9333", "SeaweedFS master location")
|
|
|
|
s.collection = cmdBackup.Flag.String("collection", "", "collection name")
|
|
|
|
s.dir = cmdBackup.Flag.String("dir", ".", "directory to store volume data files")
|
|
|
|
s.volumeId = cmdBackup.Flag.Int("volumeId", -1, "a volume id. The volume .dat and .idx files should already exist in the dir.")
|
|
|
|
}
|
|
|
|
|
|
|
|
var cmdBackup = &Command{
|
|
|
|
UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333",
|
|
|
|
Short: "incrementally backup a volume to local folder",
|
|
|
|
Long: `Incrementally backup volume data.
|
2019-04-11 09:18:53 +00:00
|
|
|
|
2015-05-26 07:58:41 +00:00
|
|
|
It is expected that you use this inside a script, to loop through
|
|
|
|
all possible volume ids that needs to be backup to local folder.
|
2019-04-11 09:18:53 +00:00
|
|
|
|
2015-05-26 07:58:41 +00:00
|
|
|
The volume id does not need to exist locally or even remotely.
|
|
|
|
This will help to backup future new volumes.
|
2019-04-11 09:18:53 +00:00
|
|
|
|
2015-05-26 07:58:41 +00:00
|
|
|
Usually backing up is just copying the .dat (and .idx) files.
|
2019-01-17 01:17:19 +00:00
|
|
|
But it's tricky to incrementally copy the differences.
|
2019-04-11 09:18:53 +00:00
|
|
|
|
2015-05-26 07:58:41 +00:00
|
|
|
The complexity comes when there are multiple addition, deletion and compaction.
|
2019-02-06 13:59:15 +00:00
|
|
|
This tool will handle them correctly and efficiently, avoiding unnecessary data transportation.
|
2015-05-26 07:58:41 +00:00
|
|
|
`,
|
|
|
|
}
|
|
|
|
|
|
|
|
func runBackup(cmd *Command, args []string) bool {
|
2019-02-18 20:11:52 +00:00
|
|
|
|
|
|
|
weed_server.LoadConfiguration("security", false)
|
|
|
|
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
|
|
|
|
|
2015-05-26 07:58:41 +00:00
|
|
|
if *s.volumeId == -1 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
vid := storage.VolumeId(*s.volumeId)
|
|
|
|
|
|
|
|
// find volume location, replication, ttl info
|
|
|
|
lookup, err := operation.Lookup(*s.master, vid.String())
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
volumeServer := lookup.Locations[0].Url
|
|
|
|
|
2019-02-18 20:11:52 +00:00
|
|
|
stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
|
2015-05-26 07:58:41 +00:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error get volume %d status: %v\n", vid, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
ttl, err := storage.ReadTTL(stats.Ttl)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
replication, err := storage.NewReplicaPlacementFromString(stats.Replication)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-01-08 19:01:46 +00:00
|
|
|
v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0)
|
2015-05-26 07:58:41 +00:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-03-25 16:16:12 +00:00
|
|
|
if v.SuperBlock.CompactRevision < uint16(stats.CompactRevision) {
|
|
|
|
if err = v.Compact(0); err != nil {
|
|
|
|
fmt.Printf("Compact Volume before synchronizing %v\n", err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err = v.CommitCompact(); err != nil {
|
|
|
|
fmt.Printf("Commit Compact before synchronizing %v\n", err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
v.SuperBlock.CompactRevision = uint16(stats.CompactRevision)
|
2019-03-26 06:18:40 +00:00
|
|
|
v.DataFile().WriteAt(v.SuperBlock.Bytes(), 0)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if uint64(v.Size()) > stats.TailOffset {
|
|
|
|
// remove the old data
|
|
|
|
v.Destroy()
|
|
|
|
// recreate an empty volume
|
|
|
|
v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
defer v.Close()
|
|
|
|
|
|
|
|
if err := v.Follow(volumeServer, grpcDialOption); err != nil {
|
2015-05-26 07:58:41 +00:00
|
|
|
fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|