From e70f740deb210672a0d409628b839c56eb867d91 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 24 Jan 2019 19:38:04 -0800 Subject: [PATCH 001/450] set file attributes fix https://github.com/chrislusf/seaweedfs/issues/847 --- weed/filesys/file.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 4bb169a33..6c07345a0 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -74,10 +74,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f return err } - if file.isOpen { - return nil - } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { From 5668ed798dc2b2c6c0c88a6615ab4d5360024f10 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 26 Jan 2019 00:05:44 -0600 Subject: [PATCH 002/450] memory needle map mark size to be TombstoneFileSize fix https://github.com/chrislusf/seaweedfs/issues/850 --- weed/storage/needle/compact_map.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/storage/needle/compact_map.go b/weed/storage/needle/compact_map.go index 4816e0098..bca698407 100644 --- a/weed/storage/needle/compact_map.go +++ b/weed/storage/needle/compact_map.go @@ -76,9 +76,9 @@ func (cs *CompactSection) Delete(key NeedleId) uint32 { cs.Lock() ret := uint32(0) if i := cs.binarySearchValues(skey); i >= 0 { - if cs.values[i].Size > 0 { + if cs.values[i].Size > 0 && cs.values[i].Size != TombstoneFileSize { ret = cs.values[i].Size - cs.values[i].Size = 0 + cs.values[i].Size = TombstoneFileSize } } if v, found := cs.overflow.findOverflowEntry(skey); found { From 09471b46f90590cb1dede387a967f92c2250b9a1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 26 Jan 2019 00:10:29 -0600 Subject: [PATCH 003/450] fix related tests --- weed/storage/needle/compact_map_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/storage/needle/compact_map_test.go b/weed/storage/needle/compact_map_test.go index 8ed851b95..73231053e 100644 --- a/weed/storage/needle/compact_map_test.go +++ b/weed/storage/needle/compact_map_test.go @@ -68,7 +68,7 @@ func TestCompactMap(t *testing.T) { t.Fatal("key", i, "size", v.Size) } } else if i%37 == 0 { - if ok && v.Size > 0 { + if ok && v.Size != TombstoneFileSize { t.Fatal("key", i, "should have been deleted needle value", v) } } else if i%2 == 0 { @@ -81,7 +81,7 @@ func TestCompactMap(t *testing.T) { for i := uint32(10 * batch); i < 100*batch; i++ { v, ok := m.Get(NeedleId(i)) if i%37 == 0 { - if ok && v.Size > 0 { + if ok && v.Size != TombstoneFileSize { t.Fatal("key", i, "should have been deleted needle value", v) } } else if i%2 == 0 { From 834f414af971a7cdee57e97f7bfcd5174f44757c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 26 Jan 2019 00:15:42 -0600 Subject: [PATCH 004/450] add a timeout --- weed/server/raft_server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 68042da54..ca1792ac6 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -45,6 +45,7 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin transporter := raft.NewHTTPTransporter("/cluster", time.Second) transporter.Transport.MaxIdleConnsPerHost = 1024 transporter.Transport.IdleConnTimeout = time.Second + transporter.Transport.ResponseHeaderTimeout = time.Second glog.V(0).Infof("Starting RaftServer with %v", httpAddr) // Clear old cluster configurations if peers are changed From adcfe660342aae71a8a0faeec2099004f558982a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 26 Jan 2019 00:15:50 -0600 Subject: [PATCH 005/450] 1.24 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 9ddf07261..0f0efad86 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,5 @@ package util const ( - VERSION = "1.23" + VERSION = "1.24" ) From 40c8725ffa02767344184fe952f7799fe4250ef9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 28 Jan 2019 10:35:28 -0800 Subject: [PATCH 006/450] use the first entry to bootstrap master cluster fix https://github.com/chrislusf/seaweedfs/issues/851 --- weed/server/raft_server.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index ca1792ac6..c332da38e 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -3,7 +3,6 @@ package weed_server import ( "encoding/json" "io/ioutil" - "math/rand" "os" "path" "reflect" @@ -71,8 +70,8 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin for _, peer := range s.peers { s.raftServer.AddPeer(peer, "http://"+peer) } - time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond) - if s.raftServer.IsLogEmpty() { + + if s.raftServer.IsLogEmpty() && isTheFirstOne(httpAddr, s.peers) { // Initialize the server by joining itself. glog.V(0).Infoln("Initializing new cluster") @@ -129,3 +128,11 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, return oldPeers, !reflect.DeepEqual(peers, oldPeers) } + +func isTheFirstOne(self string, peers []string) bool { + sort.Strings(peers) + if len(peers)<=0{ + return true + } + return self == peers[0] +} From 7a493bbefa8aafe08ce9a7035c6fd0cf2e206e6c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 28 Jan 2019 10:36:16 -0800 Subject: [PATCH 007/450] better way to detect a master isLeader --- weed/topology/topology.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 4242bfa05..ff23be1ff 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -50,6 +50,9 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls } func (t *Topology) IsLeader() bool { + if t.RaftServer!=nil { + return t.RaftServer.State() == raft.Leader + } if leader, e := t.Leader(); e == nil { return leader == t.RaftServer.Name() } From 221105eea3cb2cfb587870df0fe0e62c640c4b99 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 28 Jan 2019 11:46:46 -0800 Subject: [PATCH 008/450] Revert "use the first entry to bootstrap master cluster" This reverts commit 40c8725ffa02767344184fe952f7799fe4250ef9. --- weed/server/raft_server.go | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index c332da38e..ca1792ac6 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -3,6 +3,7 @@ package weed_server import ( "encoding/json" "io/ioutil" + "math/rand" "os" "path" "reflect" @@ -70,8 +71,8 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin for _, peer := range s.peers { s.raftServer.AddPeer(peer, "http://"+peer) } - - if s.raftServer.IsLogEmpty() && isTheFirstOne(httpAddr, s.peers) { + time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond) + if s.raftServer.IsLogEmpty() { // Initialize the server by joining itself. glog.V(0).Infoln("Initializing new cluster") @@ -128,11 +129,3 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, return oldPeers, !reflect.DeepEqual(peers, oldPeers) } - -func isTheFirstOne(self string, peers []string) bool { - sort.Strings(peers) - if len(peers)<=0{ - return true - } - return self == peers[0] -} From 6230eb28a61e289eeb2eb7ef579c7d1716cbc016 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 28 Jan 2019 11:55:33 -0800 Subject: [PATCH 009/450] randomize based on self address fix #851 --- weed/server/raft_server.go | 2 ++ weed/util/randomizer.go | 9 +++++++++ 2 files changed, 11 insertions(+) create mode 100644 weed/util/randomizer.go diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index ca1792ac6..17b1f51b2 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -14,6 +14,7 @@ import ( "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" ) @@ -71,6 +72,7 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin for _, peer := range s.peers { s.raftServer.AddPeer(peer, "http://"+peer) } + rand.Seed(util.HashBytesToInt64([]byte(httpAddr))) time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond) if s.raftServer.IsLogEmpty() { // Initialize the server by joining itself. diff --git a/weed/util/randomizer.go b/weed/util/randomizer.go new file mode 100644 index 000000000..e5ccd3fb3 --- /dev/null +++ b/weed/util/randomizer.go @@ -0,0 +1,9 @@ +package util + +import "hash/fnv" + +func HashBytesToInt64(x []byte) int64 { + hash := fnv.New64() + hash.Write(x) + return int64(hash.Sum64()) +} From 133450759586e39d00182aafd86b8491a1c7483b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 28 Jan 2019 12:12:51 -0800 Subject: [PATCH 010/450] Revert "randomize based on self address" This reverts commit 6230eb28a61e289eeb2eb7ef579c7d1716cbc016. --- weed/server/raft_server.go | 2 -- weed/util/randomizer.go | 9 --------- 2 files changed, 11 deletions(-) delete mode 100644 weed/util/randomizer.go diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 17b1f51b2..ca1792ac6 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -14,7 +14,6 @@ import ( "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" ) @@ -72,7 +71,6 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin for _, peer := range s.peers { s.raftServer.AddPeer(peer, "http://"+peer) } - rand.Seed(util.HashBytesToInt64([]byte(httpAddr))) time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond) if s.raftServer.IsLogEmpty() { // Initialize the server by joining itself. diff --git a/weed/util/randomizer.go b/weed/util/randomizer.go deleted file mode 100644 index e5ccd3fb3..000000000 --- a/weed/util/randomizer.go +++ /dev/null @@ -1,9 +0,0 @@ -package util - -import "hash/fnv" - -func HashBytesToInt64(x []byte) int64 { - hash := fnv.New64() - hash.Write(x) - return int64(hash.Sum64()) -} From aa5ccff6d20abcfa246ed11c28f5cb69ab71672f Mon Sep 17 00:00:00 2001 From: Sergey Date: Wed, 6 Feb 2019 18:59:15 +0500 Subject: [PATCH 011/450] fixing of typos --- weed/command/backup.go | 2 +- weed/command/upload.go | 2 +- weed/operation/compress.go | 2 +- weed/security/guard.go | 2 +- weed/topology/topology_vacuum.go | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/weed/command/backup.go b/weed/command/backup.go index 072aea75b..0641f2e5d 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -41,7 +41,7 @@ var cmdBackup = &Command{ But it's tricky to incrementally copy the differences. The complexity comes when there are multiple addition, deletion and compaction. - This tool will handle them correctly and efficiently, avoiding unnecessary data transporation. + This tool will handle them correctly and efficiently, avoiding unnecessary data transportation. `, } diff --git a/weed/command/upload.go b/weed/command/upload.go index f664c0e3a..244caaa4c 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -53,7 +53,7 @@ var cmdUpload = &Command{ All files under the folder and subfolders will be uploaded, each with its own file key. Optional parameter "-include" allows you to specify the file name patterns. - If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separatedly. + If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separately. The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned. `, diff --git a/weed/operation/compress.go b/weed/operation/compress.go index 65979d529..fedc877dd 100644 --- a/weed/operation/compress.go +++ b/weed/operation/compress.go @@ -30,7 +30,7 @@ func IsGzippable(ext, mtype string, data []byte) bool { return false } - // by file name extention + // by file name extension switch ext { case ".zip", ".rar", ".gz", ".bz2", ".xz": return false diff --git a/weed/security/guard.go b/weed/security/guard.go index dea3b12f2..fd9c8e0b3 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -158,5 +158,5 @@ func (g *Guard) checkJwt(w http.ResponseWriter, r *http.Request) error { } glog.V(1).Infof("No permission from %s", r.RemoteAddr) - return fmt.Errorf("No write permisson from %s", r.RemoteAddr) + return fmt.Errorf("No write permission from %s", r.RemoteAddr) } diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index d6b09314b..48a75ba9d 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -82,7 +82,7 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool { isCommitSuccess := true for _, dn := range locationlist.list { - glog.V(0).Infoln("Start Commiting vacuum", vid, "on", dn.Url()) + glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ VolumdId: uint32(vid), @@ -93,7 +93,7 @@ func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlis glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err) isCommitSuccess = false } else { - glog.V(0).Infof("Complete Commiting vacuum %d on %s", vid, dn.Url()) + glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url()) } if isCommitSuccess { vl.SetVolumeAvailable(dn, vid) From 744abc2690a551f1e76e4e12690754b322f383de Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 9 Feb 2019 12:51:14 -0800 Subject: [PATCH 012/450] log raft state changes --- weed/server/master_server.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/server/master_server.go b/weed/server/master_server.go index f22925e56..492bb76e9 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -98,6 +98,9 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.") } }) + ms.Topo.RaftServer.AddEventListener(raft.StateChangeEventType, func(e raft.Event) { + glog.V(0).Infof("state change: %+v", e) + }) if ms.Topo.IsLeader() { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!") } else { From 8afc63248449ee681dc964e07ab687a0f4cb8a9b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 9 Feb 2019 12:52:09 -0800 Subject: [PATCH 013/450] raft: use the first master to bootstrap the election --- weed/server/raft_server.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index ca1792ac6..c332da38e 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -3,7 +3,6 @@ package weed_server import ( "encoding/json" "io/ioutil" - "math/rand" "os" "path" "reflect" @@ -71,8 +70,8 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin for _, peer := range s.peers { s.raftServer.AddPeer(peer, "http://"+peer) } - time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond) - if s.raftServer.IsLogEmpty() { + + if s.raftServer.IsLogEmpty() && isTheFirstOne(httpAddr, s.peers) { // Initialize the server by joining itself. glog.V(0).Infoln("Initializing new cluster") @@ -129,3 +128,11 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, return oldPeers, !reflect.DeepEqual(peers, oldPeers) } + +func isTheFirstOne(self string, peers []string) bool { + sort.Strings(peers) + if len(peers)<=0{ + return true + } + return self == peers[0] +} From 501bd72b1c9a88cadb5182cf9c13c2d796cf775f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 9 Feb 2019 21:07:12 -0800 Subject: [PATCH 014/450] wip: add security.toml file --- weed/command/filer.go | 2 +- weed/command/filer_replication.go | 2 +- weed/command/master.go | 7 +++++-- weed/command/scaffold.go | 12 +++++++++++- 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/weed/command/filer.go b/weed/command/filer.go index 0c1950f96..f5fa6d50f 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -70,7 +70,7 @@ var cmdFiler = &Command{ The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. - The example filer.toml configuration file can be generated by "weed scaffold filer" + The example filer.toml configuration file can be generated by "weed scaffold -config=filer" `, } diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index 3384e4023..c24f63bf0 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -28,7 +28,7 @@ var cmdFilerReplicate = &Command{ filer.replicate listens on filer notifications. If any file is updated, it will fetch the updated content, and write to the other destination. - Run "weed scaffold -config replication" to generate a replication.toml file and customize the parameters. + Run "weed scaffold -config=replication" to generate a replication.toml file and customize the parameters. `, } diff --git a/weed/command/master.go b/weed/command/master.go index bd2267b9e..53c72852c 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -23,8 +23,11 @@ func init() { var cmdMaster = &Command{ UsageLine: "master -port=9333", Short: "start a master server", - Long: `start a master server to provide volume=>location mapping service - and sequence number of file ids + Long: `start a master server to provide volume=>location mapping service and sequence number of file ids + + The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. + + The example security.toml configuration file can be generated by "weed scaffold -config=security" `, } diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index ec0723859..40e7437d2 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -19,7 +19,7 @@ var cmdScaffold = &Command{ var ( outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory") - config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication] the configuration file to generate") + config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security] the configuration file to generate") ) func runScaffold(cmd *Command, args []string) bool { @@ -32,6 +32,8 @@ func runScaffold(cmd *Command, args []string) bool { content = NOTIFICATION_TOML_EXAMPLE case "replication": content = REPLICATION_TOML_EXAMPLE + case "security": + content = SECURITY_TOML_EXAMPLE } if content == "" { println("need a valid -config option") @@ -239,5 +241,13 @@ b2_master_application_key = "" bucket = "mybucket" # an existing bucket directory = "/" # destination directory +` + + SECURITY_TOML_EXAMPLE = ` +# this file is read by master, volume server, and filer + +[jwt] +signing_key = "" + ` ) From 4ff4a147b258bb7787e492a74254f3993bb69d1a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 9 Feb 2019 21:56:32 -0800 Subject: [PATCH 015/450] cleanup security.Secret --- weed/command/benchmark.go | 9 +- weed/command/filer.go | 3 - weed/command/filer_copy.go | 6 - weed/command/master.go | 3 +- weed/command/server.go | 4 +- weed/command/upload.go | 8 +- weed/operation/data_struts.go | 1 - weed/operation/submit.go | 11 +- weed/pb/master.proto | 1 - weed/pb/master_pb/master.pb.go | 142 +++++++++----------- weed/security/guard.go | 4 +- weed/security/jwt.go | 18 +-- weed/server/filer_server.go | 3 +- weed/server/master_grpc_server.go | 1 - weed/server/master_server.go | 3 +- weed/server/volume_grpc_client_to_master.go | 4 - 16 files changed, 90 insertions(+), 131 deletions(-) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 60fd88ccd..be76a3e2e 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -17,7 +17,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" ) @@ -35,7 +34,6 @@ type BenchmarkOptions struct { collection *string cpuprofile *string maxCpu *int - secretKey *string } var ( @@ -59,7 +57,6 @@ func init() { b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - b.secretKey = cmdBenchmark.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") sharedBytes = make([]byte, 1024) } @@ -188,7 +185,6 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { defer wait.Done() delayedDeleteChan := make(chan *delayedFile, 100) var waitForDeletions sync.WaitGroup - secret := security.Secret(*b.secretKey) for i := 0; i < 7; i++ { waitForDeletions.Add(1) @@ -198,8 +194,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { if df.enterTime.After(time.Now()) { time.Sleep(df.enterTime.Sub(time.Now())) } - if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid, - security.GenJwt(secret, df.fp.Fid)); e == nil { + if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid, ""); e == nil { s.completed++ } else { s.failed++ @@ -224,7 +219,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { } if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection - if _, err := fp.Upload(0, masterClient.GetMaster(), secret); err == nil { + if _, err := fp.Upload(0, masterClient.GetMaster(), ""); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} diff --git a/weed/command/filer.go b/weed/command/filer.go index f5fa6d50f..a07a67471 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -28,7 +28,6 @@ type FilerOptions struct { redirectOnRead *bool disableDirListing *bool maxMB *int - secretKey *string dirListingLimit *int dataCenter *string enableNotification *bool @@ -49,7 +48,6 @@ func init() { f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") - f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") } @@ -103,7 +101,6 @@ func (fo *FilerOptions) startFiler() { RedirectOnRead: *fo.redirectOnRead, DisableDirListing: *fo.disableDirListing, MaxMB: *fo.maxMB, - SecretKey: *fo.secretKey, DirListingLimit: *fo.dirListingLimit, DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 3638bcb27..af121ca1d 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -11,7 +11,6 @@ import ( "context" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "io" "net/http" @@ -31,9 +30,6 @@ type CopyOptions struct { collection *string ttl *string maxMB *int - secretKey *string - - secret security.Secret } func init() { @@ -46,7 +42,6 @@ func init() { copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") copy.maxMB = cmdCopy.Flag.Int("maxMB", 0, "split files larger than the limit") copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000") - copy.secretKey = cmdCopy.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") } var cmdCopy = &Command{ @@ -66,7 +61,6 @@ var cmdCopy = &Command{ } func runCopy(cmd *Command, args []string) bool { - copy.secret = security.Secret(*copy.secretKey) if len(args) <= 1 { return false } diff --git a/weed/command/master.go b/weed/command/master.go index 53c72852c..6f1373aa2 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -47,7 +47,6 @@ var ( mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - masterSecureKey = cmdMaster.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file") @@ -75,7 +74,7 @@ func runMaster(cmd *Command, args []string) bool { ms := weed_server.NewMasterServer(r, *mport, *metaFolder, *volumeSizeLimitMB, *volumePreallocate, *mpulse, *defaultReplicaPlacement, *garbageThreshold, - masterWhiteList, *masterSecureKey, + masterWhiteList, ) listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport) diff --git a/weed/command/server.go b/weed/command/server.go index ba5305a97..2dd506772 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -58,7 +58,6 @@ var ( serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") - serverSecureKey = cmdServer.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") masterGrpcPort = cmdServer.Flag.Int("master.port.grpc", 0, "master grpc server listen port, default to http port + 10000") @@ -96,7 +95,6 @@ func init() { } func runServer(cmd *Command, args []string) bool { - filerOptions.secretKey = serverSecureKey if *serverOptions.cpuprofile != "" { f, err := os.Create(*serverOptions.cpuprofile) if err != nil { @@ -170,7 +168,7 @@ func runServer(cmd *Command, args []string) bool { ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder, *masterVolumeSizeLimitMB, *masterVolumePreallocate, *pulseSeconds, *masterDefaultReplicaPlacement, *serverGarbageThreshold, - serverWhiteList, *serverSecureKey, + serverWhiteList, ) glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterPort) diff --git a/weed/command/upload.go b/weed/command/upload.go index 244caaa4c..df2cb9892 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -7,7 +7,6 @@ import ( "path/filepath" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/security" ) var ( @@ -23,7 +22,6 @@ type UploadOptions struct { dataCenter *string ttl *string maxMB *int - secretKey *string } func init() { @@ -37,7 +35,6 @@ func init() { upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit") - upload.secretKey = cmdUpload.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") } var cmdUpload = &Command{ @@ -60,7 +57,6 @@ var cmdUpload = &Command{ } func runUpload(cmd *Command, args []string) bool { - secret := security.Secret(*upload.secretKey) if len(args) == 0 { if *upload.dir == "" { return false @@ -79,7 +75,7 @@ func runUpload(cmd *Command, args []string) bool { } results, e := operation.SubmitFiles(*upload.master, parts, *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB, secret) + *upload.ttl, *upload.maxMB) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { @@ -98,7 +94,7 @@ func runUpload(cmd *Command, args []string) bool { } results, _ := operation.SubmitFiles(*upload.master, parts, *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB, secret) + *upload.ttl, *upload.maxMB) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) } diff --git a/weed/operation/data_struts.go b/weed/operation/data_struts.go index bfc53aa50..4980f9913 100644 --- a/weed/operation/data_struts.go +++ b/weed/operation/data_struts.go @@ -2,6 +2,5 @@ package operation type JoinResult struct { VolumeSizeLimit uint64 `json:"VolumeSizeLimit,omitempty"` - SecretKey string `json:"secretKey,omitempty"` Error string `json:"error,omitempty"` } diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 7a1a3085e..66a7a5f36 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -37,9 +37,7 @@ type SubmitResult struct { } func SubmitFiles(master string, files []FilePart, - replication string, collection string, dataCenter string, ttl string, maxMB int, - secret security.Secret, -) ([]SubmitResult, error) { + replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { results[index].FileName = file.FileName @@ -67,7 +65,7 @@ func SubmitFiles(master string, files []FilePart, file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, secret) + results[index].Size, err = file.Upload(maxMB, master, "") if err != nil { results[index].Error = err.Error() } @@ -110,8 +108,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { return ret, nil } -func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (retSize uint32, err error) { - jwt := security.GenJwt(secret, fi.Fid) +func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) @@ -201,7 +198,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret } func upload_one_chunk(filename string, reader io.Reader, master, - fileUrl string, jwt security.EncodedJwt, +fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") uploadResult, uploadError := Upload(fileUrl, filename, reader, false, diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 544160c06..7aac4c392 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -36,7 +36,6 @@ message Heartbeat { message HeartbeatResponse { uint64 volumeSizeLimit = 1; - string secretKey = 2; string leader = 3; } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 894f08471..124a4d263 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -145,7 +145,6 @@ func (m *Heartbeat) GetDeletedVids() []uint32 { type HeartbeatResponse struct { VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volumeSizeLimit" json:"volumeSizeLimit,omitempty"` - SecretKey string `protobuf:"bytes,2,opt,name=secretKey" json:"secretKey,omitempty"` Leader string `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` } @@ -161,13 +160,6 @@ func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 { return 0 } -func (m *HeartbeatResponse) GetSecretKey() string { - if m != nil { - return m.SecretKey - } - return "" -} - func (m *HeartbeatResponse) GetLeader() string { if m != nil { return m.Leader @@ -966,71 +958,71 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1055 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x4b, 0x6f, 0xe4, 0x44, - 0x10, 0x5e, 0x7b, 0x9e, 0xae, 0xd9, 0xc9, 0x4e, 0x3a, 0x11, 0xf2, 0xce, 0xbe, 0x06, 0x73, 0x19, - 0x04, 0x8a, 0x96, 0x70, 0x44, 0x08, 0xb1, 0xd1, 0x22, 0xa2, 0x04, 0x36, 0x38, 0xb0, 0x07, 0x2e, - 0xa6, 0x63, 0x57, 0xa2, 0x56, 0xfc, 0xa2, 0xbb, 0x27, 0x99, 0xd9, 0x0b, 0x47, 0xfe, 0x15, 0x17, - 0xb8, 0xf1, 0x53, 0xb8, 0xf1, 0x0b, 0x50, 0x3f, 0xec, 0xf1, 0x38, 0x09, 0x91, 0x90, 0xb8, 0xb5, - 0xbf, 0xae, 0xee, 0xaa, 0xfe, 0xbe, 0x7a, 0x18, 0x1e, 0x66, 0x54, 0x48, 0xe4, 0x7b, 0x25, 0x2f, - 0x64, 0x41, 0x3c, 0xf3, 0x15, 0x95, 0x67, 0xc1, 0x5f, 0x2e, 0x78, 0x5f, 0x23, 0xe5, 0xf2, 0x0c, - 0xa9, 0x24, 0x5b, 0xe0, 0xb2, 0xd2, 0x77, 0x66, 0xce, 0xdc, 0x0b, 0x5d, 0x56, 0x12, 0x02, 0xdd, - 0xb2, 0xe0, 0xd2, 0x77, 0x67, 0xce, 0x7c, 0x1c, 0xea, 0x35, 0x79, 0x06, 0x50, 0x2e, 0xce, 0x52, - 0x16, 0x47, 0x0b, 0x9e, 0xfa, 0x1d, 0x6d, 0xeb, 0x19, 0xe4, 0x07, 0x9e, 0x92, 0x39, 0x4c, 0x32, - 0xba, 0x8c, 0xae, 0x8a, 0x74, 0x91, 0x61, 0x14, 0x17, 0x8b, 0x5c, 0xfa, 0x5d, 0x7d, 0x7c, 0x2b, - 0xa3, 0xcb, 0xb7, 0x1a, 0x3e, 0x50, 0x28, 0x99, 0xa9, 0xa8, 0x96, 0xd1, 0x39, 0x4b, 0x31, 0xba, - 0xc4, 0x95, 0xdf, 0x9b, 0x39, 0xf3, 0x6e, 0x08, 0x19, 0x5d, 0x7e, 0xc5, 0x52, 0x3c, 0xc2, 0x15, - 0x79, 0x01, 0xa3, 0x84, 0x4a, 0x1a, 0xc5, 0x98, 0x4b, 0xe4, 0x7e, 0x5f, 0xfb, 0x02, 0x05, 0x1d, - 0x68, 0x44, 0xc5, 0xc7, 0x69, 0x7c, 0xe9, 0x0f, 0xf4, 0x8e, 0x5e, 0xab, 0xf8, 0x68, 0x92, 0xb1, - 0x3c, 0xd2, 0x91, 0x0f, 0xb5, 0x6b, 0x4f, 0x23, 0x27, 0x2a, 0xfc, 0xcf, 0x61, 0x60, 0x62, 0x13, - 0xbe, 0x37, 0xeb, 0xcc, 0x47, 0xfb, 0x1f, 0xec, 0xd5, 0x6c, 0xec, 0x99, 0xf0, 0x0e, 0xf3, 0xf3, - 0x82, 0x67, 0x54, 0xb2, 0x22, 0xff, 0x06, 0x85, 0xa0, 0x17, 0x18, 0x56, 0x67, 0xc8, 0x63, 0x18, - 0xe6, 0x78, 0x1d, 0x5d, 0xb1, 0x44, 0xf8, 0x30, 0xeb, 0xcc, 0xc7, 0xe1, 0x20, 0xc7, 0xeb, 0xb7, - 0x2c, 0x11, 0xe4, 0x7d, 0x78, 0x98, 0x60, 0x8a, 0x12, 0x13, 0xb3, 0x3d, 0xd2, 0xdb, 0x23, 0x8b, - 0x29, 0x93, 0x40, 0xc0, 0x76, 0x4d, 0x76, 0x88, 0xa2, 0x2c, 0x72, 0x81, 0x64, 0x0e, 0x8f, 0xcc, - 0xed, 0xa7, 0xec, 0x1d, 0x1e, 0xb3, 0x8c, 0x49, 0xad, 0x40, 0x37, 0x6c, 0xc3, 0xe4, 0x29, 0x78, - 0x02, 0x63, 0x8e, 0xf2, 0x08, 0x57, 0x5a, 0x13, 0x2f, 0x5c, 0x03, 0xe4, 0x3d, 0xe8, 0xa7, 0x48, - 0x13, 0xe4, 0x56, 0x14, 0xfb, 0x15, 0xfc, 0xe1, 0x82, 0x7f, 0xd7, 0xc3, 0xb4, 0xe2, 0x89, 0xf6, - 0x37, 0x0e, 0x5d, 0x96, 0x28, 0x46, 0x05, 0x7b, 0x87, 0xfa, 0xf6, 0x6e, 0xa8, 0xd7, 0xe4, 0x39, - 0x40, 0x5c, 0xa4, 0x29, 0xc6, 0xea, 0xa0, 0xbd, 0xbc, 0x81, 0x28, 0xc6, 0xb5, 0x88, 0x6b, 0xb1, - 0xbb, 0xa1, 0xa7, 0x10, 0xa3, 0x73, 0xcd, 0x8b, 0x35, 0x30, 0x3a, 0x5b, 0x5e, 0x8c, 0xc9, 0xc7, - 0x40, 0x2a, 0xea, 0xce, 0x56, 0xb5, 0x61, 0x5f, 0x1b, 0x4e, 0xec, 0xce, 0xab, 0x55, 0x65, 0xfd, - 0x04, 0x3c, 0x8e, 0x34, 0x89, 0x8a, 0x3c, 0x5d, 0x69, 0xe9, 0x87, 0xe1, 0x50, 0x01, 0x6f, 0xf2, - 0x74, 0x45, 0x3e, 0x82, 0x6d, 0x8e, 0x65, 0xca, 0x62, 0x1a, 0x95, 0x29, 0x8d, 0x31, 0xc3, 0xbc, - 0xca, 0x82, 0x89, 0xdd, 0x38, 0xa9, 0x70, 0xe2, 0xc3, 0xe0, 0x0a, 0xb9, 0x50, 0xcf, 0xf2, 0xb4, - 0x49, 0xf5, 0x49, 0x26, 0xd0, 0x91, 0x32, 0xf5, 0x41, 0xa3, 0x6a, 0x19, 0x0c, 0xa0, 0xf7, 0x3a, - 0x2b, 0xe5, 0x2a, 0xf8, 0xcd, 0x81, 0x47, 0xa7, 0x8b, 0x12, 0xf9, 0xab, 0xb4, 0x88, 0x2f, 0x5f, - 0x2f, 0x25, 0xa7, 0xe4, 0x0d, 0x6c, 0x21, 0xa7, 0x62, 0xc1, 0x55, 0xec, 0x09, 0xcb, 0x2f, 0x34, - 0xa5, 0xa3, 0xfd, 0x79, 0x23, 0xb9, 0x5a, 0x67, 0xf6, 0x5e, 0x9b, 0x03, 0x07, 0xda, 0x3e, 0x1c, - 0x63, 0xf3, 0x73, 0xfa, 0x23, 0x8c, 0x37, 0xf6, 0x95, 0x30, 0x2a, 0xf1, 0xad, 0x54, 0x7a, 0xad, - 0x14, 0x2f, 0x29, 0x67, 0x72, 0x65, 0x0b, 0xd4, 0x7e, 0x29, 0x41, 0x6c, 0xfd, 0xa9, 0x3c, 0xec, - 0xe8, 0x3c, 0xf4, 0x0c, 0x72, 0x98, 0x88, 0xe0, 0x43, 0xd8, 0x39, 0x48, 0x19, 0xe6, 0xf2, 0x98, - 0x09, 0x89, 0x79, 0x88, 0x3f, 0x2f, 0x50, 0x48, 0xe5, 0x21, 0xa7, 0x19, 0xda, 0xf2, 0xd7, 0xeb, - 0xe0, 0x17, 0xd8, 0x32, 0xa9, 0x73, 0x5c, 0xc4, 0x3a, 0x6f, 0x14, 0x31, 0xaa, 0xee, 0x8d, 0x91, - 0x5a, 0xb6, 0x1a, 0x82, 0xdb, 0x6e, 0x08, 0xcd, 0x8a, 0xe9, 0xfc, 0x7b, 0xc5, 0x74, 0x6f, 0x56, - 0xcc, 0xf7, 0xb0, 0x73, 0x5c, 0x14, 0x97, 0x8b, 0xd2, 0x84, 0x51, 0xc5, 0xba, 0xf9, 0x42, 0x67, - 0xd6, 0x51, 0x3e, 0xeb, 0x17, 0xb6, 0x32, 0xd6, 0x6d, 0x67, 0x6c, 0xf0, 0xb7, 0x03, 0xbb, 0x9b, - 0xd7, 0xda, 0x5a, 0xfc, 0x09, 0x76, 0xea, 0x7b, 0xa3, 0xd4, 0xbe, 0xd9, 0x38, 0x18, 0xed, 0xbf, - 0x6c, 0x88, 0x79, 0xdb, 0xe9, 0xaa, 0x7d, 0x24, 0x15, 0x59, 0xe1, 0xf6, 0x55, 0x0b, 0x11, 0xd3, - 0x25, 0x4c, 0xda, 0x66, 0x2a, 0xa1, 0x6b, 0xaf, 0x96, 0xd9, 0x61, 0x75, 0x92, 0x7c, 0x02, 0xde, - 0x3a, 0x10, 0x57, 0x07, 0xb2, 0xb3, 0x11, 0x88, 0xf5, 0xb5, 0xb6, 0x22, 0xbb, 0xd0, 0x43, 0xce, - 0x8b, 0xaa, 0x11, 0x98, 0x8f, 0xe0, 0x33, 0x18, 0xfe, 0x67, 0x15, 0x83, 0x3f, 0x1d, 0x18, 0x7f, - 0x29, 0x04, 0xbb, 0xa8, 0xd3, 0x65, 0x17, 0x7a, 0xa6, 0x4c, 0x4d, 0xb3, 0x32, 0x1f, 0x64, 0x06, - 0x23, 0x5b, 0x65, 0x0d, 0xea, 0x9b, 0xd0, 0xbd, 0xdd, 0xc4, 0x56, 0x5e, 0xd7, 0x84, 0x26, 0x65, - 0xda, 0x1e, 0x03, 0xbd, 0x3b, 0xc7, 0x40, 0xbf, 0x31, 0x06, 0x9e, 0x80, 0xa7, 0x0f, 0xe5, 0x45, - 0x82, 0x76, 0x3e, 0x0c, 0x15, 0xf0, 0x6d, 0x91, 0xe8, 0xb4, 0xae, 0x1e, 0x63, 0x85, 0x9f, 0x40, - 0xe7, 0xbc, 0x26, 0x5f, 0x2d, 0x2b, 0x8a, 0xdc, 0xbb, 0x28, 0xba, 0x31, 0xf9, 0x6a, 0x42, 0xba, - 0x4d, 0x42, 0x6a, 0x2d, 0x7a, 0x4d, 0x2d, 0x2e, 0x60, 0xfb, 0x54, 0x52, 0xc9, 0x84, 0x64, 0xb1, - 0xa8, 0x18, 0x6d, 0x71, 0xe7, 0xdc, 0xc7, 0x9d, 0x7b, 0x17, 0x77, 0x9d, 0x9a, 0xbb, 0xe0, 0x77, - 0x07, 0x48, 0xd3, 0x93, 0x7d, 0xee, 0xff, 0xe0, 0x4a, 0xd1, 0x23, 0x0b, 0x49, 0xd3, 0x48, 0x0f, - 0x10, 0x3b, 0x06, 0x34, 0xa2, 0x26, 0x98, 0x12, 0x64, 0x21, 0x30, 0x31, 0xbb, 0x66, 0x06, 0x0c, - 0x15, 0xa0, 0x37, 0x37, 0x47, 0x48, 0xbf, 0x35, 0x42, 0xf6, 0x7f, 0xed, 0xc0, 0xe0, 0x14, 0xe9, - 0x35, 0x62, 0x42, 0x0e, 0x61, 0x7c, 0x8a, 0x79, 0xb2, 0xfe, 0x69, 0xd9, 0x6d, 0x54, 0x43, 0x8d, - 0x4e, 0x9f, 0xde, 0x86, 0x56, 0xef, 0x0f, 0x1e, 0xcc, 0x9d, 0x97, 0x0e, 0x39, 0x81, 0xf1, 0x11, - 0x62, 0x79, 0x50, 0xe4, 0x39, 0xc6, 0x12, 0x13, 0xf2, 0xbc, 0x71, 0xe8, 0x96, 0x16, 0x39, 0x7d, - 0x7c, 0xe3, 0x5f, 0xa1, 0xaa, 0x28, 0x7b, 0xe3, 0x77, 0xf0, 0xb0, 0xd9, 0x19, 0x36, 0x2e, 0xbc, - 0xa5, 0x8f, 0x4d, 0x5f, 0xdc, 0xd3, 0x52, 0x82, 0x07, 0xe4, 0x0b, 0xe8, 0x9b, 0x5c, 0x25, 0x7e, - 0xc3, 0x78, 0xa3, 0x16, 0x37, 0xe2, 0xda, 0x4c, 0xec, 0xe0, 0x01, 0x39, 0x02, 0x58, 0x67, 0x00, - 0x69, 0xf2, 0x72, 0x23, 0x05, 0xa7, 0xcf, 0xee, 0xd8, 0xad, 0x2e, 0x3b, 0xeb, 0xeb, 0x3f, 0xc8, - 0x4f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x9f, 0x0a, 0x25, 0x51, 0x0a, 0x00, 0x00, + // 1043 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0x24, 0x35, + 0x10, 0xde, 0xee, 0xf9, 0xed, 0x9a, 0x4c, 0x76, 0xe2, 0x44, 0xa8, 0x77, 0x96, 0xdd, 0x1d, 0x9a, + 0xcb, 0x20, 0x50, 0xb4, 0x84, 0x23, 0x42, 0x88, 0x8d, 0x82, 0x88, 0x12, 0xd8, 0xd0, 0x61, 0xf7, + 0xc0, 0xa5, 0x71, 0xba, 0x2b, 0x91, 0x95, 0xfe, 0xc3, 0xf6, 0x24, 0x33, 0x7b, 0xe1, 0xc8, 0x5b, + 0x71, 0x81, 0x1b, 0x8f, 0xc2, 0x8d, 0x27, 0x40, 0xfe, 0xe9, 0x4e, 0x4f, 0x27, 0x21, 0x12, 0x12, + 0x37, 0xfb, 0x73, 0xd9, 0xae, 0xfa, 0xbe, 0xaa, 0xb2, 0x61, 0x23, 0xa3, 0x42, 0x22, 0xdf, 0x2d, + 0x79, 0x21, 0x0b, 0xe2, 0x99, 0x59, 0x54, 0x9e, 0x05, 0x7f, 0xb9, 0xe0, 0x7d, 0x83, 0x94, 0xcb, + 0x33, 0xa4, 0x92, 0x6c, 0x82, 0xcb, 0x4a, 0xdf, 0x99, 0x39, 0x73, 0x2f, 0x74, 0x59, 0x49, 0x08, + 0x74, 0xcb, 0x82, 0x4b, 0xdf, 0x9d, 0x39, 0xf3, 0x71, 0xa8, 0xc7, 0xe4, 0x19, 0x40, 0xb9, 0x38, + 0x4b, 0x59, 0x1c, 0x2d, 0x78, 0xea, 0x77, 0xb4, 0xad, 0x67, 0x90, 0x37, 0x3c, 0x25, 0x73, 0x98, + 0x64, 0x74, 0x19, 0x5d, 0x15, 0xe9, 0x22, 0xc3, 0x28, 0x2e, 0x16, 0xb9, 0xf4, 0xbb, 0x7a, 0xfb, + 0x66, 0x46, 0x97, 0x6f, 0x35, 0xbc, 0xaf, 0x50, 0x32, 0x53, 0x5e, 0x2d, 0xa3, 0x73, 0x96, 0x62, + 0x74, 0x89, 0x2b, 0xbf, 0x37, 0x73, 0xe6, 0xdd, 0x10, 0x32, 0xba, 0xfc, 0x9a, 0xa5, 0x78, 0x84, + 0x2b, 0xf2, 0x02, 0x46, 0x09, 0x95, 0x34, 0x8a, 0x31, 0x97, 0xc8, 0xfd, 0xbe, 0xbe, 0x0b, 0x14, + 0xb4, 0xaf, 0x11, 0xe5, 0x1f, 0xa7, 0xf1, 0xa5, 0x3f, 0xd0, 0x2b, 0x7a, 0xac, 0xfc, 0xa3, 0x49, + 0xc6, 0xf2, 0x48, 0x7b, 0x3e, 0xd4, 0x57, 0x7b, 0x1a, 0x39, 0x51, 0xee, 0x7f, 0x01, 0x03, 0xe3, + 0x9b, 0xf0, 0xbd, 0x59, 0x67, 0x3e, 0xda, 0xfb, 0x70, 0xb7, 0x66, 0x63, 0xd7, 0xb8, 0x77, 0x98, + 0x9f, 0x17, 0x3c, 0xa3, 0x92, 0x15, 0xf9, 0xb7, 0x28, 0x04, 0xbd, 0xc0, 0xb0, 0xda, 0x43, 0x9e, + 0xc0, 0x30, 0xc7, 0xeb, 0xe8, 0x8a, 0x25, 0xc2, 0x87, 0x59, 0x67, 0x3e, 0x0e, 0x07, 0x39, 0x5e, + 0xbf, 0x65, 0x89, 0x20, 0x1f, 0xc0, 0x46, 0x82, 0x29, 0x4a, 0x4c, 0xcc, 0xf2, 0x48, 0x2f, 0x8f, + 0x2c, 0xa6, 0x4c, 0x82, 0x37, 0xb0, 0x55, 0x93, 0x1d, 0xa2, 0x28, 0x8b, 0x5c, 0x20, 0x99, 0xc3, + 0x63, 0x73, 0xfa, 0x29, 0x7b, 0x87, 0xc7, 0x2c, 0x63, 0x52, 0x2b, 0xd0, 0x0d, 0xdb, 0x30, 0x79, + 0x0f, 0xfa, 0x29, 0xd2, 0x04, 0xb9, 0xa5, 0xdd, 0xce, 0x82, 0x3f, 0x5c, 0xf0, 0xef, 0x73, 0x5d, + 0x6b, 0x9a, 0xe8, 0x13, 0xc7, 0xa1, 0xcb, 0x12, 0xc5, 0x99, 0x60, 0xef, 0x50, 0x6b, 0xda, 0x0d, + 0xf5, 0x98, 0x3c, 0x07, 0x88, 0x8b, 0x34, 0xc5, 0x58, 0x6d, 0xb4, 0x87, 0x37, 0x10, 0xc5, 0xa9, + 0x96, 0xe9, 0x46, 0xce, 0x6e, 0xe8, 0x29, 0xc4, 0x28, 0x59, 0x47, 0x6e, 0x0d, 0x8c, 0x92, 0x36, + 0x72, 0x63, 0xf2, 0x09, 0x90, 0x8a, 0x9c, 0xb3, 0x55, 0x6d, 0xd8, 0xd7, 0x86, 0x13, 0xbb, 0xf2, + 0x6a, 0x55, 0x59, 0x3f, 0x05, 0x8f, 0x23, 0x4d, 0xa2, 0x22, 0x4f, 0x57, 0x5a, 0xdc, 0x61, 0x38, + 0x54, 0xc0, 0xeb, 0x3c, 0x5d, 0x91, 0x8f, 0x61, 0x8b, 0x63, 0x99, 0xb2, 0x98, 0x46, 0x65, 0x4a, + 0x63, 0xcc, 0x30, 0xaf, 0x74, 0x9e, 0xd8, 0x85, 0x93, 0x0a, 0x27, 0x3e, 0x0c, 0xae, 0x90, 0x0b, + 0x15, 0x96, 0xa7, 0x4d, 0xaa, 0x29, 0x99, 0x40, 0x47, 0xca, 0xd4, 0x07, 0x8d, 0xaa, 0x61, 0x30, + 0x80, 0xde, 0x41, 0x56, 0xca, 0x55, 0xf0, 0x9b, 0x03, 0x8f, 0x4f, 0x17, 0x25, 0xf2, 0x57, 0x69, + 0x11, 0x5f, 0x1e, 0x2c, 0x25, 0xa7, 0xe4, 0x35, 0x6c, 0x22, 0xa7, 0x62, 0xc1, 0x95, 0xef, 0x09, + 0xcb, 0x2f, 0x34, 0xa5, 0xa3, 0xbd, 0x79, 0x23, 0x7d, 0x5a, 0x7b, 0x76, 0x0f, 0xcc, 0x86, 0x7d, + 0x6d, 0x1f, 0x8e, 0xb1, 0x39, 0x9d, 0xfe, 0x08, 0xe3, 0xb5, 0x75, 0x25, 0x8c, 0x4a, 0x6d, 0x2b, + 0x95, 0x1e, 0x2b, 0xc5, 0x4b, 0xca, 0x99, 0x5c, 0xd9, 0x12, 0xb4, 0x33, 0x25, 0x88, 0xad, 0x30, + 0x95, 0x69, 0x1d, 0x9d, 0x69, 0x9e, 0x41, 0x0e, 0x13, 0x11, 0x7c, 0x04, 0xdb, 0xfb, 0x29, 0xc3, + 0x5c, 0x1e, 0x33, 0x21, 0x31, 0x0f, 0xf1, 0xe7, 0x05, 0x0a, 0xa9, 0x6e, 0xc8, 0x69, 0x86, 0xb6, + 0xc0, 0xf5, 0x38, 0xf8, 0x05, 0x36, 0x4d, 0xea, 0x1c, 0x17, 0xb1, 0xce, 0x1b, 0x45, 0x8c, 0xaa, + 0x6c, 0x63, 0xa4, 0x86, 0xad, 0x92, 0x77, 0xdb, 0x25, 0xdf, 0xac, 0x89, 0xce, 0xbf, 0xd7, 0x44, + 0xf7, 0x76, 0x4d, 0xfc, 0x00, 0xdb, 0xc7, 0x45, 0x71, 0xb9, 0x28, 0x8d, 0x1b, 0x95, 0xaf, 0xeb, + 0x11, 0x3a, 0xb3, 0x8e, 0xba, 0xb3, 0x8e, 0xb0, 0x95, 0xb1, 0x6e, 0x3b, 0x63, 0x83, 0xbf, 0x1d, + 0xd8, 0x59, 0x3f, 0xd6, 0x56, 0xdb, 0x4f, 0xb0, 0x5d, 0x9f, 0x1b, 0xa5, 0x36, 0x66, 0x73, 0xc1, + 0x68, 0xef, 0x65, 0x43, 0xcc, 0xbb, 0x76, 0x57, 0x0d, 0x22, 0xa9, 0xc8, 0x0a, 0xb7, 0xae, 0x5a, + 0x88, 0x98, 0x2e, 0x61, 0xd2, 0x36, 0x53, 0x09, 0x5d, 0xdf, 0x6a, 0x99, 0x1d, 0x56, 0x3b, 0xc9, + 0xa7, 0xe0, 0xdd, 0x38, 0xe2, 0x6a, 0x47, 0xb6, 0xd7, 0x1c, 0xb1, 0x77, 0xdd, 0x58, 0x91, 0x1d, + 0xe8, 0x21, 0xe7, 0x45, 0xd5, 0x08, 0xcc, 0x24, 0xf8, 0x1c, 0x86, 0xff, 0x59, 0xc5, 0xe0, 0x4f, + 0x07, 0xc6, 0x5f, 0x09, 0xc1, 0x2e, 0xea, 0x74, 0xd9, 0x81, 0x9e, 0x29, 0x53, 0xd3, 0x8e, 0xcc, + 0x84, 0xcc, 0x60, 0x64, 0xab, 0xac, 0x41, 0x7d, 0x13, 0x7a, 0xb0, 0x9b, 0xd8, 0xca, 0xeb, 0x1a, + 0xd7, 0xa4, 0x4c, 0xdb, 0x8d, 0xbe, 0x77, 0x6f, 0xa3, 0xef, 0x37, 0x1a, 0xfd, 0x53, 0xf0, 0xf4, + 0xa6, 0xbc, 0x48, 0xd0, 0xbe, 0x00, 0x43, 0x05, 0x7c, 0x57, 0x24, 0x3a, 0xad, 0xab, 0x60, 0xac, + 0xf0, 0x13, 0xe8, 0x9c, 0xd7, 0xe4, 0xab, 0x61, 0x45, 0x91, 0x7b, 0x1f, 0x45, 0xb7, 0xde, 0xb6, + 0x9a, 0x90, 0x6e, 0x93, 0x90, 0x5a, 0x8b, 0x5e, 0x53, 0x8b, 0x0b, 0xd8, 0x3a, 0x95, 0x54, 0x32, + 0x21, 0x59, 0x2c, 0x2a, 0x46, 0x5b, 0xdc, 0x39, 0x0f, 0x71, 0xe7, 0xde, 0xc7, 0x5d, 0xa7, 0xe6, + 0x2e, 0xf8, 0xdd, 0x01, 0xd2, 0xbc, 0xc9, 0x86, 0xfb, 0x3f, 0x5c, 0xa5, 0xe8, 0x91, 0x85, 0xa4, + 0x69, 0xa4, 0x1f, 0x10, 0xfb, 0x0c, 0x68, 0x44, 0xbd, 0x51, 0x4a, 0x90, 0x85, 0xc0, 0xc4, 0xac, + 0x9a, 0x37, 0x60, 0xa8, 0x00, 0xbd, 0xb8, 0xfe, 0x84, 0xf4, 0x5b, 0x4f, 0xc8, 0xde, 0xaf, 0x1d, + 0x18, 0x9c, 0x22, 0xbd, 0x46, 0x4c, 0xc8, 0x21, 0x8c, 0x4f, 0x31, 0x4f, 0x6e, 0xbe, 0x25, 0x3b, + 0x8d, 0x6a, 0xa8, 0xd1, 0xe9, 0xfb, 0x77, 0xa1, 0x55, 0xfc, 0xc1, 0xa3, 0xb9, 0xf3, 0xd2, 0x21, + 0x27, 0x30, 0x3e, 0x42, 0x2c, 0xf7, 0x8b, 0x3c, 0xc7, 0x58, 0x62, 0x42, 0x9e, 0x37, 0x36, 0xdd, + 0xd1, 0x22, 0xa7, 0x4f, 0x6e, 0xfd, 0x06, 0xaa, 0x8a, 0xb2, 0x27, 0x7e, 0x0f, 0x1b, 0xcd, 0xce, + 0xb0, 0x76, 0xe0, 0x1d, 0x7d, 0x6c, 0xfa, 0xe2, 0x81, 0x96, 0x12, 0x3c, 0x22, 0x5f, 0x42, 0xdf, + 0xe4, 0x2a, 0xf1, 0x1b, 0xc6, 0x6b, 0xb5, 0xb8, 0xe6, 0xd7, 0x7a, 0x62, 0x07, 0x8f, 0xc8, 0x11, + 0xc0, 0x4d, 0x06, 0x90, 0x26, 0x2f, 0xb7, 0x52, 0x70, 0xfa, 0xec, 0x9e, 0xd5, 0xea, 0xb0, 0xb3, + 0xbe, 0xfe, 0x23, 0x7e, 0xf6, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x77, 0xb3, 0x32, 0xc7, 0x33, + 0x0a, 0x00, 0x00, } diff --git a/weed/security/guard.go b/weed/security/guard.go index fd9c8e0b3..2ae4ec5a9 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -42,13 +42,13 @@ https://github.com/pkieltyka/jwtauth/blob/master/jwtauth.go */ type Guard struct { whiteList []string - SecretKey Secret + SecretKey SigningKey isActive bool } func NewGuard(whiteList []string, secretKey string) *Guard { - g := &Guard{whiteList: whiteList, SecretKey: Secret(secretKey)} + g := &Guard{whiteList: whiteList, SecretKey: SigningKey(secretKey)} g.isActive = len(g.whiteList) != 0 || len(g.SecretKey) != 0 return g } diff --git a/weed/security/jwt.go b/weed/security/jwt.go index 46b7efaaf..844ffb77b 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -11,10 +11,10 @@ import ( ) type EncodedJwt string -type Secret string +type SigningKey string -func GenJwt(secret Secret, fileId string) EncodedJwt { - if secret == "" { +func GenJwt(signingKey SigningKey, fileId string) EncodedJwt { + if signingKey == "" { return "" } @@ -23,7 +23,7 @@ func GenJwt(secret Secret, fileId string) EncodedJwt { ExpiresAt: time.Now().Add(time.Second * 10).Unix(), Subject: fileId, } - encoded, e := t.SignedString(secret) + encoded, e := t.SignedString(signingKey) if e != nil { glog.V(0).Infof("Failed to sign claims: %v", t.Claims) return "" @@ -55,20 +55,20 @@ func GetJwt(r *http.Request) EncodedJwt { return EncodedJwt(tokenStr) } -func EncodeJwt(secret Secret, claims *jwt.StandardClaims) (EncodedJwt, error) { - if secret == "" { +func EncodeJwt(signingKey SigningKey, claims *jwt.StandardClaims) (EncodedJwt, error) { + if signingKey == "" { return "", nil } t := jwt.New(jwt.GetSigningMethod("HS256")) t.Claims = claims - encoded, e := t.SignedString(secret) + encoded, e := t.SignedString(signingKey) return EncodedJwt(encoded), e } -func DecodeJwt(secret Secret, tokenString EncodedJwt) (token *jwt.Token, err error) { +func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt) (token *jwt.Token, err error) { // check exp, nbf return jwt.Parse(string(tokenString), func(token *jwt.Token) (interface{}, error) { - return secret, nil + return signingKey, nil }) } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 9d70e4dac..c3c5072d0 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -28,7 +28,6 @@ type FilerOption struct { RedirectOnRead bool DisableDirListing bool MaxMB int - SecretKey string DirListingLimit int DataCenter string DefaultLevelDbDir string @@ -36,7 +35,7 @@ type FilerOption struct { type FilerServer struct { option *FilerOption - secret security.Secret + secret security.SigningKey filer *filer2.Filer } diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 93dce59d8..043a6ff51 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -67,7 +67,6 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) if err := stream.Send(&master_pb.HeartbeatResponse{ VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024, - SecretKey: string(ms.guard.SecretKey), }); err != nil { return err } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 492bb76e9..19849ace6 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -45,7 +45,6 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, defaultReplicaPlacement string, garbageThreshold float64, whiteList []string, - secureKey string, ) *MasterServer { var preallocateSize int64 @@ -67,7 +66,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, ms.vg = topology.NewDefaultVolumeGrowth() glog.V(0).Infoln("Volume Size Limit is", volumeSizeLimitMB, "MB") - ms.guard = security.NewGuard(whiteList, secureKey) + ms.guard = security.NewGuard(whiteList, signingKey) handleStaticResources2(r) r.HandleFunc("/", ms.uiStatusHandler) diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index bd3ffd7b3..25e9b1677 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -6,7 +6,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "golang.org/x/net/context" ) @@ -73,9 +72,6 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI if in.GetVolumeSizeLimit() != 0 { vs.store.VolumeSizeLimit = in.GetVolumeSizeLimit() } - if in.GetSecretKey() != "" { - vs.guard.SecretKey = security.Secret(in.GetSecretKey()) - } if in.GetLeader() != "" && masterNode != in.GetLeader() { glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode) newLeader = in.GetLeader() From 215cd27b37d504aca255a54283e77c8cff6692ab Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 14 Feb 2019 00:08:20 -0800 Subject: [PATCH 016/450] add authorizing fileId write access need to secure upload/update/delete for benchmark/filer/mount need to add secure grpc --- weed/command/scaffold.go | 10 ++-- weed/security/guard.go | 56 +++------------------ weed/security/jwt.go | 46 +++++++---------- weed/server/master_server.go | 5 ++ weed/server/master_server_handlers.go | 31 ++++++++++-- weed/server/volume_server.go | 24 ++++++--- weed/server/volume_server_handlers.go | 31 ++++++++++++ weed/server/volume_server_handlers_write.go | 14 +++++- 8 files changed, 125 insertions(+), 92 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 40e7437d2..22300d3ba 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -10,7 +10,7 @@ func init() { } var cmdScaffold = &Command{ - UsageLine: "scaffold [filer]", + UsageLine: "scaffold -config=[filer|notification|replication|security]", Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. @@ -244,10 +244,14 @@ directory = "/" # destination directory ` SECURITY_TOML_EXAMPLE = ` +# Put this file to one of the location, with descending priority +# ./security.toml +# $HOME/.seaweedfs/security.toml +# /etc/seaweedfs/security.toml # this file is read by master, volume server, and filer -[jwt] -signing_key = "" +[jwt.signing] +key = "" ` ) diff --git a/weed/security/guard.go b/weed/security/guard.go index 2ae4ec5a9..84a415253 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -41,21 +41,21 @@ https://github.com/pkieltyka/jwtauth/blob/master/jwtauth.go */ type Guard struct { - whiteList []string - SecretKey SigningKey + whiteList []string + SigningKey SigningKey isActive bool } -func NewGuard(whiteList []string, secretKey string) *Guard { - g := &Guard{whiteList: whiteList, SecretKey: SigningKey(secretKey)} - g.isActive = len(g.whiteList) != 0 || len(g.SecretKey) != 0 +func NewGuard(whiteList []string, signingKey string) *Guard { + g := &Guard{whiteList: whiteList, SigningKey: SigningKey(signingKey)} + g.isActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0 return g } func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { if !g.isActive { - //if no security needed, just skip all checkings + //if no security needed, just skip all checking return f } return func(w http.ResponseWriter, r *http.Request) { @@ -67,20 +67,6 @@ func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w } } -func (g *Guard) Secure(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { - if !g.isActive { - //if no security needed, just skip all checkings - return f - } - return func(w http.ResponseWriter, r *http.Request) { - if err := g.checkJwt(w, r); err != nil { - w.WriteHeader(http.StatusUnauthorized) - return - } - f(w, r) - } -} - func GetActualRemoteHost(r *http.Request) (host string, err error) { host = r.Header.Get("HTTP_X_FORWARDED_FOR") if host == "" { @@ -130,33 +116,3 @@ func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error { glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr) return fmt.Errorf("Not in whitelis: %s", r.RemoteAddr) } - -func (g *Guard) checkJwt(w http.ResponseWriter, r *http.Request) error { - if g.checkWhiteList(w, r) == nil { - return nil - } - - if len(g.SecretKey) == 0 { - return nil - } - - tokenStr := GetJwt(r) - - if tokenStr == "" { - return ErrUnauthorized - } - - // Verify the token - token, err := DecodeJwt(g.SecretKey, tokenStr) - if err != nil { - glog.V(1).Infof("Token verification error from %s: %v", r.RemoteAddr, err) - return ErrUnauthorized - } - if !token.Valid { - glog.V(1).Infof("Token invliad from %s: %v", r.RemoteAddr, tokenStr) - return ErrUnauthorized - } - - glog.V(1).Infof("No permission from %s", r.RemoteAddr) - return fmt.Errorf("No write permission from %s", r.RemoteAddr) -} diff --git a/weed/security/jwt.go b/weed/security/jwt.go index 844ffb77b..ba394c3bf 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -1,6 +1,7 @@ package security import ( + "fmt" "net/http" "strings" @@ -11,21 +12,28 @@ import ( ) type EncodedJwt string -type SigningKey string +type SigningKey []byte + +type SeaweedFileIdClaims struct { + Fid string `json:"fid"` + jwt.StandardClaims +} func GenJwt(signingKey SigningKey, fileId string) EncodedJwt { - if signingKey == "" { + if len(signingKey) == 0 { return "" } - t := jwt.New(jwt.GetSigningMethod("HS256")) - t.Claims = &jwt.StandardClaims{ - ExpiresAt: time.Now().Add(time.Second * 10).Unix(), - Subject: fileId, + claims := SeaweedFileIdClaims{ + fileId, + jwt.StandardClaims{ + ExpiresAt: time.Now().Add(time.Second * 10).Unix(), + }, } - encoded, e := t.SignedString(signingKey) + t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + encoded, e := t.SignedString([]byte(signingKey)) if e != nil { - glog.V(0).Infof("Failed to sign claims: %v", t.Claims) + glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e) return "" } return EncodedJwt(encoded) @@ -44,31 +52,15 @@ func GetJwt(r *http.Request) EncodedJwt { } } - // Get token from cookie - if tokenStr == "" { - cookie, err := r.Cookie("jwt") - if err == nil { - tokenStr = cookie.Value - } - } - return EncodedJwt(tokenStr) } -func EncodeJwt(signingKey SigningKey, claims *jwt.StandardClaims) (EncodedJwt, error) { - if signingKey == "" { - return "", nil - } - - t := jwt.New(jwt.GetSigningMethod("HS256")) - t.Claims = claims - encoded, e := t.SignedString(signingKey) - return EncodedJwt(encoded), e -} - func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt) (token *jwt.Token, err error) { // check exp, nbf return jwt.Parse(string(tokenString), func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unknown token method") + } return signingKey, nil }) } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 19849ace6..06c959b92 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -15,6 +15,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" + "github.com/spf13/viper" ) type MasterServer struct { @@ -47,6 +48,10 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, whiteList []string, ) *MasterServer { + LoadConfiguration("security", false) + v := viper.GetViper() + signingKey := v.GetString("jwt.signing.key") + var preallocateSize int64 if preallocate { preallocateSize = int64(volumeSizeLimitMB) * (1 << 20) diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index a797dddfc..c4149e0cf 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage" ) @@ -40,12 +41,23 @@ func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volume return } -// Takes one volumeId only, can not do batch lookup +// If "fileId" is provided, this returns the fileId location and a JWT to update or delete the file. +// If "volumeId" is provided, this only returns the volumeId location func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) { vid := r.FormValue("volumeId") - commaSep := strings.Index(vid, ",") - if commaSep > 0 { - vid = vid[0:commaSep] + if vid != "" { + // backward compatible + commaSep := strings.Index(vid, ",") + if commaSep > 0 { + vid = vid[0:commaSep] + } + } + fileId := r.FormValue("fileId") + if fileId != "" { + commaSep := strings.Index(fileId, ",") + if commaSep > 0 { + vid = fileId[0:commaSep] + } } vids := []string{vid} collection := r.FormValue("collection") //optional, but can be faster if too many collections @@ -54,6 +66,8 @@ func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) httpStatus := http.StatusOK if location.Error != "" { httpStatus = http.StatusNotFound + } else { + ms.maybeAddJwtAuthorization(w, fileId) } writeJsonQuiet(w, r, httpStatus, location) } @@ -88,8 +102,17 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) } fid, count, dn, err := ms.Topo.PickForWrite(requestedCount, option) if err == nil { + ms.maybeAddJwtAuthorization(w, fid) writeJsonQuiet(w, r, http.StatusOK, operation.AssignResult{Fid: fid, Url: dn.Url(), PublicUrl: dn.PublicUrl, Count: count}) } else { writeJsonQuiet(w, r, http.StatusNotAcceptable, operation.AssignResult{Error: err.Error()}) } } + +func (ms *MasterServer) maybeAddJwtAuthorization(w http.ResponseWriter, fileId string) { + encodedJwt := security.GenJwt(ms.guard.SigningKey, fileId) + if encodedJwt == "" { + return + } + w.Header().Set("Authorization", "BEARER "+string(encodedJwt)) +} diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 0914e81b0..d8ff01766 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -6,6 +6,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/spf13/viper" ) type VolumeServer struct { @@ -31,6 +32,12 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, whiteList []string, fixJpgOrientation bool, readRedirect bool) *VolumeServer { + + LoadConfiguration("security", false) + v := viper.GetViper() + signingKey := v.GetString("jwt.signing.key") + enableUiAccess := v.GetBool("access.ui") + vs := &VolumeServer{ pulseSeconds: pulseSeconds, dataCenter: dataCenter, @@ -42,14 +49,17 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, vs.MasterNodes = masterNodes vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) - vs.guard = security.NewGuard(whiteList, "") + vs.guard = security.NewGuard(whiteList, signingKey) handleStaticResources(adminMux) - adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) - adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) - adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) - adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) - adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + if signingKey == "" || enableUiAccess { + // only expose the volume server details for safe environments + adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) + adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) + adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) + adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) + adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + } adminMux.HandleFunc("/", vs.privateStoreHandler) if publicMux != adminMux { // separated admin and public port @@ -69,5 +79,5 @@ func (vs *VolumeServer) Shutdown() { } func (vs *VolumeServer) jwt(fileId string) security.EncodedJwt { - return security.GenJwt(vs.guard.SecretKey, fileId) + return security.GenJwt(vs.guard.SigningKey, fileId) } diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go index 77b1274fd..0e9aaeb3b 100644 --- a/weed/server/volume_server_handlers.go +++ b/weed/server/volume_server_handlers.go @@ -3,6 +3,8 @@ package weed_server import ( "net/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" ) @@ -45,3 +47,32 @@ func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Req vs.GetOrHeadHandler(w, r) } } + +func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid string) bool { + + if len(vs.guard.SigningKey) == 0 { + return true + } + + tokenStr := security.GetJwt(r) + if tokenStr == "" { + glog.V(1).Infof("missing jwt from %s", r.RemoteAddr) + return false + } + + token, err := security.DecodeJwt(vs.guard.SigningKey, tokenStr) + if err != nil { + glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err) + return false + } + if !token.Valid { + glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) + return false + } + + if sc, ok := token.Claims.(*security.SeaweedFileIdClaims); ok { + return sc.Fid == vid+","+fid + } + glog.V(1).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr) + return false +} diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index fd93142e1..1cfd9187e 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -20,13 +20,20 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { writeJsonError(w, r, http.StatusBadRequest, e) return } - vid, _, _, _, _ := parseURLPath(r.URL.Path) + + vid, fid, _, _, _ := parseURLPath(r.URL.Path) volumeId, ve := storage.NewVolumeId(vid) if ve != nil { glog.V(0).Infoln("NewVolumeId error:", ve) writeJsonError(w, r, http.StatusBadRequest, ve) return } + + if !vs.maybeCheckJwtAuthorization(r, vid, fid) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + needle, originalSize, ne := storage.CreateNeedleFromRequest(r, vs.FixJpgOrientation) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) @@ -56,6 +63,11 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { volumeId, _ := storage.NewVolumeId(vid) n.ParsePath(fid) + if !vs.maybeCheckJwtAuthorization(r, vid, fid) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + // glog.V(2).Infof("volume %s deleting %s", vid, n) cookie := n.Cookie From a3b0e39b06dcc284ff255d9c2a3c1f55c05ad19c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 14 Feb 2019 07:07:38 -0800 Subject: [PATCH 017/450] weed mount deletion always goes to weed filer --- weed/filesys/wfs.go | 5 ----- weed/filesys/wfs_deletion.go | 36 ------------------------------------ 2 files changed, 41 deletions(-) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 969514a06..6778d7b31 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -46,8 +46,6 @@ type WFS struct { pathToHandleLock sync.Mutex bufPool sync.Pool - fileIdsDeletionChan chan []string - stats statsCache } type statsCache struct { @@ -65,11 +63,8 @@ func NewSeaweedFileSystem(option *Option) *WFS { return make([]byte, option.ChunkSizeLimit) }, }, - fileIdsDeletionChan: make(chan []string, 32), } - go wfs.loopProcessingDeletion() - return wfs } diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index f58ef24f4..b96b27ca6 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -2,39 +2,9 @@ package filesys import ( "context" - "time" - - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func (wfs *WFS) loopProcessingDeletion() { - - ticker := time.NewTicker(2 * time.Second) - - wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - var fileIds []string - for { - select { - case fids := <-wfs.fileIdsDeletionChan: - fileIds = append(fileIds, fids...) - if len(fileIds) >= 1024 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - deleteFileIds(context.Background(), client, fileIds) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - deleteFileIds(context.Background(), client, fileIds) - fileIds = fileIds[:0] - } - } - } - }) - -} - func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { if len(chunks) == 0 { return @@ -45,12 +15,6 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { fileIds = append(fileIds, chunk.FileId) } - var async = false - if async { - wfs.fileIdsDeletionChan <- fileIds - return - } - wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { deleteFileIds(context.Background(), client, fileIds) return nil From 74fb237727267aa482ee07851f454ca03fbd1fdf Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Feb 2019 00:09:19 -0800 Subject: [PATCH 018/450] benchmark can work in secure mode --- other/java/client/src/main/proto/filer.proto | 1 + .../repeated_vacuum/repeated_vacuum.go | 4 +- weed/command/benchmark.go | 13 +- weed/command/filer_copy.go | 4 +- weed/operation/assign_file_id.go | 29 ++- weed/operation/delete_content.go | 2 +- weed/operation/submit.go | 4 +- weed/operation/upload_content.go | 6 +- weed/pb/filer.proto | 1 + weed/pb/filer_pb/filer.pb.go | 173 +++++++++--------- weed/pb/master.proto | 1 + weed/pb/master_pb/master.pb.go | 142 +++++++------- weed/security/jwt.go | 5 +- weed/server/common.go | 4 +- weed/server/filer_grpc_server.go | 1 + weed/server/filer_server_handlers_write.go | 11 +- .../filer_server_handlers_write_autochunk.go | 9 +- weed/server/master_grpc_server_volume.go | 2 + 18 files changed, 233 insertions(+), 179 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 6cd4df6b4..5cdcb6a97 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -139,6 +139,7 @@ message AssignVolumeResponse { string url = 2; string public_url = 3; int32 count = 4; + string auth = 5; } message LookupVolumeRequest { diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 7cc583f56..d551baddb 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -31,12 +31,12 @@ func main() { targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, "") + _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth) if err != nil { log.Fatalf("upload: %v", err) } - util.Delete(targetUrl, "") + util.Delete(targetUrl, assignResult.Auth) util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index be76a3e2e..47fdc69a0 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -17,6 +17,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" ) @@ -40,6 +41,7 @@ var ( b BenchmarkOptions sharedBytes []byte masterClient *wdclient.MasterClient + isSecure bool ) func init() { @@ -194,7 +196,11 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { if df.enterTime.After(time.Now()) { time.Sleep(df.enterTime.Sub(time.Now())) } - if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid, ""); e == nil { + var jwtAuthorization security.EncodedJwt + if isSecure { + jwtAuthorization = operation.LookupJwt(masterClient.GetMaster(), df.fp.Fid) + } + if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), jwtAuthorization); e == nil { s.completed++ } else { s.failed++ @@ -219,7 +225,10 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { } if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection - if _, err := fp.Upload(0, masterClient.GetMaster(), ""); err == nil { + if !isSecure && assignResult.Auth != "" { + isSecure = true + } + if _, err := fp.Upload(0, masterClient.GetMaster(), assignResult.Auth); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index af121ca1d..39d83c31e 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -173,7 +173,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid - uploadResult, err := operation.Upload(targetUrl, fileName, f, false, mimeType, nil, "") + uploadResult, err := operation.Upload(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth) if err != nil { fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) return false @@ -253,7 +253,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(f, chunkSize), - false, "application/octet-stream", nil, "") + false, "application/octet-stream", nil, assignResult.Auth) if err != nil { fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) return false diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 00e1caad5..acadc88c8 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,9 +3,12 @@ package operation import ( "context" "fmt" + "strings" "time" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) type VolumeAssignRequest struct { @@ -19,11 +22,12 @@ type VolumeAssignRequest struct { } type AssignResult struct { - Fid string `json:"fid,omitempty"` - Url string `json:"url,omitempty"` - PublicUrl string `json:"publicUrl,omitempty"` - Count uint64 `json:"count,omitempty"` - Error string `json:"error,omitempty"` + Fid string `json:"fid,omitempty"` + Url string `json:"url,omitempty"` + PublicUrl string `json:"publicUrl,omitempty"` + Count uint64 `json:"count,omitempty"` + Error string `json:"error,omitempty"` + Auth security.EncodedJwt `json:"auth,omitempty"` } func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { @@ -63,6 +67,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque ret.Url = resp.Url ret.PublicUrl = resp.PublicUrl ret.Error = resp.Error + ret.Auth = security.EncodedJwt(resp.Auth) return nil @@ -81,3 +86,17 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque return ret, lastError } + +func LookupJwt(master string, fileId string) security.EncodedJwt { + + tokenStr := "" + + if h, e := util.Head(fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId)); e == nil { + bearer := h.Get("Authorization") + if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" { + tokenStr = bearer[7:] + } + } + + return security.EncodedJwt(tokenStr) +} diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 3e468e1a3..57fc0329e 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -48,7 +48,7 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin vid, _, err := ParseFileId(fileId) if err != nil { ret = append(ret, &volume_server_pb.DeleteResult{ - FileId: vid, + FileId: fileId, Status: http.StatusBadRequest, Error: err.Error()}, ) diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 66a7a5f36..21cc887cf 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -65,7 +65,7 @@ func SubmitFiles(master string, files []FilePart, file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, "") + results[index].Size, err = file.Upload(maxMB, master, ret.Auth) if err != nil { results[index].Error = err.Error() } @@ -167,7 +167,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re baseName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(fi.Reader, chunkSize), master, fileUrl, - jwt) + ret.Auth) if e != nil { // delete all uploaded chunks cm.DeleteChunks(master) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 030bf5889..be7b8e69c 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -58,9 +58,6 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error if isGzipped { h.Set("Content-Encoding", "gzip") } - if jwt != "" { - h.Set("Authorization", "BEARER "+string(jwt)) - } file_writer, cp_err := body_writer.CreatePart(h) if cp_err != nil { @@ -86,6 +83,9 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error for k, v := range pairMap { req.Header.Set(k, v) } + if jwt != "" { + req.Header.Set("Authorization", "BEARER "+string(jwt)) + } resp, post_err := client.Do(req) if post_err != nil { glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error()) diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 6cd4df6b4..5cdcb6a97 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -139,6 +139,7 @@ message AssignVolumeResponse { string url = 2; string public_url = 3; int32 count = 4; + string auth = 5; } message LookupVolumeRequest { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 6b4a27c0a..920261538 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -574,6 +574,7 @@ type AssignVolumeResponse struct { Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` } func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } @@ -609,6 +610,13 @@ func (m *AssignVolumeResponse) GetCount() int32 { return 0 } +func (m *AssignVolumeResponse) GetAuth() string { + if m != nil { + return m.Auth + } + return "" +} + type LookupVolumeRequest struct { VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` } @@ -1159,86 +1167,87 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1291 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x8f, 0xdc, 0x44, - 0x13, 0x8e, 0xe7, 0x2b, 0xe3, 0x9a, 0x99, 0xbc, 0xbb, 0x3d, 0xfb, 0x12, 0x6b, 0xb2, 0x1b, 0x26, - 0x86, 0xa0, 0x8d, 0x88, 0x46, 0x51, 0xe0, 0x90, 0x10, 0x21, 0x91, 0x6c, 0x36, 0x52, 0xa4, 0x4d, - 0x82, 0xbc, 0x09, 0x12, 0xe2, 0x60, 0x79, 0xed, 0x9e, 0xa1, 0xb5, 0x1e, 0x7b, 0x70, 0xb7, 0x37, - 0x09, 0x7f, 0x82, 0x0b, 0x57, 0x0e, 0x9c, 0xf8, 0x17, 0x5c, 0xf8, 0x3f, 0xdc, 0xb9, 0xa1, 0xae, - 0x6e, 0x7b, 0xda, 0x63, 0xef, 0x06, 0x84, 0x72, 0xeb, 0x7e, 0xaa, 0xba, 0xbe, 0xfa, 0xe9, 0x2a, - 0x1b, 0x06, 0x73, 0x16, 0xd3, 0x6c, 0xb6, 0xca, 0x52, 0x91, 0x92, 0x3e, 0x6e, 0xfc, 0xd5, 0x89, - 0xfb, 0x02, 0xae, 0x1d, 0xa5, 0xe9, 0x69, 0xbe, 0x7a, 0xcc, 0x32, 0x1a, 0x8a, 0x34, 0x7b, 0x7b, - 0x98, 0x88, 0xec, 0xad, 0x47, 0x7f, 0xc8, 0x29, 0x17, 0x64, 0x17, 0xec, 0xa8, 0x10, 0x38, 0xd6, - 0xd4, 0xda, 0xb7, 0xbd, 0x35, 0x40, 0x08, 0x74, 0x92, 0x60, 0x49, 0x9d, 0x16, 0x0a, 0x70, 0xed, - 0x1e, 0xc2, 0x6e, 0xb3, 0x41, 0xbe, 0x4a, 0x13, 0x4e, 0xc9, 0x4d, 0xe8, 0x52, 0x09, 0xa0, 0xb5, - 0xc1, 0xdd, 0xff, 0xcd, 0x8a, 0x50, 0x66, 0x4a, 0x4f, 0x49, 0xdd, 0xdf, 0x2d, 0x20, 0x47, 0x8c, - 0x0b, 0x09, 0x32, 0xca, 0xff, 0x59, 0x3c, 0x1f, 0x40, 0x6f, 0x95, 0xd1, 0x39, 0x7b, 0xa3, 0x23, - 0xd2, 0x3b, 0x72, 0x1b, 0xb6, 0xb9, 0x08, 0x32, 0xf1, 0x24, 0x4b, 0x97, 0x4f, 0x58, 0x4c, 0x9f, - 0xcb, 0xa0, 0xdb, 0xa8, 0x52, 0x17, 0x90, 0x19, 0x10, 0x96, 0x84, 0x71, 0xce, 0xd9, 0x19, 0x3d, - 0x2e, 0xa4, 0x4e, 0x67, 0x6a, 0xed, 0xf7, 0xbd, 0x06, 0x09, 0xd9, 0x81, 0x6e, 0xcc, 0x96, 0x4c, - 0x38, 0xdd, 0xa9, 0xb5, 0x3f, 0xf2, 0xd4, 0xc6, 0xfd, 0x0a, 0xc6, 0x95, 0xf8, 0x75, 0xfa, 0xb7, - 0xe0, 0x32, 0x55, 0x90, 0x63, 0x4d, 0xdb, 0x4d, 0x05, 0x28, 0xe4, 0xee, 0x2f, 0x2d, 0xe8, 0x22, - 0x54, 0xd6, 0xd9, 0x5a, 0xd7, 0x99, 0xdc, 0x80, 0x21, 0xe3, 0xfe, 0xba, 0x18, 0x2d, 0x8c, 0x6f, - 0xc0, 0x78, 0x59, 0x77, 0xf2, 0x29, 0xf4, 0xc2, 0xef, 0xf3, 0xe4, 0x94, 0x3b, 0x6d, 0x74, 0x35, - 0x5e, 0xbb, 0x92, 0xc9, 0x1e, 0x48, 0x99, 0xa7, 0x55, 0xc8, 0x3d, 0x80, 0x40, 0x88, 0x8c, 0x9d, - 0xe4, 0x82, 0x72, 0xcc, 0x76, 0x70, 0xd7, 0x31, 0x0e, 0xe4, 0x9c, 0x3e, 0x2c, 0xe5, 0x9e, 0xa1, - 0x4b, 0xee, 0x43, 0x9f, 0xbe, 0x11, 0x34, 0x89, 0x68, 0xe4, 0x74, 0xd1, 0xd1, 0xde, 0x46, 0x4e, - 0xb3, 0x43, 0x2d, 0x57, 0x19, 0x96, 0xea, 0x93, 0x07, 0x30, 0xaa, 0x88, 0xc8, 0x16, 0xb4, 0x4f, - 0x69, 0x71, 0xb3, 0x72, 0x29, 0xab, 0x7b, 0x16, 0xc4, 0xb9, 0x22, 0xd9, 0xd0, 0x53, 0x9b, 0x2f, - 0x5a, 0xf7, 0x2c, 0xf7, 0x67, 0x0b, 0xb6, 0x0f, 0xcf, 0x68, 0x22, 0x9e, 0xa7, 0x82, 0xcd, 0x59, - 0x18, 0x08, 0x96, 0x26, 0xe4, 0x36, 0xd8, 0x69, 0x1c, 0xf9, 0x17, 0x72, 0xac, 0x9f, 0xc6, 0xda, - 0xdf, 0x6d, 0xb0, 0x13, 0xfa, 0x5a, 0x6b, 0xb7, 0xce, 0xd1, 0x4e, 0xe8, 0x6b, 0xa5, 0xfd, 0x11, - 0x8c, 0x22, 0x1a, 0x53, 0x41, 0xfd, 0xb2, 0xae, 0xb2, 0xe8, 0x43, 0x05, 0x62, 0x3d, 0xb9, 0xfb, - 0xab, 0x05, 0x76, 0x59, 0x5e, 0x72, 0x15, 0x2e, 0x4b, 0x73, 0x3e, 0x8b, 0x74, 0x52, 0x3d, 0xb9, - 0x7d, 0x1a, 0x49, 0xae, 0xa6, 0xf3, 0x39, 0xa7, 0x02, 0xdd, 0xb6, 0x3d, 0xbd, 0x93, 0x77, 0xcd, - 0xd9, 0x8f, 0x8a, 0x9e, 0x1d, 0x0f, 0xd7, 0xb2, 0x06, 0x4b, 0xc1, 0x96, 0x14, 0xaf, 0xa5, 0xed, - 0xa9, 0x0d, 0x19, 0x43, 0x97, 0xfa, 0x22, 0x58, 0x20, 0xef, 0x6c, 0xaf, 0x43, 0x5f, 0x06, 0x0b, - 0xf2, 0x31, 0x5c, 0xe1, 0x69, 0x9e, 0x85, 0xd4, 0x2f, 0xdc, 0xf6, 0x50, 0x3a, 0x54, 0xe8, 0x13, - 0x74, 0xee, 0xfe, 0xd9, 0x82, 0x2b, 0xd5, 0x1b, 0x25, 0xd7, 0xc0, 0xc6, 0x13, 0xe8, 0xdc, 0x42, - 0xe7, 0xd8, 0x25, 0x8e, 0x2b, 0x01, 0xb4, 0xcc, 0x00, 0x8a, 0x23, 0xcb, 0x34, 0x52, 0xf1, 0x8e, - 0xd4, 0x91, 0x67, 0x69, 0x44, 0xe5, 0x4d, 0xe6, 0x2c, 0xc2, 0x88, 0x47, 0x9e, 0x5c, 0x4a, 0x64, - 0xc1, 0x22, 0xfd, 0x4a, 0xe4, 0x52, 0xd6, 0x20, 0xcc, 0xd0, 0x6e, 0x4f, 0xd5, 0x40, 0xed, 0x64, - 0x0d, 0x96, 0x12, 0xbd, 0xac, 0x12, 0x93, 0x6b, 0x32, 0x85, 0x41, 0x46, 0x57, 0xb1, 0xbe, 0x66, - 0xa7, 0x8f, 0x22, 0x13, 0x22, 0xd7, 0x01, 0xc2, 0x34, 0x8e, 0x69, 0x88, 0x0a, 0x36, 0x2a, 0x18, - 0x88, 0xbc, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xa9, 0xb5, 0xdf, 0xf5, 0x7a, 0x42, 0xc4, - 0xc7, 0x34, 0x94, 0x79, 0xe4, 0x9c, 0x66, 0x3e, 0xbe, 0xb1, 0x01, 0x9e, 0xeb, 0x4b, 0x00, 0xbb, - 0xc1, 0x1e, 0xc0, 0x22, 0x4b, 0xf3, 0x95, 0x92, 0x0e, 0xa7, 0x6d, 0xd9, 0x72, 0x10, 0x41, 0xf1, - 0x4d, 0xb8, 0xc2, 0xdf, 0x2e, 0x63, 0x96, 0x9c, 0xfa, 0x22, 0xc8, 0x16, 0x54, 0x38, 0x23, 0x34, - 0x30, 0xd2, 0xe8, 0x4b, 0x04, 0xdd, 0x6f, 0x81, 0x1c, 0x64, 0x34, 0x10, 0xf4, 0x5f, 0x74, 0xd7, - 0xb2, 0x53, 0xb6, 0x2e, 0xec, 0x94, 0xff, 0x87, 0x71, 0xc5, 0xb4, 0x6a, 0x34, 0xd2, 0xe3, 0xab, - 0x55, 0xf4, 0xbe, 0x3c, 0x56, 0x4c, 0x6b, 0x8f, 0x3f, 0x59, 0x40, 0x1e, 0xe3, 0x4b, 0xf8, 0x6f, - 0x23, 0x44, 0x72, 0x58, 0xb6, 0x36, 0xf5, 0xd2, 0xa2, 0x40, 0x04, 0xba, 0xf9, 0x0e, 0x19, 0x57, - 0xf6, 0x1f, 0x07, 0x22, 0xd0, 0x0d, 0x30, 0xa3, 0x61, 0x9e, 0xc9, 0x7e, 0x8c, 0xbc, 0xc2, 0x06, - 0xe8, 0x15, 0x90, 0x0c, 0xb4, 0x12, 0x90, 0x0e, 0xf4, 0x37, 0x0b, 0xc6, 0x0f, 0x39, 0x67, 0x8b, - 0xe4, 0x9b, 0x34, 0xce, 0x97, 0xb4, 0x88, 0x74, 0x07, 0xba, 0x61, 0x9a, 0x27, 0x02, 0xa3, 0xec, - 0x7a, 0x6a, 0xb3, 0x41, 0xab, 0x56, 0x8d, 0x56, 0x1b, 0xc4, 0x6c, 0xd7, 0x89, 0x69, 0x10, 0xaf, - 0x53, 0x21, 0xde, 0x87, 0x30, 0x90, 0xe9, 0xf9, 0x21, 0x4d, 0x04, 0xcd, 0xf4, 0x3b, 0x06, 0x09, - 0x1d, 0x20, 0xe2, 0x9e, 0xc1, 0x4e, 0x35, 0x50, 0x3d, 0x45, 0xce, 0xed, 0x2a, 0xf2, 0xd5, 0x65, - 0xb1, 0x8e, 0x52, 0x2e, 0x25, 0x7f, 0x57, 0xf9, 0x49, 0xcc, 0x42, 0x5f, 0x0a, 0x54, 0x74, 0xb6, - 0x42, 0x5e, 0x65, 0xf1, 0x3a, 0xe7, 0x8e, 0x91, 0xb3, 0xfb, 0x39, 0x8c, 0xd5, 0x10, 0xaf, 0x16, - 0x68, 0x0f, 0xe0, 0x0c, 0x01, 0x9f, 0x45, 0x6a, 0x7e, 0xd9, 0x9e, 0xad, 0x90, 0xa7, 0x11, 0x77, - 0xbf, 0x04, 0xfb, 0x28, 0x55, 0x39, 0x73, 0x72, 0x07, 0xec, 0xb8, 0xd8, 0xe8, 0x51, 0x47, 0xd6, - 0x7c, 0x2a, 0xf4, 0xbc, 0xb5, 0x92, 0xfb, 0x00, 0xfa, 0x05, 0x5c, 0xe4, 0x61, 0x9d, 0x97, 0x47, - 0x6b, 0x23, 0x0f, 0xf7, 0x0f, 0x0b, 0x76, 0xaa, 0x21, 0xeb, 0x52, 0xbd, 0x82, 0x51, 0xe9, 0xc2, - 0x5f, 0x06, 0x2b, 0x1d, 0xcb, 0x1d, 0x33, 0x96, 0xfa, 0xb1, 0x32, 0x40, 0xfe, 0x2c, 0x58, 0x29, - 0xf6, 0x0c, 0x63, 0x03, 0x9a, 0xbc, 0x84, 0xed, 0x9a, 0x4a, 0xc3, 0xf4, 0xba, 0x65, 0x4e, 0xaf, - 0xca, 0x04, 0x2e, 0x4f, 0x9b, 0x23, 0xed, 0x3e, 0x5c, 0x55, 0x84, 0x3d, 0x28, 0xf9, 0x55, 0xd4, - 0xbe, 0x4a, 0x43, 0x6b, 0x93, 0x86, 0xee, 0x04, 0x9c, 0xfa, 0x51, 0x4d, 0xf8, 0x05, 0x6c, 0x1f, - 0x8b, 0x40, 0x30, 0x2e, 0x58, 0x58, 0x7e, 0x4a, 0x6d, 0xf0, 0xd6, 0x7a, 0x57, 0x43, 0xad, 0x33, - 0x7f, 0x0b, 0xda, 0x42, 0x14, 0x9c, 0x92, 0x4b, 0x79, 0x0b, 0xc4, 0xf4, 0xa4, 0xef, 0xe0, 0x3d, - 0xb8, 0x92, 0x7c, 0x10, 0xa9, 0x08, 0x62, 0x35, 0xb0, 0x3a, 0x38, 0xb0, 0x6c, 0x44, 0x70, 0x62, - 0xa9, 0x9e, 0x1e, 0x29, 0x69, 0x57, 0x8d, 0x33, 0x09, 0xa0, 0x70, 0x0f, 0x00, 0x9f, 0x8f, 0x62, - 0x7e, 0x4f, 0x9d, 0x95, 0xc8, 0x81, 0x04, 0xee, 0xfe, 0xd5, 0x85, 0xe1, 0x31, 0x0d, 0x5e, 0x53, - 0x1a, 0xc9, 0x79, 0x99, 0x91, 0x45, 0xc1, 0xad, 0xea, 0x37, 0x2d, 0xb9, 0xb9, 0x49, 0xa2, 0xc6, - 0x8f, 0xe8, 0xc9, 0x27, 0xef, 0x52, 0xd3, 0xd7, 0x74, 0x89, 0x1c, 0xc1, 0xc0, 0xf8, 0x68, 0x24, - 0xbb, 0xc6, 0xc1, 0xda, 0xb7, 0xf0, 0x64, 0xef, 0x1c, 0xa9, 0x69, 0xcd, 0x98, 0x0c, 0xa6, 0xb5, - 0xfa, 0x2c, 0x32, 0xad, 0x35, 0x8d, 0x13, 0xb4, 0x66, 0x74, 0x7d, 0xd3, 0x5a, 0x7d, 0xce, 0x98, - 0xd6, 0x9a, 0x46, 0x05, 0x5a, 0x33, 0x5a, 0xb3, 0x69, 0xad, 0x3e, 0x42, 0x4c, 0x6b, 0x4d, 0xfd, - 0xfc, 0x12, 0x79, 0x01, 0x43, 0xb3, 0x4f, 0x12, 0xe3, 0x40, 0x43, 0xa3, 0x9f, 0x5c, 0x3f, 0x4f, - 0x6c, 0x1a, 0x34, 0xdb, 0x82, 0x69, 0xb0, 0xa1, 0x31, 0x9a, 0x06, 0x9b, 0xba, 0x89, 0x7b, 0x89, - 0x7c, 0x07, 0x5b, 0x9b, 0xcf, 0x93, 0xdc, 0xd8, 0x4c, 0xab, 0xf6, 0xea, 0x27, 0xee, 0x45, 0x2a, - 0xa5, 0xf1, 0xa7, 0x00, 0xeb, 0x57, 0x47, 0xae, 0xad, 0xcf, 0xd4, 0x5e, 0xfd, 0x64, 0xb7, 0x59, - 0x58, 0x98, 0x7a, 0x74, 0x1d, 0xb6, 0xb8, 0xa2, 0xfe, 0x9c, 0xcf, 0xc2, 0x98, 0xd1, 0x44, 0x3c, - 0x02, 0x7c, 0x05, 0x5f, 0xcb, 0x3f, 0xc7, 0x93, 0x1e, 0xfe, 0x40, 0x7e, 0xf6, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x8d, 0x38, 0xa9, 0x9f, 0x4f, 0x0e, 0x00, 0x00, + // 1301 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x8f, 0xd3, 0xc6, + 0x1b, 0xc7, 0x79, 0x23, 0x7e, 0x92, 0xf0, 0xdf, 0x9d, 0xec, 0xbf, 0x58, 0x61, 0x97, 0x06, 0xb7, + 0x54, 0x8b, 0x8a, 0x22, 0x44, 0x7b, 0x80, 0xa2, 0x4a, 0x85, 0x65, 0x91, 0x90, 0x16, 0xa8, 0xbc, + 0x50, 0xa9, 0xea, 0xc1, 0xf2, 0xda, 0x93, 0x30, 0x5a, 0xc7, 0x4e, 0x3d, 0xe3, 0x05, 0xfa, 0x11, + 0x7a, 0xe9, 0xa5, 0xd7, 0x1e, 0x7a, 0xea, 0xb7, 0xe8, 0xa5, 0xdf, 0xa7, 0xf7, 0xde, 0xaa, 0x79, + 0x66, 0xec, 0x8c, 0x63, 0x2f, 0xb4, 0xaa, 0xb8, 0xcd, 0xfc, 0x9e, 0xf7, 0x67, 0x9e, 0x17, 0x1b, + 0x06, 0x73, 0x16, 0xd3, 0x6c, 0xb6, 0xca, 0x52, 0x91, 0x92, 0x3e, 0x5e, 0xfc, 0xd5, 0x89, 0xfb, + 0x0c, 0xae, 0x1c, 0xa5, 0xe9, 0x69, 0xbe, 0x7a, 0xc8, 0x32, 0x1a, 0x8a, 0x34, 0x7b, 0x73, 0x98, + 0x88, 0xec, 0x8d, 0x47, 0xbf, 0xcf, 0x29, 0x17, 0x64, 0x17, 0xec, 0xa8, 0x20, 0x38, 0xd6, 0xd4, + 0xda, 0xb7, 0xbd, 0x35, 0x40, 0x08, 0x74, 0x92, 0x60, 0x49, 0x9d, 0x16, 0x12, 0xf0, 0xec, 0x1e, + 0xc2, 0x6e, 0xb3, 0x42, 0xbe, 0x4a, 0x13, 0x4e, 0xc9, 0x75, 0xe8, 0x52, 0x09, 0xa0, 0xb6, 0xc1, + 0xed, 0xff, 0xcd, 0x0a, 0x57, 0x66, 0x8a, 0x4f, 0x51, 0xdd, 0xdf, 0x2d, 0x20, 0x47, 0x8c, 0x0b, + 0x09, 0x32, 0xca, 0xff, 0x99, 0x3f, 0x1f, 0x40, 0x6f, 0x95, 0xd1, 0x39, 0x7b, 0xad, 0x3d, 0xd2, + 0x37, 0x72, 0x13, 0xb6, 0xb9, 0x08, 0x32, 0xf1, 0x28, 0x4b, 0x97, 0x8f, 0x58, 0x4c, 0x9f, 0x4a, + 0xa7, 0xdb, 0xc8, 0x52, 0x27, 0x90, 0x19, 0x10, 0x96, 0x84, 0x71, 0xce, 0xd9, 0x19, 0x3d, 0x2e, + 0xa8, 0x4e, 0x67, 0x6a, 0xed, 0xf7, 0xbd, 0x06, 0x0a, 0xd9, 0x81, 0x6e, 0xcc, 0x96, 0x4c, 0x38, + 0xdd, 0xa9, 0xb5, 0x3f, 0xf2, 0xd4, 0xc5, 0xfd, 0x0a, 0xc6, 0x15, 0xff, 0x75, 0xf8, 0x37, 0xe0, + 0x22, 0x55, 0x90, 0x63, 0x4d, 0xdb, 0x4d, 0x09, 0x28, 0xe8, 0xee, 0x2f, 0x2d, 0xe8, 0x22, 0x54, + 0xe6, 0xd9, 0x5a, 0xe7, 0x99, 0x5c, 0x83, 0x21, 0xe3, 0xfe, 0x3a, 0x19, 0x2d, 0xf4, 0x6f, 0xc0, + 0x78, 0x99, 0x77, 0xf2, 0x29, 0xf4, 0xc2, 0x97, 0x79, 0x72, 0xca, 0x9d, 0x36, 0x9a, 0x1a, 0xaf, + 0x4d, 0xc9, 0x60, 0x0f, 0x24, 0xcd, 0xd3, 0x2c, 0xe4, 0x0e, 0x40, 0x20, 0x44, 0xc6, 0x4e, 0x72, + 0x41, 0x39, 0x46, 0x3b, 0xb8, 0xed, 0x18, 0x02, 0x39, 0xa7, 0xf7, 0x4b, 0xba, 0x67, 0xf0, 0x92, + 0xbb, 0xd0, 0xa7, 0xaf, 0x05, 0x4d, 0x22, 0x1a, 0x39, 0x5d, 0x34, 0xb4, 0xb7, 0x11, 0xd3, 0xec, + 0x50, 0xd3, 0x55, 0x84, 0x25, 0xfb, 0xe4, 0x1e, 0x8c, 0x2a, 0x24, 0xb2, 0x05, 0xed, 0x53, 0x5a, + 0xbc, 0xac, 0x3c, 0xca, 0xec, 0x9e, 0x05, 0x71, 0xae, 0x8a, 0x6c, 0xe8, 0xa9, 0xcb, 0x17, 0xad, + 0x3b, 0x96, 0xfb, 0xb3, 0x05, 0xdb, 0x87, 0x67, 0x34, 0x11, 0x4f, 0x53, 0xc1, 0xe6, 0x2c, 0x0c, + 0x04, 0x4b, 0x13, 0x72, 0x13, 0xec, 0x34, 0x8e, 0xfc, 0xb7, 0xd6, 0x58, 0x3f, 0x8d, 0xb5, 0xbd, + 0x9b, 0x60, 0x27, 0xf4, 0x95, 0xe6, 0x6e, 0x9d, 0xc3, 0x9d, 0xd0, 0x57, 0x8a, 0xfb, 0x23, 0x18, + 0x45, 0x34, 0xa6, 0x82, 0xfa, 0x65, 0x5e, 0x65, 0xd2, 0x87, 0x0a, 0xc4, 0x7c, 0x72, 0xf7, 0x57, + 0x0b, 0xec, 0x32, 0xbd, 0xe4, 0x32, 0x5c, 0x94, 0xea, 0x7c, 0x16, 0xe9, 0xa0, 0x7a, 0xf2, 0xfa, + 0x38, 0x92, 0xb5, 0x9a, 0xce, 0xe7, 0x9c, 0x0a, 0x34, 0xdb, 0xf6, 0xf4, 0x4d, 0xbe, 0x35, 0x67, + 0x3f, 0xa8, 0xf2, 0xec, 0x78, 0x78, 0x96, 0x39, 0x58, 0x0a, 0xb6, 0xa4, 0xf8, 0x2c, 0x6d, 0x4f, + 0x5d, 0xc8, 0x18, 0xba, 0xd4, 0x17, 0xc1, 0x02, 0xeb, 0xce, 0xf6, 0x3a, 0xf4, 0x79, 0xb0, 0x20, + 0x1f, 0xc3, 0x25, 0x9e, 0xe6, 0x59, 0x48, 0xfd, 0xc2, 0x6c, 0x0f, 0xa9, 0x43, 0x85, 0x3e, 0x42, + 0xe3, 0xee, 0x9f, 0x2d, 0xb8, 0x54, 0x7d, 0x51, 0x72, 0x05, 0x6c, 0x94, 0x40, 0xe3, 0x16, 0x1a, + 0xc7, 0x29, 0x71, 0x5c, 0x71, 0xa0, 0x65, 0x3a, 0x50, 0x88, 0x2c, 0xd3, 0x48, 0xf9, 0x3b, 0x52, + 0x22, 0x4f, 0xd2, 0x88, 0xca, 0x97, 0xcc, 0x59, 0x84, 0x1e, 0x8f, 0x3c, 0x79, 0x94, 0xc8, 0x82, + 0x45, 0xba, 0x4b, 0xe4, 0x51, 0xe6, 0x20, 0xcc, 0x50, 0x6f, 0x4f, 0xe5, 0x40, 0xdd, 0x64, 0x0e, + 0x96, 0x12, 0xbd, 0xa8, 0x02, 0x93, 0x67, 0x32, 0x85, 0x41, 0x46, 0x57, 0xb1, 0x7e, 0x66, 0xa7, + 0x8f, 0x24, 0x13, 0x22, 0x57, 0x01, 0xc2, 0x34, 0x8e, 0x69, 0x88, 0x0c, 0x36, 0x32, 0x18, 0x88, + 0x7c, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xa9, 0xb5, 0xdf, 0xf5, 0x7a, 0x42, 0xc4, 0xc7, + 0x34, 0x94, 0x71, 0xe4, 0x9c, 0x66, 0x3e, 0xf6, 0xd8, 0x00, 0xe5, 0xfa, 0x12, 0xc0, 0x69, 0xb0, + 0x07, 0xb0, 0xc8, 0xd2, 0x7c, 0xa5, 0xa8, 0xc3, 0x69, 0x5b, 0x8e, 0x1c, 0x44, 0x90, 0x7c, 0x1d, + 0x2e, 0xf1, 0x37, 0xcb, 0x98, 0x25, 0xa7, 0xbe, 0x08, 0xb2, 0x05, 0x15, 0xce, 0x08, 0x15, 0x8c, + 0x34, 0xfa, 0x1c, 0x41, 0xf7, 0x5b, 0x20, 0x07, 0x19, 0x0d, 0x04, 0xfd, 0x17, 0xd3, 0xb5, 0x9c, + 0x94, 0xad, 0xb7, 0x4e, 0xca, 0xff, 0xc3, 0xb8, 0xa2, 0x5a, 0x0d, 0x1a, 0x69, 0xf1, 0xc5, 0x2a, + 0x7a, 0x5f, 0x16, 0x2b, 0xaa, 0xb5, 0xc5, 0x9f, 0x2c, 0x20, 0x0f, 0xb1, 0x13, 0xfe, 0xdb, 0x0a, + 0x91, 0x35, 0x2c, 0x47, 0x9b, 0xea, 0xb4, 0x28, 0x10, 0x81, 0x1e, 0xbe, 0x43, 0xc6, 0x95, 0xfe, + 0x87, 0x81, 0x08, 0xf4, 0x00, 0xcc, 0x68, 0x98, 0x67, 0x72, 0x1e, 0x63, 0x5d, 0xe1, 0x00, 0xf4, + 0x0a, 0x48, 0x3a, 0x5a, 0x71, 0x48, 0x3b, 0xfa, 0x9b, 0x05, 0xe3, 0xfb, 0x9c, 0xb3, 0x45, 0xf2, + 0x4d, 0x1a, 0xe7, 0x4b, 0x5a, 0x78, 0xba, 0x03, 0xdd, 0x30, 0xcd, 0x13, 0x81, 0x5e, 0x76, 0x3d, + 0x75, 0xd9, 0x28, 0xab, 0x56, 0xad, 0xac, 0x36, 0x0a, 0xb3, 0x5d, 0x2f, 0x4c, 0xa3, 0xf0, 0x3a, + 0x95, 0xc2, 0xfb, 0x10, 0x06, 0x32, 0x3c, 0x3f, 0xa4, 0x89, 0xa0, 0x99, 0xee, 0x63, 0x90, 0xd0, + 0x01, 0x22, 0xee, 0x8f, 0x16, 0xec, 0x54, 0x3d, 0xd5, 0x6b, 0xe4, 0xdc, 0xb1, 0x22, 0xdb, 0x2e, + 0x8b, 0xb5, 0x9b, 0xf2, 0x28, 0x0b, 0x78, 0x95, 0x9f, 0xc4, 0x2c, 0xf4, 0x25, 0x41, 0xb9, 0x67, + 0x2b, 0xe4, 0x45, 0x16, 0xaf, 0x83, 0xee, 0x98, 0x41, 0x13, 0xe8, 0x04, 0xb9, 0x78, 0x59, 0x8c, + 0x16, 0x79, 0x76, 0x3f, 0x87, 0xb1, 0xda, 0xec, 0xd5, 0xac, 0xed, 0x01, 0x9c, 0x21, 0xe0, 0xb3, + 0x48, 0x2d, 0x35, 0xdb, 0xb3, 0x15, 0xf2, 0x38, 0xe2, 0xee, 0x97, 0x60, 0x1f, 0xa5, 0x2a, 0x11, + 0x9c, 0xdc, 0x02, 0x3b, 0x2e, 0x2e, 0x7a, 0xff, 0x91, 0x75, 0x91, 0x15, 0x7c, 0xde, 0x9a, 0xc9, + 0xbd, 0x07, 0xfd, 0x02, 0x2e, 0x62, 0xb3, 0xce, 0x8b, 0xad, 0xb5, 0x11, 0x9b, 0xfb, 0x87, 0x05, + 0x3b, 0x55, 0x97, 0x75, 0xfa, 0x5e, 0xc0, 0xa8, 0x34, 0xe1, 0x2f, 0x83, 0x95, 0xf6, 0xe5, 0x96, + 0xe9, 0x4b, 0x5d, 0xac, 0x74, 0x90, 0x3f, 0x09, 0x56, 0xaa, 0xa4, 0x86, 0xb1, 0x01, 0x4d, 0x9e, + 0xc3, 0x76, 0x8d, 0xa5, 0x61, 0xa5, 0xdd, 0x30, 0x57, 0x5a, 0x65, 0x2d, 0x97, 0xd2, 0xe6, 0x9e, + 0xbb, 0x0b, 0x97, 0x55, 0x15, 0x1f, 0x94, 0x45, 0x57, 0xe4, 0xbe, 0x5a, 0x9b, 0xd6, 0x66, 0x6d, + 0xba, 0x13, 0x70, 0xea, 0xa2, 0xba, 0x0b, 0x16, 0xb0, 0x7d, 0x2c, 0x02, 0xc1, 0xb8, 0x60, 0x61, + 0xf9, 0x7d, 0xb5, 0x51, 0xcc, 0xd6, 0xbb, 0xa6, 0x6c, 0xbd, 0x1d, 0xb6, 0xa0, 0x2d, 0x44, 0x51, + 0x67, 0xf2, 0x28, 0x5f, 0x81, 0x98, 0x96, 0xf4, 0x1b, 0xbc, 0x07, 0x53, 0xb2, 0x1e, 0x44, 0x2a, + 0x82, 0x58, 0x6d, 0xb1, 0x0e, 0x6e, 0x31, 0x1b, 0x11, 0x5c, 0x63, 0x6a, 0xd0, 0x47, 0x8a, 0xda, + 0x55, 0x3b, 0x4e, 0x02, 0x48, 0xdc, 0x03, 0xc0, 0x96, 0x52, 0xdd, 0xd0, 0x53, 0xb2, 0x12, 0x39, + 0x90, 0xc0, 0xed, 0xbf, 0xba, 0x30, 0x3c, 0xa6, 0xc1, 0x2b, 0x4a, 0x23, 0xb9, 0x44, 0x33, 0xb2, + 0x28, 0x6a, 0xab, 0xfa, 0xa1, 0x4b, 0xae, 0x6f, 0x16, 0x51, 0xe3, 0x97, 0xf5, 0xe4, 0x93, 0x77, + 0xb1, 0xe9, 0x67, 0xba, 0x40, 0x8e, 0x60, 0x60, 0x7c, 0x49, 0x92, 0x5d, 0x43, 0xb0, 0xf6, 0x81, + 0x3c, 0xd9, 0x3b, 0x87, 0x6a, 0x6a, 0x33, 0xd6, 0x85, 0xa9, 0xad, 0xbe, 0xa0, 0x4c, 0x6d, 0x4d, + 0x3b, 0x06, 0xb5, 0x19, 0xab, 0xc0, 0xd4, 0x56, 0x5f, 0x3e, 0xa6, 0xb6, 0xa6, 0xfd, 0x81, 0xda, + 0x8c, 0x79, 0x6d, 0x6a, 0xab, 0xef, 0x15, 0x53, 0x5b, 0xd3, 0x90, 0xbf, 0x40, 0x9e, 0xc1, 0xd0, + 0x9c, 0x9d, 0xc4, 0x10, 0x68, 0x98, 0xfe, 0x93, 0xab, 0xe7, 0x91, 0x4d, 0x85, 0xe6, 0x58, 0x30, + 0x15, 0x36, 0x0c, 0x46, 0x53, 0x61, 0xd3, 0x34, 0x71, 0x2f, 0x90, 0xef, 0x60, 0x6b, 0xb3, 0x3d, + 0xc9, 0xb5, 0xcd, 0xb0, 0x6a, 0x5d, 0x3f, 0x71, 0xdf, 0xc6, 0x52, 0x2a, 0x7f, 0x0c, 0xb0, 0xee, + 0x3a, 0x72, 0x65, 0x2d, 0x53, 0xeb, 0xfa, 0xc9, 0x6e, 0x33, 0xb1, 0x50, 0xf5, 0xe0, 0x2a, 0x6c, + 0x71, 0x55, 0xfa, 0x73, 0x3e, 0x0b, 0x63, 0x46, 0x13, 0xf1, 0x00, 0xb0, 0x0b, 0xbe, 0x96, 0xbf, + 0x93, 0x27, 0x3d, 0xfc, 0xab, 0xfc, 0xec, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x6d, 0xf7, + 0x42, 0x64, 0x0e, 0x00, 0x00, } diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 7aac4c392..f03d1e3de 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -108,6 +108,7 @@ message AssignResponse { string public_url = 3; uint64 count = 4; string error = 5; + string auth = 6; } message StatisticsRequest { diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 124a4d263..0c73ff2c8 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -537,6 +537,7 @@ type AssignResponse struct { PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` Count uint64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` Error string `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"` + Auth string `protobuf:"bytes,6,opt,name=auth" json:"auth,omitempty"` } func (m *AssignResponse) Reset() { *m = AssignResponse{} } @@ -579,6 +580,13 @@ func (m *AssignResponse) GetError() string { return "" } +func (m *AssignResponse) GetAuth() string { + if m != nil { + return m.Auth + } + return "" +} + type StatisticsRequest struct { Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` @@ -958,71 +966,71 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1043 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0x24, 0x35, - 0x10, 0xde, 0xee, 0xf9, 0xed, 0x9a, 0x4c, 0x76, 0xe2, 0x44, 0xa8, 0x77, 0x96, 0xdd, 0x1d, 0x9a, - 0xcb, 0x20, 0x50, 0xb4, 0x84, 0x23, 0x42, 0x88, 0x8d, 0x82, 0x88, 0x12, 0xd8, 0xd0, 0x61, 0xf7, - 0xc0, 0xa5, 0x71, 0xba, 0x2b, 0x91, 0x95, 0xfe, 0xc3, 0xf6, 0x24, 0x33, 0x7b, 0xe1, 0xc8, 0x5b, - 0x71, 0x81, 0x1b, 0x8f, 0xc2, 0x8d, 0x27, 0x40, 0xfe, 0xe9, 0x4e, 0x4f, 0x27, 0x21, 0x12, 0x12, - 0x37, 0xfb, 0x73, 0xd9, 0xae, 0xfa, 0xbe, 0xaa, 0xb2, 0x61, 0x23, 0xa3, 0x42, 0x22, 0xdf, 0x2d, - 0x79, 0x21, 0x0b, 0xe2, 0x99, 0x59, 0x54, 0x9e, 0x05, 0x7f, 0xb9, 0xe0, 0x7d, 0x83, 0x94, 0xcb, - 0x33, 0xa4, 0x92, 0x6c, 0x82, 0xcb, 0x4a, 0xdf, 0x99, 0x39, 0x73, 0x2f, 0x74, 0x59, 0x49, 0x08, - 0x74, 0xcb, 0x82, 0x4b, 0xdf, 0x9d, 0x39, 0xf3, 0x71, 0xa8, 0xc7, 0xe4, 0x19, 0x40, 0xb9, 0x38, - 0x4b, 0x59, 0x1c, 0x2d, 0x78, 0xea, 0x77, 0xb4, 0xad, 0x67, 0x90, 0x37, 0x3c, 0x25, 0x73, 0x98, - 0x64, 0x74, 0x19, 0x5d, 0x15, 0xe9, 0x22, 0xc3, 0x28, 0x2e, 0x16, 0xb9, 0xf4, 0xbb, 0x7a, 0xfb, - 0x66, 0x46, 0x97, 0x6f, 0x35, 0xbc, 0xaf, 0x50, 0x32, 0x53, 0x5e, 0x2d, 0xa3, 0x73, 0x96, 0x62, - 0x74, 0x89, 0x2b, 0xbf, 0x37, 0x73, 0xe6, 0xdd, 0x10, 0x32, 0xba, 0xfc, 0x9a, 0xa5, 0x78, 0x84, - 0x2b, 0xf2, 0x02, 0x46, 0x09, 0x95, 0x34, 0x8a, 0x31, 0x97, 0xc8, 0xfd, 0xbe, 0xbe, 0x0b, 0x14, - 0xb4, 0xaf, 0x11, 0xe5, 0x1f, 0xa7, 0xf1, 0xa5, 0x3f, 0xd0, 0x2b, 0x7a, 0xac, 0xfc, 0xa3, 0x49, - 0xc6, 0xf2, 0x48, 0x7b, 0x3e, 0xd4, 0x57, 0x7b, 0x1a, 0x39, 0x51, 0xee, 0x7f, 0x01, 0x03, 0xe3, - 0x9b, 0xf0, 0xbd, 0x59, 0x67, 0x3e, 0xda, 0xfb, 0x70, 0xb7, 0x66, 0x63, 0xd7, 0xb8, 0x77, 0x98, - 0x9f, 0x17, 0x3c, 0xa3, 0x92, 0x15, 0xf9, 0xb7, 0x28, 0x04, 0xbd, 0xc0, 0xb0, 0xda, 0x43, 0x9e, - 0xc0, 0x30, 0xc7, 0xeb, 0xe8, 0x8a, 0x25, 0xc2, 0x87, 0x59, 0x67, 0x3e, 0x0e, 0x07, 0x39, 0x5e, - 0xbf, 0x65, 0x89, 0x20, 0x1f, 0xc0, 0x46, 0x82, 0x29, 0x4a, 0x4c, 0xcc, 0xf2, 0x48, 0x2f, 0x8f, - 0x2c, 0xa6, 0x4c, 0x82, 0x37, 0xb0, 0x55, 0x93, 0x1d, 0xa2, 0x28, 0x8b, 0x5c, 0x20, 0x99, 0xc3, - 0x63, 0x73, 0xfa, 0x29, 0x7b, 0x87, 0xc7, 0x2c, 0x63, 0x52, 0x2b, 0xd0, 0x0d, 0xdb, 0x30, 0x79, - 0x0f, 0xfa, 0x29, 0xd2, 0x04, 0xb9, 0xa5, 0xdd, 0xce, 0x82, 0x3f, 0x5c, 0xf0, 0xef, 0x73, 0x5d, - 0x6b, 0x9a, 0xe8, 0x13, 0xc7, 0xa1, 0xcb, 0x12, 0xc5, 0x99, 0x60, 0xef, 0x50, 0x6b, 0xda, 0x0d, - 0xf5, 0x98, 0x3c, 0x07, 0x88, 0x8b, 0x34, 0xc5, 0x58, 0x6d, 0xb4, 0x87, 0x37, 0x10, 0xc5, 0xa9, - 0x96, 0xe9, 0x46, 0xce, 0x6e, 0xe8, 0x29, 0xc4, 0x28, 0x59, 0x47, 0x6e, 0x0d, 0x8c, 0x92, 0x36, - 0x72, 0x63, 0xf2, 0x09, 0x90, 0x8a, 0x9c, 0xb3, 0x55, 0x6d, 0xd8, 0xd7, 0x86, 0x13, 0xbb, 0xf2, - 0x6a, 0x55, 0x59, 0x3f, 0x05, 0x8f, 0x23, 0x4d, 0xa2, 0x22, 0x4f, 0x57, 0x5a, 0xdc, 0x61, 0x38, - 0x54, 0xc0, 0xeb, 0x3c, 0x5d, 0x91, 0x8f, 0x61, 0x8b, 0x63, 0x99, 0xb2, 0x98, 0x46, 0x65, 0x4a, - 0x63, 0xcc, 0x30, 0xaf, 0x74, 0x9e, 0xd8, 0x85, 0x93, 0x0a, 0x27, 0x3e, 0x0c, 0xae, 0x90, 0x0b, - 0x15, 0x96, 0xa7, 0x4d, 0xaa, 0x29, 0x99, 0x40, 0x47, 0xca, 0xd4, 0x07, 0x8d, 0xaa, 0x61, 0x30, - 0x80, 0xde, 0x41, 0x56, 0xca, 0x55, 0xf0, 0x9b, 0x03, 0x8f, 0x4f, 0x17, 0x25, 0xf2, 0x57, 0x69, - 0x11, 0x5f, 0x1e, 0x2c, 0x25, 0xa7, 0xe4, 0x35, 0x6c, 0x22, 0xa7, 0x62, 0xc1, 0x95, 0xef, 0x09, - 0xcb, 0x2f, 0x34, 0xa5, 0xa3, 0xbd, 0x79, 0x23, 0x7d, 0x5a, 0x7b, 0x76, 0x0f, 0xcc, 0x86, 0x7d, - 0x6d, 0x1f, 0x8e, 0xb1, 0x39, 0x9d, 0xfe, 0x08, 0xe3, 0xb5, 0x75, 0x25, 0x8c, 0x4a, 0x6d, 0x2b, - 0x95, 0x1e, 0x2b, 0xc5, 0x4b, 0xca, 0x99, 0x5c, 0xd9, 0x12, 0xb4, 0x33, 0x25, 0x88, 0xad, 0x30, - 0x95, 0x69, 0x1d, 0x9d, 0x69, 0x9e, 0x41, 0x0e, 0x13, 0x11, 0x7c, 0x04, 0xdb, 0xfb, 0x29, 0xc3, - 0x5c, 0x1e, 0x33, 0x21, 0x31, 0x0f, 0xf1, 0xe7, 0x05, 0x0a, 0xa9, 0x6e, 0xc8, 0x69, 0x86, 0xb6, - 0xc0, 0xf5, 0x38, 0xf8, 0x05, 0x36, 0x4d, 0xea, 0x1c, 0x17, 0xb1, 0xce, 0x1b, 0x45, 0x8c, 0xaa, - 0x6c, 0x63, 0xa4, 0x86, 0xad, 0x92, 0x77, 0xdb, 0x25, 0xdf, 0xac, 0x89, 0xce, 0xbf, 0xd7, 0x44, - 0xf7, 0x76, 0x4d, 0xfc, 0x00, 0xdb, 0xc7, 0x45, 0x71, 0xb9, 0x28, 0x8d, 0x1b, 0x95, 0xaf, 0xeb, - 0x11, 0x3a, 0xb3, 0x8e, 0xba, 0xb3, 0x8e, 0xb0, 0x95, 0xb1, 0x6e, 0x3b, 0x63, 0x83, 0xbf, 0x1d, - 0xd8, 0x59, 0x3f, 0xd6, 0x56, 0xdb, 0x4f, 0xb0, 0x5d, 0x9f, 0x1b, 0xa5, 0x36, 0x66, 0x73, 0xc1, - 0x68, 0xef, 0x65, 0x43, 0xcc, 0xbb, 0x76, 0x57, 0x0d, 0x22, 0xa9, 0xc8, 0x0a, 0xb7, 0xae, 0x5a, - 0x88, 0x98, 0x2e, 0x61, 0xd2, 0x36, 0x53, 0x09, 0x5d, 0xdf, 0x6a, 0x99, 0x1d, 0x56, 0x3b, 0xc9, - 0xa7, 0xe0, 0xdd, 0x38, 0xe2, 0x6a, 0x47, 0xb6, 0xd7, 0x1c, 0xb1, 0x77, 0xdd, 0x58, 0x91, 0x1d, - 0xe8, 0x21, 0xe7, 0x45, 0xd5, 0x08, 0xcc, 0x24, 0xf8, 0x1c, 0x86, 0xff, 0x59, 0xc5, 0xe0, 0x4f, - 0x07, 0xc6, 0x5f, 0x09, 0xc1, 0x2e, 0xea, 0x74, 0xd9, 0x81, 0x9e, 0x29, 0x53, 0xd3, 0x8e, 0xcc, - 0x84, 0xcc, 0x60, 0x64, 0xab, 0xac, 0x41, 0x7d, 0x13, 0x7a, 0xb0, 0x9b, 0xd8, 0xca, 0xeb, 0x1a, - 0xd7, 0xa4, 0x4c, 0xdb, 0x8d, 0xbe, 0x77, 0x6f, 0xa3, 0xef, 0x37, 0x1a, 0xfd, 0x53, 0xf0, 0xf4, - 0xa6, 0xbc, 0x48, 0xd0, 0xbe, 0x00, 0x43, 0x05, 0x7c, 0x57, 0x24, 0x3a, 0xad, 0xab, 0x60, 0xac, - 0xf0, 0x13, 0xe8, 0x9c, 0xd7, 0xe4, 0xab, 0x61, 0x45, 0x91, 0x7b, 0x1f, 0x45, 0xb7, 0xde, 0xb6, - 0x9a, 0x90, 0x6e, 0x93, 0x90, 0x5a, 0x8b, 0x5e, 0x53, 0x8b, 0x0b, 0xd8, 0x3a, 0x95, 0x54, 0x32, - 0x21, 0x59, 0x2c, 0x2a, 0x46, 0x5b, 0xdc, 0x39, 0x0f, 0x71, 0xe7, 0xde, 0xc7, 0x5d, 0xa7, 0xe6, - 0x2e, 0xf8, 0xdd, 0x01, 0xd2, 0xbc, 0xc9, 0x86, 0xfb, 0x3f, 0x5c, 0xa5, 0xe8, 0x91, 0x85, 0xa4, - 0x69, 0xa4, 0x1f, 0x10, 0xfb, 0x0c, 0x68, 0x44, 0xbd, 0x51, 0x4a, 0x90, 0x85, 0xc0, 0xc4, 0xac, - 0x9a, 0x37, 0x60, 0xa8, 0x00, 0xbd, 0xb8, 0xfe, 0x84, 0xf4, 0x5b, 0x4f, 0xc8, 0xde, 0xaf, 0x1d, - 0x18, 0x9c, 0x22, 0xbd, 0x46, 0x4c, 0xc8, 0x21, 0x8c, 0x4f, 0x31, 0x4f, 0x6e, 0xbe, 0x25, 0x3b, - 0x8d, 0x6a, 0xa8, 0xd1, 0xe9, 0xfb, 0x77, 0xa1, 0x55, 0xfc, 0xc1, 0xa3, 0xb9, 0xf3, 0xd2, 0x21, - 0x27, 0x30, 0x3e, 0x42, 0x2c, 0xf7, 0x8b, 0x3c, 0xc7, 0x58, 0x62, 0x42, 0x9e, 0x37, 0x36, 0xdd, - 0xd1, 0x22, 0xa7, 0x4f, 0x6e, 0xfd, 0x06, 0xaa, 0x8a, 0xb2, 0x27, 0x7e, 0x0f, 0x1b, 0xcd, 0xce, - 0xb0, 0x76, 0xe0, 0x1d, 0x7d, 0x6c, 0xfa, 0xe2, 0x81, 0x96, 0x12, 0x3c, 0x22, 0x5f, 0x42, 0xdf, - 0xe4, 0x2a, 0xf1, 0x1b, 0xc6, 0x6b, 0xb5, 0xb8, 0xe6, 0xd7, 0x7a, 0x62, 0x07, 0x8f, 0xc8, 0x11, - 0xc0, 0x4d, 0x06, 0x90, 0x26, 0x2f, 0xb7, 0x52, 0x70, 0xfa, 0xec, 0x9e, 0xd5, 0xea, 0xb0, 0xb3, - 0xbe, 0xfe, 0x23, 0x7e, 0xf6, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x77, 0xb3, 0x32, 0xc7, 0x33, - 0x0a, 0x00, 0x00, + // 1056 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0xe4, 0x44, + 0x10, 0x8e, 0x3d, 0xbf, 0xae, 0xc9, 0x64, 0x27, 0x9d, 0x08, 0x79, 0x67, 0xd9, 0xdd, 0xc1, 0x5c, + 0x06, 0x81, 0xa2, 0x25, 0x1c, 0x11, 0x42, 0x6c, 0x14, 0x44, 0x94, 0xc0, 0x06, 0x87, 0xdd, 0x03, + 0x17, 0xd3, 0xb1, 0x2b, 0xa1, 0x15, 0xff, 0xe1, 0x6e, 0x27, 0x33, 0x7b, 0xe1, 0xc8, 0x03, 0xf0, + 0x3e, 0x5c, 0xe0, 0xc6, 0xa3, 0x70, 0xe3, 0x09, 0x50, 0xff, 0xd8, 0xf1, 0x38, 0x19, 0x22, 0x21, + 0x71, 0xeb, 0xfe, 0xba, 0xba, 0xab, 0xfa, 0xfb, 0xaa, 0xaa, 0x1b, 0x36, 0x13, 0xca, 0x05, 0x16, + 0x7b, 0x79, 0x91, 0x89, 0x8c, 0x38, 0x7a, 0x16, 0xe4, 0xe7, 0xde, 0x5f, 0x36, 0x38, 0x5f, 0x21, + 0x2d, 0xc4, 0x39, 0x52, 0x41, 0xb6, 0xc0, 0x66, 0xb9, 0x6b, 0xcd, 0xac, 0xb9, 0xe3, 0xdb, 0x2c, + 0x27, 0x04, 0xba, 0x79, 0x56, 0x08, 0xd7, 0x9e, 0x59, 0xf3, 0xb1, 0xaf, 0xc6, 0xe4, 0x29, 0x40, + 0x5e, 0x9e, 0xc7, 0x2c, 0x0c, 0xca, 0x22, 0x76, 0x3b, 0xca, 0xd6, 0xd1, 0xc8, 0xeb, 0x22, 0x26, + 0x73, 0x98, 0x24, 0x74, 0x11, 0x5c, 0x67, 0x71, 0x99, 0x60, 0x10, 0x66, 0x65, 0x2a, 0xdc, 0xae, + 0xda, 0xbe, 0x95, 0xd0, 0xc5, 0x1b, 0x05, 0x1f, 0x48, 0x94, 0xcc, 0x64, 0x54, 0x8b, 0xe0, 0x82, + 0xc5, 0x18, 0x5c, 0xe1, 0xd2, 0xed, 0xcd, 0xac, 0x79, 0xd7, 0x87, 0x84, 0x2e, 0xbe, 0x64, 0x31, + 0x1e, 0xe3, 0x92, 0x3c, 0x87, 0x51, 0x44, 0x05, 0x0d, 0x42, 0x4c, 0x05, 0x16, 0x6e, 0x5f, 0xf9, + 0x02, 0x09, 0x1d, 0x28, 0x44, 0xc6, 0x57, 0xd0, 0xf0, 0xca, 0x1d, 0xa8, 0x15, 0x35, 0x96, 0xf1, + 0xd1, 0x28, 0x61, 0x69, 0xa0, 0x22, 0x1f, 0x2a, 0xd7, 0x8e, 0x42, 0x4e, 0x65, 0xf8, 0x9f, 0xc1, + 0x40, 0xc7, 0xc6, 0x5d, 0x67, 0xd6, 0x99, 0x8f, 0xf6, 0xdf, 0xdf, 0xab, 0xd9, 0xd8, 0xd3, 0xe1, + 0x1d, 0xa5, 0x17, 0x59, 0x91, 0x50, 0xc1, 0xb2, 0xf4, 0x6b, 0xe4, 0x9c, 0x5e, 0xa2, 0x5f, 0xed, + 0x21, 0x8f, 0x61, 0x98, 0xe2, 0x4d, 0x70, 0xcd, 0x22, 0xee, 0xc2, 0xac, 0x33, 0x1f, 0xfb, 0x83, + 0x14, 0x6f, 0xde, 0xb0, 0x88, 0x93, 0xf7, 0x60, 0x33, 0xc2, 0x18, 0x05, 0x46, 0x7a, 0x79, 0xa4, + 0x96, 0x47, 0x06, 0x93, 0x26, 0xde, 0x6b, 0xd8, 0xae, 0xc9, 0xf6, 0x91, 0xe7, 0x59, 0xca, 0x91, + 0xcc, 0xe1, 0x91, 0x3e, 0xfd, 0x8c, 0xbd, 0xc5, 0x13, 0x96, 0x30, 0xa1, 0x14, 0xe8, 0xfa, 0x6d, + 0x98, 0xbc, 0x03, 0xfd, 0x18, 0x69, 0x84, 0x85, 0xa1, 0xdd, 0xcc, 0xbc, 0x3f, 0x6c, 0x70, 0xd7, + 0x85, 0xae, 0x34, 0x8d, 0xd4, 0x89, 0x63, 0xdf, 0x66, 0x91, 0xe4, 0x8c, 0xb3, 0xb7, 0xa8, 0x34, + 0xed, 0xfa, 0x6a, 0x4c, 0x9e, 0x01, 0x84, 0x59, 0x1c, 0x63, 0x28, 0x37, 0x9a, 0xc3, 0x1b, 0x88, + 0xe4, 0x54, 0xc9, 0x74, 0x2b, 0x67, 0xd7, 0x77, 0x24, 0xa2, 0x95, 0xac, 0x6f, 0x6e, 0x0c, 0xb4, + 0x92, 0xe6, 0xe6, 0xda, 0xe4, 0x23, 0x20, 0x15, 0x39, 0xe7, 0xcb, 0xda, 0xb0, 0xaf, 0x0c, 0x27, + 0x66, 0xe5, 0xe5, 0xb2, 0xb2, 0x7e, 0x02, 0x4e, 0x81, 0x34, 0x0a, 0xb2, 0x34, 0x5e, 0x2a, 0x71, + 0x87, 0xfe, 0x50, 0x02, 0xaf, 0xd2, 0x78, 0x49, 0x3e, 0x84, 0xed, 0x02, 0xf3, 0x98, 0x85, 0x34, + 0xc8, 0x63, 0x1a, 0x62, 0x82, 0x69, 0xa5, 0xf3, 0xc4, 0x2c, 0x9c, 0x56, 0x38, 0x71, 0x61, 0x70, + 0x8d, 0x05, 0x97, 0xd7, 0x72, 0x94, 0x49, 0x35, 0x25, 0x13, 0xe8, 0x08, 0x11, 0xbb, 0xa0, 0x50, + 0x39, 0xf4, 0x06, 0xd0, 0x3b, 0x4c, 0x72, 0xb1, 0xf4, 0x7e, 0xb3, 0xe0, 0xd1, 0x59, 0x99, 0x63, + 0xf1, 0x32, 0xce, 0xc2, 0xab, 0xc3, 0x85, 0x28, 0x28, 0x79, 0x05, 0x5b, 0x58, 0x50, 0x5e, 0x16, + 0x32, 0xf6, 0x88, 0xa5, 0x97, 0x8a, 0xd2, 0xd1, 0xfe, 0xbc, 0x91, 0x3e, 0xad, 0x3d, 0x7b, 0x87, + 0x7a, 0xc3, 0x81, 0xb2, 0xf7, 0xc7, 0xd8, 0x9c, 0x4e, 0xbf, 0x87, 0xf1, 0xca, 0xba, 0x14, 0x46, + 0xa6, 0xb6, 0x91, 0x4a, 0x8d, 0xa5, 0xe2, 0x39, 0x2d, 0x98, 0x58, 0x9a, 0x12, 0x34, 0x33, 0x29, + 0x88, 0xa9, 0x30, 0x99, 0x69, 0x1d, 0x95, 0x69, 0x8e, 0x46, 0x8e, 0x22, 0xee, 0x7d, 0x00, 0x3b, + 0x07, 0x31, 0xc3, 0x54, 0x9c, 0x30, 0x2e, 0x30, 0xf5, 0xf1, 0xa7, 0x12, 0xb9, 0x90, 0x1e, 0x52, + 0x9a, 0xa0, 0x29, 0x70, 0x35, 0xf6, 0x7e, 0x86, 0x2d, 0x9d, 0x3a, 0x27, 0x59, 0xa8, 0xf2, 0x46, + 0x12, 0x23, 0x2b, 0x5b, 0x1b, 0xc9, 0x61, 0xab, 0xe4, 0xed, 0x76, 0xc9, 0x37, 0x6b, 0xa2, 0xf3, + 0xef, 0x35, 0xd1, 0xbd, 0x5b, 0x13, 0xdf, 0xc1, 0xce, 0x49, 0x96, 0x5d, 0x95, 0xb9, 0x0e, 0xa3, + 0x8a, 0x75, 0xf5, 0x86, 0xd6, 0xac, 0x23, 0x7d, 0xd6, 0x37, 0x6c, 0x65, 0xac, 0xdd, 0xce, 0x58, + 0xef, 0x6f, 0x0b, 0x76, 0x57, 0x8f, 0x35, 0xd5, 0xf6, 0x03, 0xec, 0xd4, 0xe7, 0x06, 0xb1, 0xb9, + 0xb3, 0x76, 0x30, 0xda, 0x7f, 0xd1, 0x10, 0xf3, 0xbe, 0xdd, 0x55, 0x83, 0x88, 0x2a, 0xb2, 0xfc, + 0xed, 0xeb, 0x16, 0xc2, 0xa7, 0x0b, 0x98, 0xb4, 0xcd, 0x64, 0x42, 0xd7, 0x5e, 0x0d, 0xb3, 0xc3, + 0x6a, 0x27, 0xf9, 0x18, 0x9c, 0xdb, 0x40, 0x6c, 0x15, 0xc8, 0xce, 0x4a, 0x20, 0xc6, 0xd7, 0xad, + 0x15, 0xd9, 0x85, 0x1e, 0x16, 0x45, 0x56, 0x35, 0x02, 0x3d, 0xf1, 0x3e, 0x85, 0xe1, 0x7f, 0x56, + 0xd1, 0xfb, 0xd3, 0x82, 0xf1, 0x17, 0x9c, 0xb3, 0xcb, 0x3a, 0x5d, 0x76, 0xa1, 0xa7, 0xcb, 0x54, + 0xb7, 0x23, 0x3d, 0x21, 0x33, 0x18, 0x99, 0x2a, 0x6b, 0x50, 0xdf, 0x84, 0x1e, 0xec, 0x26, 0xa6, + 0xf2, 0xba, 0x3a, 0x34, 0x21, 0xe2, 0x76, 0xa3, 0xef, 0xad, 0x6d, 0xf4, 0xfd, 0x46, 0xa3, 0x7f, + 0x02, 0x8e, 0xda, 0x94, 0x66, 0x11, 0x9a, 0x17, 0x60, 0x28, 0x81, 0x6f, 0xb2, 0x08, 0xbd, 0x5f, + 0x2d, 0xd8, 0xaa, 0x6e, 0x63, 0x94, 0x9f, 0x40, 0xe7, 0xa2, 0x66, 0x5f, 0x0e, 0x2b, 0x8e, 0xec, + 0x75, 0x1c, 0xdd, 0x79, 0xdc, 0x6a, 0x46, 0xba, 0x4d, 0x46, 0x6a, 0x31, 0x7a, 0x0d, 0x31, 0x64, + 0xc8, 0xb4, 0x14, 0x3f, 0x56, 0x21, 0xcb, 0xb1, 0x77, 0x09, 0xdb, 0x67, 0x82, 0x0a, 0xc6, 0x05, + 0x0b, 0x79, 0x45, 0x73, 0x8b, 0x50, 0xeb, 0x21, 0x42, 0xed, 0x75, 0x84, 0x76, 0x6a, 0x42, 0xbd, + 0xdf, 0x2d, 0x20, 0x4d, 0x4f, 0x86, 0x82, 0xff, 0xc1, 0x95, 0xa4, 0x4c, 0x64, 0x82, 0xc6, 0x81, + 0x7a, 0x55, 0xcc, 0xdb, 0xa0, 0x10, 0xf9, 0x70, 0x49, 0x95, 0x4a, 0x8e, 0x91, 0x5e, 0xd5, 0x0f, + 0xc3, 0x50, 0x02, 0x6a, 0x71, 0xf5, 0x5d, 0xe9, 0xb7, 0xde, 0x95, 0xfd, 0x5f, 0x3a, 0x30, 0x38, + 0x43, 0x7a, 0x83, 0x18, 0x91, 0x23, 0x18, 0x9f, 0x61, 0x1a, 0xdd, 0xfe, 0x55, 0x76, 0x1b, 0x25, + 0x52, 0xa3, 0xd3, 0x77, 0xef, 0x43, 0xab, 0xfb, 0x7b, 0x1b, 0x73, 0xeb, 0x85, 0x45, 0x4e, 0x61, + 0x7c, 0x8c, 0x98, 0x1f, 0x64, 0x69, 0x8a, 0xa1, 0xc0, 0x88, 0x3c, 0x6b, 0x6c, 0xba, 0xa7, 0x6f, + 0x4e, 0x1f, 0xdf, 0xf9, 0x22, 0x54, 0x65, 0x66, 0x4e, 0xfc, 0x16, 0x36, 0x9b, 0xed, 0x62, 0xe5, + 0xc0, 0x7b, 0x9a, 0xdb, 0xf4, 0xf9, 0x03, 0x7d, 0xc6, 0xdb, 0x20, 0x9f, 0x43, 0x5f, 0xe7, 0x2f, + 0x71, 0x1b, 0xc6, 0x2b, 0x05, 0xba, 0x12, 0xd7, 0x6a, 0xb2, 0x7b, 0x1b, 0xe4, 0x18, 0xe0, 0x36, + 0x03, 0x48, 0x93, 0x97, 0x3b, 0x29, 0x38, 0x7d, 0xba, 0x66, 0xb5, 0x3a, 0xec, 0xbc, 0xaf, 0x3e, + 0x8e, 0x9f, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x25, 0xfc, 0xb7, 0x54, 0x48, 0x0a, 0x00, 0x00, } diff --git a/weed/security/jwt.go b/weed/security/jwt.go index ba394c3bf..45a77f093 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -4,7 +4,6 @@ import ( "fmt" "net/http" "strings" - "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -57,10 +56,10 @@ func GetJwt(r *http.Request) EncodedJwt { func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt) (token *jwt.Token, err error) { // check exp, nbf - return jwt.Parse(string(tokenString), func(token *jwt.Token) (interface{}, error) { + return jwt.ParseWithClaims(string(tokenString), &SeaweedFileIdClaims{}, func(token *jwt.Token) (interface{}, error) { if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { return nil, fmt.Errorf("unknown token method") } - return signingKey, nil + return []byte(signingKey), nil }) } diff --git a/weed/server/common.go b/weed/server/common.go index d88abfdc8..c9f17aa86 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -13,7 +13,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" @@ -83,7 +82,6 @@ func debug(params ...interface{}) { } func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string) { - jwt := security.GetJwt(r) m := make(map[string]interface{}) if r.Method != "POST" { writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) @@ -125,7 +123,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, jwt) + uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 06589e3c6..9a83ee1a6 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -233,6 +233,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol Count: int32(assignResult.Count), Url: assignResult.Url, PublicUrl: assignResult.PublicUrl, + Auth: string(assignResult.Auth), }, err } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 32f481e74..7cdbddde2 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "net/http" "net/url" + "os" "strconv" "strings" "time" @@ -14,8 +15,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "os" ) var ( @@ -31,7 +32,7 @@ type FilerPostResult struct { Url string `json:"url,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, err error) { +func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { ar := &operation.VolumeAssignRequest{ Count: 1, Replication: replication, @@ -59,6 +60,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, } fileId = assignResult.Fid urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid + auth = assignResult.Auth return } @@ -82,7 +84,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } - fileId, urlLocation, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) if err != nil || fileId == "" || urlLocation == "" { glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) @@ -115,6 +117,9 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { Host: r.Host, ContentLength: r.ContentLength, } + if auth != "" { + request.Header.Set("Authorization", "BEARER "+string(auth)) + } resp, do_err := util.Do(request) if do_err != nil { glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, do_err, r.Method) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 4b1745aaa..b9c0691c7 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -14,6 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -105,14 +106,14 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { writtenChunks = writtenChunks + 1 - fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) if assignErr != nil { return nil, assignErr } // upload the chunk to the volume server chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId) + uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId, auth) if uploadErr != nil { return nil, uploadErr } @@ -175,11 +176,11 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte return } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string) (err error) { +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { err = nil ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId)) + uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) if uploadResult != nil { glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) } diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index ae0819d2d..6e9cd512d 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -6,6 +6,7 @@ import ( "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -92,6 +93,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest Url: dn.Url(), PublicUrl: dn.PublicUrl, Count: count, + Auth: string(security.GenJwt(ms.guard.SigningKey, fid)), }, nil } From 7103c1ab7eb4e879d4b7a20e31cac2a288fe04a9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Feb 2019 00:09:48 -0800 Subject: [PATCH 019/450] go fmt --- weed/operation/submit.go | 2 +- weed/server/raft_server.go | 2 +- weed/topology/topology.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 21cc887cf..374927495 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -198,7 +198,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re } func upload_one_chunk(filename string, reader io.Reader, master, -fileUrl string, jwt security.EncodedJwt, + fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") uploadResult, uploadError := Upload(fileUrl, filename, reader, false, diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index c332da38e..7afef0b15 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -131,7 +131,7 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, func isTheFirstOne(self string, peers []string) bool { sort.Strings(peers) - if len(peers)<=0{ + if len(peers) <= 0 { return true } return self == peers[0] diff --git a/weed/topology/topology.go b/weed/topology/topology.go index ff23be1ff..736a4c89b 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -50,7 +50,7 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls } func (t *Topology) IsLeader() bool { - if t.RaftServer!=nil { + if t.RaftServer != nil { return t.RaftServer.State() == raft.Leader } if leader, e := t.Leader(); e == nil { From 157c0f7c011816a817e56da7fd0fb7fa3a337aee Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Feb 2019 09:59:22 -0800 Subject: [PATCH 020/450] add jwt for writes --- weed/filesys/dirty_page.go | 8 +++++--- weed/replication/sink/filersink/fetch_write.go | 6 ++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 696296e62..69f652ead 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -4,13 +4,14 @@ import ( "bytes" "context" "fmt" + "sync" "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "sync" + "github.com/chrislusf/seaweedfs/weed/security" ) type ContinuousDirtyPages struct { @@ -164,6 +165,7 @@ func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Contex func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { var fileId, host string + var auth security.EncodedJwt if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -181,7 +183,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte return err } - fileId, host = resp.FileId, resp.Url + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) return nil }); err != nil { @@ -190,7 +192,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "") + uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload data: %v", err) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index c14566723..e632164a4 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -9,6 +9,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -59,6 +60,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri defer readCloser.Close() var host string + var auth security.EncodedJwt if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -76,7 +78,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri return err } - fileId, host = resp.FileId, resp.Url + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) return nil }); err != nil { @@ -88,7 +90,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) uploadResult, err := operation.Upload(fileUrl, filename, readCloser, - "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, "") + "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) return "", fmt.Errorf("upload data: %v", err) From 2ec6a679c2e86c437d5fd826bc119b6b9855f740 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Feb 2019 10:00:27 -0800 Subject: [PATCH 021/450] avoid "fchmod failed" during cp --- weed/filesys/file.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 6c07345a0..812137fe2 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -105,6 +105,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.entry.Attributes.Mtime = req.Mtime.Unix() } + if file.isOpen { + return nil + } + return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ From ad257ae1793f0631794334532f2c2b5f849600f5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 16 Feb 2019 08:55:23 -0800 Subject: [PATCH 022/450] simplify isLeader() logic --- weed/topology/topology.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/weed/topology/topology.go b/weed/topology/topology.go index ff23be1ff..3bf827d82 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -53,9 +53,6 @@ func (t *Topology) IsLeader() bool { if t.RaftServer!=nil { return t.RaftServer.State() == raft.Leader } - if leader, e := t.Leader(); e == nil { - return leader == t.RaftServer.Name() - } return false } From 98a03b38e5798fb75fe35174da297b3528b9750e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 16 Feb 2019 12:23:35 -0800 Subject: [PATCH 023/450] avoid util package depends on security package --- unmaintained/repeated_vacuum/repeated_vacuum.go | 2 +- weed/command/benchmark.go | 2 +- weed/topology/store_replicate.go | 2 +- weed/util/http_util.go | 4 +--- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index d551baddb..95201f3e8 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -36,7 +36,7 @@ func main() { log.Fatalf("upload: %v", err) } - util.Delete(targetUrl, assignResult.Auth) + util.Delete(targetUrl, string(assignResult.Auth)) util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 47fdc69a0..93359b243 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -200,7 +200,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { if isSecure { jwtAuthorization = operation.LookupJwt(masterClient.GetMaster(), df.fp.Fid) } - if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), jwtAuthorization); e == nil { + if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil { s.completed++ } else { s.failed++ diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index c73fb706a..3967bb3e3 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -102,7 +102,7 @@ func ReplicatedDelete(masterNode string, store *storage.Store, if needToReplicate { //send to other replica locations if r.FormValue("type") != "replicate" { if err = distributedOperation(masterNode, store, volumeId, func(location operation.Location) error { - return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", jwt) + return util.Delete("http://"+location.Url+r.URL.Path+"?type=replicate", string(jwt)) }); err != nil { ret = 0 } diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 21e0a678d..a465a7b7f 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -12,8 +12,6 @@ import ( "net/url" "strings" "time" - - "github.com/chrislusf/seaweedfs/weed/security" ) var ( @@ -97,7 +95,7 @@ func Head(url string) (http.Header, error) { return r.Header, nil } -func Delete(url string, jwt security.EncodedJwt) error { +func Delete(url string, jwt string) error { req, err := http.NewRequest("DELETE", url, nil) if jwt != "" { req.Header.Set("Authorization", "BEARER "+string(jwt)) From 55761ae806bc7cc8ab34424508aee5481131b941 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 16 Feb 2019 12:49:58 -0800 Subject: [PATCH 024/450] skip nil options --- weed/util/grpc_client_server.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index d029d21ae..7fa650855 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -17,24 +17,37 @@ var ( grpcClientsLock sync.Mutex ) -func NewGrpcServer() *grpc.Server { - return grpc.NewServer(grpc.KeepaliveParams(keepalive.ServerParameters{ +func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { + var options []grpc.ServerOption + options = append(options, grpc.KeepaliveParams(keepalive.ServerParameters{ Time: 10 * time.Second, // wait time before ping if no activity Timeout: 20 * time.Second, // ping timeout }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: 60 * time.Second, // min time a client should wait before sending a ping })) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } + return grpc.NewServer(options...) } func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { // opts = append(opts, grpc.WithBlock()) // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) - opts = append(opts, grpc.WithInsecure()) - opts = append(opts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 30 * time.Second, // client ping server if no activity for this long - Timeout: 20 * time.Second, - })) - + var options []grpc.DialOption + options = append(options, + grpc.WithInsecure(), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 30 * time.Second, // client ping server if no activity for this long + Timeout: 20 * time.Second, + })) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } return grpc.Dial(address, opts...) } From 77b9af531d18e10b04b49b069b5f26a329ed4902 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Feb 2019 12:11:52 -0800 Subject: [PATCH 025/450] adding grpc mutual tls --- .../repeated_vacuum/repeated_vacuum.go | 8 ++- weed/command/backup.go | 11 +++- weed/command/benchmark.go | 14 +++- weed/command/filer.go | 6 +- weed/command/filer_copy.go | 49 ++++++++------ weed/command/filer_replication.go | 1 + weed/command/master.go | 7 +- weed/command/mount_std.go | 7 ++ weed/command/s3.go | 6 ++ weed/command/scaffold.go | 26 ++++++++ weed/command/server.go | 8 ++- weed/command/upload.go | 11 +++- weed/command/volume.go | 7 +- weed/filer2/filer.go | 7 +- weed/filer2/filer_deletion.go | 4 +- weed/filer2/leveldb/leveldb_store_test.go | 4 +- weed/filer2/memdb/memdb_store_test.go | 4 +- weed/filesys/filehandle.go | 5 +- weed/filesys/wfs.go | 3 +- weed/filesys/wfs_deletion.go | 2 +- weed/operation/assign_file_id.go | 5 +- weed/operation/chunked_file.go | 5 +- weed/operation/delete_content.go | 15 +++-- weed/operation/grpc_client.go | 8 +-- weed/operation/lookup.go | 5 +- weed/operation/stats.go | 5 +- weed/operation/submit.go | 19 +++--- weed/operation/sync_volume.go | 9 +-- .../replication/sink/filersink/fetch_write.go | 2 +- weed/replication/sink/filersink/filer_sink.go | 19 ++++-- weed/replication/source/filer_source.go | 15 +++-- weed/s3api/s3api_handlers.go | 2 +- weed/s3api/s3api_server.go | 2 + weed/security/tls.go | 66 +++++++++++++++++++ weed/server/common.go | 5 +- weed/server/filer_grpc_server.go | 4 +- weed/server/filer_server.go | 13 ++-- weed/server/filer_server_handlers_write.go | 2 +- weed/server/master_grpc_server_volume.go | 2 +- weed/server/master_server.go | 7 +- weed/server/master_server_handlers.go | 2 +- weed/server/master_server_handlers_admin.go | 10 +-- weed/server/volume_grpc_client_to_master.go | 11 +++- weed/server/volume_server.go | 18 ++--- weed/server/volume_server_handlers_write.go | 2 +- weed/storage/volume_sync.go | 21 +++--- weed/topology/allocate_volume.go | 5 +- weed/topology/topology_event_handling.go | 5 +- weed/topology/topology_vacuum.go | 31 ++++----- weed/topology/volume_growth.go | 17 ++--- weed/util/grpc_client_server.go | 4 +- weed/wdclient/masterclient.go | 29 ++++---- weed/wdclient/wdclient.go | 15 ----- 53 files changed, 382 insertions(+), 188 deletions(-) create mode 100644 weed/security/tls.go delete mode 100644 weed/wdclient/wdclient.go diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 95201f3e8..90bdeb5e8 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -4,6 +4,9 @@ import ( "bytes" "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" "log" "math/rand" @@ -19,8 +22,11 @@ var ( func main() { flag.Parse() + weed_server.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + for i := 0; i < *repeat; i++ { - assignResult, err := operation.Assign(*master, &operation.VolumeAssignRequest{Count: 1}) + assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) if err != nil { log.Fatalf("assign: %v", err) } diff --git a/weed/command/backup.go b/weed/command/backup.go index 0641f2e5d..86391f9c4 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -2,6 +2,9 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/storage" @@ -46,6 +49,10 @@ var cmdBackup = &Command{ } func runBackup(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + if *s.volumeId == -1 { return false } @@ -59,7 +66,7 @@ func runBackup(cmd *Command, args []string) bool { } volumeServer := lookup.Locations[0].Url - stats, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid)) + stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid)) if err != nil { fmt.Printf("Error get volume %d status: %v\n", vid, err) return true @@ -81,7 +88,7 @@ func runBackup(cmd *Command, args []string) bool { return true } - if err := v.Synchronize(volumeServer); err != nil { + if err := v.Synchronize(volumeServer, grpcDialOption); err != nil { fmt.Printf("Error synchronizing volume %d: %v\n", vid, err) return true } diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 93359b243..44601e567 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -4,6 +4,9 @@ import ( "bufio" "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" + "google.golang.org/grpc" "io" "math" "math/rand" @@ -35,6 +38,7 @@ type BenchmarkOptions struct { collection *string cpuprofile *string maxCpu *int + grpcDialOption grpc.DialOption } var ( @@ -101,6 +105,10 @@ var ( ) func runBenchmark(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { *b.maxCpu = runtime.NumCPU() @@ -115,7 +123,7 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - masterClient = wdclient.NewMasterClient(context.Background(), "benchmark", strings.Split(*b.masters, ",")) + masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) go masterClient.KeepConnectedToMaster() masterClient.WaitUntilConnected() @@ -223,12 +231,12 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { Count: 1, Collection: *b.collection, } - if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil { + if assignResult, err := operation.Assign(masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection if !isSecure && assignResult.Auth != "" { isSecure = true } - if _, err := fp.Upload(0, masterClient.GetMaster(), assignResult.Auth); err == nil { + if _, err := fp.Upload(0, masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} diff --git a/weed/command/filer.go b/weed/command/filer.go index a07a67471..478b7d6bf 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -1,6 +1,8 @@ package command import ( + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" "net/http" "strconv" "strings" @@ -75,6 +77,8 @@ var cmdFiler = &Command{ func runFiler(cmd *Command, args []string) bool { + weed_server.LoadConfiguration("security", false) + f.startFiler() return true @@ -141,7 +145,7 @@ func (fo *FilerOptions) startFiler() { if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer() + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 39d83c31e..650757442 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -2,6 +2,10 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" + "google.golang.org/grpc" "io/ioutil" "net/url" "os" @@ -23,13 +27,14 @@ var ( ) type CopyOptions struct { - filerGrpcPort *int - master *string - include *string - replication *string - collection *string - ttl *string - maxMB *int + filerGrpcPort *int + master *string + include *string + replication *string + collection *string + ttl *string + maxMB *int + grpcDialOption grpc.DialOption } func init() { @@ -61,6 +66,9 @@ var cmdCopy = &Command{ } func runCopy(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + if len(args) <= 1 { return false } @@ -95,16 +103,17 @@ func runCopy(cmd *Command, args []string) bool { } filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) + copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") for _, fileOrDir := range fileOrDirs { - if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, urlPath) { + if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) { return false } } return true } -func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path string) bool { +func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, path string) bool { f, err := os.Open(fileOrDir) if err != nil { fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) @@ -122,7 +131,7 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path st if mode.IsDir() { files, _ := ioutil.ReadDir(fileOrDir) for _, subFileOrDir := range files { - if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, path+fi.Name()+"/") { + if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, grpcDialOption, path+fi.Name()+"/") { return false } } @@ -144,13 +153,13 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path st } if chunkCount == 1 { - return uploadFileAsOne(filerAddress, filerGrpcAddress, path, f, fi) + return uploadFileAsOne(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi) } - return uploadFileInChunks(filerAddress, filerGrpcAddress, path, f, fi, chunkCount, chunkSize) + return uploadFileInChunks(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi, chunkCount, chunkSize) } -func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo) bool { +func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo) bool { // upload the file content fileName := filepath.Base(f.Name()) @@ -161,7 +170,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f if fi.Size() > 0 { // assign a volume - assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(*copy.master, grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, Replication: *copy.replication, Collection: *copy.collection, @@ -195,7 +204,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) } - if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: urlFolder, Entry: &filer_pb.Entry{ @@ -228,7 +237,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f return true } -func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { +func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -238,7 +247,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, for i := int64(0); i < int64(chunkCount); i++ { // assign a volume - assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(*copy.master, grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, Replication: *copy.replication, Collection: *copy.collection, @@ -272,7 +281,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) } - if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: urlFolder, Entry: &filer_pb.Entry{ @@ -323,9 +332,9 @@ func detectMimeType(f *os.File) string { return mimeType } -func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error { +func withFilerClient(filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(filerAddress) + grpcConnection, err := util.GrpcDial(filerAddress, grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", filerAddress, err) } diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index c24f63bf0..c9afbdc8a 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -35,6 +35,7 @@ var cmdFilerReplicate = &Command{ func runFilerReplicate(cmd *Command, args []string) bool { + weed_server.LoadConfiguration("security", false) weed_server.LoadConfiguration("replication", true) weed_server.LoadConfiguration("notification", true) config := viper.GetViper() diff --git a/weed/command/master.go b/weed/command/master.go index 6f1373aa2..5b45c9627 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -1,6 +1,8 @@ package command import ( + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" "net/http" "os" "runtime" @@ -54,6 +56,9 @@ var ( ) func runMaster(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + if *mMaxCpu < 1 { *mMaxCpu = runtime.NumCPU() } @@ -104,7 +109,7 @@ func runMaster(cmd *Command, args []string) bool { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer() + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) master_pb.RegisterSeaweedServer(grpcS, ms) reflection.Register(grpcS) diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 2937b9ef1..3e4249bfc 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -4,6 +4,9 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" "os" "os/user" "runtime" @@ -19,6 +22,9 @@ import ( ) func runMount(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) if *mountOptions.dir == "" { fmt.Printf("Please specify the mount directory via \"-dir\"") @@ -91,6 +97,7 @@ func runMount(cmd *Command, args []string) bool { err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), FilerMountRootPath: mountRoot, Collection: *mountOptions.collection, Replication: *mountOptions.replication, diff --git a/weed/command/s3.go b/weed/command/s3.go index 16a9490ff..a54ddd2f7 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,6 +1,9 @@ package command import ( + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" "net/http" "time" @@ -46,6 +49,8 @@ var cmdS3 = &Command{ func runS3(cmd *Command, args []string) bool { + weed_server.LoadConfiguration("security", false) + filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort) if err != nil { glog.Fatal(err) @@ -59,6 +64,7 @@ func runS3(cmd *Command, args []string) bool { FilerGrpcAddress: filerGrpcAddress, DomainName: *s3options.domainName, BucketsPath: *s3options.filerBucketsPath, + GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 22300d3ba..e8608e9dd 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -250,8 +250,34 @@ directory = "/" # destination directory # /etc/seaweedfs/security.toml # this file is read by master, volume server, and filer +# the jwt signing key is read by master and volume server +# a jwt expires in 10 seconds [jwt.signing] key = "" +# volume server also uses grpc that should be secured. + +# all grpc tls authentications are mutual +[grpc] +ca = "" + +[grpc.volume] +cert = "" +key = "" + +[grpc.master] +cert = "" +key = "" + +[grpc.filer] +cert = "" +key = "" + +# use this for any place needs a grpc client +# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" +[grpc.client] +cert = "" +key = "" + ` ) diff --git a/weed/command/server.go b/weed/command/server.go index 2dd506772..a9415d068 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -1,6 +1,8 @@ package command import ( + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" "net/http" "os" "runtime" @@ -95,6 +97,9 @@ func init() { } func runServer(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + if *serverOptions.cpuprofile != "" { f, err := os.Create(*serverOptions.cpuprofile) if err != nil { @@ -188,7 +193,8 @@ func runServer(cmd *Command, args []string) bool { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer() + glog.V(0).Infof("grpc config %+v", viper.Sub("grpc")) + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) master_pb.RegisterSeaweedServer(grpcS, ms) reflection.Register(grpcS) diff --git a/weed/command/upload.go b/weed/command/upload.go index df2cb9892..80fc635c1 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -3,6 +3,9 @@ package command import ( "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" "os" "path/filepath" @@ -57,6 +60,10 @@ var cmdUpload = &Command{ } func runUpload(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + if len(args) == 0 { if *upload.dir == "" { return false @@ -73,7 +80,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { return e } - results, e := operation.SubmitFiles(*upload.master, parts, + results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB) bytes, _ := json.Marshal(results) @@ -92,7 +99,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { fmt.Println(e.Error()) } - results, _ := operation.SubmitFiles(*upload.master, parts, + results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB) bytes, _ := json.Marshal(results) diff --git a/weed/command/volume.go b/weed/command/volume.go index 27a075b5b..32ec7819b 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -1,6 +1,8 @@ package command import ( + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" "net/http" "os" "runtime" @@ -78,6 +80,9 @@ var ( ) func runVolume(cmd *Command, args []string) bool { + + weed_server.LoadConfiguration("security", false) + if *v.maxCpu < 1 { *v.maxCpu = runtime.NumCPU() } @@ -185,7 +190,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer() + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume")) volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 1ee2f5ede..672c6201c 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -3,6 +3,7 @@ package filer2 import ( "context" "fmt" + "google.golang.org/grpc" "math" "os" "path/filepath" @@ -24,13 +25,15 @@ type Filer struct { directoryCache *ccache.Cache MasterClient *wdclient.MasterClient fileIdDeletionChan chan string + GrpcDialOption grpc.DialOption } -func NewFiler(masters []string) *Filer { +func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { f := &Filer{ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), "filer", masters), + MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), fileIdDeletionChan: make(chan string, 4096), + GrpcDialOption: grpcDialOption, } go f.loopProcessingDeletion() diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index 8fe8ae04f..c7d02657d 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -38,13 +38,13 @@ func (f *Filer) loopProcessingDeletion() { fileIds = append(fileIds, fid) if len(fileIds) >= 4096 { glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) fileIds = fileIds[:0] } case <-ticker.C: if len(fileIds) > 0 { glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) fileIds = fileIds[:0] } } diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 5b214558f..4d600e0bf 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -8,7 +8,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -61,7 +61,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go index cf813e04b..31da8998f 100644 --- a/weed/filer2/memdb/memdb_store_test.go +++ b/weed/filer2/memdb/memdb_store_test.go @@ -6,7 +6,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) store := &MemDbStore{} store.Initialize(nil) filer.SetStore(store) @@ -43,7 +43,7 @@ func TestCreateAndFind(t *testing.T) { } func TestCreateFileAndList(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) store := &MemDbStore{} store.Initialize(nil) filer.SetStore(store) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 0f6ca1164..3bca0e22e 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + "google.golang.org/grpc" "net/http" "strings" "sync" @@ -230,7 +231,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { }) } -func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, fileIds []string) error { +func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { var vids []string for _, fileId := range fileIds { @@ -267,7 +268,7 @@ func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, file return m, err } - _, err := operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) return err } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 6778d7b31..f7383582d 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -19,6 +19,7 @@ import ( type Option struct { FilerGrpcAddress string + GrpcDialOption grpc.DialOption FilerMountRootPath string Collection string Replication string @@ -77,7 +78,7 @@ func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) erro return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, wfs.option.FilerGrpcAddress) + }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) } diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index b96b27ca6..90058d75a 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -16,7 +16,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { } wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(context.Background(), client, fileIds) + deleteFileIds(context.Background(), wfs.option.GrpcDialOption, client, fileIds) return nil }) } diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index acadc88c8..7e7a9059d 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,6 +3,7 @@ package operation import ( "context" "fmt" + "google.golang.org/grpc" "strings" "time" @@ -30,7 +31,7 @@ type AssignResult struct { Auth security.EncodedJwt `json:"auth,omitempty"` } -func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { +func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { var requests []*VolumeAssignRequest requests = append(requests, primaryRequest) @@ -44,7 +45,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque continue } - lastError = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { + lastError = withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 9d8267dee..f3f6e7b00 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "google.golang.org/grpc" "io" "net/http" "sort" @@ -69,12 +70,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) { return json.Marshal(cm) } -func (cm *ChunkManifest) DeleteChunks(master string) error { +func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error { var fileIds []string for _, ci := range cm.Chunks { fileIds = append(fileIds, ci.Fid) } - results, err := DeleteFiles(master, fileIds) + results, err := DeleteFiles(master, grpcDialOption, fileIds) if err != nil { glog.V(0).Infof("delete %+v: %v", fileIds, err) return fmt.Errorf("chunk delete: %v", err) diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 57fc0329e..1df95211e 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "google.golang.org/grpc" "net/http" "strings" "sync" @@ -28,17 +29,17 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) { } // DeleteFiles batch deletes a list of fileIds -func DeleteFiles(master string, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { +func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { lookupFunc := func(vids []string) (map[string]LookupResult, error) { - return LookupVolumeIds(master, vids) + return LookupVolumeIds(master, grpcDialOption, vids) } - return DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) } -func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { +func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { var ret []*volume_server_pb.DeleteResult @@ -92,7 +93,7 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin go func(server string, fidList []string) { defer wg.Done() - if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, fidList); deleteErr != nil { + if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil { err = deleteErr } else { ret = append(ret, deleteResults...) @@ -106,9 +107,9 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin } // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc -func DeleteFilesAtOneVolumeServer(volumeServer string, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { +func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { - err = WithVolumeServerClient(volumeServer, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index d0931a8d3..a02844657 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -18,7 +18,7 @@ var ( grpcClientsLock sync.Mutex ) -func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.VolumeServerClient) error) error { +func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { @@ -28,7 +28,7 @@ func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.Volume return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) - }, grpcAddress) + }, grpcAddress, grpcDialOption) } @@ -42,7 +42,7 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil } -func withMasterServerClient(masterServer string, fn func(masterClient master_pb.SeaweedClient) error) error { +func withMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0) if parseErr != nil { @@ -52,6 +52,6 @@ func withMasterServerClient(masterServer string, fn func(masterClient master_pb. return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) - }, masterGrpcAddress) + }, masterGrpcAddress, grpcDialOption) } diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index 562a11580..c4040f3e7 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "google.golang.org/grpc" "math/rand" "net/url" "strings" @@ -78,7 +79,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) { } // LookupVolumeIds find volume locations by cache and actual lookup -func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, error) { +func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) { ret := make(map[string]LookupResult) var unknown_vids []string @@ -98,7 +99,7 @@ func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, err //only query unknown_vids - err := withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { + err := withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() diff --git a/weed/operation/stats.go b/weed/operation/stats.go index 364727272..9f7166864 100644 --- a/weed/operation/stats.go +++ b/weed/operation/stats.go @@ -2,14 +2,15 @@ package operation import ( "context" + "google.golang.org/grpc" "time" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) -func Statistics(server string, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { +func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - err = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { + err = withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 374927495..bdf59d966 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -2,6 +2,7 @@ package operation import ( "bytes" + "google.golang.org/grpc" "io" "mime" "net/url" @@ -36,7 +37,7 @@ type SubmitResult struct { Error string `json:"error,omitempty"` } -func SubmitFiles(master string, files []FilePart, +func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { @@ -49,7 +50,7 @@ func SubmitFiles(master string, files []FilePart, DataCenter: dataCenter, Ttl: ttl, } - ret, err := Assign(master, ar) + ret, err := Assign(master, grpcDialOption, ar) if err != nil { for index, _ := range files { results[index].Error = err.Error() @@ -65,7 +66,7 @@ func SubmitFiles(master string, files []FilePart, file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, ret.Auth) + results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption) if err != nil { results[index].Error = err.Error() } @@ -108,7 +109,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { return ret, nil } -func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (retSize uint32, err error) { +func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) @@ -136,7 +137,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re Collection: fi.Collection, Ttl: fi.Ttl, } - ret, err = Assign(master, ar) + ret, err = Assign(master, grpcDialOption, ar) if err != nil { return } @@ -149,10 +150,10 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re Collection: fi.Collection, Ttl: fi.Ttl, } - ret, err = Assign(master, ar) + ret, err = Assign(master, grpcDialOption, ar) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, grpcDialOption) return } id = ret.Fid @@ -170,7 +171,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re ret.Auth) if e != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, grpcDialOption) return 0, e } cm.Chunks = append(cm.Chunks, @@ -185,7 +186,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re err = upload_chunked_file_manifest(fileUrl, &cm, jwt) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, grpcDialOption) } } else { ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index e40c7de41..bf81415c9 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -3,6 +3,7 @@ package operation import ( "context" "fmt" + "google.golang.org/grpc" "io" "time" @@ -11,9 +12,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { +func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { - WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { + WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() @@ -26,9 +27,9 @@ func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.Volu return } -func GetVolumeIdxEntries(server string, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error { +func GetVolumeIdxEntries(server string, grpcDialOption grpc.DialOption, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error { - return WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { + return WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{ VolumdId: vid, }) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index e632164a4..f1306ca4c 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -105,7 +105,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(fs.grpcAddress) + grpcConnection, err := util.GrpcDial(fs.grpcAddress, fs.grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 2e9cc86d1..2eb326b83 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,6 +3,9 @@ package filersink import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -13,13 +16,14 @@ import ( ) type FilerSink struct { - filerSource *source.FilerSource - grpcAddress string - dir string - replication string - collection string - ttlSec int32 - dataCenter string + filerSource *source.FilerSource + grpcAddress string + dir string + replication string + collection string + ttlSec int32 + dataCenter string + grpcDialOption grpc.DialOption } func init() { @@ -55,6 +59,7 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) + fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") return nil } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index efe71e706..92c2d203d 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -3,6 +3,9 @@ package source import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "google.golang.org/grpc" "io" "net/http" "strings" @@ -17,8 +20,9 @@ type ReplicationSource interface { } type FilerSource struct { - grpcAddress string - Dir string + grpcAddress string + grpcDialOption grpc.DialOption + Dir string } func (fs *FilerSource) Initialize(configuration util.Configuration) error { @@ -31,6 +35,7 @@ func (fs *FilerSource) Initialize(configuration util.Configuration) error { func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { fs.grpcAddress = grpcAddress fs.Dir = dir + fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") return nil } @@ -40,7 +45,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { vid := volumeId(part) - err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ @@ -84,9 +89,9 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSource) withFilerClient(grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(fs.grpcAddress) + grpcConnection, err := util.GrpcDial(fs.grpcAddress, grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) } diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 286398310..5d92085cc 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -37,7 +37,7 @@ func encodeResponse(response interface{}) []byte { func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress) + grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err) } diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index db798a546..24458592d 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -8,6 +8,7 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" "github.com/gorilla/mux" + "google.golang.org/grpc" "net/http" ) @@ -16,6 +17,7 @@ type S3ApiServerOption struct { FilerGrpcAddress string DomainName string BucketsPath string + GrpcDialOption grpc.DialOption } type S3ApiServer struct { diff --git a/weed/security/tls.go b/weed/security/tls.go new file mode 100644 index 000000000..e81ba4831 --- /dev/null +++ b/weed/security/tls.go @@ -0,0 +1,66 @@ +package security + +import ( + "crypto/tls" + "crypto/x509" + "github.com/spf13/viper" + "io/ioutil" + + "github.com/chrislusf/seaweedfs/weed/glog" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { + if config == nil { + return nil + } + + // load cert/key, ca cert + cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + if err != nil { + glog.Errorf("load cert/key error: %v", err) + return nil + } + caCert, err := ioutil.ReadFile(config.GetString("ca")) + if err != nil { + glog.Errorf("read ca cert file error: %v", err) + return nil + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + ta := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + }) + + return grpc.Creds(ta) +} + +func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { + if config == nil { + return grpc.WithInsecure() + } + + // load cert/key, cacert + cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + if err != nil { + glog.Errorf("load cert/key error: %v", err) + return grpc.WithInsecure() + } + caCert, err := ioutil.ReadFile(config.GetString("ca")) + if err != nil { + glog.Errorf("read ca cert file error: %v", err) + return grpc.WithInsecure() + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + ta := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + InsecureSkipVerify: true, + }) + return grpc.WithTransportCredentials(ta) +} diff --git a/weed/server/common.go b/weed/server/common.go index c9f17aa86..1c75d44cf 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "google.golang.org/grpc" "net/http" "path/filepath" "strconv" @@ -81,7 +82,7 @@ func debug(params ...interface{}) { glog.V(4).Infoln(params...) } -func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string) { +func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) { m := make(map[string]interface{}) if r.Method != "POST" { writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) @@ -111,7 +112,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st Collection: r.FormValue("collection"), Ttl: r.FormValue("ttl"), } - assignResult, ae := operation.Assign(masterUrl, ar) + assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar) if ae != nil { writeJsonError(w, r, http.StatusInternalServerError, ae) return diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 9a83ee1a6..4f1377331 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -220,7 +220,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol DataCenter: "", } } - assignResult, err := operation.Assign(fs.filer.GetMaster(), assignRequest, altRequest) + assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { return nil, fmt.Errorf("assign volume: %v", err) } @@ -254,7 +254,7 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR Ttl: req.Ttl, } - output, err := operation.Statistics(fs.filer.GetMaster(), input) + output, err := operation.Statistics(fs.filer.GetMaster(), fs.grpcDialOption, input) if err != nil { return nil, err } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index c3c5072d0..2ace0a7ea 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -1,6 +1,7 @@ package weed_server import ( + "google.golang.org/grpc" "net/http" "os" @@ -34,22 +35,24 @@ type FilerOption struct { } type FilerServer struct { - option *FilerOption - secret security.SigningKey - filer *filer2.Filer + option *FilerOption + secret security.SigningKey + filer *filer2.Filer + grpcDialOption grpc.DialOption } func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { fs = &FilerServer{ - option: option, + option: option, + grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), } if len(option.Masters) == 0 { glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters) + fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) go fs.filer.KeepConnectedToMaster() diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 7cdbddde2..9e231c645 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -51,7 +51,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, } } - assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar, altRequest) + assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest) if ae != nil { glog.Errorf("failing to assign a file id: %v", ae) writeJsonError(w, r, http.StatusInternalServerError, ae) diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index 6e9cd512d..13f8b37d1 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -76,7 +76,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } ms.vgLock.Lock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil { ms.vgLock.Unlock() return nil, fmt.Errorf("Cannot grow volume group! %v", err) } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 06c959b92..a44a567d6 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -2,6 +2,7 @@ package weed_server import ( "fmt" + "google.golang.org/grpc" "net/http" "net/http/httputil" "net/url" @@ -37,6 +38,8 @@ type MasterServer struct { // notifying clients clientChansLock sync.RWMutex clientChans map[string]chan *master_pb.VolumeLocation + + grpcDialOpiton grpc.DialOption } func NewMasterServer(r *mux.Router, port int, metaFolder string, @@ -48,7 +51,6 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, whiteList []string, ) *MasterServer { - LoadConfiguration("security", false) v := viper.GetViper() signingKey := v.GetString("jwt.signing.key") @@ -64,6 +66,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, defaultReplicaPlacement: defaultReplicaPlacement, garbageThreshold: garbageThreshold, clientChans: make(map[string]chan *master_pb.VolumeLocation), + grpcDialOpiton: security.LoadClientTLS(v.Sub("grpc"), "master"), } ms.bounedLeaderChan = make(chan int, 16) seq := sequence.NewMemorySequencer() @@ -89,7 +92,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) - ms.Topo.StartRefreshWritableVolumes(garbageThreshold, ms.preallocate) + ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOpiton, garbageThreshold, ms.preallocate) return ms } diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index c4149e0cf..5bdb448c1 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -93,7 +93,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) ms.vgLock.Lock() defer ms.vgLock.Unlock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Cannot grow volume group! %v", err)) return diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 3a2662908..eccf3ee4c 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -24,7 +24,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R return } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), func(client volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() @@ -60,7 +60,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque } } glog.Infoln("garbageThreshold =", gcThreshold) - ms.Topo.Vacuum(gcThreshold, ms.preallocate) + ms.Topo.Vacuum(ms.grpcDialOpiton, gcThreshold, ms.preallocate) ms.dirStatusHandler(w, r) } @@ -76,7 +76,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request if ms.Topo.FreeSpace() < count*option.ReplicaPlacement.GetCopyCount() { err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*option.ReplicaPlacement.GetCopyCount())) } else { - count, err = ms.vg.GrowByCountAndType(count, option, ms.Topo) + count, err = ms.vg.GrowByCountAndType(ms.grpcDialOpiton, count, option, ms.Topo) } } else { err = errors.New("parameter count is not found") @@ -126,13 +126,13 @@ func (ms *MasterServer) selfUrl(r *http.Request) string { } func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) { if ms.Topo.IsLeader() { - submitForClientHandler(w, r, ms.selfUrl(r)) + submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOpiton) } else { masterUrl, err := ms.Topo.Leader() if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else { - submitForClientHandler(w, r, masterUrl) + submitForClientHandler(w, r, masterUrl, ms.grpcDialOpiton) } } } diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 25e9b1677..38603e4b6 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -2,6 +2,9 @@ package weed_server import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "google.golang.org/grpc" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -19,6 +22,8 @@ func (vs *VolumeServer) heartbeat() { vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "volume") + var err error var newLeader string for { @@ -31,7 +36,7 @@ func (vs *VolumeServer) heartbeat() { glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress) continue } - newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, time.Duration(vs.pulseSeconds)*time.Second) + newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) @@ -40,9 +45,9 @@ func (vs *VolumeServer) heartbeat() { } } -func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepInterval time.Duration) (newLeader string, err error) { +func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { - grpcConection, err := util.GrpcDial(masterGrpcAddress) + grpcConection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index d8ff01766..8e77ec570 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -1,6 +1,7 @@ package weed_server import ( + "google.golang.org/grpc" "net/http" "github.com/chrislusf/seaweedfs/weed/glog" @@ -10,13 +11,14 @@ import ( ) type VolumeServer struct { - MasterNodes []string - currentMaster string - pulseSeconds int - dataCenter string - rack string - store *storage.Store - guard *security.Guard + MasterNodes []string + currentMaster string + pulseSeconds int + dataCenter string + rack string + store *storage.Store + guard *security.Guard + grpcDialOption grpc.DialOption needleMapKind storage.NeedleMapType FixJpgOrientation bool @@ -33,7 +35,6 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, fixJpgOrientation bool, readRedirect bool) *VolumeServer { - LoadConfiguration("security", false) v := viper.GetViper() signingKey := v.GetString("jwt.signing.key") enableUiAccess := v.GetBool("access.ui") @@ -45,6 +46,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, needleMapKind: needleMapKind, FixJpgOrientation: fixJpgOrientation, ReadRedirect: readRedirect, + grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), } vs.MasterNodes = masterNodes vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 1cfd9187e..6b78cea40 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -95,7 +95,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { return } // make sure all chunks had deleted before delete manifest - if e := chunkManifest.DeleteChunks(vs.GetMaster()); e != nil { + if e := chunkManifest.DeleteChunks(vs.GetMaster(), vs.grpcDialOption); e != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e)) return } diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go index 137a9b4ca..8d90a729d 100644 --- a/weed/storage/volume_sync.go +++ b/weed/storage/volume_sync.go @@ -3,6 +3,7 @@ package storage import ( "context" "fmt" + "google.golang.org/grpc" "io" "os" "sort" @@ -45,12 +46,12 @@ optimized more later). */ -func (v *Volume) Synchronize(volumeServer string) (err error) { +func (v *Volume) Synchronize(volumeServer string, grpcDialOption grpc.DialOption) (err error) { var lastCompactRevision uint16 = 0 var compactRevision uint16 = 0 var masterMap *needle.CompactMap for i := 0; i < 3; i++ { - if masterMap, _, compactRevision, err = fetchVolumeFileEntries(volumeServer, v.Id); err != nil { + if masterMap, _, compactRevision, err = fetchVolumeFileEntries(volumeServer, grpcDialOption, v.Id); err != nil { return fmt.Errorf("Failed to sync volume %d entries with %s: %v", v.Id, volumeServer, err) } if lastCompactRevision != compactRevision && lastCompactRevision != 0 { @@ -62,7 +63,7 @@ func (v *Volume) Synchronize(volumeServer string) (err error) { } } lastCompactRevision = compactRevision - if err = v.trySynchronizing(volumeServer, masterMap, compactRevision); err == nil { + if err = v.trySynchronizing(volumeServer, grpcDialOption, masterMap, compactRevision); err == nil { return } } @@ -77,7 +78,7 @@ func (a ByOffset) Less(i, j int) bool { return a[i].Offset < a[j].Offset } // trySynchronizing sync with remote volume server incrementally by // make up the local and remote delta. -func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.CompactMap, compactRevision uint16) error { +func (v *Volume) trySynchronizing(volumeServer string, grpcDialOption grpc.DialOption, masterMap *needle.CompactMap, compactRevision uint16) error { slaveIdxFile, err := os.Open(v.nm.IndexFileName()) if err != nil { return fmt.Errorf("Open volume %d index file: %v", v.Id, err) @@ -126,7 +127,7 @@ func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.Compact continue } // add master file entry to local data file - if err := v.fetchNeedle(volumeServer, needleValue, compactRevision); err != nil { + if err := v.fetchNeedle(volumeServer, grpcDialOption, needleValue, compactRevision); err != nil { glog.V(0).Infof("Fetch needle %v from %s: %v", needleValue, volumeServer, err) return err } @@ -136,16 +137,16 @@ func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.Compact return nil } -func fetchVolumeFileEntries(volumeServer string, vid VolumeId) (m *needle.CompactMap, lastOffset uint64, compactRevision uint16, err error) { +func fetchVolumeFileEntries(volumeServer string, grpcDialOption grpc.DialOption, vid VolumeId) (m *needle.CompactMap, lastOffset uint64, compactRevision uint16, err error) { m = needle.NewCompactMap() - syncStatus, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid)) + syncStatus, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid)) if err != nil { return m, 0, 0, err } total := 0 - err = operation.GetVolumeIdxEntries(volumeServer, uint32(vid), func(key NeedleId, offset Offset, size uint32) { + err = operation.GetVolumeIdxEntries(volumeServer, grpcDialOption, uint32(vid), func(key NeedleId, offset Offset, size uint32) { // println("remote key", key, "offset", offset*NeedlePaddingSize, "size", size) if offset > 0 && size != TombstoneFileSize { m.Set(NeedleId(key), offset, size) @@ -187,9 +188,9 @@ func (v *Volume) removeNeedle(key NeedleId) { // fetchNeedle fetches a remote volume needle by vid, id, offset // The compact revision is checked first in case the remote volume // is compacted and the offset is invalid any more. -func (v *Volume) fetchNeedle(volumeServer string, needleValue needle.NeedleValue, compactRevision uint16) error { +func (v *Volume) fetchNeedle(volumeServer string, grpcDialOption grpc.DialOption, needleValue needle.NeedleValue, compactRevision uint16) error { - return operation.WithVolumeServerClient(volumeServer, func(client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { stream, err := client.VolumeSyncData(context.Background(), &volume_server_pb.VolumeSyncDataRequest{ VolumdId: uint32(v.Id), Revision: uint32(compactRevision), diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index 55796ab43..ff0bbce42 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -2,6 +2,7 @@ package topology import ( "context" + "google.golang.org/grpc" "time" "github.com/chrislusf/seaweedfs/weed/operation" @@ -13,9 +14,9 @@ type AllocateVolumeResult struct { Error string } -func AllocateVolume(dn *DataNode, vid storage.VolumeId, option *VolumeGrowOption) error { +func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid storage.VolumeId, option *VolumeGrowOption) error { - return operation.WithVolumeServerClient(dn.Url(), func(client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index a301103eb..041351492 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -1,6 +1,7 @@ package topology import ( + "google.golang.org/grpc" "math/rand" "time" @@ -8,7 +9,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage" ) -func (t *Topology) StartRefreshWritableVolumes(garbageThreshold float64, preallocate int64) { +func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) { go func() { for { if t.IsLeader() { @@ -22,7 +23,7 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold float64, preallo c := time.Tick(15 * time.Minute) for _ = range c { if t.IsLeader() { - t.Vacuum(garbageThreshold, preallocate) + t.Vacuum(grpcDialOption, garbageThreshold, preallocate) } } }(garbageThreshold) diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 48a75ba9d..71d3ead76 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -2,6 +2,7 @@ package topology import ( "context" + "google.golang.org/grpc" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -10,11 +11,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage" ) -func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool { +func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool { ch := make(chan bool, locationlist.Length()) for index, dn := range locationlist.list { go func(index int, url string, vid storage.VolumeId) { - err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) defer cancel() @@ -46,13 +47,13 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist } return isCheckSuccess } -func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool { +func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool { vl.removeFromWritable(vid) ch := make(chan bool, locationlist.Length()) for index, dn := range locationlist.list { go func(index int, url string, vid storage.VolumeId) { glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) - err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ VolumdId: uint32(vid), }) @@ -79,11 +80,11 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli } return isVacuumSuccess } -func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool { +func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool { isCommitSuccess := true for _, dn := range locationlist.list { glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ VolumdId: uint32(vid), }) @@ -101,10 +102,10 @@ func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlis } return isCommitSuccess } -func batchVacuumVolumeCleanup(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) { +func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) { for _, dn := range locationlist.list { glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ VolumdId: uint32(vid), }) @@ -118,21 +119,21 @@ func batchVacuumVolumeCleanup(vl *VolumeLayout, vid storage.VolumeId, locationli } } -func (t *Topology) Vacuum(garbageThreshold float64, preallocate int64) int { +func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) int { glog.V(1).Infof("Start vacuum on demand with threshold: %f", garbageThreshold) for _, col := range t.collectionMap.Items() { c := col.(*Collection) for _, vl := range c.storageType2VolumeLayout.Items() { if vl != nil { volumeLayout := vl.(*VolumeLayout) - vacuumOneVolumeLayout(volumeLayout, c, garbageThreshold, preallocate) + vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate) } } } return 0 } -func vacuumOneVolumeLayout(volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) { +func vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) { volumeLayout.accessLock.RLock() tmpMap := make(map[storage.VolumeId]*VolumeLocationList) @@ -152,11 +153,11 @@ func vacuumOneVolumeLayout(volumeLayout *VolumeLayout, c *Collection, garbageThr } glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) - if batchVacuumVolumeCheck(volumeLayout, vid, locationList, garbageThreshold) { - if batchVacuumVolumeCompact(volumeLayout, vid, locationList, preallocate) { - batchVacuumVolumeCommit(volumeLayout, vid, locationList) + if batchVacuumVolumeCheck(grpcDialOption, volumeLayout, vid, locationList, garbageThreshold) { + if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, locationList, preallocate) { + batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, locationList) } else { - batchVacuumVolumeCleanup(volumeLayout, vid, locationList) + batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, locationList) } } } diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 9bf013ca6..3d178b827 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -2,6 +2,7 @@ package topology import ( "fmt" + "google.golang.org/grpc" "math/rand" "sync" @@ -55,19 +56,19 @@ func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) { return } -func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, topo *Topology) (count int, err error) { - count, err = vg.GrowByCountAndType(vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount()), option, topo) +func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology) (count int, err error) { + count, err = vg.GrowByCountAndType(grpcDialOption, vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount()), option, topo) if count > 0 && count%option.ReplicaPlacement.GetCopyCount() == 0 { return count, nil } return count, err } -func (vg *VolumeGrowth) GrowByCountAndType(targetCount int, option *VolumeGrowOption, topo *Topology) (counter int, err error) { +func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount int, option *VolumeGrowOption, topo *Topology) (counter int, err error) { vg.accessLock.Lock() defer vg.accessLock.Unlock() for i := 0; i < targetCount; i++ { - if c, e := vg.findAndGrow(topo, option); e == nil { + if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil { counter += c } else { return counter, e @@ -76,13 +77,13 @@ func (vg *VolumeGrowth) GrowByCountAndType(targetCount int, option *VolumeGrowOp return } -func (vg *VolumeGrowth) findAndGrow(topo *Topology, option *VolumeGrowOption) (int, error) { +func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topology, option *VolumeGrowOption) (int, error) { servers, e := vg.findEmptySlotsForOneVolume(topo, option) if e != nil { return 0, e } vid := topo.NextVolumeId() - err := vg.grow(topo, vid, option, servers...) + err := vg.grow(grpcDialOption, topo, vid, option, servers...) return len(servers), err } @@ -189,9 +190,9 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum return } -func (vg *VolumeGrowth) grow(topo *Topology, vid storage.VolumeId, option *VolumeGrowOption, servers ...*DataNode) error { +func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid storage.VolumeId, option *VolumeGrowOption, servers ...*DataNode) error { for _, server := range servers { - if err := AllocateVolume(server, vid, option); err == nil { + if err := AllocateVolume(server, grpcDialOption, vid, option); err == nil { vi := storage.VolumeInfo{ Id: vid, Size: 0, diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 7fa650855..b26366ae0 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -38,7 +38,7 @@ func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) var options []grpc.DialOption options = append(options, - grpc.WithInsecure(), + // grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 30 * time.Second, // client ping server if no activity for this long Timeout: 20 * time.Second, @@ -48,7 +48,7 @@ func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) options = append(options, opt) } } - return grpc.Dial(address, opts...) + return grpc.Dial(address, options...) } func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index b26853945..3600fe7c7 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -3,29 +3,32 @@ package wdclient import ( "context" "fmt" + "math/rand" "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" - "math/rand" + "google.golang.org/grpc" ) type MasterClient struct { - ctx context.Context - name string - currentMaster string - masters []string + ctx context.Context + name string + currentMaster string + masters []string + grpcDialOption grpc.DialOption vidMap } -func NewMasterClient(ctx context.Context, clientName string, masters []string) *MasterClient { +func NewMasterClient(ctx context.Context, grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient { return &MasterClient{ - ctx: ctx, - name: clientName, - masters: masters, - vidMap: newVidMap(), + ctx: ctx, + name: clientName, + masters: masters, + grpcDialOption: grpcDialOption, + vidMap: newVidMap(), } } @@ -50,7 +53,7 @@ func (mc *MasterClient) KeepConnectedToMaster() { func (mc *MasterClient) tryAllMasters() { for _, master := range mc.masters { glog.V(0).Infof("Connecting to master %v", master) - gprcErr := withMasterClient(master, func(client master_pb.SeaweedClient) error { + gprcErr := withMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { stream, err := client.KeepConnected(context.Background()) if err != nil { @@ -96,14 +99,14 @@ func (mc *MasterClient) tryAllMasters() { } } -func withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { +func withMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v", master) } - grpcConnection, err := util.GrpcDial(masterGrpcAddress) + grpcConnection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", master, err) } diff --git a/weed/wdclient/wdclient.go b/weed/wdclient/wdclient.go deleted file mode 100644 index 722f4d061..000000000 --- a/weed/wdclient/wdclient.go +++ /dev/null @@ -1,15 +0,0 @@ -package wdclient - -import ( - "context" -) - -type SeaweedClient struct { - *MasterClient -} - -func NewSeaweedClient(ctx context.Context, clientName string, masters []string) *SeaweedClient { - return &SeaweedClient{ - MasterClient: NewMasterClient(ctx, clientName, masters), - } -} From a1c7dc380683d44e59a18c2e71c9c3aa7734835f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Feb 2019 12:14:28 -0800 Subject: [PATCH 026/450] avoid changing attributes for mount directory --- weed/filesys/dir.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index fae289217..6d4917cb4 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -379,6 +379,10 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { + if dir.attributes == nil { + return nil + } + glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) if req.Valid.Mode() { dir.attributes.FileMode = uint32(req.Mode) From 2442d56671069fba125e9653e039aeae9d0caf89 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Feb 2019 18:03:16 -0800 Subject: [PATCH 027/450] adjust package name --- weed/filer2/entry_codec.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go index e50b3fa9a..cf4627b74 100644 --- a/weed/filer2/entry_codec.go +++ b/weed/filer2/entry_codec.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gogo/protobuf/proto" + "github.com/golang/protobuf/proto" ) func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { From d37c3ab7a5e26054f78f3c6734cf57cebb1f43bd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Feb 2019 18:03:27 -0800 Subject: [PATCH 028/450] adjust error message --- weed/server/volume_grpc_sync.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/weed/server/volume_grpc_sync.go b/weed/server/volume_grpc_sync.go index 5f56ec17d..0114b38a4 100644 --- a/weed/server/volume_grpc_sync.go +++ b/weed/server/volume_grpc_sync.go @@ -14,7 +14,7 @@ func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) if v == nil { - return nil, fmt.Errorf("Not Found Volume Id %d", req.VolumdId) + return nil, fmt.Errorf("not found volume id %d", req.VolumdId) } resp := v.GetVolumeSyncStatus() @@ -29,7 +29,7 @@ func (vs *VolumeServer) VolumeSyncIndex(req *volume_server_pb.VolumeSyncIndexReq v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) if v == nil { - return fmt.Errorf("Not Found Volume Id %d", req.VolumdId) + return fmt.Errorf("not found volume id %d", req.VolumdId) } content, err := v.IndexFileContent() @@ -59,11 +59,11 @@ func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataReque v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) if v == nil { - return fmt.Errorf("Not Found Volume Id %d", req.VolumdId) + return fmt.Errorf("not found volume id %d", req.VolumdId) } if uint32(v.SuperBlock.CompactRevision) != req.Revision { - return fmt.Errorf("Requested Volume Revision is %d, but current revision is %d", req.Revision, v.SuperBlock.CompactRevision) + return fmt.Errorf("requested volume revision is %d, but current revision is %d", req.Revision, v.SuperBlock.CompactRevision) } content, err := storage.ReadNeedleBlob(v.DataFile(), int64(req.Offset)*types.NeedlePaddingSize, req.Size, v.Version()) @@ -78,7 +78,7 @@ func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataReque n := new(storage.Needle) n.ParseNeedleHeader(content) if id != n.Id { - return fmt.Errorf("Expected file entry id %d, but found %d", id, n.Id) + return fmt.Errorf("expected file entry id %d, but found %d", id, n.Id) } if err != nil { From 448645203aa8478c83a0376a4b229ad1405839d0 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Feb 2019 20:05:55 -0800 Subject: [PATCH 029/450] remove unused variables --- weed/operation/grpc_client.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index a02844657..c842ed09f 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -2,20 +2,13 @@ package operation import ( "fmt" - "strconv" - "strings" - "sync" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" -) - -var ( - grpcClients = make(map[string]*grpc.ClientConn) - grpcClientsLock sync.Mutex + "strconv" + "strings" ) func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { From 07af52cb6fefc0ebf00a5b3c4223e2f861755560 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Feb 2019 22:38:14 -0800 Subject: [PATCH 030/450] raft change from http to grpc master grpc port is fixed to http port + 10000 --- weed/command/master.go | 29 ++++++++----------- weed/command/server.go | 27 +++++++---------- weed/server/master_ui/templates.go | 2 +- weed/server/raft_server.go | 45 +++++++++++++---------------- weed/server/raft_server_handlers.go | 21 -------------- weed/util/grpc_client_server.go | 28 ++++++++++++++---- 6 files changed, 65 insertions(+), 87 deletions(-) delete mode 100644 weed/server/raft_server_handlers.go diff --git a/weed/command/master.go b/weed/command/master.go index 5b45c9627..9a0ae7eb4 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -1,21 +1,20 @@ package command import ( + "github.com/chrislusf/raft/protobuf" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/gorilla/mux" "github.com/spf13/viper" + "google.golang.org/grpc/reflection" "net/http" "os" "runtime" "strconv" "strings" - "time" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/server" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "google.golang.org/grpc/reflection" ) func init() { @@ -36,7 +35,6 @@ var cmdMaster = &Command{ var ( mport = cmdMaster.Flag.Int("port", 9333, "http listen port") - mGrpcPort = cmdMaster.Flag.Int("port.grpc", 0, "grpc server listen port, default to http port + 10000") masterIp = cmdMaster.Flag.String("ip", "localhost", "master | address") masterBindIp = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") @@ -92,18 +90,14 @@ func runMaster(cmd *Command, args []string) bool { } go func() { - time.Sleep(100 * time.Millisecond) + // start raftServer myMasterAddress, peers := checkPeers(*masterIp, *mport, *masterPeers) - raftServer := weed_server.NewRaftServer(r, peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), + peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) ms.SetRaftServer(raftServer) - }() - go func() { // starting grpc server - grpcPort := *mGrpcPort - if grpcPort == 0 { - grpcPort = *mport + 10000 - } + grpcPort := *mport + 10000 grpcL, err := util.NewListener(*masterBindIp+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) @@ -111,6 +105,7 @@ func runMaster(cmd *Command, args []string) bool { // Create your protocol servers. grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) master_pb.RegisterSeaweedServer(grpcS, ms) + protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterBindIp, grpcPort) diff --git a/weed/command/server.go b/weed/command/server.go index a9415d068..456b96435 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -1,6 +1,7 @@ package command import ( + "github.com/chrislusf/raft/protobuf" "github.com/chrislusf/seaweedfs/weed/security" "github.com/spf13/viper" "net/http" @@ -62,7 +63,6 @@ var ( serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") - masterGrpcPort = cmdServer.Flag.Int("master.port.grpc", 0, "master grpc server listen port, default to http port + 10000") masterMetaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") masterVolumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") masterVolumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.") @@ -162,10 +162,8 @@ func runServer(cmd *Command, args []string) bool { }() } - var raftWaitForMaster sync.WaitGroup var volumeWait sync.WaitGroup - raftWaitForMaster.Add(1) volumeWait.Add(1) go func() { @@ -183,11 +181,14 @@ func runServer(cmd *Command, args []string) bool { } go func() { + // start raftServer + myMasterAddress, peers := checkPeers(*masterIp, *mport, *masterPeers) + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), + peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) + ms.SetRaftServer(raftServer) + // starting grpc server - grpcPort := *masterGrpcPort - if grpcPort == 0 { - grpcPort = *masterPort + 10000 - } + grpcPort := *masterPort + 10000 grpcL, err := util.NewListener(*serverIp+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) @@ -196,22 +197,14 @@ func runServer(cmd *Command, args []string) bool { glog.V(0).Infof("grpc config %+v", viper.Sub("grpc")) grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) master_pb.RegisterSeaweedServer(grpcS, ms) + protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *serverIp, grpcPort) grpcS.Serve(grpcL) }() - go func() { - raftWaitForMaster.Wait() - time.Sleep(100 * time.Millisecond) - myAddress, peers := checkPeers(*serverIp, *masterPort, *serverPeers) - raftServer := weed_server.NewRaftServer(r, peers, myAddress, *masterMetaFolder, ms.Topo, *pulseSeconds) - ms.SetRaftServer(raftServer) - volumeWait.Done() - }() - - raftWaitForMaster.Done() + volumeWait.Done() // start http server httpS := &http.Server{Handler: r} diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go index f32e8e61b..ce632b099 100644 --- a/weed/server/master_ui/templates.go +++ b/weed/server/master_ui/templates.go @@ -41,7 +41,7 @@ var StatusTpl = template.Must(template.New("status").Parse(` diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 7afef0b15..4be13810f 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -2,36 +2,35 @@ package weed_server import ( "encoding/json" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "io/ioutil" "os" "path" "reflect" "sort" - "strings" "time" "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" - "github.com/gorilla/mux" ) type RaftServer struct { peers []string // initial peers to join with raftServer raft.Server dataDir string - httpAddr string - router *mux.Router + serverAddr string topo *topology.Topology + *raft.GrpcServer } -func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { +func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { s := &RaftServer{ - peers: peers, - httpAddr: httpAddr, - dataDir: dataDir, - router: r, - topo: topo, + peers: peers, + serverAddr: serverAddr, + dataDir: dataDir, + topo: topo, } if glog.V(4) { @@ -41,43 +40,39 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin raft.RegisterCommand(&topology.MaxVolumeIdCommand{}) var err error - transporter := raft.NewHTTPTransporter("/cluster", time.Second) - transporter.Transport.MaxIdleConnsPerHost = 1024 - transporter.Transport.IdleConnTimeout = time.Second - transporter.Transport.ResponseHeaderTimeout = time.Second - glog.V(0).Infof("Starting RaftServer with %v", httpAddr) + transporter := raft.NewGrpcTransporter(grpcDialOption) + glog.V(0).Infof("Starting RaftServer with %v", serverAddr) // Clear old cluster configurations if peers are changed - if oldPeers, changed := isPeersChanged(s.dataDir, httpAddr, s.peers); changed { + if oldPeers, changed := isPeersChanged(s.dataDir, serverAddr, s.peers); changed { glog.V(0).Infof("Peers Change: %v => %v", oldPeers, s.peers) os.RemoveAll(path.Join(s.dataDir, "conf")) os.RemoveAll(path.Join(s.dataDir, "log")) os.RemoveAll(path.Join(s.dataDir, "snapshot")) } - s.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, topo, "") + s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, nil, topo, "") if err != nil { glog.V(0).Infoln(err) return nil } - transporter.Install(s.raftServer, s) s.raftServer.SetHeartbeatInterval(500 * time.Millisecond) s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 500 * time.Millisecond) s.raftServer.Start() - s.router.HandleFunc("/cluster/status", s.statusHandler).Methods("GET") - for _, peer := range s.peers { - s.raftServer.AddPeer(peer, "http://"+peer) + s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer, 19333)) } - if s.raftServer.IsLogEmpty() && isTheFirstOne(httpAddr, s.peers) { + s.GrpcServer = raft.NewGrpcServer(s.raftServer) + + if s.raftServer.IsLogEmpty() && isTheFirstOne(serverAddr, s.peers) { // Initialize the server by joining itself. glog.V(0).Infoln("Initializing new cluster") _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ Name: s.raftServer.Name(), - ConnectionString: "http://" + s.httpAddr, + ConnectionString: util.ServerToGrpcAddress(s.serverAddr, 19333), }) if err != nil { @@ -95,7 +90,7 @@ func (s *RaftServer) Peers() (members []string) { peers := s.raftServer.Peers() for _, p := range peers { - members = append(members, strings.TrimPrefix(p.ConnectionString, "http://")) + members = append(members, p.Name) } return @@ -114,7 +109,7 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, } for _, p := range conf.Peers { - oldPeers = append(oldPeers, strings.TrimPrefix(p.ConnectionString, "http://")) + oldPeers = append(oldPeers, p.Name) } oldPeers = append(oldPeers, self) diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go deleted file mode 100644 index 627fe354e..000000000 --- a/weed/server/raft_server_handlers.go +++ /dev/null @@ -1,21 +0,0 @@ -package weed_server - -import ( - "github.com/chrislusf/seaweedfs/weed/operation" - "net/http" -) - -func (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) { - s.router.HandleFunc(pattern, handler) -} - -func (s *RaftServer) statusHandler(w http.ResponseWriter, r *http.Request) { - ret := operation.ClusterStatusResult{ - IsLeader: s.topo.IsLeader(), - Peers: s.Peers(), - } - if leader, e := s.topo.Leader(); e == nil { - ret.Leader = leader - } - writeJsonQuiet(w, r, http.StatusOK, ret) -} diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index b26366ae0..b989a35d1 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -83,18 +83,34 @@ func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts func ParseServerToGrpcAddress(server string, optionalGrpcPort int) (serverGrpcAddress string, err error) { hostnameAndPort := strings.Split(server, ":") if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("The server should have hostname:port format: %v", hostnameAndPort) + return "", fmt.Errorf("server should have hostname:port format: %v", hostnameAndPort) } - filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) if parseErr != nil { - return "", fmt.Errorf("The server port parse error: %v", parseErr) + return "", fmt.Errorf("server port parse error: %v", parseErr) } - filerGrpcPort := int(filerPort) + 10000 + grpcPort := int(port) + 10000 if optionalGrpcPort != 0 { - filerGrpcPort = optionalGrpcPort + grpcPort = optionalGrpcPort } - return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil + return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort), nil +} + +func ServerToGrpcAddress(server string, defaultGrpcPort int) (serverGrpcAddress string) { + hostnameAndPort := strings.Split(server, ":") + if len(hostnameAndPort) != 2 { + return fmt.Sprintf("%s:%d", server, defaultGrpcPort) + } + + port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + if parseErr != nil { + return fmt.Sprintf("%s:%d", hostnameAndPort[0], defaultGrpcPort) + } + + grpcPort := int(port) + 10000 + + return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort) } From 58d4088db4bb5a5d56f695d67259ad02755f1fe5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Feb 2019 11:57:25 -0800 Subject: [PATCH 031/450] HDFS: add tls secured grpc --- other/java/client/pom.xml | 2 +- .../seaweedfs/client/FilerGrpcClient.java | 31 +++ other/java/hdfs/pom.xml | 2 +- .../java/seaweed/hdfs/SeaweedFileSystem.java | 185 ++++++++++-------- .../seaweed/hdfs/SeaweedFileSystemStore.java | 8 + 5 files changed, 142 insertions(+), 86 deletions(-) diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 1ea39863f..540d73f4b 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -4,7 +4,7 @@ com.github.chrislusf seaweedfs-client - 1.0.5 + 1.0.7 org.sonatype.oss diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java index 16b7c3249..c28c1dcf2 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java @@ -2,7 +2,14 @@ package seaweedfs.client; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; +import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.shaded.io.grpc.netty.NegotiationType; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder; +import javax.net.ssl.SSLException; +import java.io.File; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; @@ -20,6 +27,16 @@ public class FilerGrpcClient { this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()); } + public FilerGrpcClient(String host, int grpcPort, + String caFilePath, + String clientCertFilePath, + String clientPrivateKeyFilePath) throws SSLException { + + this(NettyChannelBuilder.forAddress(host, grpcPort) + .negotiationType(NegotiationType.TLS) + .sslContext(buildSslContext(caFilePath,clientCertFilePath,clientPrivateKeyFilePath))); + } + public FilerGrpcClient(ManagedChannelBuilder channelBuilder) { channel = channelBuilder.build(); blockingStub = SeaweedFilerGrpc.newBlockingStub(channel); @@ -42,4 +59,18 @@ public class FilerGrpcClient { public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() { return futureStub; } + + private static SslContext buildSslContext(String trustCertCollectionFilePath, + String clientCertChainFilePath, + String clientPrivateKeyFilePath) throws SSLException { + SslContextBuilder builder = GrpcSslContexts.forClient(); + if (trustCertCollectionFilePath != null) { + builder.trustManager(new File(trustCertCollectionFilePath)); + } + if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) { + builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath)); + } + return builder.build(); + } + } diff --git a/other/java/hdfs/pom.xml b/other/java/hdfs/pom.xml index a0cab8752..fb4ef3bac 100644 --- a/other/java/hdfs/pom.xml +++ b/other/java/hdfs/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.0.5 + 1.0.7 3.1.1 diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java index 2a0ef78af..453924cf7 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java +++ b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -34,6 +34,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; + public static final String FS_SEAWEED_GRPC_CA = "fs.seaweed.ca"; + public static final String FS_SEAWEED_GRPC_CLIENT_KEY = "fs.seaweed.client.key"; + public static final String FS_SEAWEED_GRPC_CLIENT_CERT = "fs.seaweed.client.cert"; private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); private static int BUFFER_SIZE = 16 * 1024 * 1024; @@ -72,7 +75,17 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { setConf(conf); this.uri = uri; - seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + if (conf.get(FS_SEAWEED_GRPC_CA) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CA).length() != 0 + && conf.get(FS_SEAWEED_GRPC_CLIENT_CERT) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_CERT).length() != 0 + && conf.get(FS_SEAWEED_GRPC_CLIENT_KEY) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_KEY).length() != 0) { + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, + conf.get(FS_SEAWEED_GRPC_CA), + conf.get(FS_SEAWEED_GRPC_CLIENT_CERT), + conf.get(FS_SEAWEED_GRPC_CLIENT_KEY)); + } else { + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + } + } @Override @@ -206,8 +219,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); return seaweedFileSystemStore.createDirectory(path, currentUser, - fsPermission == null ? FsPermission.getDirDefault() : fsPermission, - FsPermission.getUMask(getConf())); + fsPermission == null ? FsPermission.getDirDefault() : fsPermission, + FsPermission.getUMask(getConf())); } @@ -238,7 +251,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { */ @Override public void setOwner(Path path, final String owner, final String group) - throws IOException { + throws IOException { LOG.debug("setOwner path: {}", path); path = qualify(path); @@ -271,54 +284,55 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { /** * Concat existing files together. - * @param trg the path to the target destination. + * + * @param trg the path to the target destination. * @param psrcs the paths to the sources to use for the concatenation. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default). + * (default). */ @Override - public void concat(final Path trg, final Path [] psrcs) throws IOException { + public void concat(final Path trg, final Path[] psrcs) throws IOException { throw new UnsupportedOperationException("Not implemented by the " + - getClass().getSimpleName() + " FileSystem implementation"); + getClass().getSimpleName() + " FileSystem implementation"); } /** * Truncate the file in the indicated path to the indicated size. *
    - *
  • Fails if path is a directory.
  • - *
  • Fails if path does not exist.
  • - *
  • Fails if path is not closed.
  • - *
  • Fails if new size is greater than current size.
  • + *
  • Fails if path is a directory.
  • + *
  • Fails if path does not exist.
  • + *
  • Fails if path is not closed.
  • + *
  • Fails if new size is greater than current size.
  • *
- * @param f The path to the file to be truncated - * @param newLength The size the file is to be truncated to * + * @param f The path to the file to be truncated + * @param newLength The size the file is to be truncated to * @return true if the file has been truncated to the desired * newLength and is immediately available to be reused for * write operations such as append, or * false if a background process of adjusting the length of * the last block has been started, and clients should wait for it to * complete before proceeding with further file updates. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default). + * (default). */ @Override public boolean truncate(Path f, long newLength) throws IOException { throw new UnsupportedOperationException("Not implemented by the " + - getClass().getSimpleName() + " FileSystem implementation"); + getClass().getSimpleName() + " FileSystem implementation"); } @Override public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException, - FileAlreadyExistsException, FileNotFoundException, - ParentNotDirectoryException, UnsupportedFileSystemException, - IOException { + FileAlreadyExistsException, FileNotFoundException, + ParentNotDirectoryException, UnsupportedFileSystemException, + IOException { // Supporting filesystems should override this method throw new UnsupportedOperationException( - "Filesystem does not support symlinks!"); + "Filesystem does not support symlinks!"); } public boolean supportsSymlinks() { @@ -327,48 +341,51 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { /** * Create a snapshot. - * @param path The directory where snapshots will be taken. + * + * @param path The directory where snapshots will be taken. * @param snapshotName The name of the snapshot * @return the snapshot path. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported */ @Override public Path createSnapshot(Path path, String snapshotName) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support createSnapshot"); + + " doesn't support createSnapshot"); } /** * Rename a snapshot. - * @param path The directory path where the snapshot was taken + * + * @param path The directory path where the snapshot was taken * @param snapshotOldName Old name of the snapshot * @param snapshotNewName New name of the snapshot - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support renameSnapshot"); + + " doesn't support renameSnapshot"); } /** * Delete a snapshot of a directory. - * @param path The directory that the to-be-deleted snapshot belongs to + * + * @param path The directory that the to-be-deleted snapshot belongs to * @param snapshotName The name of the snapshot - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void deleteSnapshot(Path path, String snapshotName) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support deleteSnapshot"); + + " doesn't support deleteSnapshot"); } /** @@ -377,49 +394,49 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * ACL entries that are not specified in this call are retained without * changes. (Modifications are merged into the current ACL.) * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List<AclEntry> describing modifications - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void modifyAclEntries(Path path, List aclSpec) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support modifyAclEntries"); + + " doesn't support modifyAclEntries"); } /** * Removes ACL entries from files and directories. Other ACL entries are * retained. * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List describing entries to remove - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeAclEntries(Path path, List aclSpec) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeAclEntries"); + + " doesn't support removeAclEntries"); } /** * Removes all default ACL entries from files and directories. * * @param path Path to modify - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeDefaultAcl(Path path) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeDefaultAcl"); + + " doesn't support removeDefaultAcl"); } /** @@ -428,32 +445,32 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * bits. * * @param path Path to modify - * @throws IOException if an ACL could not be removed + * @throws IOException if an ACL could not be removed * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeAcl(Path path) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeAcl"); + + " doesn't support removeAcl"); } /** * Fully replaces ACL of files and directories, discarding all existing * entries. * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List describing modifications, which must include entries - * for user, group, and others for compatibility with permission bits. - * @throws IOException if an ACL could not be modified + * for user, group, and others for compatibility with permission bits. + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void setAcl(Path path, List aclSpec) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support setAcl"); + + " doesn't support setAcl"); } /** @@ -461,14 +478,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get * @return AclStatus describing the ACL of the file or directory - * @throws IOException if an ACL could not be read + * @throws IOException if an ACL could not be read * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public AclStatus getAclStatus(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getAclStatus"); + + " doesn't support getAclStatus"); } /** @@ -478,19 +495,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { *

* Refer to the HDFS extended attributes user documentation for details. * - * @param path Path to modify - * @param name xattr name. + * @param path Path to modify + * @param name xattr name. * @param value xattr value. - * @param flag xattr set flag - * @throws IOException IO failure + * @param flag xattr set flag + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void setXAttr(Path path, String name, byte[] value, EnumSet flag) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support setXAttr"); + + " doesn't support setXAttr"); } /** @@ -503,14 +520,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * @param path Path to get extended attribute * @param name xattr name. * @return byte[] xattr value. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public byte[] getXAttr(Path path, String name) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttr"); + + " doesn't support getXAttr"); } /** @@ -522,14 +539,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get extended attributes * @return Map describing the XAttrs of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public Map getXAttrs(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttrs"); + + " doesn't support getXAttrs"); } /** @@ -539,18 +556,18 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { *

* Refer to the HDFS extended attributes user documentation for details. * - * @param path Path to get extended attributes + * @param path Path to get extended attributes * @param names XAttr names. * @return Map describing the XAttrs of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public Map getXAttrs(Path path, List names) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttrs"); + + " doesn't support getXAttrs"); } /** @@ -562,14 +579,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get extended attributes * @return List{@literal } of the XAttr names of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public List listXAttrs(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support listXAttrs"); + + " doesn't support listXAttrs"); } /** @@ -581,14 +598,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to remove extended attribute * @param name xattr name - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeXAttr(Path path, String name) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeXAttr"); + + " doesn't support removeXAttr"); } } diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index 27678e615..c93a28abc 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -12,6 +12,7 @@ import seaweedfs.client.FilerGrpcClient; import seaweedfs.client.FilerProto; import seaweedfs.client.SeaweedRead; +import javax.net.ssl.SSLException; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; @@ -33,6 +34,13 @@ public class SeaweedFileSystemStore { filerClient = new FilerClient(filerGrpcClient); } + public SeaweedFileSystemStore(String host, int port, + String caFile, String clientCertFile, String clientKeyFile) throws SSLException { + int grpcPort = 10000 + port; + filerGrpcClient = new FilerGrpcClient(host, grpcPort, caFile, clientCertFile, clientKeyFile); + filerClient = new FilerClient(filerGrpcClient); + } + public static String getParentDirectory(Path path) { return path.isRoot() ? "/" : path.getParent().toUri().getPath(); } From 72c2e6bb8fdb4da6680d4dfe78e74783948ebe42 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Feb 2019 14:16:19 -0800 Subject: [PATCH 032/450] remove unused file --- weed/operation/list_masters.go | 32 -------------------------------- 1 file changed, 32 deletions(-) delete mode 100644 weed/operation/list_masters.go diff --git a/weed/operation/list_masters.go b/weed/operation/list_masters.go deleted file mode 100644 index 75838de4d..000000000 --- a/weed/operation/list_masters.go +++ /dev/null @@ -1,32 +0,0 @@ -package operation - -import ( - "encoding/json" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" -) - -type ClusterStatusResult struct { - IsLeader bool `json:"IsLeader,omitempty"` - Leader string `json:"Leader,omitempty"` - Peers []string `json:"Peers,omitempty"` -} - -func ListMasters(server string) (leader string, peers []string, err error) { - jsonBlob, err := util.Get("http://" + server + "/cluster/status") - glog.V(2).Info("list masters result :", string(jsonBlob)) - if err != nil { - return "", nil, err - } - var ret ClusterStatusResult - err = json.Unmarshal(jsonBlob, &ret) - if err != nil { - return "", nil, err - } - peers = ret.Peers - if ret.IsLeader { - peers = append(peers, ret.Leader) - } - return ret.Leader, peers, nil -} From 6ed69de6bd5dcabc6fa70185bfcb772786b27517 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Feb 2019 14:26:59 -0800 Subject: [PATCH 033/450] HDFS: add jwt auth for uploading --- .../java/seaweedfs/client/SeaweedWrite.java | 45 ++++++++++--------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index a7cede09f..15db87195 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -20,25 +20,26 @@ public class SeaweedWrite { final byte[] bytes, final long bytesOffset, final long bytesLength) throws IOException { FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume( - FilerProto.AssignVolumeRequest.newBuilder() - .setCollection("") - .setReplication(replication) - .setDataCenter("") - .setReplication("") - .setTtlSec(0) - .build()); + FilerProto.AssignVolumeRequest.newBuilder() + .setCollection("") + .setReplication(replication) + .setDataCenter("") + .setReplication("") + .setTtlSec(0) + .build()); String fileId = response.getFileId(); String url = response.getUrl(); + String auth = response.getAuth(); String targetUrl = String.format("http://%s/%s", url, fileId); - String etag = multipartUpload(targetUrl, bytes, bytesOffset, bytesLength); + String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength); entry.addChunks(FilerProto.FileChunk.newBuilder() - .setFileId(fileId) - .setOffset(offset) - .setSize(bytesLength) - .setMtime(System.currentTimeMillis() / 10000L) - .setETag(etag) + .setFileId(fileId) + .setOffset(offset) + .setSize(bytesLength) + .setMtime(System.currentTimeMillis() / 10000L) + .setETag(etag) ); } @@ -46,14 +47,15 @@ public class SeaweedWrite { public static void writeMeta(final FilerGrpcClient filerGrpcClient, final String parentDirectory, final FilerProto.Entry.Builder entry) { filerGrpcClient.getBlockingStub().createEntry( - FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parentDirectory) - .setEntry(entry) - .build() + FilerProto.CreateEntryRequest.newBuilder() + .setDirectory(parentDirectory) + .setEntry(entry) + .build() ); } private static String multipartUpload(String targetUrl, + String auth, final byte[] bytes, final long bytesOffset, final long bytesLength) throws IOException { @@ -62,11 +64,14 @@ public class SeaweedWrite { InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); HttpPost post = new HttpPost(targetUrl); + if (auth != null && auth.length() != 0) { + post.addHeader("Authorization", "BEARER " + auth); + } post.setEntity(MultipartEntityBuilder.create() - .setMode(HttpMultipartMode.BROWSER_COMPATIBLE) - .addBinaryBody("upload", inputStream) - .build()); + .setMode(HttpMultipartMode.BROWSER_COMPATIBLE) + .addBinaryBody("upload", inputStream) + .build()); try { HttpResponse response = client.execute(post); From 0fa1be8c4b5485ff2f0491ea8983ff88eed4a186 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Feb 2019 21:10:10 -0800 Subject: [PATCH 034/450] adjust formatting and comments --- weed/command/scaffold.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index e8608e9dd..cb0a726ce 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -257,27 +257,28 @@ key = "" # volume server also uses grpc that should be secured. -# all grpc tls authentications are mutual +# all grpc tls authentications are mutual +# the values for the following ca, cert, and key are paths to the PERM files. [grpc] ca = "" [grpc.volume] cert = "" -key = "" +key = "" [grpc.master] cert = "" -key = "" +key = "" [grpc.filer] cert = "" -key = "" +key = "" # use this for any place needs a grpc client # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" [grpc.client] cert = "" -key = "" +key = "" ` ) From 097b7a321eae4a70d375ca6d3dea1b118fe77795 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 20 Feb 2019 00:57:31 -0800 Subject: [PATCH 035/450] HDFS: move to 1.0.8 --- other/java/client/pom.xml | 2 +- other/java/hdfs/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 540d73f4b..30b9d4d55 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -4,7 +4,7 @@ com.github.chrislusf seaweedfs-client - 1.0.7 + 1.0.8 org.sonatype.oss diff --git a/other/java/hdfs/pom.xml b/other/java/hdfs/pom.xml index fb4ef3bac..18ae192d0 100644 --- a/other/java/hdfs/pom.xml +++ b/other/java/hdfs/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.0.7 + 1.0.8 3.1.1 From e10868899067ad72f822e0d9571b33fa9d1fc7f6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 20 Feb 2019 01:01:01 -0800 Subject: [PATCH 036/450] avoid grpc 5 seconds timeout some operations may take longer than 5 seconds. only keep the timeout for raft operations --- weed/operation/assign_file_id.go | 10 +++------- weed/operation/delete_content.go | 8 ++------ weed/operation/lookup.go | 4 +--- weed/operation/stats.go | 5 +---- weed/operation/sync_volume.go | 10 +++------- weed/server/master_server_handlers_admin.go | 13 ++++--------- weed/topology/allocate_volume.go | 8 ++------ weed/topology/topology_vacuum.go | 5 +---- 8 files changed, 17 insertions(+), 46 deletions(-) diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 7e7a9059d..dc5c0c5d7 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,13 +3,11 @@ package operation import ( "context" "fmt" - "google.golang.org/grpc" - "strings" - "time" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "strings" ) type VolumeAssignRequest struct { @@ -46,8 +44,6 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum } lastError = withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() req := &master_pb.AssignRequest{ Count: primaryRequest.Count, @@ -58,7 +54,7 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum Rack: primaryRequest.Rack, DataNode: primaryRequest.DataNode, } - resp, grpcErr := masterClient.Assign(ctx, req) + resp, grpcErr := masterClient.Assign(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 1df95211e..cfe2feae8 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,13 +4,11 @@ import ( "context" "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "google.golang.org/grpc" "net/http" "strings" "sync" - "time" - - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -110,14 +108,12 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() req := &volume_server_pb.BatchDeleteRequest{ FileIds: fileIds, } - resp, err := volumeServerClient.BatchDelete(ctx, req) + resp, err := volumeServerClient.BatchDelete(context.Background(), req) // fmt.Printf("deleted %v %v: %v\n", fileIds, err, resp) diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index c4040f3e7..ca9056dbb 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -100,13 +100,11 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin //only query unknown_vids err := withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, } - resp, grpcErr := masterClient.LookupVolume(ctx, req) + resp, grpcErr := masterClient.LookupVolume(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/stats.go b/weed/operation/stats.go index 9f7166864..08719cdb5 100644 --- a/weed/operation/stats.go +++ b/weed/operation/stats.go @@ -3,7 +3,6 @@ package operation import ( "context" "google.golang.org/grpc" - "time" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) @@ -11,10 +10,8 @@ import ( func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { err = withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - grpcResponse, grpcErr := masterClient.Statistics(ctx, req) + grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index bf81415c9..c979254f4 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -3,22 +3,18 @@ package operation import ( "context" "fmt" - "google.golang.org/grpc" - "io" - "time" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" . "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "io" ) func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{ + resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{ VolumdId: vid, }) return nil diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index eccf3ee4c..95e55a497 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -4,17 +4,15 @@ import ( "context" "errors" "fmt" - "math/rand" - "net/http" - "strconv" - "time" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" + "math/rand" + "net/http" + "strconv" ) func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) { @@ -25,10 +23,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R } for _, server := range collection.ListVolumeServers() { err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - - _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ Collection: collection.Name, }) return deleteErr diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index ff0bbce42..66b1b3af5 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -2,12 +2,10 @@ package topology import ( "context" - "google.golang.org/grpc" - "time" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" + "google.golang.org/grpc" ) type AllocateVolumeResult struct { @@ -17,10 +15,8 @@ type AllocateVolumeResult struct { func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid storage.VolumeId, option *VolumeGrowOption) error { return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - _, deleteErr := client.AssignVolume(ctx, &volume_server_pb.AssignVolumeRequest{ + _, deleteErr := client.AssignVolume(context.Background(), &volume_server_pb.AssignVolumeRequest{ VolumdId: uint32(vid), Collection: option.Collection, Replication: option.ReplicaPlacement.String(), diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 71d3ead76..840821efa 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -16,10 +16,7 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi for index, dn := range locationlist.list { go func(index int, url string, vid storage.VolumeId) { err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - - resp, err := volumeServerClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{ + resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{ VolumdId: uint32(vid), }) if err != nil { From 0a106c1757c207d17e41f47b381a69eaab9ee1e6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 20 Feb 2019 09:09:47 -0800 Subject: [PATCH 037/450] updated glide --- weed/glide.lock | 270 ++++++++++++++++++++++++++++++++++++++---------- weed/glide.yaml | 55 +++++++--- 2 files changed, 257 insertions(+), 68 deletions(-) diff --git a/weed/glide.lock b/weed/glide.lock index fee78be42..ec0c0686e 100644 --- a/weed/glide.lock +++ b/weed/glide.lock @@ -1,66 +1,139 @@ -hash: 2e3a065472829938d25e879451b6d1aa43e55270e1166a9c044803ef8a3b9eb1 -updated: 2018-06-28T22:01:35.910567-07:00 +hash: 306ab43df769fe8072733ba28c3d2b6308288f248ee65806ac7d0bfa3349ab73 +updated: 2019-02-20T09:08:30.870858-08:00 imports: -- name: github.com/seaweedfs/fuse - version: 65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e +- name: cloud.google.com/go + version: f775b69d59df4650962b3adf2bbdb7677445e381 subpackages: - - fs - - fuseutil + - compute/metadata + - iam + - internal + - internal/optional + - internal/trace + - internal/version + - pubsub + - pubsub/apiv1 + - pubsub/internal/distribution + - storage +- name: github.com/aws/aws-sdk-go + version: 112e8a372b3f46d410ed416decd09cc06a05e3b0 + subpackages: + - aws + - aws/awserr + - aws/awsutil + - aws/client + - aws/client/metadata + - aws/corehandlers + - aws/credentials + - aws/credentials/ec2rolecreds + - aws/credentials/endpointcreds + - aws/credentials/processcreds + - aws/credentials/stscreds + - aws/csm + - aws/defaults + - aws/ec2metadata + - aws/endpoints + - aws/request + - aws/session + - aws/signer/v4 + - internal/ini + - internal/s3err + - internal/sdkio + - internal/sdkrand + - internal/sdkuri + - internal/shareddefaults + - private/protocol + - private/protocol/eventstream + - private/protocol/eventstream/eventstreamapi + - private/protocol/query + - private/protocol/query/queryutil + - private/protocol/rest + - private/protocol/restxml + - private/protocol/xml/xmlutil + - service/s3 + - service/s3/s3iface + - service/sqs + - service/sts +- name: github.com/Azure/azure-pipeline-go + version: 76b57228f36adfbb7e6990ba1347a7fbbf3043da + subpackages: + - pipeline +- name: github.com/Azure/azure-storage-blob-go + version: 457680cc0804810f6d02958481e0ffdda51d5c60 + subpackages: + - azblob - name: github.com/boltdb/bolt - version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8 + version: fd01fc79c553a8e99d512a07e8e0c63d4a3ccfc5 - name: github.com/chrislusf/raft - version: 5f7ddd8f479583daf05879d3d3b174aa202c8fb7 + version: cc54ba4a4ef32c5ac0fcc61fc7cbbf901410d127 subpackages: - protobuf +- name: github.com/DataDog/zstd + version: 1e382f59b41eebd6f592c5db4fd1958ec38a0eba +- name: github.com/davecgh/go-spew + version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d + subpackages: + - spew - name: github.com/dgrijalva/jwt-go - version: 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e + version: 3af4c746e1c248ee8491a3e0c6f7a9cd831e95f8 - name: github.com/disintegration/imaging - version: bbcee2f5c9d5e94ca42c8b50ec847fec64a6c134 + version: 5362c131d56305ce787e79a5b94ffc956df00d62 +- name: github.com/dustin/go-humanize + version: 9f541cc9db5d55bce703bd99987c9d5cb8eea45e +- name: github.com/eapache/go-resiliency + version: 842e16ec2c98ef0c59eebfe60d2d3500a793ba19 + subpackages: + - breaker +- name: github.com/eapache/go-xerial-snappy + version: 776d5712da21bc4762676d614db1d8a64f4238b0 +- name: github.com/eapache/queue + version: 093482f3f8ce946c05bcba64badd2c82369e084d - name: github.com/fsnotify/fsnotify - version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 + version: ccc981bf80385c528a65fbfdd49bf2d8da22aa23 - name: github.com/go-redis/redis - version: 83fb42932f6145ce52df09860384a4653d2d332a + version: 35e2db2f14bb01d2a53b9e627b63239281c87927 subpackages: - internal - internal/consistenthash - internal/hashtag - internal/pool - internal/proto - - internal/singleflight - internal/util - name: github.com/go-sql-driver/mysql - version: d523deb1b23d913de5bdada721a6071e71283618 + version: 972a708cf97995463843c08c8585b26997daf0e1 - name: github.com/gocql/gocql - version: e06f8c1bcd787e6bf0608288b314522f08cc7848 + version: ec4793573d1447b6f92a1b359a0594566fad9d0e subpackages: - internal/lru - internal/murmur - internal/streams -- name: github.com/gogo/protobuf - version: 30cf7ac33676b5786e78c746683f0d4cd64fa75b - subpackages: - - proto - name: github.com/golang/protobuf - version: b4deda0973fb4c70b50d226b1af49f3da59f5265 + version: c823c79ea1570fb5ff454033735a8e68575d1d0f subpackages: - proto - protoc-gen-go/descriptor - ptypes - ptypes/any - ptypes/duration + - ptypes/empty - ptypes/timestamp - name: github.com/golang/snappy - version: 2e65f85255dbc3072edf28d6b5b8efc472979f5a + version: 2a8bb927dd31d8daada140a5d09578521ce5c36a - name: github.com/google/btree - version: e89373fe6b4a7413d7acd6da1725b83ef713e6e4 -- name: github.com/gorilla/context - version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 + version: 4030bb1f1f0c35b30ca7009e9ebd06849dd45306 +- name: github.com/googleapis/gax-go + version: ddfab93c3faef4935403ac75a7c11f0e731dc181 + subpackages: + - v2 - name: github.com/gorilla/mux - version: e3702bed27f0d39777b0b37b664b6280e8ef8fbf + version: 8559a4f775fc329165fe32bd4c2543de8ada8fce - name: github.com/hailocab/go-hostpool version: e80d13ce29ede4452c43dea11e79b9bc8a15b478 +- name: github.com/hashicorp/golang-lru + version: 20f1fb78b0740ba8c3cb143a61e86ba5c8669768 + subpackages: + - simplelru - name: github.com/hashicorp/hcl - version: ef8a98b0bbce4a65b5aa4c368430a80ddc533168 + version: 65a6292f0157eff210d03ed1bf6c59b190b8b906 subpackages: - hcl/ast - hcl/parser @@ -71,41 +144,71 @@ imports: - json/parser - json/scanner - json/token +- name: github.com/jmespath/go-jmespath + version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5 - name: github.com/karlseguin/ccache - version: b425c9ca005a2050ebe723f6a0cddcb907354ab7 + version: 3385784411ac24a8be403f7938890ec67ef6e0d6 - name: github.com/klauspost/crc32 - version: cb6bfca970f6908083f26f39a79009d608efd5cd + version: bab58d77464aa9cf4e84200c3276da0831fe0c03 +- name: github.com/kurin/blazer + version: f20ef4f2aa8ccc2e94a1981dc37f199e90fa4ba5 + subpackages: + - b2 + - base + - internal/b2assets + - internal/b2types + - internal/blog + - x/window - name: github.com/lib/pq - version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8 + version: 9eb73efc1fcc404148b56765b0d3f61d9a5ef8ee subpackages: - oid - name: github.com/magiconair/properties - version: c2353362d570a7bfa228149c62842019201cfb71 + version: 7757cc9fdb852f7579b24170bcacda2c7471bb6a - name: github.com/mitchellh/mapstructure - version: bb74f1db0675b241733089d5a1faa5dd8b0ef57b + version: 3536a929edddb9a5b34bd6861dc4a9647cb459fe - name: github.com/pelletier/go-toml - version: c01d1270ff3e442a8a57cddc1c92dc1138598194 + version: 27c6b39a135b7dc87a14afb068809132fb7a9a8f +- name: github.com/pierrec/lz4 + version: 7f42fed96396aa858623e7e51628123cda496462 + subpackages: + - internal/xxh32 +- name: github.com/rakyll/statik + version: 79258177a57a85a8ab2eca7ce0936aad80307f4e + subpackages: + - fs +- name: github.com/rcrowley/go-metrics + version: 3113b8401b8a98917cde58f8bbd42a1b1c03b1fd - name: github.com/rwcarlsen/goexif - version: 8d986c03457a2057c7b0fb0a48113f7dd48f9619 + version: b1fd11e07dc5bc0d2ca3b79d28cbdf3c6d186247 subpackages: - exif - tiff -- name: github.com/soheilhy/cmux - version: e09e9389d85d8492d313d73d1469c029e710623f +- name: github.com/satori/go.uuid + version: b2ce2384e17bbe0c6d34077efa39dbab3e09123b +- name: github.com/seaweedfs/fuse + version: a476a0037a0b95d71f30356a7d047af484f37e5b + subpackages: + - fs + - fuseutil +- name: github.com/Shopify/sarama + version: 2dcf36cdd2017e60a9fcd9d530ab517cc1b6d854 +- name: github.com/spaolacci/murmur3 + version: f09979ecbc725b9e6d41a297405f65e7e8804acc - name: github.com/spf13/afero - version: 787d034dfe70e44075ccc060d346146ef53270ad + version: f4711e4db9e9a1d3887343acb72b2bbfc2f686f5 subpackages: - mem - name: github.com/spf13/cast - version: 8965335b8c7107321228e3e3702cab9832751bac + version: 8c9545af88b134710ab1cd196795e7f2388358d7 - name: github.com/spf13/jwalterweatherman - version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 + version: 94f6ae3ed3bceceafa716478c5fbf8d29ca601a1 - name: github.com/spf13/pflag - version: 3ebe029320b2676d667ae88da602a5f854788a8a + version: 24fa6976df40757dce6aea913e7b81ade90530e1 - name: github.com/spf13/viper - version: 15738813a09db5c8e5b60a19d67d3f9bd38da3a4 + version: d104d259b3380cb653bb793756823c3c41b37b53 - name: github.com/syndtr/goleveldb - version: 0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697 + version: 2f17a3356c6616cbfc4ae4c38147dc062a68fb0e subpackages: - leveldb - leveldb/cache @@ -119,57 +222,117 @@ imports: - leveldb/storage - leveldb/table - leveldb/util +- name: github.com/willf/bitset + version: 20ad246f50b49590afcb1ed8ad143da7163869cb +- name: github.com/willf/bloom + version: 54e3b963ee1652b06c4562cb9b6020ebc6e36e59 +- name: go.opencensus.io + version: 57c09932883846047fd542903575671cb6b75070 + subpackages: + - exemplar + - internal + - internal/tagencoding + - plugin/ocgrpc + - plugin/ochttp + - plugin/ochttp/propagation/b3 + - stats + - stats/internal + - stats/view + - tag + - trace + - trace/internal + - trace/propagation + - trace/tracestate - name: golang.org/x/image - version: cc896f830cedae125428bc9fe1b0362aa91b3fb1 + version: ef4a1470e0dc5915f2f5fa04a28eeab72c6936a4 subpackages: - bmp - tiff - tiff/lzw - name: golang.org/x/net - version: 4cb1c02c05b0e749b0365f61ae859a8e0cfceed9 + version: 3a22650c66bd7f4fb6d1e8072ffd7b75c8a27898 subpackages: - context + - context/ctxhttp - http/httpguts - http2 - http2/hpack - idna - internal/timeseries - trace -- name: golang.org/x/sys - version: 7138fd3d9dc8335c567ca206f4333fb75eb05d56 +- name: golang.org/x/oauth2 + version: 9b3c75971fc92dd27c6436a37c05c831498658f1 subpackages: - - unix + - google + - internal + - jws + - jwt +- name: golang.org/x/sync + version: 37e7f081c4d4c64e13b10787722085407fe5d15f + subpackages: + - errgroup + - semaphore - name: golang.org/x/text - version: 5cec4b58c438bd98288aeb248bab2c1840713d21 + version: 6c92c7dc7f53607809182301b96e4cc1975143f1 subpackages: - secure/bidirule - transform - unicode/bidi - unicode/norm -- name: google.golang.org/appengine - version: b1f26356af11148e710935ed1ac8a7f5702c7612 +- name: golang.org/x/tools + version: e8c45e0433280218fb679d7bbad0f80fc9677353 subpackages: - - cloudsql + - godoc/util + - godoc/vfs +- name: google.golang.org/api + version: 2eba4b8eddaf7e8873ed5ba4d2b7c51b3c443bf7 + subpackages: + - gensupport + - googleapi + - googleapi/internal/uritemplates + - googleapi/transport + - internal + - iterator + - option + - storage/v1 + - support/bundler + - transport + - transport/grpc + - transport/http + - transport/http/internal/propagation - name: google.golang.org/genproto - version: ff3583edef7de132f219f0efc00e097cabcc0ec0 + version: 082222b4a5c572e33e82ee9162d1352c7cf38682 subpackages: + - googleapis/api/annotations + - googleapis/iam/v1 + - googleapis/pubsub/v1 + - googleapis/rpc/code - googleapis/rpc/status + - protobuf/field_mask - name: google.golang.org/grpc - version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8 + version: 29c406a5bd0ed61c5752d00c6aabd9fa036ed328 subpackages: - balancer - balancer/base - balancer/roundrobin + - binarylog/grpc_binarylog_v1 - codes - connectivity - credentials + - credentials/internal + - credentials/oauth - encoding - encoding/proto - grpclog - internal - internal/backoff + - internal/binarylog - internal/channelz + - internal/envconfig - internal/grpcrand + - internal/grpcsync + - internal/syscall + - internal/transport - keepalive - metadata - naming @@ -182,9 +345,8 @@ imports: - stats - status - tap - - transport - name: gopkg.in/inf.v0 version: d2d2541c53f18d2a059457998ce2876cc8e67cbf - name: gopkg.in/yaml.v2 - version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 + version: 51d6538a90f86fe93ac480b35f37b2be17fef232 testImports: [] diff --git a/weed/glide.yaml b/weed/glide.yaml index 740d2ad3d..dd00f8522 100644 --- a/weed/glide.yaml +++ b/weed/glide.yaml @@ -1,44 +1,71 @@ package: github.com/chrislusf/seaweedfs/weed import: -- package: github.com/seaweedfs/fuse +- package: cloud.google.com/go subpackages: - - fs + - pubsub + - storage +- package: github.com/Azure/azure-storage-blob-go + subpackages: + - azblob +- package: github.com/Shopify/sarama +- package: github.com/aws/aws-sdk-go + subpackages: + - aws + - aws/awserr + - aws/credentials + - aws/session + - service/s3 + - service/s3/s3iface + - service/sqs - package: github.com/boltdb/bolt - version: ^1.3.1 - package: github.com/chrislusf/raft + subpackages: + - protobuf - package: github.com/dgrijalva/jwt-go - version: ^3.2.0 - package: github.com/disintegration/imaging - version: ^1.4.1 +- package: github.com/dustin/go-humanize - package: github.com/go-redis/redis - version: ^6.10.2 - package: github.com/go-sql-driver/mysql - version: ^1.3.0 - package: github.com/gocql/gocql - package: github.com/golang/protobuf - version: ^1.0.0 subpackages: - proto - package: github.com/google/btree - package: github.com/gorilla/mux - version: ^1.6.1 +- package: github.com/karlseguin/ccache - package: github.com/klauspost/crc32 - version: ^1.1.0 +- package: github.com/kurin/blazer + subpackages: + - b2 - package: github.com/lib/pq +- package: github.com/rakyll/statik + subpackages: + - fs - package: github.com/rwcarlsen/goexif subpackages: - exif -- package: github.com/soheilhy/cmux - version: ^0.1.4 +- package: github.com/satori/go.uuid +- package: github.com/seaweedfs/fuse + subpackages: + - fs +- package: github.com/spf13/viper - package: github.com/syndtr/goleveldb subpackages: - leveldb - leveldb/util +- package: github.com/willf/bloom - package: golang.org/x/net subpackages: - context -- package: google.golang.org/grpc - version: ^1.11.3 +- package: golang.org/x/tools subpackages: + - godoc/util +- package: google.golang.org/api + subpackages: + - option +- package: google.golang.org/grpc + subpackages: + - credentials + - keepalive - peer - reflection From 0bc3cdd26ae96de640b8b06896dff2baf7605abf Mon Sep 17 00:00:00 2001 From: chenwanli Date: Mon, 25 Feb 2019 10:47:41 +0800 Subject: [PATCH 038/450] Fix https://github.com/chrislusf/seaweedfs/issues/825 --- weed/topology/topology.go | 8 +++++--- weed/topology/volume_growth.go | 5 ++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 619cc9696..77716605a 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -88,11 +88,13 @@ func (t *Topology) Lookup(collection string, vid storage.VolumeId) []*DataNode { return nil } -func (t *Topology) NextVolumeId() storage.VolumeId { +func (t *Topology) NextVolumeId() (storage.VolumeId, error) { vid := t.GetMaxVolumeId() next := vid.Next() - go t.RaftServer.Do(NewMaxVolumeIdCommand(next)) - return next + if _, err := t.RaftServer.Do(NewMaxVolumeIdCommand(next)); err != nil { + return 0, err + } + return next, nil } func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool { diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 3d178b827..ef39a1c01 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -82,7 +82,10 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo if e != nil { return 0, e } - vid := topo.NextVolumeId() + vid, raftErr := topo.NextVolumeId() + if raftErr != nil { + return 0, raftErr + } err := vg.grow(grpcDialOption, topo, vid, option, servers...) return len(servers), err } From 0d84e32f56876b4ea99ed2aa767347c125bde428 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Feb 2019 00:34:21 -0800 Subject: [PATCH 039/450] update library versions --- weed/glide.lock | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/weed/glide.lock b/weed/glide.lock index ec0c0686e..f5039bb71 100644 --- a/weed/glide.lock +++ b/weed/glide.lock @@ -1,8 +1,8 @@ hash: 306ab43df769fe8072733ba28c3d2b6308288f248ee65806ac7d0bfa3349ab73 -updated: 2019-02-20T09:08:30.870858-08:00 +updated: 2019-02-25T00:25:56.429024-08:00 imports: - name: cloud.google.com/go - version: f775b69d59df4650962b3adf2bbdb7677445e381 + version: e2c125ceac8b663cfcf4610477d4d67827377cb7 subpackages: - compute/metadata - iam @@ -15,7 +15,7 @@ imports: - pubsub/internal/distribution - storage - name: github.com/aws/aws-sdk-go - version: 112e8a372b3f46d410ed416decd09cc06a05e3b0 + version: 5604f1add1ce6b18465fd50f7fe8de7561cc8a62 subpackages: - aws - aws/awserr @@ -64,7 +64,7 @@ imports: - name: github.com/boltdb/bolt version: fd01fc79c553a8e99d512a07e8e0c63d4a3ccfc5 - name: github.com/chrislusf/raft - version: cc54ba4a4ef32c5ac0fcc61fc7cbbf901410d127 + version: 10d6e2182d923e93ec0cc1aa1d556e5b1f8a39e7 subpackages: - protobuf - name: github.com/DataDog/zstd @@ -90,7 +90,7 @@ imports: - name: github.com/fsnotify/fsnotify version: ccc981bf80385c528a65fbfdd49bf2d8da22aa23 - name: github.com/go-redis/redis - version: 35e2db2f14bb01d2a53b9e627b63239281c87927 + version: bd542089bb6e776e6fced5038edac8a0f526aa53 subpackages: - internal - internal/consistenthash @@ -147,7 +147,7 @@ imports: - name: github.com/jmespath/go-jmespath version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5 - name: github.com/karlseguin/ccache - version: 3385784411ac24a8be403f7938890ec67ef6e0d6 + version: ec06cd93a07565b373789b0078ba88fe697fddd9 - name: github.com/klauspost/crc32 version: bab58d77464aa9cf4e84200c3276da0831fe0c03 - name: github.com/kurin/blazer @@ -170,7 +170,7 @@ imports: - name: github.com/pelletier/go-toml version: 27c6b39a135b7dc87a14afb068809132fb7a9a8f - name: github.com/pierrec/lz4 - version: 7f42fed96396aa858623e7e51628123cda496462 + version: 062282ea0dcff40c9fb8525789eef9644b1fbd6e subpackages: - internal/xxh32 - name: github.com/rakyll/statik @@ -187,12 +187,12 @@ imports: - name: github.com/satori/go.uuid version: b2ce2384e17bbe0c6d34077efa39dbab3e09123b - name: github.com/seaweedfs/fuse - version: a476a0037a0b95d71f30356a7d047af484f37e5b + version: 1aae43e32cadcfa182fc60777f20fb02673e8f82 subpackages: - fs - fuseutil - name: github.com/Shopify/sarama - version: 2dcf36cdd2017e60a9fcd9d530ab517cc1b6d854 + version: 4602b5a8c6e826f9e0737865818dd43b2339a092 - name: github.com/spaolacci/murmur3 version: f09979ecbc725b9e6d41a297405f65e7e8804acc - name: github.com/spf13/afero @@ -208,7 +208,7 @@ imports: - name: github.com/spf13/viper version: d104d259b3380cb653bb793756823c3c41b37b53 - name: github.com/syndtr/goleveldb - version: 2f17a3356c6616cbfc4ae4c38147dc062a68fb0e + version: 9d007e481048296f09f59bd19bb7ae584563cd95 subpackages: - leveldb - leveldb/cache @@ -227,7 +227,7 @@ imports: - name: github.com/willf/bloom version: 54e3b963ee1652b06c4562cb9b6020ebc6e36e59 - name: go.opencensus.io - version: 57c09932883846047fd542903575671cb6b75070 + version: beafb2a85a579a4918ba259877a1625e9213a263 subpackages: - exemplar - internal @@ -244,7 +244,7 @@ imports: - trace/propagation - trace/tracestate - name: golang.org/x/image - version: ef4a1470e0dc5915f2f5fa04a28eeab72c6936a4 + version: 31aff87c08e9a5e5d524279a564f96968336f886 subpackages: - bmp - tiff @@ -280,12 +280,12 @@ imports: - unicode/bidi - unicode/norm - name: golang.org/x/tools - version: e8c45e0433280218fb679d7bbad0f80fc9677353 + version: 83362c3779f5f48611068d488a03ea7bbaddc81e subpackages: - godoc/util - godoc/vfs - name: google.golang.org/api - version: 2eba4b8eddaf7e8873ed5ba4d2b7c51b3c443bf7 + version: 8a550ba84cafabe9b2262c41303f31e5a4626318 subpackages: - gensupport - googleapi @@ -310,7 +310,7 @@ imports: - googleapis/rpc/status - protobuf/field_mask - name: google.golang.org/grpc - version: 29c406a5bd0ed61c5752d00c6aabd9fa036ed328 + version: 2773c7bbcf81cf358d3f0038b1469b2d44062acb subpackages: - balancer - balancer/base From 7bbe24dd2857e345df84de76a844936f29a8fdc1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Feb 2019 00:43:36 -0800 Subject: [PATCH 040/450] volume server directly support https --- weed/command/scaffold.go | 13 +++++++++++-- weed/command/volume.go | 11 +++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index cb0a726ce..9e45d7381 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -255,8 +255,6 @@ directory = "/" # destination directory [jwt.signing] key = "" -# volume server also uses grpc that should be secured. - # all grpc tls authentications are mutual # the values for the following ca, cert, and key are paths to the PERM files. [grpc] @@ -280,5 +278,16 @@ key = "" cert = "" key = "" + +# volume server https options +# Note: work in progress! +# this does not work with other clients, e.g., "weed filer|mount" etc, yet. +[https.client] +enabled = true +[https.volume] +cert = "" +key = "" + + ` ) diff --git a/weed/command/volume.go b/weed/command/volume.go index 32ec7819b..2ee6bb11a 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -195,8 +195,15 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v reflection.Register(grpcS) go grpcS.Serve(grpcL) - if e := http.Serve(listener, volumeMux); e != nil { - glog.Fatalf("Volume server fail to serve: %v", e) + if viper.GetString("https.volume.key") != "" { + if e := http.ServeTLS(listener, volumeMux, + viper.GetString("https.volume.cert"), viper.GetString("https.volume.key")); e != nil { + glog.Fatalf("Volume server fail to serve: %v", e) + } + } else { + if e := http.Serve(listener, volumeMux); e != nil { + glog.Fatalf("Volume server fail to serve: %v", e) + } } } From 344caf3cd7d2de00469a61ae3bf597b8e9bab726 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Feb 2019 00:44:48 -0800 Subject: [PATCH 041/450] 1.25 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 0f0efad86..5587d5b5b 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,5 @@ package util const ( - VERSION = "1.24" + VERSION = "1.25" ) From fd27ed775591134e8cd8c31dbbec86ee3afec0f6 Mon Sep 17 00:00:00 2001 From: chenwanli Date: Tue, 26 Feb 2019 17:12:39 +0800 Subject: [PATCH 042/450] Fix https://github.com/chrislusf/seaweedfs/issues/861 --- weed/storage/volume_ttl.go | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/weed/storage/volume_ttl.go b/weed/storage/volume_ttl.go index 4318bb048..0989e7a49 100644 --- a/weed/storage/volume_ttl.go +++ b/weed/storage/volume_ttl.go @@ -16,8 +16,8 @@ const ( ) type TTL struct { - count byte - unit byte + Count byte + Unit byte } var EMPTY_TTL = &TTL{} @@ -43,12 +43,12 @@ func ReadTTL(ttlString string) (*TTL, error) { } count, err := strconv.Atoi(string(countBytes)) unit := toStoredByte(unitByte) - return &TTL{count: byte(count), unit: unit}, err + return &TTL{Count: byte(count), Unit: unit}, err } // read stored bytes to a ttl func LoadTTLFromBytes(input []byte) (t *TTL) { - return &TTL{count: input[0], unit: input[1]} + return &TTL{Count: input[0], Unit: input[1]} } // read stored bytes to a ttl @@ -61,25 +61,25 @@ func LoadTTLFromUint32(ttl uint32) (t *TTL) { // save stored bytes to an output with 2 bytes func (t *TTL) ToBytes(output []byte) { - output[0] = t.count - output[1] = t.unit + output[0] = t.Count + output[1] = t.Unit } func (t *TTL) ToUint32() (output uint32) { - output = uint32(t.count) << 8 - output += uint32(t.unit) + output = uint32(t.Count) << 8 + output += uint32(t.Unit) return output } func (t *TTL) String() string { - if t == nil || t.count == 0 { + if t == nil || t.Count == 0 { return "" } - if t.unit == Empty { + if t.Unit == Empty { return "" } - countString := strconv.Itoa(int(t.count)) - switch t.unit { + countString := strconv.Itoa(int(t.Count)) + switch t.Unit { case Minute: return countString + "m" case Hour: @@ -115,21 +115,21 @@ func toStoredByte(readableUnitByte byte) byte { } func (t TTL) Minutes() uint32 { - switch t.unit { + switch t.Unit { case Empty: return 0 case Minute: - return uint32(t.count) + return uint32(t.Count) case Hour: - return uint32(t.count) * 60 + return uint32(t.Count) * 60 case Day: - return uint32(t.count) * 60 * 24 + return uint32(t.Count) * 60 * 24 case Week: - return uint32(t.count) * 60 * 24 * 7 + return uint32(t.Count) * 60 * 24 * 7 case Month: - return uint32(t.count) * 60 * 24 * 31 + return uint32(t.Count) * 60 * 24 * 31 case Year: - return uint32(t.count) * 60 * 24 * 365 + return uint32(t.Count) * 60 * 24 * 365 } return 0 } From 70ac2f6ea85d9913ac0c68289074463af0d897bd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 26 Feb 2019 23:23:04 -0800 Subject: [PATCH 043/450] add namespace for ListAllMyBucketsResult --- weed/s3api/s3api_bucket_handlers.go | 4 ++- weed/s3api/s3api_bucket_handlers_test.go | 40 ++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 weed/s3api/s3api_bucket_handlers_test.go diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 1d319e354..d9508ae9c 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -2,6 +2,7 @@ package s3api import ( "context" + "encoding/xml" "fmt" "math" "net/http" @@ -21,8 +22,9 @@ var ( ) type ListAllMyBucketsResult struct { - Buckets []*s3.Bucket `xml:"Buckets>Bucket"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` Owner *s3.Owner + Buckets []*s3.Bucket `xml:"Buckets>Bucket"` } func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { diff --git a/weed/s3api/s3api_bucket_handlers_test.go b/weed/s3api/s3api_bucket_handlers_test.go new file mode 100644 index 000000000..188ccbcbd --- /dev/null +++ b/weed/s3api/s3api_bucket_handlers_test.go @@ -0,0 +1,40 @@ +package s3api + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestListBucketsHandler(t *testing.T) { + + expected := ` +2011-04-09T12:34:49Ztest12011-02-09T12:34:49Ztest2` + var response ListAllMyBucketsResult + + var buckets []*s3.Bucket + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test1"), + CreationDate: aws.Time(time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC)), + }) + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test2"), + CreationDate: aws.Time(time.Date(2011, 2, 9, 12, 34, 49, 0, time.UTC)), + }) + + response = ListAllMyBucketsResult{ + Owner: &s3.Owner{ + ID: aws.String(""), + DisplayName: aws.String(""), + }, + Buckets: buckets, + } + + encoded := string(encodeResponse(response)) + println(encoded) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } +} From cb07d15254a0ce87486b859ed693bfea1a7263a5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 27 Feb 2019 00:21:37 -0800 Subject: [PATCH 044/450] add namespace for s3 --- weed/s3api/filer_multipart.go | 13 +++-- weed/s3api/filer_multipart_test.go | 26 ++++++++++ weed/s3api/s3api_bucket_handlers_test.go | 1 - weed/s3api/s3api_objects_list_handlers.go | 46 ++++++++--------- .../s3api/s3api_objects_list_handlers_test.go | 38 ++++++++++++++ weed/s3api/s3api_xsd_generated.go | 51 ++++++++++--------- 6 files changed, 121 insertions(+), 54 deletions(-) create mode 100644 weed/s3api/filer_multipart_test.go create mode 100644 weed/s3api/s3api_objects_list_handlers_test.go diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 73be496d9..d39e821d0 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,6 +1,7 @@ package s3api import ( + "encoding/xml" "fmt" "path/filepath" "strconv" @@ -16,6 +17,7 @@ import ( ) type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"` s3.CreateMultipartUploadOutput } @@ -34,7 +36,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp } output = &InitiateMultipartUploadResult{ - s3.CreateMultipartUploadOutput{ + CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{ Bucket: input.Bucket, Key: input.Key, UploadId: aws.String(uploadIdString), @@ -45,6 +47,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp } type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"` s3.CompleteMultipartUploadOutput } @@ -95,7 +98,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } output = &CompleteMultipartUploadResult{ - s3.CompleteMultipartUploadOutput{ + CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ Bucket: input.Bucket, ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""), Key: input.Key, @@ -128,13 +131,14 @@ func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput } type ListMultipartUploadsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"` s3.ListMultipartUploadsOutput } func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { output = &ListMultipartUploadsResult{ - s3.ListMultipartUploadsOutput{ + ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ Bucket: input.Bucket, Delimiter: input.Delimiter, EncodingType: input.EncodingType, @@ -164,12 +168,13 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput } type ListPartsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"` s3.ListPartsOutput } func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { output = &ListPartsResult{ - s3.ListPartsOutput{ + ListPartsOutput: s3.ListPartsOutput{ Bucket: input.Bucket, Key: input.Key, UploadId: input.UploadId, diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go new file mode 100644 index 000000000..835665dd6 --- /dev/null +++ b/weed/s3api/filer_multipart_test.go @@ -0,0 +1,26 @@ +package s3api + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "testing" +) + +func TestInitiateMultipartUploadResult(t *testing.T) { + + expected := ` +example-bucketexample-objectVXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA` + response := &InitiateMultipartUploadResult{ + CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{ + Bucket: aws.String("example-bucket"), + Key: aws.String("example-object"), + UploadId: aws.String("VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"), + }, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } + +} diff --git a/weed/s3api/s3api_bucket_handlers_test.go b/weed/s3api/s3api_bucket_handlers_test.go index 188ccbcbd..7ab04830b 100644 --- a/weed/s3api/s3api_bucket_handlers_test.go +++ b/weed/s3api/s3api_bucket_handlers_test.go @@ -33,7 +33,6 @@ func TestListBucketsHandler(t *testing.T) { } encoded := string(encodeResponse(response)) - println(encoded) if encoded != expected { t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) } diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index d751a3b1d..927416e0f 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -9,8 +9,6 @@ import ( "strconv" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -85,7 +83,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response *s3.ListObjectsOutput, err error) { +func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name dir, prefix := filepath.Split(originalPrefix) @@ -106,8 +104,8 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys return fmt.Errorf("list buckets: %v", err) } - var contents []*s3.Object - var commonPrefixes []*s3.CommonPrefix + var contents []ListEntry + var commonPrefixes []PrefixEntry var counter int var lastEntryName string var isTruncated bool @@ -119,32 +117,32 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys } lastEntryName = entry.Name if entry.IsDirectory { - commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{ - Prefix: aws.String(fmt.Sprintf("%s%s/", dir, entry.Name)), + commonPrefixes = append(commonPrefixes, PrefixEntry{ + Prefix: fmt.Sprintf("%s%s/", dir, entry.Name), }) } else { - contents = append(contents, &s3.Object{ - Key: aws.String(fmt.Sprintf("%s%s", dir, entry.Name)), - LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), - ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""), - Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), - Owner: &s3.Owner{ - ID: aws.String("bcaf161ca5fb16fd081034f"), - DisplayName: aws.String("webfile"), + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s%s", dir, entry.Name), + LastModified: time.Unix(entry.Attributes.Mtime, 0), + ETag: "\"" + filer2.ETag(entry.Chunks) + "\"", + Size: int64(filer2.TotalSize(entry.Chunks)), + Owner: CanonicalUser{ + ID: "bcaf161ca5fb16fd081034f", + DisplayName: "webfile", }, - StorageClass: aws.String("STANDARD"), + StorageClass: "STANDARD", }) } } - response = &s3.ListObjectsOutput{ - Name: aws.String(bucket), - Prefix: aws.String(originalPrefix), - Marker: aws.String(marker), - NextMarker: aws.String(lastEntryName), - MaxKeys: aws.Int64(int64(maxKeys)), - Delimiter: aws.String("/"), - IsTruncated: aws.Bool(isTruncated), + response = ListBucketResult{ + Name: bucket, + Prefix: originalPrefix, + Marker: marker, + NextMarker: lastEntryName, + MaxKeys: maxKeys, + Delimiter: "/", + IsTruncated: isTruncated, Contents: contents, CommonPrefixes: commonPrefixes, } diff --git a/weed/s3api/s3api_objects_list_handlers_test.go b/weed/s3api/s3api_objects_list_handlers_test.go new file mode 100644 index 000000000..9feb25920 --- /dev/null +++ b/weed/s3api/s3api_objects_list_handlers_test.go @@ -0,0 +1,38 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestListObjectsHandler(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html + + expected := ` +test_container1000false1.zip"4397da7a7649e8085de9916c240e8166"123456765a011niqo39cdf8ec533ec3d1ccaafsa932STANDARD2011-04-09T12:34:49` + + response := ListBucketResult{ + Name: "test_container", + Prefix: "", + Marker: "", + NextMarker: "", + MaxKeys: 1000, + IsTruncated: false, + Contents: []ListEntry{{ + Key: "1.zip", + LastModified: time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC), + ETag: "\"4397da7a7649e8085de9916c240e8166\"", + Size: 1234567, + Owner: CanonicalUser{ + ID: "65a011niqo39cdf8ec533ec3d1ccaafsa932", + }, + StorageClass: "STANDARD", + }}, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } +} diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go index df07f3fea..e678ecf0d 100644 --- a/weed/s3api/s3api_xsd_generated.go +++ b/weed/s3api/s3api_xsd_generated.go @@ -25,8 +25,8 @@ type BucketLoggingStatus struct { } type CanonicalUser struct { - ID string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ID"` - DisplayName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DisplayName,omitempty"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` } type CopyObject struct { @@ -506,15 +506,15 @@ func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement) } type ListAllMyBucketsEntry struct { - Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` - CreationDate time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + Name string `xml:"Name"` + CreationDate time.Time `xml:"CreationDate"` } func (t *ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListAllMyBucketsEntry var layout struct { *T - CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + CreationDate *xsdDateTime `xml:"CreationDate"` } layout.T = (*T)(t) layout.CreationDate = (*xsdDateTime)(&layout.T.CreationDate) @@ -524,7 +524,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem type T ListAllMyBucketsEntry var overlay struct { *T - CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + CreationDate *xsdDateTime `xml:"CreationDate"` } overlay.T = (*T)(t) overlay.CreationDate = (*xsdDateTime)(&overlay.T.CreationDate) @@ -532,7 +532,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem } type ListAllMyBucketsList struct { - Bucket []ListAllMyBucketsEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket,omitempty"` + Bucket []ListAllMyBucketsEntry `xml:"Bucket,omitempty"` } type ListAllMyBucketsResponse struct { @@ -577,32 +577,33 @@ type ListBucketResponse struct { } type ListBucketResult struct { - Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` - Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` - Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"` - Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker"` - NextMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextMarker,omitempty"` - MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"` - Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"` - IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"` - Contents []ListEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Contents,omitempty"` - CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` + Metadata []MetadataEntry `xml:"Metadata,omitempty"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker,omitempty"` + MaxKeys int `xml:"MaxKeys"` + Delimiter string `xml:"Delimiter,omitempty"` + IsTruncated bool `xml:"IsTruncated"` + Contents []ListEntry `xml:"Contents,omitempty"` + CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` } type ListEntry struct { - Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` - LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` - ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` - Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"` - Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"` - StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"` + Key string `xml:"Key"` + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` + Size int64 `xml:"Size"` + Owner CanonicalUser `xml:"Owner,omitempty"` + StorageClass StorageClass `xml:"StorageClass"` } func (t *ListEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListEntry var layout struct { *T - LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + LastModified *xsdDateTime `xml:"LastModified"` } layout.T = (*T)(t) layout.LastModified = (*xsdDateTime)(&layout.T.LastModified) @@ -612,7 +613,7 @@ func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { type T ListEntry var overlay struct { *T - LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + LastModified *xsdDateTime `xml:"LastModified"` } overlay.T = (*T)(t) overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified) From 900aad4935a508fc63d2a70f6d75429431e618d6 Mon Sep 17 00:00:00 2001 From: chenwanli Date: Tue, 26 Feb 2019 18:12:58 +0800 Subject: [PATCH 045/450] Fix https://github.com/chrislusf/seaweedfs/issues/864 --- weed/topology/store_replicate.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index 3967bb3e3..4273e6d68 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -47,6 +47,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, } q := url.Values{ "type": {"replicate"}, + "ttl": {needle.Ttl.String()}, } if needle.LastModified > 0 { q.Set("ts", strconv.FormatUint(needle.LastModified, 10)) From 2812c14520b75b0f89195108c8c54424587f0d6d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 2 Mar 2019 05:10:05 -0800 Subject: [PATCH 046/450] master: add option to disable http operations --- weed/command/master.go | 2 ++ weed/command/server.go | 3 ++- weed/server/master_server.go | 19 +++++++++++-------- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/weed/command/master.go b/weed/command/master.go index 9a0ae7eb4..cc6818967 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -47,6 +47,7 @@ var ( mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + httpReadOnly = cmdMaster.Flag.Bool("httpReadOnly", false, "disable http operations, only gRPC operations are allowed.") masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file") @@ -78,6 +79,7 @@ func runMaster(cmd *Command, args []string) bool { *volumeSizeLimitMB, *volumePreallocate, *mpulse, *defaultReplicaPlacement, *garbageThreshold, masterWhiteList, + *httpReadOnly, ) listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport) diff --git a/weed/command/server.go b/weed/command/server.go index 456b96435..1594d20cc 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -60,6 +60,7 @@ var ( serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + masterHttpReadOnly = cmdServer.Flag.Bool("master.httpReadOnly", false, "disable http operations, only gRPC operations are allowed.") serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") @@ -171,7 +172,7 @@ func runServer(cmd *Command, args []string) bool { ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder, *masterVolumeSizeLimitMB, *masterVolumePreallocate, *pulseSeconds, *masterDefaultReplicaPlacement, *serverGarbageThreshold, - serverWhiteList, + serverWhiteList, *masterHttpReadOnly, ) glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterPort) diff --git a/weed/server/master_server.go b/weed/server/master_server.go index a44a567d6..7572e9b0e 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -49,6 +49,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, defaultReplicaPlacement string, garbageThreshold float64, whiteList []string, + httpReadOnly bool, ) *MasterServer { v := viper.GetViper() @@ -79,14 +80,16 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, handleStaticResources2(r) r.HandleFunc("/", ms.uiStatusHandler) r.HandleFunc("/ui/index.html", ms.uiStatusHandler) - r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) - r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) - r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) - r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler))) - r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler))) - r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) - r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) - r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) + if (!httpReadOnly) { + r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) + r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) + r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) + r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler))) + r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler))) + r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) + r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) + r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) + } r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) From 219b651bc3642b74e5ffd824a0fedc904ec9796c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 3 Mar 2019 10:17:44 -0800 Subject: [PATCH 047/450] jwt check the base file id fix https://github.com/chrislusf/seaweedfs/issues/867 --- weed/server/common_test.go | 31 +++++++++++++++++++++++++++ weed/server/volume_server_handlers.go | 4 ++++ 2 files changed, 35 insertions(+) create mode 100644 weed/server/common_test.go diff --git a/weed/server/common_test.go b/weed/server/common_test.go new file mode 100644 index 000000000..2e6c70bfe --- /dev/null +++ b/weed/server/common_test.go @@ -0,0 +1,31 @@ +package weed_server + +import ( + "strings" + "testing" +) + +func TestParseURL(t *testing.T) { + if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684"); true { + if vid != "1" { + t.Errorf("fail to parse vid: %s", vid) + } + if fid != "06dfa8a684" { + t.Errorf("fail to parse fid: %s", fid) + } + } + if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684_1"); true { + if vid != "1" { + t.Errorf("fail to parse vid: %s", vid) + } + if fid != "06dfa8a684_1" { + t.Errorf("fail to parse fid: %s", fid) + } + if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 { + fid = fid[:sepIndex] + } + if fid != "06dfa8a684" { + t.Errorf("fail to parse fid: %s", fid) + } + } +} diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go index 0e9aaeb3b..d23c08290 100644 --- a/weed/server/volume_server_handlers.go +++ b/weed/server/volume_server_handlers.go @@ -2,6 +2,7 @@ package weed_server import ( "net/http" + "strings" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -71,6 +72,9 @@ func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid str } if sc, ok := token.Claims.(*security.SeaweedFileIdClaims); ok { + if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 { + fid = fid[:sepIndex] + } return sc.Fid == vid+","+fid } glog.V(1).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr) From f233bbe9c01c9b721de914543f38dc9fd0e8bf31 Mon Sep 17 00:00:00 2001 From: chenwanli Date: Mon, 4 Mar 2019 12:43:43 +0800 Subject: [PATCH 048/450] Fix https://github.com/chrislusf/seaweedfs/issues/825#issuecomment-469097375 --- weed/server/master_grpc_server.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 043a6ff51..4c8ff5700 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -102,12 +102,13 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ // tell the volume servers about the leader newLeader, err := t.Leader() - if err == nil { - if err := stream.Send(&master_pb.HeartbeatResponse{ - Leader: newLeader, - }); err != nil { - return err - } + if err != nil { + return err + } + if err := stream.Send(&master_pb.HeartbeatResponse{ + Leader: newLeader, + }); err != nil { + return err } } } From f9dcf56465837b210e0b60a6b3a3c929c66c1f6b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 4 Mar 2019 09:02:58 -0800 Subject: [PATCH 049/450] adjust log level for loading configurations --- weed/server/filer_server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 2ace0a7ea..83998a009 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -92,10 +92,10 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in - glog.V(0).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) + glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file - glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) + glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) if required { glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ "\n\nPlease follow this example and add a filer.toml file to "+ From d312c55bbed8ef69dc11bf024024c7b50f56f5b2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 4 Mar 2019 13:00:08 -0800 Subject: [PATCH 050/450] file path supports windows, avoiding back slashes fix https://github.com/chrislusf/seaweedfs/issues/868 --- weed/filer2/filer.go | 2 +- weed/filesys/dir_rename.go | 4 ++-- weed/replication/replicator.go | 2 +- weed/server/filer_grpc_server.go | 10 +++++----- weed/server/filer_ui/breadcrumb.go | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 672c6201c..eae1107ca 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -70,7 +70,7 @@ func (f *Filer) CreateEntry(entry *Entry) error { var lastDirectoryEntry *Entry for i := 1; i < len(dirParts); i++ { - dirPath := "/" + filepath.Join(dirParts[:i]...) + dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...)) // fmt.Printf("%d directory: %+v\n", i, dirPath) // first check local cache diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index d29281f35..e18f67edc 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -41,7 +41,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector func moveEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, oldParent string, entry *filer_pb.Entry, newParent, newName string) error { if entry.IsDirectory { - currentDirPath := filepath.Join(oldParent, entry.Name) + currentDirPath := filepath.ToSlash(filepath.Join(oldParent, entry.Name)) lastFileName := "" includeLastFile := false @@ -65,7 +65,7 @@ func moveEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, oldParen for _, item := range resp.Entries { lastFileName = item.Name - err := moveEntry(ctx, client, currentDirPath, item, filepath.Join(newParent, newName), item.Name) + err := moveEntry(ctx, client, currentDirPath, item, filepath.ToSlash(filepath.Join(newParent, newName)), item.Name) if err != nil { return err } diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index ac8235fd5..48a81a093 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -34,7 +34,7 @@ func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) return nil } - newKey := filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]) + newKey := filepath.ToSlash(filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])) glog.V(3).Infof("replicate %s => %s", key, newKey) key = newKey if message.OldEntry != nil && message.NewEntry == nil { diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 4f1377331..d593a425e 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -19,7 +19,7 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name))) + entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) if err != nil { return nil, fmt.Errorf("%s not found under %s: %v", req.Name, req.Directory, err) } @@ -112,7 +112,7 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { - fullpath := filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)) + fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) fs.filer.DeleteChunks(garbages) @@ -135,7 +135,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { - fullpath := filepath.Join(req.Directory, req.Entry.Name) + fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)) entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath)) if err != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) @@ -147,7 +147,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) newEntry := &filer2.Entry{ - FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)), + FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), Attr: entry.Attr, Chunks: chunks, } @@ -186,7 +186,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsRecursive, req.IsDeleteData) + err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IsDeleteData) return &filer_pb.DeleteEntryResponse{}, err } diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index d056a4b25..55a1909a8 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -16,7 +16,7 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { for i := 0; i < len(parts); i++ { crumbs = append(crumbs, Breadcrumb{ Name: parts[i] + "/", - Link: "/" + filepath.Join(parts[0:i+1]...), + Link: "/" + filepath.ToSlash(filepath.Join(parts[0:i+1]...)), }) } From ab32556a51dc12b25b5b107f3ebc5aea6c54ec47 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 6 Mar 2019 10:21:49 -0800 Subject: [PATCH 051/450] weed server: fix error coming from moving raft to grpc fix https://github.com/chrislusf/seaweedfs/issues/872 --- weed/command/server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/server.go b/weed/command/server.go index 1594d20cc..d625dd732 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -183,9 +183,9 @@ func runServer(cmd *Command, args []string) bool { go func() { // start raftServer - myMasterAddress, peers := checkPeers(*masterIp, *mport, *masterPeers) + myMasterAddress, peers := checkPeers(*serverIp, *masterPort, *serverPeers) raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), - peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) + peers, myMasterAddress, *masterMetaFolder, ms.Topo, *pulseSeconds) ms.SetRaftServer(raftServer) // starting grpc server From e71e0db5b7bb3001caeb8ec1da31028a3a5b9b42 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 6 Mar 2019 10:51:52 -0800 Subject: [PATCH 052/450] adding go 1.12 --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index f445bff68..e1a1fa31c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,7 @@ language: go go: - 1.10.x - 1.11.x +- 1.12.x - tip before_install: From 4773497d2c2a96990cd68e841675405db26353cb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 6 Mar 2019 12:10:45 -0800 Subject: [PATCH 053/450] add back "/cluster/status" fix https://github.com/chrislusf/seaweedfs/issues/870 --- weed/command/master.go | 1 + weed/command/server.go | 1 + weed/server/raft_server_handlers.go | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 weed/server/raft_server_handlers.go diff --git a/weed/command/master.go b/weed/command/master.go index cc6818967..4207a331c 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -97,6 +97,7 @@ func runMaster(cmd *Command, args []string) bool { raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) ms.SetRaftServer(raftServer) + r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") // starting grpc server grpcPort := *mport + 10000 diff --git a/weed/command/server.go b/weed/command/server.go index d625dd732..32b9f6987 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -187,6 +187,7 @@ func runServer(cmd *Command, args []string) bool { raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), peers, myMasterAddress, *masterMetaFolder, ms.Topo, *pulseSeconds) ms.SetRaftServer(raftServer) + r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") // starting grpc server grpcPort := *masterPort + 10000 diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go new file mode 100644 index 000000000..fd38cb977 --- /dev/null +++ b/weed/server/raft_server_handlers.go @@ -0,0 +1,22 @@ +package weed_server + +import ( + "net/http" +) + +type ClusterStatusResult struct { + IsLeader bool `json:"IsLeader,omitempty"` + Leader string `json:"Leader,omitempty"` + Peers []string `json:"Peers,omitempty"` +} + +func (s *RaftServer) StatusHandler(w http.ResponseWriter, r *http.Request) { + ret := ClusterStatusResult{ + IsLeader: s.topo.IsLeader(), + Peers: s.Peers(), + } + if leader, e := s.topo.Leader(); e == nil { + ret.Leader = leader + } + writeJsonQuiet(w, r, http.StatusOK, ret) +} From ad08a52ab65855ccc1b1470be3cdea94b012608c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 9 Mar 2019 13:24:22 -0800 Subject: [PATCH 054/450] synchronously upload files fix https://github.com/chrislusf/seaweedfs/issues/807 --- weed/server/filer_ui/templates.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index e31685ea0..884798936 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -162,7 +162,7 @@ function uploadFile(file, i) { var url = window.location.href var xhr = new XMLHttpRequest() var formData = new FormData() - xhr.open('POST', url, true) + xhr.open('POST', url, false) formData.append('file', file) xhr.send(formData) From 6fe071175dec7bb7db482dfb8506e9471d4a9e6f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 10 Mar 2019 19:45:40 -0700 Subject: [PATCH 055/450] mount: add option to disable allow others fix https://github.com/chrislusf/seaweedfs/issues/877 --- weed/command/mount.go | 2 ++ weed/command/mount_std.go | 13 ++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/weed/command/mount.go b/weed/command/mount.go index e61f16783..760c68e40 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -17,6 +17,7 @@ type MountOptions struct { ttlSec *int chunkSizeLimitMB *int dataCenter *string + allowOthers *bool } var ( @@ -37,6 +38,7 @@ func init() { mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") + mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") } diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 3e4249bfc..2b274e200 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -56,8 +56,7 @@ func runMount(cmd *Command, args []string) bool { util.SetupProfiling(*mountCpuProfile, *mountMemProfile) - c, err := fuse.Mount( - *mountOptions.dir, + options := []fuse.MountOption{ fuse.VolumeName("SeaweedFS"), fuse.FSName("SeaweedFS"), fuse.Subtype("SeaweedFS"), @@ -67,13 +66,17 @@ func runMount(cmd *Command, args []string) bool { fuse.AutoXattr(), fuse.ExclCreate(), fuse.DaemonTimeout("3600"), - fuse.AllowOther(), fuse.AllowSUID(), fuse.DefaultPermissions(), - fuse.MaxReadahead(1024*128), + fuse.MaxReadahead(1024 * 128), fuse.AsyncRead(), fuse.WritebackCache(), - ) + } + if *mountOptions.allowOthers { + options = append(options, fuse.AllowOther()) + } + + c, err := fuse.Mount(*mountOptions.dir, options...) if err != nil { glog.Fatal(err) return false From c668e55e07bbaac85ab1d3f0778e9916678756e5 Mon Sep 17 00:00:00 2001 From: chenwanli Date: Mon, 11 Mar 2019 11:48:11 +0800 Subject: [PATCH 056/450] Revert "fix https://github.com/chrislusf/seaweedfs/issues/780" This reverts commit 31038acccf73541204b320fc334d0acad0b9923a. --- weed/operation/upload_content.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index be7b8e69c..2276c67b7 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -92,12 +92,6 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error return nil, post_err } defer resp.Body.Close() - - if resp.StatusCode < http.StatusOK || - resp.StatusCode > http.StatusIMUsed { - return nil, errors.New(http.StatusText(resp.StatusCode)) - } - etag := getEtag(resp) resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { From 2ce4dae79519a8f844bcb2ca9a27e9729eaa1718 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 11 Mar 2019 00:58:12 -0700 Subject: [PATCH 057/450] move to 1.26 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 5587d5b5b..7fbb6971a 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,5 @@ package util const ( - VERSION = "1.25" + VERSION = "1.26" ) From 346541a101a06b24b7630e58cdb07353c1c0904c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 13 Mar 2019 23:07:24 -0700 Subject: [PATCH 058/450] print out per entry memory usage --- weed/storage/needle/compact_map_perf_test.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/weed/storage/needle/compact_map_perf_test.go b/weed/storage/needle/compact_map_perf_test.go index cd21cc184..bb538bf7f 100644 --- a/weed/storage/needle/compact_map_perf_test.go +++ b/weed/storage/needle/compact_map_perf_test.go @@ -28,6 +28,7 @@ go tool pprof --alloc_space needle.test mem.out func TestMemoryUsage(t *testing.T) { var maps []*CompactMap + totalRowCount := uint64(0) startTime := time.Now() for i := 0; i < 10; i++ { @@ -35,11 +36,13 @@ func TestMemoryUsage(t *testing.T) { if ie != nil { log.Fatalln(ie) } - maps = append(maps, loadNewNeedleMap(indexFile)) + m, rowCount := loadNewNeedleMap(indexFile) + maps = append(maps, m) + totalRowCount += rowCount indexFile.Close() - PrintMemUsage() + PrintMemUsage(totalRowCount) now := time.Now() fmt.Printf("\tTaken = %v\n", now.Sub(startTime)) startTime = now @@ -47,12 +50,14 @@ func TestMemoryUsage(t *testing.T) { } -func loadNewNeedleMap(file *os.File) *CompactMap { +func loadNewNeedleMap(file *os.File) (*CompactMap, uint64) { m := NewCompactMap() bytes := make([]byte, NeedleEntrySize) + rowCount := uint64(0) count, e := file.Read(bytes) for count > 0 && e == nil { for i := 0; i < count; i += NeedleEntrySize { + rowCount++ key := BytesToNeedleId(bytes[i : i+NeedleIdSize]) offset := BytesToOffset(bytes[i+NeedleIdSize : i+NeedleIdSize+OffsetSize]) size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize]) @@ -67,17 +72,18 @@ func loadNewNeedleMap(file *os.File) *CompactMap { count, e = file.Read(bytes) } - return m + return m, rowCount } -func PrintMemUsage() { +func PrintMemUsage(totalRowCount uint64) { runtime.GC() var m runtime.MemStats runtime.ReadMemStats(&m) // For info on each, see: https://golang.org/pkg/runtime/#MemStats - fmt.Printf("Alloc = %v MiB", bToMb(m.Alloc)) + fmt.Printf("Each %v Bytes", m.Alloc/totalRowCount) + fmt.Printf("\tAlloc = %v MiB", bToMb(m.Alloc)) fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc)) fmt.Printf("\tSys = %v MiB", bToMb(m.Sys)) fmt.Printf("\tNumGC = %v", m.NumGC) From 95ef4513c88ee2399f5edb89db9080e18830ef4e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 13 Mar 2019 23:14:40 -0700 Subject: [PATCH 059/450] print out memory size --- weed/storage/needle/compact_map_perf_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/storage/needle/compact_map_perf_test.go b/weed/storage/needle/compact_map_perf_test.go index bb538bf7f..908da968f 100644 --- a/weed/storage/needle/compact_map_perf_test.go +++ b/weed/storage/needle/compact_map_perf_test.go @@ -82,7 +82,7 @@ func PrintMemUsage(totalRowCount uint64) { var m runtime.MemStats runtime.ReadMemStats(&m) // For info on each, see: https://golang.org/pkg/runtime/#MemStats - fmt.Printf("Each %v Bytes", m.Alloc/totalRowCount) + fmt.Printf("Each %.2f Bytes", float64(m.TotalAlloc)/float64(totalRowCount)) fmt.Printf("\tAlloc = %v MiB", bToMb(m.Alloc)) fmt.Printf("\tTotalAlloc = %v MiB", bToMb(m.TotalAlloc)) fmt.Printf("\tSys = %v MiB", bToMb(m.Sys)) From 023f447d56fd17a5ebdd3550d926fd1e35fe8248 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 14 Mar 2019 23:48:53 -0700 Subject: [PATCH 060/450] add directory /data/filerdb fix https://github.com/chrislusf/seaweedfs/issues/885 --- docker/Dockerfile | 1 + docker/Dockerfile.go_build | 1 + 2 files changed, 2 insertions(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index c7a343111..b78f1677a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -25,6 +25,7 @@ EXPOSE 8333 VOLUME /data COPY filer.toml /etc/seaweedfs/filer.toml +RUN mkdir /data/filerdb COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index d0a214476..3581d2fa3 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -20,6 +20,7 @@ VOLUME /data RUN mkdir -p /etc/seaweedfs RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +RUN mkdir -p /data/filerdb RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh RUN cp /go/bin/weed /usr/bin/ From a4e8aa98a0cf0581140a93b6233bc11d4a3f123e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 00:32:42 -0700 Subject: [PATCH 061/450] weed server: mkdir first, and set the master ip address fix https://github.com/chrislusf/seaweedfs/issues/885 --- docker/Dockerfile | 3 ++- weed/command/server.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index b78f1677a..75efc7ec0 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -22,10 +22,11 @@ EXPOSE 9333 # s3 server http port EXPOSE 8333 +RUN mkdir -p /data/filerdb + VOLUME /data COPY filer.toml /etc/seaweedfs/filer.toml -RUN mkdir /data/filerdb COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh diff --git a/weed/command/server.go b/weed/command/server.go index 32b9f6987..cc63c96e1 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -115,6 +115,7 @@ func runServer(cmd *Command, args []string) bool { } master := *serverIp + ":" + strconv.Itoa(*masterPort) + filerOptions.masters = &master filerOptions.ip = serverIp serverOptions.v.ip = serverIp serverOptions.v.bindIp = serverBindIp From 5cbe7392a381c01ae18c11e42f63563e637c7c49 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 00:33:51 -0700 Subject: [PATCH 062/450] weed server: mkdir first --- docker/Dockerfile.go_build | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 3581d2fa3..2f1948070 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -16,11 +16,12 @@ EXPOSE 9333 # s3 server http port EXPOSE 8333 +RUN mkdir -p /data/filerdb + VOLUME /data RUN mkdir -p /etc/seaweedfs RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -RUN mkdir -p /data/filerdb RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh RUN cp /go/bin/weed /usr/bin/ From 43e9dc3cf229ed765fd1bc330cbff8b320387c12 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 00:54:01 -0700 Subject: [PATCH 063/450] docker: add the /data/filerdb folder --- docker/entrypoint.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 105087dbe..660e51766 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -33,7 +33,6 @@ case "$1" in if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" fi - mkdir -p /data/filerdb exec /usr/bin/weed $@ $ARGS ;; From d30600e6d6b44101525d208b213a6c37bdfdba9e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 09:57:33 -0700 Subject: [PATCH 064/450] fix filer starting under "weed server" when ip is not localhost fix https://github.com/chrislusf/seaweedfs/issues/886 --- weed/command/filer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/filer.go b/weed/command/filer.go index 478b7d6bf..d12d661a8 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -99,7 +99,7 @@ func (fo *FilerOptions) startFiler() { } fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ - Masters: strings.Split(*f.masters, ","), + Masters: strings.Split(*fo.masters, ","), Collection: *fo.collection, DefaultReplication: *fo.defaultReplicaPlacement, RedirectOnRead: *fo.redirectOnRead, From 55c85f3a66fce8ba2d8613fc3fd2b0de1ca46bde Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 15:26:09 -0700 Subject: [PATCH 065/450] adding context --- weed/filer2/filer.go | 4 ++-- weed/filer2/memdb/memdb_store_test.go | 3 ++- weed/server/filer_grpc_server.go | 2 +- weed/server/filer_server_handlers_write.go | 3 ++- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index eae1107ca..50df3fc0b 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -187,7 +187,7 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { return f.store.FindEntry(p) } -func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) { +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) { entry, err := f.FindEntry(p) if err != nil { return err @@ -211,7 +211,7 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDelet if isRecursive { for _, sub := range entries { lastFileName = sub.Name() - f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks) + f.DeleteEntryMetaAndData(ctx, sub.FullPath, isRecursive, shouldDeleteChunks) limit-- if limit <= 0 { break diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go index 31da8998f..53a5ab94d 100644 --- a/weed/filer2/memdb/memdb_store_test.go +++ b/weed/filer2/memdb/memdb_store_test.go @@ -1,6 +1,7 @@ package memdb import ( + "context" "github.com/chrislusf/seaweedfs/weed/filer2" "testing" ) @@ -134,7 +135,7 @@ func TestCreateFileAndList(t *testing.T) { } // delete file and count - filer.DeleteEntryMetaAndData(file3Path, false, false) + filer.DeleteEntryMetaAndData(context.Background(), file3Path, false, false) entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index d593a425e..38c9135be 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -186,7 +186,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IsDeleteData) + err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IsDeleteData) return &filer_pb.DeleteEntryResponse{}, err } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 9e231c645..737798a7e 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -1,6 +1,7 @@ package weed_server import ( + "context" "encoding/json" "errors" "io/ioutil" @@ -218,7 +219,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { isRecursive := r.FormValue("recursive") == "true" - err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), isRecursive, true) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, true) if err != nil { glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) writeJsonError(w, r, http.StatusInternalServerError, err) From cece860bfde443d4f8cddb04b10fb98a998995ed Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 15:55:34 -0700 Subject: [PATCH 066/450] add context to all filer APIs --- weed/command/filer_export.go | 13 +++++--- .../filer2/abstract_sql/abstract_sql_store.go | 11 ++++--- weed/filer2/cassandra/cassandra_store.go | 13 ++++---- weed/filer2/filer.go | 32 +++++++++---------- weed/filer2/filerstore.go | 11 ++++--- weed/filer2/leveldb/leveldb_store.go | 13 ++++---- weed/filer2/memdb/memdb_store.go | 13 ++++---- weed/filer2/redis/universal_redis_store.go | 15 +++++---- weed/s3api/s3api_bucket_handlers.go | 2 +- weed/server/filer_grpc_server.go | 10 +++--- weed/server/filer_server_handlers_read.go | 3 +- weed/server/filer_server_handlers_read_dir.go | 3 +- weed/server/filer_server_handlers_write.go | 8 +++-- .../filer_server_handlers_write_autochunk.go | 9 +++--- weed/server/master_server.go | 2 +- 15 files changed, 86 insertions(+), 72 deletions(-) diff --git a/weed/command/filer_export.go b/weed/command/filer_export.go index 7a2e7920a..396d0d71f 100644 --- a/weed/command/filer_export.go +++ b/weed/command/filer_export.go @@ -1,6 +1,7 @@ package command import ( + "context" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" @@ -96,6 +97,8 @@ func runFilerExport(cmd *Command, args []string) bool { return false } + ctx := context.Background() + stat := statistics{} var fn func(level int, entry *filer2.Entry) error @@ -125,23 +128,23 @@ func runFilerExport(cmd *Command, args []string) bool { if *dryRun { return nil } - return targetStore.InsertEntry(entry) + return targetStore.InsertEntry(ctx, entry) } } - doTraverse(&stat, sourceStore, filer2.FullPath(*dir), 0, fn) + doTraverse(ctx, &stat, sourceStore, filer2.FullPath(*dir), 0, fn) glog.Infof("processed %d directories, %d files", stat.directoryCount, stat.fileCount) return true } -func doTraverse(stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) { +func doTraverse(ctx context.Context, stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) { limit := *dirListLimit lastEntryName := "" for { - entries, err := filerStore.ListDirectoryEntries(parentPath, lastEntryName, false, limit) + entries, err := filerStore.ListDirectoryEntries(ctx, parentPath, lastEntryName, false, limit) if err != nil { break } @@ -151,7 +154,7 @@ func doTraverse(stat *statistics, filerStore filer2.FilerStore, parentPath filer } if entry.IsDirectory() { stat.directoryCount++ - doTraverse(stat, filerStore, entry.FullPath, level+1, fn) + doTraverse(ctx, stat, filerStore, entry.FullPath, level+1, fn) } else { stat.fileCount++ } diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 5f2990475..95ce9cb9f 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -1,6 +1,7 @@ package abstract_sql import ( + "context" "database/sql" "fmt" @@ -18,7 +19,7 @@ type AbstractSqlStore struct { SqlListInclusive string } -func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -38,7 +39,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -58,7 +59,7 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { return nil } -func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) { +func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir) @@ -77,7 +78,7 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr return entry, nil } -func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error { +func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { dir, name := fullpath.DirAndName() @@ -94,7 +95,7 @@ func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error { return nil } -func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { +func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { sqlText := store.SqlListExclusive if inclusive { diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index 2c1f03182..e14a9e023 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -1,6 +1,7 @@ package cassandra import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -39,7 +40,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er return } -func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -56,12 +57,12 @@ func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { dir, name := fullpath.DirAndName() var data []byte @@ -88,7 +89,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2. return entry, nil } -func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { +func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { dir, name := fullpath.DirAndName() @@ -101,7 +102,7 @@ func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { return nil } -func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 50df3fc0b..4220e24d3 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -57,7 +57,7 @@ func (fs *Filer) KeepConnectedToMaster() { fs.MasterClient.KeepConnectedToMaster() } -func (f *Filer) CreateEntry(entry *Entry) error { +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { if string(entry.FullPath) == "/" { return nil @@ -79,7 +79,7 @@ func (f *Filer) CreateEntry(entry *Entry) error { // not found, check the store directly if dirEntry == nil { glog.V(4).Infof("find uncached directory: %s", dirPath) - dirEntry, _ = f.FindEntry(FullPath(dirPath)) + dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) } else { glog.V(4).Infof("found cached directory: %s", dirPath) } @@ -102,9 +102,9 @@ func (f *Filer) CreateEntry(entry *Entry) error { } glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) - mkdirErr := f.store.InsertEntry(dirEntry) + mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { - if _, err := f.FindEntry(FullPath(dirPath)); err == ErrNotFound { + if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { @@ -137,14 +137,14 @@ func (f *Filer) CreateEntry(entry *Entry) error { } */ - oldEntry, _ := f.FindEntry(entry.FullPath) + oldEntry, _ := f.FindEntry(ctx, entry.FullPath) if oldEntry == nil { - if err := f.store.InsertEntry(entry); err != nil { + if err := f.store.InsertEntry(ctx, entry); err != nil { return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { - if err := f.UpdateEntry(oldEntry, entry); err != nil { + if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } @@ -156,7 +156,7 @@ func (f *Filer) CreateEntry(entry *Entry) error { return nil } -func (f *Filer) UpdateEntry(oldEntry, entry *Entry) (err error) { +func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { if oldEntry != nil { if oldEntry.IsDirectory() && !entry.IsDirectory() { return fmt.Errorf("existing %s is a directory", entry.FullPath) @@ -165,10 +165,10 @@ func (f *Filer) UpdateEntry(oldEntry, entry *Entry) (err error) { return fmt.Errorf("existing %s is a file", entry.FullPath) } } - return f.store.UpdateEntry(entry) + return f.store.UpdateEntry(ctx, entry) } -func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { +func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) { now := time.Now() @@ -184,11 +184,11 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { }, }, nil } - return f.store.FindEntry(p) + return f.store.FindEntry(ctx, p) } func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) { - entry, err := f.FindEntry(p) + entry, err := f.FindEntry(ctx, p) if err != nil { return err } @@ -201,7 +201,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecurs lastFileName := "" includeLastFile := false for limit > 0 { - entries, err := f.ListDirectoryEntries(p, lastFileName, includeLastFile, 1024) + entries, err := f.ListDirectoryEntries(ctx, p, lastFileName, includeLastFile, 1024) if err != nil { return fmt.Errorf("list folder %s: %v", p, err) } @@ -241,14 +241,14 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecurs f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) - return f.store.DeleteEntry(p) + return f.store.DeleteEntry(ctx, p) } -func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { +func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { if strings.HasSuffix(string(p), "/") && len(p) > 1 { p = p[0 : len(p)-1] } - return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit) + return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) } func (f *Filer) cacheDelDirectory(dirpath string) { diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index 9ef1d9d48..c10074eb2 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -1,6 +1,7 @@ package filer2 import ( + "context" "errors" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -10,12 +11,12 @@ type FilerStore interface { GetName() string // Initialize initializes the file store Initialize(configuration util.Configuration) error - InsertEntry(*Entry) error - UpdateEntry(*Entry) (err error) + InsertEntry(context.Context, *Entry) error + UpdateEntry(context.Context, *Entry) (err error) // err == filer2.ErrNotFound if not found - FindEntry(FullPath) (entry *Entry, err error) - DeleteEntry(FullPath) (err error) - ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) + FindEntry(context.Context, FullPath) (entry *Entry, err error) + DeleteEntry(context.Context, FullPath) (err error) + ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) } var ErrNotFound = errors.New("filer: no entry is found in filer store") diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 179107e2c..60de11565 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -2,6 +2,7 @@ package leveldb import ( "bytes" + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" @@ -45,7 +46,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) { return } -func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { key := genKey(entry.DirAndName()) value, err := entry.EncodeAttributesAndChunks() @@ -64,12 +65,12 @@ func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { key := genKey(fullpath.DirAndName()) data, err := store.db.Get(key, nil) @@ -94,7 +95,7 @@ func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.En return entry, nil } -func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { key := genKey(fullpath.DirAndName()) err = store.db.Delete(key, nil) @@ -105,7 +106,7 @@ func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { return nil } -func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go index 062f1cd1c..d4c906f2d 100644 --- a/weed/filer2/memdb/memdb_store.go +++ b/weed/filer2/memdb/memdb_store.go @@ -1,6 +1,7 @@ package memdb import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/util" @@ -33,21 +34,21 @@ func (store *MemDbStore) Initialize(configuration util.Configuration) (err error return nil } -func (store *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *MemDbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { // println("inserting", entry.FullPath) store.tree.ReplaceOrInsert(entryItem{entry}) return nil } -func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) { - if _, err = store.FindEntry(entry.FullPath); err != nil { +func (store *MemDbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + if _, err = store.FindEntry(ctx, entry.FullPath); err != nil { return fmt.Errorf("no such file %s : %v", entry.FullPath, err) } store.tree.ReplaceOrInsert(entryItem{entry}) return nil } -func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *MemDbStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}}) if item == nil { return nil, filer2.ErrNotFound @@ -56,12 +57,12 @@ func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entr return entry, nil } -func (store *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *MemDbStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}}) return nil } -func (store *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { +func (store *MemDbStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { startFrom := string(fullpath) if startFileName != "" { diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index 7fd7e1180..ec78f70e7 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -1,6 +1,7 @@ package redis import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -18,7 +19,7 @@ type UniversalRedisStore struct { Client redis.UniversalClient } -func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { value, err := entry.EncodeAttributesAndChunks() if err != nil { @@ -42,12 +43,12 @@ func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *UniversalRedisStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { data, err := store.Client.Get(string(fullpath)).Result() if err == redis.Nil { @@ -69,7 +70,7 @@ func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *fi return entry, nil } -func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { _, err = store.Client.Del(string(fullpath)).Result() @@ -88,7 +89,7 @@ func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err err return nil } -func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() @@ -126,7 +127,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, // fetch entry meta for _, fileName := range members { path := filer2.NewFullPath(string(fullpath), fileName) - entry, err := store.FindEntry(path) + entry, err := store.FindEntry(ctx, path) if err != nil { glog.V(0).Infof("list %s : %v", path, err) } else { diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index d9508ae9c..35aa85493 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -22,7 +22,7 @@ var ( ) type ListAllMyBucketsResult struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` Owner *s3.Owner Buckets []*s3.Bucket `xml:"Buckets>Bucket"` } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 38c9135be..4234af5f5 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -19,7 +19,7 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) + entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) if err != nil { return nil, fmt.Errorf("%s not found under %s: %v", req.Name, req.Directory, err) } @@ -45,7 +45,7 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(req.Directory), lastFileName, includeLastFile, 1024) + entries, err := fs.filer.ListDirectoryEntries(ctx, filer2.FullPath(req.Directory), lastFileName, includeLastFile, 1024) if err != nil { return nil, err } @@ -121,7 +121,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr return nil, fmt.Errorf("can not create entry with empty attributes") } - err = fs.filer.CreateEntry(&filer2.Entry{ + err = fs.filer.CreateEntry(ctx, &filer2.Entry{ FullPath: fullpath, Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), Chunks: chunks, @@ -136,7 +136,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)) - entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath)) + entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(fullpath)) if err != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) } @@ -175,7 +175,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr return &filer_pb.UpdateEntryResponse{}, err } - if err = fs.filer.UpdateEntry(entry, newEntry); err == nil { + if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { fs.filer.DeleteChunks(unusedChunks) fs.filer.DeleteChunks(garbages) } diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 226de640c..4d1f41fd4 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -1,6 +1,7 @@ package weed_server import ( + "context" "io" "mime" "mime/multipart" @@ -21,7 +22,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, path = path[:len(path)-1] } - entry, err := fs.filer.FindEntry(filer2.FullPath(path)) + entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path)) if err != nil { if path == "/" { fs.listDirectoryHandler(w, r) diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index bcf7f0eb5..94c894baa 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -1,6 +1,7 @@ package weed_server import ( + "context" "net/http" "strconv" "strings" @@ -27,7 +28,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName := r.FormValue("lastFileName") - entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(path), lastFileName, false, limit) + entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 737798a7e..f20212cc2 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -67,6 +67,8 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + query := r.URL.Query() replication := query.Get("replication") if replication == "" { @@ -81,7 +83,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { dataCenter = fs.option.DataCenter } - if autoChunked := fs.autoChunk(w, r, replication, collection, dataCenter); autoChunked { + if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter); autoChunked { return } @@ -164,7 +166,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { } // update metadata in filer store - existingEntry, err := fs.filer.FindEntry(filer2.FullPath(path)) + existingEntry, err := fs.filer.FindEntry(ctx, filer2.FullPath(path)) crTime := time.Now() if err == nil && existingEntry != nil { // glog.V(4).Infof("existing %s => %+v", path, existingEntry) @@ -194,7 +196,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { }}, } // glog.V(4).Infof("saving %s => %+v", path, entry) - if db_err := fs.filer.CreateEntry(entry); db_err != nil { + if db_err := fs.filer.CreateEntry(ctx, entry); db_err != nil { fs.filer.DeleteFileByFileId(fileId) glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err) writeJsonError(w, r, http.StatusInternalServerError, db_err) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index b9c0691c7..d1e1e7a09 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -2,6 +2,7 @@ package weed_server import ( "bytes" + "context" "io" "io/ioutil" "net/http" @@ -18,7 +19,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string) bool { +func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string) bool { if r.Method != "POST" { glog.V(4).Infoln("AutoChunking not supported for method", r.Method) return false @@ -54,7 +55,7 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica return false } - reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection, dataCenter) + reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else if reply != nil { @@ -63,7 +64,7 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica return true } -func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { +func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { @@ -166,7 +167,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte }, Chunks: fileChunks, } - if db_err := fs.filer.CreateEntry(entry); db_err != nil { + if db_err := fs.filer.CreateEntry(ctx, entry); db_err != nil { replyerr = db_err filerResult.Error = db_err.Error() glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err) diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 7572e9b0e..a77c8fa19 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -80,7 +80,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, handleStaticResources2(r) r.HandleFunc("/", ms.uiStatusHandler) r.HandleFunc("/ui/index.html", ms.uiStatusHandler) - if (!httpReadOnly) { + if !httpReadOnly { r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) From 55bab1b456c3c13a8009a11730e678ca0c48dfb0 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 17:20:24 -0700 Subject: [PATCH 067/450] add context.Context --- weed/command/filer_copy.go | 26 ++++++++-------- weed/command/filer_replication.go | 3 +- weed/filer2/memdb/memdb_store_test.go | 22 +++++++------- weed/filesys/dir.go | 24 +++++++-------- weed/filesys/dir_link.go | 2 +- weed/filesys/dir_rename.go | 2 +- weed/filesys/dirty_page.go | 2 +- weed/filesys/file.go | 4 +-- weed/filesys/filehandle.go | 6 ++-- weed/filesys/wfs.go | 6 ++-- weed/filesys/wfs_deletion.go | 6 ++-- weed/operation/grpc_client.go | 9 ++++-- weed/replication/replicator.go | 11 +++---- weed/replication/sink/azuresink/azure_sink.go | 12 +++----- weed/replication/sink/b2sink/b2_sink.go | 12 +++----- .../replication/sink/filersink/fetch_write.go | 20 ++++++------- weed/replication/sink/filersink/filer_sink.go | 23 +++++++------- weed/replication/sink/gcssink/gcs_sink.go | 12 ++++---- weed/replication/sink/replication_sink.go | 7 +++-- weed/replication/sink/s3sink/s3_sink.go | 11 +++---- weed/replication/sink/s3sink/s3_write.go | 11 +++---- weed/replication/source/filer_source.go | 14 ++++----- weed/s3api/filer_multipart.go | 27 +++++++++-------- weed/s3api/filer_util.go | 30 ++++++++----------- weed/s3api/s3api_bucket_handlers.go | 17 ++++++----- weed/s3api/s3api_handlers.go | 5 ++-- weed/s3api/s3api_object_multipart_handlers.go | 15 ++++++---- weed/s3api/s3api_objects_list_handlers.go | 14 +++++---- weed/server/volume_grpc_client_to_master.go | 8 ++--- weed/util/grpc_client_server.go | 9 +++--- weed/wdclient/masterclient.go | 6 ++-- 31 files changed, 191 insertions(+), 185 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 650757442..a852ca773 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -106,14 +106,14 @@ func runCopy(cmd *Command, args []string) bool { copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") for _, fileOrDir := range fileOrDirs { - if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) { + if !doEachCopy(context.Background(), fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) { return false } } return true } -func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, path string) bool { +func doEachCopy(ctx context.Context, fileOrDir string, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, path string) bool { f, err := os.Open(fileOrDir) if err != nil { fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) @@ -131,7 +131,7 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDia if mode.IsDir() { files, _ := ioutil.ReadDir(fileOrDir) for _, subFileOrDir := range files { - if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, grpcDialOption, path+fi.Name()+"/") { + if !doEachCopy(ctx, fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, grpcDialOption, path+fi.Name()+"/") { return false } } @@ -153,13 +153,13 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDia } if chunkCount == 1 { - return uploadFileAsOne(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi) + return uploadFileAsOne(ctx, filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi) } - return uploadFileInChunks(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi, chunkCount, chunkSize) + return uploadFileInChunks(ctx, filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi, chunkCount, chunkSize) } -func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo) bool { +func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo) bool { // upload the file content fileName := filepath.Base(f.Name()) @@ -204,7 +204,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc. fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) } - if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: urlFolder, Entry: &filer_pb.Entry{ @@ -225,7 +225,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc. }, } - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -237,7 +237,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc. return true } -func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { +func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -281,7 +281,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption gr fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) } - if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: urlFolder, Entry: &filer_pb.Entry{ @@ -302,7 +302,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption gr }, } - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -332,9 +332,9 @@ func detectMimeType(f *os.File) string { return mimeType } -func withFilerClient(filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(filerAddress, grpcDialOption) + grpcConnection, err := util.GrpcDial(ctx, filerAddress, grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", filerAddress, err) } diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index c9afbdc8a..82576afe6 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -1,6 +1,7 @@ package command import ( + "context" "strings" "github.com/chrislusf/seaweedfs/weed/glog" @@ -116,7 +117,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { } else { glog.V(1).Infof("modify: %s", key) } - if err = replicator.Replicate(key, m); err != nil { + if err = replicator.Replicate(context.Background(), key, m); err != nil { glog.Errorf("replicate %s: %+v", key, err) } else { glog.V(1).Infof("replicated %s", key) diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go index 53a5ab94d..c5cc32e68 100644 --- a/weed/filer2/memdb/memdb_store_test.go +++ b/weed/filer2/memdb/memdb_store_test.go @@ -50,6 +50,8 @@ func TestCreateFileAndList(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() + ctx := context.Background() + entry1 := &filer2.Entry{ FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"), Attr: filer2.Attr{ @@ -68,11 +70,11 @@ func TestCreateFileAndList(t *testing.T) { }, } - filer.CreateEntry(entry1) - filer.CreateEntry(entry2) + filer.CreateEntry(ctx, entry1) + filer.CreateEntry(ctx, entry2) // checking the 2 files - entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) @@ -95,21 +97,21 @@ func TestCreateFileAndList(t *testing.T) { } // checking the offset - entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) + entries, err = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking root directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -125,18 +127,18 @@ func TestCreateFileAndList(t *testing.T) { Gid: 5678, }, } - filer.CreateEntry(entry3) + filer.CreateEntry(ctx, entry3) // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) if len(entries) != 2 { t.Errorf("list entries count: %v", len(entries)) return } // delete file and count - filer.DeleteEntryMetaAndData(context.Background(), file3Path, false, false) - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) + filer.DeleteEntryMetaAndData(ctx, file3Path, false, false) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 6d4917cb4..761fdc422 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -29,7 +29,7 @@ var _ = fs.NodeRemover(&Dir{}) var _ = fs.NodeRenamer(&Dir{}) var _ = fs.NodeSetattrer(&Dir{}) -func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { +func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second @@ -56,7 +56,7 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { parent, name := filepath.Split(dir.Path) - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: parent, @@ -64,7 +64,7 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { } glog.V(1).Infof("read dir %s attr: %v", dir.Path, request) - resp, err := client.LookupDirectoryEntry(context, request) + resp, err := client.LookupDirectoryEntry(ctx, request) if err != nil { if err == filer2.ErrNotFound { return nil @@ -132,7 +132,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, glog.V(1).Infof("create: %v", request) if request.Entry.IsDirectory { - if err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { if _, err := client.CreateEntry(ctx, request); err != nil { glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) return fuse.EIO @@ -155,7 +155,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: dir.Path, @@ -199,7 +199,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. } if entry == nil { - err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir.Path, @@ -243,7 +243,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { paginationLimit := 1024 remaining := dir.wfs.option.DirListingLimit @@ -306,7 +306,7 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { var entry *filer_pb.Entry - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir.Path, @@ -329,9 +329,9 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro return err } - dir.wfs.deleteFileChunks(entry.Chunks) + dir.wfs.deleteFileChunks(ctx, entry.Chunks) - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -355,7 +355,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -401,7 +401,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus } parentDir, name := filer2.FullPath(dir.Path).DirAndName() - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 3b3735369..4f631bc88 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -35,7 +35,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, }, } - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { if _, err := client.CreateEntry(ctx, request); err != nil { glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) return fuse.EIO diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index e18f67edc..8c586eb73 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -15,7 +15,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDir := newDirectory.(*Dir) - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { // find existing entry request := &filer_pb.LookupDirectoryEntryRequest{ diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 69f652ead..0044cfd87 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -167,7 +167,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte var fileId, host string var auth security.EncodedJwt - if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := pages.f.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 812137fe2..eb4b03f64 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -109,7 +109,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f return nil } - return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return file.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: file.dir.Path, @@ -144,7 +144,7 @@ func (file *File) maybeLoadAttributes(ctx context.Context) error { file.setEntry(entry) // glog.V(1).Infof("file attr read cached %v attributes", file.Name) } else { - err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := file.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: file.Name, diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 3bca0e22e..2c2e041e7 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -73,7 +73,7 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus vid2Locations := make(map[string]*filer_pb.Locations) - err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := fh.f.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read fh lookup volume id locations: %v", vids) resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ @@ -197,7 +197,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { return nil } - return fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return fh.f.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType @@ -221,7 +221,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) fh.f.entry.Chunks = chunks // fh.f.entryViewCache = nil - fh.f.wfs.deleteFileChunks(garbages) + fh.f.wfs.deleteFileChunks(ctx, garbages) if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("update fh: %v", err) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index f7383582d..f8be24e5e 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -73,9 +73,9 @@ func (wfs *WFS) Root() (fs.Node, error) { return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil } -func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (wfs *WFS) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) @@ -133,7 +133,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index 90058d75a..16f8af594 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -5,7 +5,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { +func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { if len(chunks) == 0 { return } @@ -15,8 +15,8 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { fileIds = append(fileIds, chunk.FileId) } - wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(context.Background(), wfs.option.GrpcDialOption, client, fileIds) + wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) return nil }) } diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index c842ed09f..eb97f5ce1 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -1,6 +1,7 @@ package operation import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" @@ -13,12 +14,14 @@ import ( func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { + ctx := context.Background() + grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { return err } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) }, grpcAddress, grpcDialOption) @@ -37,12 +40,14 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err func withMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { + ctx := context.Background() + masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v", masterServer) } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) }, masterGrpcAddress, grpcDialOption) diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 48a81a093..20c1d08b5 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -1,6 +1,7 @@ package replication import ( + "context" "path/filepath" "strings" @@ -29,7 +30,7 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin } } -func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) error { +func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error { if !strings.HasPrefix(key, r.source.Dir) { glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) return nil @@ -39,23 +40,23 @@ func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) key = newKey if message.OldEntry != nil && message.NewEntry == nil { glog.V(4).Infof("deleting %v", key) - return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks) + return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks) } if message.OldEntry == nil && message.NewEntry != nil { glog.V(4).Infof("creating %v", key) - return r.sink.CreateEntry(key, message.NewEntry) + return r.sink.CreateEntry(ctx, key, message.NewEntry) } if message.OldEntry == nil && message.NewEntry == nil { glog.V(0).Infof("weird message %+v", message) return nil } - foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewEntry, message.DeleteChunks) if foundExisting { glog.V(4).Infof("updated %v", key) return err } glog.V(4).Infof("creating missing %v", key) - return r.sink.CreateEntry(key, message.NewEntry) + return r.sink.CreateEntry(ctx, key, message.NewEntry) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 7acf37fa5..760fbdbb5 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e return nil } -func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -78,8 +78,6 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo key = key + "/" } - ctx := context.Background() - if _, err := g.containerURL.NewBlobURL(key).Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) @@ -89,7 +87,7 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo } -func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -100,8 +98,6 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - ctx := context.Background() - // Create a URL that references a to-be-created blob in your // Azure Storage account's container. appendBlobURL := g.containerURL.NewAppendBlobURL(key) @@ -113,7 +109,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return err } @@ -136,7 +132,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *AzureSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 17f5e39b2..c80bfcc49 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -58,7 +58,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { return nil } -func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -66,8 +66,6 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) key = key + "/" } - ctx := context.Background() - bucket, err := g.client.Bucket(ctx, g.bucket) if err != nil { return err @@ -79,7 +77,7 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) } -func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -90,8 +88,6 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - ctx := context.Background() - bucket, err := g.client.Bucket(ctx, g.bucket) if err != nil { return err @@ -102,7 +98,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return err } @@ -128,7 +124,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *B2Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index f1306ca4c..0f3473ff2 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -13,7 +13,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } @@ -22,7 +22,7 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic wg.Add(1) go func(chunk *filer_pb.FileChunk) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(chunk) + replicatedChunk, e := fs.replicateOneChunk(ctx, chunk) if e != nil { err = e } @@ -34,9 +34,9 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic return } -func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(sourceChunk) + fileId, err := fs.fetchAndWrite(ctx, sourceChunk) if err != nil { return nil, fmt.Errorf("copy %s: %v", sourceChunk.FileId, err) } @@ -51,9 +51,9 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_ }, nil } -func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) { - filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.FileId) + filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.FileId) if err != nil { return "", fmt.Errorf("read part %s: %v", sourceChunk.FileId, err) } @@ -62,7 +62,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri var host string var auth security.EncodedJwt - if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -72,7 +72,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri DataCenter: fs.dataCenter, } - resp, err := client.AssignVolume(context.Background(), request) + resp, err := client.AssignVolume(ctx, request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err @@ -103,9 +103,9 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri return } -func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(fs.grpcAddress, fs.grpcDialOption) + grpcConnection, err := util.GrpcDial(ctx, fs.grpcAddress, fs.grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 2eb326b83..777c28620 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -63,8 +63,8 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, return nil } -func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { + return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -75,7 +75,7 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo } glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(context.Background(), request) + _, err := client.DeleteEntry(ctx, request) if err != nil { glog.V(0).Infof("delete entry %s: %v", key, err) return fmt.Errorf("delete entry %s: %v", key, err) @@ -85,12 +85,11 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo }) } -func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() - ctx := context.Background() // look up existing entry lookupRequest := &filer_pb.LookupDirectoryEntryRequest{ @@ -105,7 +104,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { } } - replicatedChunks, err := fs.replicateChunks(entry.Chunks) + replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks) if err != nil { glog.V(0).Infof("replicate entry chunks %s: %v", key, err) @@ -134,15 +133,13 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { }) } -func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { - - ctx := context.Background() +func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { dir, name := filer2.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -186,7 +183,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(newChunks) + replicatedChunks, err := fs.replicateChunks(ctx, newChunks) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } @@ -194,7 +191,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, } // save updated meta data - return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: dir, diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index c1beefc33..6b710a12a 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str return nil } -func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { if isDirectory { key = key + "/" } - if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil { + if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil { return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err) } @@ -83,7 +83,7 @@ func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) } -func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { if entry.IsDirectory { return nil @@ -92,13 +92,11 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - ctx := context.Background() - wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return err } @@ -121,7 +119,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *GcsSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index 0a86139d3..984aebc58 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -1,6 +1,7 @@ package sink import ( + "context" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" @@ -9,9 +10,9 @@ import ( type ReplicationSink interface { GetName() string Initialize(configuration util.Configuration) error - DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error - CreateEntry(key string, entry *filer_pb.Entry) error - UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error + CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error + UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) } diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 0a4e78318..a5b52095c 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -1,6 +1,7 @@ package S3Sink import ( + "context" "fmt" "strings" "sync" @@ -76,7 +77,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, buc return nil } -func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -88,7 +89,7 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b } -func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -111,7 +112,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { wg.Add(1) go func(chunk *filer2.ChunkView) { defer wg.Done() - if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { + if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr } else { parts = append(parts, part) @@ -125,11 +126,11 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { return err } - return s3sink.completeMultipartUpload(key, uploadId, parts) + return s3sink.completeMultipartUpload(ctx, key, uploadId, parts) } -func (s3sink *S3Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 5c4be7aee..0a190b27d 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -2,6 +2,7 @@ package S3Sink import ( "bytes" + "context" "fmt" "io" @@ -81,7 +82,7 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error { } // To complete multipart upload -func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.CompletedPart) error { +func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId string, parts []*s3.CompletedPart) error { input := &s3.CompleteMultipartUploadInput{ Bucket: aws.String(s3sink.bucket), Key: aws.String(key), @@ -102,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3. } // To upload a part -func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { +func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { var readSeeker io.ReadSeeker - readSeeker, err := s3sink.buildReadSeeker(chunk) + readSeeker, err := s3sink.buildReadSeeker(ctx, chunk) if err != nil { glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) @@ -155,8 +156,8 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou return err } -func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) { - fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId) +func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) { + fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return nil, err } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index 92c2d203d..3ab6c7261 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -39,16 +39,16 @@ func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { return nil } -func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { +func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) { vid2Locations := make(map[string]*filer_pb.Locations) vid := volumeId(part) - err = fs.withFilerClient(fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) - resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -77,9 +77,9 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { return } -func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { +func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { - fileUrl, err := fs.LookupFileId(part) + fileUrl, err := fs.LookupFileId(ctx, part) if err != nil { return "", nil, nil, err } @@ -89,9 +89,9 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(fs.grpcAddress, grpcDialOption) + grpcConnection, err := util.GrpcDial(ctx, fs.grpcAddress, grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) } diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index d39e821d0..e6af085fd 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,6 +1,7 @@ package s3api import ( + "context" "encoding/xml" "fmt" "path/filepath" @@ -21,11 +22,11 @@ type InitiateMultipartUploadResult struct { s3.CreateMultipartUploadOutput } -func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { uploadId, _ := uuid.NewV4() uploadIdString := uploadId.String() - if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { + if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } @@ -51,11 +52,11 @@ type CompleteMultipartUploadResult struct { s3.CompleteMultipartUploadOutput } -func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId - entries, err := s3a.list(uploadDirectory, "", "", false, 0) + entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0) if err != nil { glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload @@ -90,7 +91,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName) - err = s3a.mkFile(dirName, entryName, finalParts) + err = s3a.mkFile(ctx, dirName, entryName, finalParts) if err != nil { glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) @@ -105,22 +106,22 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa }, } - if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } return } -func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { +func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { - exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) + exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) if err != nil { glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload } if exists { - err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) @@ -135,7 +136,7 @@ type ListMultipartUploadsResult struct { s3.ListMultipartUploadsOutput } -func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { +func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { output = &ListMultipartUploadsResult{ ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ @@ -148,7 +149,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return @@ -172,7 +173,7 @@ type ListPartsResult struct { s3.ListPartsOutput } -func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { +func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { output = &ListPartsResult{ ListPartsOutput: s3.ListPartsOutput{ Bucket: input.Bucket, @@ -183,7 +184,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, + entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 40c5a3e26..4f2c1578b 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -10,8 +10,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { + return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: dirName, @@ -35,7 +35,7 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } @@ -43,8 +43,8 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun }) } -func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { + return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: fileName, @@ -65,7 +65,7 @@ func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chun } glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } @@ -73,9 +73,9 @@ func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chun }) } -func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { +func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: parentDirectoryPath, @@ -86,7 +86,7 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl } glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(context.Background(), request) + resp, err := client.ListEntries(ctx, request) if err != nil { return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) } @@ -100,11 +100,9 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl } -func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { +func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() + return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: parentDirectoryPath, @@ -123,11 +121,9 @@ func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirec } -func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { +func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() + err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 35aa85493..492d94616 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -31,7 +31,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques var response ListAllMyBucketsResult - entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32) + entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -65,7 +65,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) bucket := vars["bucket"] // create the folder for bucket, but lazily create actual collection - if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil { + if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil { writeErrorResponse(w, ErrInternalError, r.URL) return } @@ -78,9 +78,8 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() + ctx := context.Background() + err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -95,7 +94,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque return nil }) - err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -110,7 +109,9 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx := context.Background() + + err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: s3a.option.BucketsPath, @@ -118,7 +119,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request } glog.V(1).Infof("lookup bucket: %v", request) - if _, err := client.LookupDirectoryEntry(context.Background(), request); err != nil { + if _, err := client.LookupDirectoryEntry(ctx, request); err != nil { return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 5d92085cc..5a63648ca 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -2,6 +2,7 @@ package s3api import ( "bytes" + "context" "encoding/base64" "encoding/xml" "fmt" @@ -35,9 +36,9 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) + grpcConnection, err := util.GrpcDial(ctx, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err) } diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 267d126c5..6643cb105 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -1,6 +1,7 @@ package s3api import ( + "context" "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -25,7 +26,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http bucket = vars["bucket"] object = vars["object"] - response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{ + response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), Key: aws.String(object), }) @@ -50,7 +51,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ + response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{ Bucket: aws.String(bucket), Key: aws.String(object), UploadId: aws.String(uploadID), @@ -76,7 +77,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ + response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ Bucket: aws.String(bucket), Key: aws.String(object), UploadId: aws.String(uploadID), @@ -111,7 +112,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht } } - response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ + response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{ Bucket: aws.String(bucket), Delimiter: aws.String(delimiter), EncodingType: aws.String(encodingType), @@ -148,7 +149,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re return } - response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ + response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{ Bucket: aws.String(bucket), Key: aws.String(object), MaxParts: aws.Int64(int64(maxParts)), @@ -174,8 +175,10 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ rAuthType := getRequestAuthType(r) + ctx := context.Background() + uploadID := r.URL.Query().Get("uploadId") - exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true) + exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true) if !exists { writeErrorResponse(w, ErrNoSuchUpload, r.URL) return diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 927416e0f..a685802d2 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -44,7 +44,9 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ marker = startAfter } - response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) + ctx := context.Background() + + response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -62,6 +64,8 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ vars := mux.Vars(r) bucket := vars["bucket"] + ctx := context.Background() + originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) if maxKeys < 0 { @@ -73,7 +77,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ return } - response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -83,13 +87,13 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { +func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name dir, prefix := filepath.Split(originalPrefix) // check filer - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), @@ -99,7 +103,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys InclusiveStartFrom: false, } - resp, err := client.ListEntries(context.Background(), request) + resp, err := client.ListEntries(ctx, request) if err != nil { return fmt.Errorf("list buckets: %v", err) } diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 38603e4b6..f6ed8ee23 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -36,7 +36,7 @@ func (vs *VolumeServer) heartbeat() { glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress) continue } - newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) + newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) @@ -45,16 +45,16 @@ func (vs *VolumeServer) heartbeat() { } } -func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { +func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { - grpcConection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption) + grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } defer grpcConection.Close() client := master_pb.NewSeaweedClient(grpcConection) - stream, err := client.SendHeartbeat(context.Background()) + stream, err := client.SendHeartbeat(ctx) if err != nil { glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err) return "", err diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index b989a35d1..361d245b8 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -1,6 +1,7 @@ package util import ( + "context" "fmt" "strconv" "strings" @@ -33,7 +34,7 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { return grpc.NewServer(options...) } -func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { +func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { // opts = append(opts, grpc.WithBlock()) // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) var options []grpc.DialOption @@ -48,10 +49,10 @@ func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) options = append(options, opt) } } - return grpc.Dial(address, options...) + return grpc.DialContext(ctx, address, options...) } -func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { +func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { grpcClientsLock.Lock() @@ -61,7 +62,7 @@ func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts return fn(existingConnection) } - grpcConnection, err := GrpcDial(address, opts...) + grpcConnection, err := GrpcDial(ctx, address, opts...) if err != nil { grpcClientsLock.Unlock() return fmt.Errorf("fail to dial %s: %v", address, err) diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 3600fe7c7..34069b3c3 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -53,7 +53,7 @@ func (mc *MasterClient) KeepConnectedToMaster() { func (mc *MasterClient) tryAllMasters() { for _, master := range mc.masters { glog.V(0).Infof("Connecting to master %v", master) - gprcErr := withMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { + gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { stream, err := client.KeepConnected(context.Background()) if err != nil { @@ -99,14 +99,14 @@ func (mc *MasterClient) tryAllMasters() { } } -func withMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { +func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v", master) } - grpcConnection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption) + grpcConnection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) if err != nil { return fmt.Errorf("fail to dial %s: %v", master, err) } From 42cb05c088cea142d1e0019c7a282915df66ecaa Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 15 Mar 2019 17:23:02 -0700 Subject: [PATCH 068/450] add context.Context --- weed/wdclient/masterclient.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 34069b3c3..1686ad5ff 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -53,9 +53,9 @@ func (mc *MasterClient) KeepConnectedToMaster() { func (mc *MasterClient) tryAllMasters() { for _, master := range mc.masters { glog.V(0).Infof("Connecting to master %v", master) - gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { + gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { - stream, err := client.KeepConnected(context.Background()) + stream, err := client.KeepConnected(ctx) if err != nil { glog.V(0).Infof("failed to keep connected to %s: %v", master, err) return err @@ -99,7 +99,7 @@ func (mc *MasterClient) tryAllMasters() { } } -func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { +func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error { masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) if parseErr != nil { @@ -114,5 +114,5 @@ func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.Di client := master_pb.NewSeaweedClient(grpcConnection) - return fn(client) + return fn(ctx, client) } From 36b632ebcbf1f1e5fbacddbc4ea0b8c94f6acc57 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 16 Mar 2019 09:50:21 -0700 Subject: [PATCH 069/450] fix tests --- weed/filer2/leveldb/leveldb_store_test.go | 13 ++++++++----- weed/filer2/memdb/memdb_store_test.go | 6 ++++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 4d600e0bf..3e60bb28c 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -1,6 +1,7 @@ package leveldb import ( + "context" "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" @@ -18,6 +19,8 @@ func TestCreateAndFind(t *testing.T) { fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + ctx := context.Background() + entry1 := &filer2.Entry{ FullPath: fullpath, Attr: filer2.Attr{ @@ -27,12 +30,12 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(fullpath) + entry, err := filer.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -45,14 +48,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -70,7 +73,7 @@ func TestEmptyRoot(t *testing.T) { filer.DisableDirectoryCache() // checking one upper directory - entries, err := filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go index c5cc32e68..d823c5177 100644 --- a/weed/filer2/memdb/memdb_store_test.go +++ b/weed/filer2/memdb/memdb_store_test.go @@ -13,6 +13,8 @@ func TestCreateAndFind(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() + ctx := context.Background() + fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") entry1 := &filer2.Entry{ @@ -24,12 +26,12 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(fullpath) + entry, err := filer.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) From b92122b885c8fba189f3c503c17478008806fda7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 16 Mar 2019 09:50:57 -0700 Subject: [PATCH 070/450] fix test --- weed/filer2/leveldb/leveldb_store_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 3e60bb28c..904de8c97 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -72,6 +72,8 @@ func TestEmptyRoot(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() + ctx := context.Background() + // checking one upper directory entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if err != nil { From 657dd2e6c93c02f46b10dfd43fb6e9b38c025ece Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 16 Mar 2019 13:43:16 -0700 Subject: [PATCH 071/450] add shell command to list all collections --- weed/command/shell.go | 57 +--- weed/pb/master.proto | 27 ++ weed/pb/master_pb/master.pb.go | 313 +++++++++++++++---- weed/server/filer_grpc_server.go | 5 +- weed/server/master_grpc_server_collection.go | 56 ++++ weed/shell/command_collection_list.go | 39 +++ weed/shell/commands.go | 27 ++ weed/shell/shell_liner.go | 143 +++++++++ weed/topology/topology.go | 7 + weed/wdclient/masterclient.go | 6 +- weed/wdclient/masterclient_collection.go | 23 ++ 11 files changed, 589 insertions(+), 114 deletions(-) create mode 100644 weed/server/master_grpc_server_collection.go create mode 100644 weed/shell/command_collection_list.go create mode 100644 weed/shell/commands.go create mode 100644 weed/shell/shell_liner.go create mode 100644 weed/wdclient/masterclient_collection.go diff --git a/weed/command/shell.go b/weed/command/shell.go index 19c5049c5..1c3ce5f10 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -1,21 +1,25 @@ package command import ( - "bufio" - "fmt" - "os" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/shell" + "github.com/spf13/viper" +) - "github.com/chrislusf/seaweedfs/weed/glog" +var ( + shellOptions shell.ShellOptions ) func init() { cmdShell.Run = runShell // break init cycle + shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers") } var cmdShell = &Command{ UsageLine: "shell", - Short: "run interactive commands, now just echo", - Long: `run interactive commands. + Short: "run interactive administrative commands", + Long: `run interactive administrative commands. `, } @@ -23,39 +27,12 @@ var cmdShell = &Command{ var () func runShell(command *Command, args []string) bool { - r := bufio.NewReader(os.Stdin) - o := bufio.NewWriter(os.Stdout) - e := bufio.NewWriter(os.Stderr) - prompt := func() { - var err error - if _, err = o.WriteString("> "); err != nil { - glog.V(0).Infoln("error writing to stdout:", err) - } - if err = o.Flush(); err != nil { - glog.V(0).Infoln("error flushing stdout:", err) - } - } - readLine := func() string { - ret, err := r.ReadString('\n') - if err != nil { - fmt.Fprint(e, err) - os.Exit(1) - } - return ret - } - execCmd := func(cmd string) int { - if cmd != "" { - if _, err := o.WriteString(cmd); err != nil { - glog.V(0).Infoln("error writing to stdout:", err) - } - } - return 0 - } - cmd := "" - for { - prompt() - cmd = readLine() - execCmd(cmd) - } + weed_server.LoadConfiguration("security", false) + shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + + shell.RunShell(shellOptions) + + return true + } diff --git a/weed/pb/master.proto b/weed/pb/master.proto index f03d1e3de..d4b02cdc5 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -15,6 +15,10 @@ service Seaweed { } rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } + rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) { + } + rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) { + } } ////////////////////////////////////////////////// @@ -124,3 +128,26 @@ message StatisticsResponse { uint64 used_size = 5; uint64 file_count = 6; } + +// +// collection related +// + +message StorageType { + string replication = 1; + string ttl = 2; +} +message Collection { + string name = 1; +} +message CollectionListRequest { +} +message CollectionListResponse { + repeated Collection collections = 1; +} + +message CollectionDeleteRequest { + string name = 1; +} +message CollectionDeleteResponse { +} diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 0c73ff2c8..a046a887b 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -23,6 +23,11 @@ It has these top-level messages: AssignResponse StatisticsRequest StatisticsResponse + Collection + CollectionListRequest + CollectionListResponse + CollectionDeleteRequest + CollectionDeleteResponse */ package master_pb @@ -675,6 +680,102 @@ func (m *StatisticsResponse) GetFileCount() uint64 { return 0 } +type Collection struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` +} + +func (m *Collection) Reset() { *m = Collection{} } +func (m *Collection) String() string { return proto.CompactTextString(m) } +func (*Collection) ProtoMessage() {} +func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *Collection) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Collection) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *Collection) GetTtl() string { + if m != nil { + return m.Ttl + } + return "" +} + +type CollectionListRequest struct { + Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,2,opt,name=ttl" json:"ttl,omitempty"` +} + +func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} } +func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) } +func (*CollectionListRequest) ProtoMessage() {} +func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *CollectionListRequest) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *CollectionListRequest) GetTtl() string { + if m != nil { + return m.Ttl + } + return "" +} + +type CollectionListResponse struct { + Collections []*Collection `protobuf:"bytes,1,rep,name=collections" json:"collections,omitempty"` +} + +func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} } +func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) } +func (*CollectionListResponse) ProtoMessage() {} +func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *CollectionListResponse) GetCollections() []*Collection { + if m != nil { + return m.Collections + } + return nil +} + +type CollectionDeleteRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} } +func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*CollectionDeleteRequest) ProtoMessage() {} +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *CollectionDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type CollectionDeleteResponse struct { +} + +func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} } +func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*CollectionDeleteResponse) ProtoMessage() {} +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + func init() { proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse") @@ -692,6 +793,11 @@ func init() { proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse") proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest") proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse") + proto.RegisterType((*Collection)(nil), "master_pb.Collection") + proto.RegisterType((*CollectionListRequest)(nil), "master_pb.CollectionListRequest") + proto.RegisterType((*CollectionListResponse)(nil), "master_pb.CollectionListResponse") + proto.RegisterType((*CollectionDeleteRequest)(nil), "master_pb.CollectionDeleteRequest") + proto.RegisterType((*CollectionDeleteResponse)(nil), "master_pb.CollectionDeleteResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -710,6 +816,8 @@ type SeaweedClient interface { LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) + CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) + CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) } type seaweedClient struct { @@ -809,6 +917,24 @@ func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, o return out, nil } +func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { + out := new(CollectionListResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) { + out := new(CollectionDeleteResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Seaweed service type SeaweedServer interface { @@ -817,6 +943,8 @@ type SeaweedServer interface { LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) Assign(context.Context, *AssignRequest) (*AssignResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) + CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) + CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -929,6 +1057,42 @@ func _Seaweed_Statistics_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Seaweed_CollectionList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).CollectionList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/CollectionList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).CollectionList(ctx, req.(*CollectionListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_CollectionDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).CollectionDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/CollectionDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).CollectionDelete(ctx, req.(*CollectionDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -945,6 +1109,14 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ MethodName: "Statistics", Handler: _Seaweed_Statistics_Handler, }, + { + MethodName: "CollectionList", + Handler: _Seaweed_CollectionList_Handler, + }, + { + MethodName: "CollectionDelete", + Handler: _Seaweed_CollectionDelete_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -966,71 +1138,78 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1056 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0xe4, 0x44, - 0x10, 0x8e, 0x3d, 0xbf, 0xae, 0xc9, 0x64, 0x27, 0x9d, 0x08, 0x79, 0x67, 0xd9, 0xdd, 0xc1, 0x5c, - 0x06, 0x81, 0xa2, 0x25, 0x1c, 0x11, 0x42, 0x6c, 0x14, 0x44, 0x94, 0xc0, 0x06, 0x87, 0xdd, 0x03, - 0x17, 0xd3, 0xb1, 0x2b, 0xa1, 0x15, 0xff, 0xe1, 0x6e, 0x27, 0x33, 0x7b, 0xe1, 0xc8, 0x03, 0xf0, - 0x3e, 0x5c, 0xe0, 0xc6, 0xa3, 0x70, 0xe3, 0x09, 0x50, 0xff, 0xd8, 0xf1, 0x38, 0x19, 0x22, 0x21, - 0x71, 0xeb, 0xfe, 0xba, 0xba, 0xab, 0xfa, 0xfb, 0xaa, 0xaa, 0x1b, 0x36, 0x13, 0xca, 0x05, 0x16, - 0x7b, 0x79, 0x91, 0x89, 0x8c, 0x38, 0x7a, 0x16, 0xe4, 0xe7, 0xde, 0x5f, 0x36, 0x38, 0x5f, 0x21, - 0x2d, 0xc4, 0x39, 0x52, 0x41, 0xb6, 0xc0, 0x66, 0xb9, 0x6b, 0xcd, 0xac, 0xb9, 0xe3, 0xdb, 0x2c, - 0x27, 0x04, 0xba, 0x79, 0x56, 0x08, 0xd7, 0x9e, 0x59, 0xf3, 0xb1, 0xaf, 0xc6, 0xe4, 0x29, 0x40, - 0x5e, 0x9e, 0xc7, 0x2c, 0x0c, 0xca, 0x22, 0x76, 0x3b, 0xca, 0xd6, 0xd1, 0xc8, 0xeb, 0x22, 0x26, - 0x73, 0x98, 0x24, 0x74, 0x11, 0x5c, 0x67, 0x71, 0x99, 0x60, 0x10, 0x66, 0x65, 0x2a, 0xdc, 0xae, - 0xda, 0xbe, 0x95, 0xd0, 0xc5, 0x1b, 0x05, 0x1f, 0x48, 0x94, 0xcc, 0x64, 0x54, 0x8b, 0xe0, 0x82, - 0xc5, 0x18, 0x5c, 0xe1, 0xd2, 0xed, 0xcd, 0xac, 0x79, 0xd7, 0x87, 0x84, 0x2e, 0xbe, 0x64, 0x31, - 0x1e, 0xe3, 0x92, 0x3c, 0x87, 0x51, 0x44, 0x05, 0x0d, 0x42, 0x4c, 0x05, 0x16, 0x6e, 0x5f, 0xf9, - 0x02, 0x09, 0x1d, 0x28, 0x44, 0xc6, 0x57, 0xd0, 0xf0, 0xca, 0x1d, 0xa8, 0x15, 0x35, 0x96, 0xf1, - 0xd1, 0x28, 0x61, 0x69, 0xa0, 0x22, 0x1f, 0x2a, 0xd7, 0x8e, 0x42, 0x4e, 0x65, 0xf8, 0x9f, 0xc1, - 0x40, 0xc7, 0xc6, 0x5d, 0x67, 0xd6, 0x99, 0x8f, 0xf6, 0xdf, 0xdf, 0xab, 0xd9, 0xd8, 0xd3, 0xe1, - 0x1d, 0xa5, 0x17, 0x59, 0x91, 0x50, 0xc1, 0xb2, 0xf4, 0x6b, 0xe4, 0x9c, 0x5e, 0xa2, 0x5f, 0xed, - 0x21, 0x8f, 0x61, 0x98, 0xe2, 0x4d, 0x70, 0xcd, 0x22, 0xee, 0xc2, 0xac, 0x33, 0x1f, 0xfb, 0x83, - 0x14, 0x6f, 0xde, 0xb0, 0x88, 0x93, 0xf7, 0x60, 0x33, 0xc2, 0x18, 0x05, 0x46, 0x7a, 0x79, 0xa4, - 0x96, 0x47, 0x06, 0x93, 0x26, 0xde, 0x6b, 0xd8, 0xae, 0xc9, 0xf6, 0x91, 0xe7, 0x59, 0xca, 0x91, - 0xcc, 0xe1, 0x91, 0x3e, 0xfd, 0x8c, 0xbd, 0xc5, 0x13, 0x96, 0x30, 0xa1, 0x14, 0xe8, 0xfa, 0x6d, - 0x98, 0xbc, 0x03, 0xfd, 0x18, 0x69, 0x84, 0x85, 0xa1, 0xdd, 0xcc, 0xbc, 0x3f, 0x6c, 0x70, 0xd7, - 0x85, 0xae, 0x34, 0x8d, 0xd4, 0x89, 0x63, 0xdf, 0x66, 0x91, 0xe4, 0x8c, 0xb3, 0xb7, 0xa8, 0x34, - 0xed, 0xfa, 0x6a, 0x4c, 0x9e, 0x01, 0x84, 0x59, 0x1c, 0x63, 0x28, 0x37, 0x9a, 0xc3, 0x1b, 0x88, - 0xe4, 0x54, 0xc9, 0x74, 0x2b, 0x67, 0xd7, 0x77, 0x24, 0xa2, 0x95, 0xac, 0x6f, 0x6e, 0x0c, 0xb4, - 0x92, 0xe6, 0xe6, 0xda, 0xe4, 0x23, 0x20, 0x15, 0x39, 0xe7, 0xcb, 0xda, 0xb0, 0xaf, 0x0c, 0x27, - 0x66, 0xe5, 0xe5, 0xb2, 0xb2, 0x7e, 0x02, 0x4e, 0x81, 0x34, 0x0a, 0xb2, 0x34, 0x5e, 0x2a, 0x71, - 0x87, 0xfe, 0x50, 0x02, 0xaf, 0xd2, 0x78, 0x49, 0x3e, 0x84, 0xed, 0x02, 0xf3, 0x98, 0x85, 0x34, - 0xc8, 0x63, 0x1a, 0x62, 0x82, 0x69, 0xa5, 0xf3, 0xc4, 0x2c, 0x9c, 0x56, 0x38, 0x71, 0x61, 0x70, - 0x8d, 0x05, 0x97, 0xd7, 0x72, 0x94, 0x49, 0x35, 0x25, 0x13, 0xe8, 0x08, 0x11, 0xbb, 0xa0, 0x50, - 0x39, 0xf4, 0x06, 0xd0, 0x3b, 0x4c, 0x72, 0xb1, 0xf4, 0x7e, 0xb3, 0xe0, 0xd1, 0x59, 0x99, 0x63, - 0xf1, 0x32, 0xce, 0xc2, 0xab, 0xc3, 0x85, 0x28, 0x28, 0x79, 0x05, 0x5b, 0x58, 0x50, 0x5e, 0x16, - 0x32, 0xf6, 0x88, 0xa5, 0x97, 0x8a, 0xd2, 0xd1, 0xfe, 0xbc, 0x91, 0x3e, 0xad, 0x3d, 0x7b, 0x87, - 0x7a, 0xc3, 0x81, 0xb2, 0xf7, 0xc7, 0xd8, 0x9c, 0x4e, 0xbf, 0x87, 0xf1, 0xca, 0xba, 0x14, 0x46, - 0xa6, 0xb6, 0x91, 0x4a, 0x8d, 0xa5, 0xe2, 0x39, 0x2d, 0x98, 0x58, 0x9a, 0x12, 0x34, 0x33, 0x29, - 0x88, 0xa9, 0x30, 0x99, 0x69, 0x1d, 0x95, 0x69, 0x8e, 0x46, 0x8e, 0x22, 0xee, 0x7d, 0x00, 0x3b, - 0x07, 0x31, 0xc3, 0x54, 0x9c, 0x30, 0x2e, 0x30, 0xf5, 0xf1, 0xa7, 0x12, 0xb9, 0x90, 0x1e, 0x52, - 0x9a, 0xa0, 0x29, 0x70, 0x35, 0xf6, 0x7e, 0x86, 0x2d, 0x9d, 0x3a, 0x27, 0x59, 0xa8, 0xf2, 0x46, - 0x12, 0x23, 0x2b, 0x5b, 0x1b, 0xc9, 0x61, 0xab, 0xe4, 0xed, 0x76, 0xc9, 0x37, 0x6b, 0xa2, 0xf3, - 0xef, 0x35, 0xd1, 0xbd, 0x5b, 0x13, 0xdf, 0xc1, 0xce, 0x49, 0x96, 0x5d, 0x95, 0xb9, 0x0e, 0xa3, - 0x8a, 0x75, 0xf5, 0x86, 0xd6, 0xac, 0x23, 0x7d, 0xd6, 0x37, 0x6c, 0x65, 0xac, 0xdd, 0xce, 0x58, - 0xef, 0x6f, 0x0b, 0x76, 0x57, 0x8f, 0x35, 0xd5, 0xf6, 0x03, 0xec, 0xd4, 0xe7, 0x06, 0xb1, 0xb9, - 0xb3, 0x76, 0x30, 0xda, 0x7f, 0xd1, 0x10, 0xf3, 0xbe, 0xdd, 0x55, 0x83, 0x88, 0x2a, 0xb2, 0xfc, - 0xed, 0xeb, 0x16, 0xc2, 0xa7, 0x0b, 0x98, 0xb4, 0xcd, 0x64, 0x42, 0xd7, 0x5e, 0x0d, 0xb3, 0xc3, - 0x6a, 0x27, 0xf9, 0x18, 0x9c, 0xdb, 0x40, 0x6c, 0x15, 0xc8, 0xce, 0x4a, 0x20, 0xc6, 0xd7, 0xad, - 0x15, 0xd9, 0x85, 0x1e, 0x16, 0x45, 0x56, 0x35, 0x02, 0x3d, 0xf1, 0x3e, 0x85, 0xe1, 0x7f, 0x56, - 0xd1, 0xfb, 0xd3, 0x82, 0xf1, 0x17, 0x9c, 0xb3, 0xcb, 0x3a, 0x5d, 0x76, 0xa1, 0xa7, 0xcb, 0x54, - 0xb7, 0x23, 0x3d, 0x21, 0x33, 0x18, 0x99, 0x2a, 0x6b, 0x50, 0xdf, 0x84, 0x1e, 0xec, 0x26, 0xa6, - 0xf2, 0xba, 0x3a, 0x34, 0x21, 0xe2, 0x76, 0xa3, 0xef, 0xad, 0x6d, 0xf4, 0xfd, 0x46, 0xa3, 0x7f, - 0x02, 0x8e, 0xda, 0x94, 0x66, 0x11, 0x9a, 0x17, 0x60, 0x28, 0x81, 0x6f, 0xb2, 0x08, 0xbd, 0x5f, - 0x2d, 0xd8, 0xaa, 0x6e, 0x63, 0x94, 0x9f, 0x40, 0xe7, 0xa2, 0x66, 0x5f, 0x0e, 0x2b, 0x8e, 0xec, - 0x75, 0x1c, 0xdd, 0x79, 0xdc, 0x6a, 0x46, 0xba, 0x4d, 0x46, 0x6a, 0x31, 0x7a, 0x0d, 0x31, 0x64, - 0xc8, 0xb4, 0x14, 0x3f, 0x56, 0x21, 0xcb, 0xb1, 0x77, 0x09, 0xdb, 0x67, 0x82, 0x0a, 0xc6, 0x05, - 0x0b, 0x79, 0x45, 0x73, 0x8b, 0x50, 0xeb, 0x21, 0x42, 0xed, 0x75, 0x84, 0x76, 0x6a, 0x42, 0xbd, - 0xdf, 0x2d, 0x20, 0x4d, 0x4f, 0x86, 0x82, 0xff, 0xc1, 0x95, 0xa4, 0x4c, 0x64, 0x82, 0xc6, 0x81, - 0x7a, 0x55, 0xcc, 0xdb, 0xa0, 0x10, 0xf9, 0x70, 0x49, 0x95, 0x4a, 0x8e, 0x91, 0x5e, 0xd5, 0x0f, - 0xc3, 0x50, 0x02, 0x6a, 0x71, 0xf5, 0x5d, 0xe9, 0xb7, 0xde, 0x95, 0xfd, 0x5f, 0x3a, 0x30, 0x38, - 0x43, 0x7a, 0x83, 0x18, 0x91, 0x23, 0x18, 0x9f, 0x61, 0x1a, 0xdd, 0xfe, 0x55, 0x76, 0x1b, 0x25, - 0x52, 0xa3, 0xd3, 0x77, 0xef, 0x43, 0xab, 0xfb, 0x7b, 0x1b, 0x73, 0xeb, 0x85, 0x45, 0x4e, 0x61, - 0x7c, 0x8c, 0x98, 0x1f, 0x64, 0x69, 0x8a, 0xa1, 0xc0, 0x88, 0x3c, 0x6b, 0x6c, 0xba, 0xa7, 0x6f, - 0x4e, 0x1f, 0xdf, 0xf9, 0x22, 0x54, 0x65, 0x66, 0x4e, 0xfc, 0x16, 0x36, 0x9b, 0xed, 0x62, 0xe5, - 0xc0, 0x7b, 0x9a, 0xdb, 0xf4, 0xf9, 0x03, 0x7d, 0xc6, 0xdb, 0x20, 0x9f, 0x43, 0x5f, 0xe7, 0x2f, - 0x71, 0x1b, 0xc6, 0x2b, 0x05, 0xba, 0x12, 0xd7, 0x6a, 0xb2, 0x7b, 0x1b, 0xe4, 0x18, 0xe0, 0x36, - 0x03, 0x48, 0x93, 0x97, 0x3b, 0x29, 0x38, 0x7d, 0xba, 0x66, 0xb5, 0x3a, 0xec, 0xbc, 0xaf, 0x3e, - 0x8e, 0x9f, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x25, 0xfc, 0xb7, 0x54, 0x48, 0x0a, 0x00, 0x00, + // 1164 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x72, 0xdb, 0x44, + 0x14, 0xae, 0xe4, 0x5f, 0x1d, 0xc7, 0xae, 0xb3, 0x49, 0x8b, 0xea, 0xd2, 0xd6, 0x55, 0x6f, 0xcc, + 0x00, 0x99, 0x12, 0x2e, 0xb8, 0x60, 0x18, 0x86, 0x9a, 0x30, 0x64, 0x12, 0x68, 0xaa, 0xb4, 0x65, + 0x86, 0x19, 0x46, 0x6c, 0xa4, 0x93, 0xb0, 0x13, 0x59, 0x12, 0xd2, 0x3a, 0xb1, 0x7b, 0xc3, 0x43, + 0xf0, 0x3e, 0x70, 0x01, 0x77, 0x3c, 0x0a, 0x77, 0x3c, 0x01, 0xb3, 0x3f, 0x92, 0x65, 0xd9, 0x49, + 0x3a, 0xcc, 0x70, 0xb7, 0xfa, 0xf6, 0xec, 0xd9, 0xb3, 0xdf, 0x77, 0x7e, 0x6c, 0xd8, 0x98, 0xd0, + 0x8c, 0x63, 0xba, 0x93, 0xa4, 0x31, 0x8f, 0x89, 0xa5, 0xbe, 0xbc, 0xe4, 0xc4, 0xf9, 0xdb, 0x04, + 0xeb, 0x6b, 0xa4, 0x29, 0x3f, 0x41, 0xca, 0x49, 0x0f, 0x4c, 0x96, 0xd8, 0xc6, 0xd0, 0x18, 0x59, + 0xae, 0xc9, 0x12, 0x42, 0xa0, 0x9e, 0xc4, 0x29, 0xb7, 0xcd, 0xa1, 0x31, 0xea, 0xba, 0x72, 0x4d, + 0x1e, 0x00, 0x24, 0xd3, 0x93, 0x90, 0xf9, 0xde, 0x34, 0x0d, 0xed, 0x9a, 0xb4, 0xb5, 0x14, 0xf2, + 0x2a, 0x0d, 0xc9, 0x08, 0xfa, 0x13, 0x3a, 0xf3, 0x2e, 0xe2, 0x70, 0x3a, 0x41, 0xcf, 0x8f, 0xa7, + 0x11, 0xb7, 0xeb, 0xf2, 0x78, 0x6f, 0x42, 0x67, 0xaf, 0x25, 0x3c, 0x16, 0x28, 0x19, 0x8a, 0xa8, + 0x66, 0xde, 0x29, 0x0b, 0xd1, 0x3b, 0xc7, 0xb9, 0xdd, 0x18, 0x1a, 0xa3, 0xba, 0x0b, 0x13, 0x3a, + 0xfb, 0x8a, 0x85, 0x78, 0x80, 0x73, 0xf2, 0x08, 0x3a, 0x01, 0xe5, 0xd4, 0xf3, 0x31, 0xe2, 0x98, + 0xda, 0x4d, 0x79, 0x17, 0x08, 0x68, 0x2c, 0x11, 0x11, 0x5f, 0x4a, 0xfd, 0x73, 0xbb, 0x25, 0x77, + 0xe4, 0x5a, 0xc4, 0x47, 0x83, 0x09, 0x8b, 0x3c, 0x19, 0x79, 0x5b, 0x5e, 0x6d, 0x49, 0xe4, 0x48, + 0x84, 0xff, 0x19, 0xb4, 0x54, 0x6c, 0x99, 0x6d, 0x0d, 0x6b, 0xa3, 0xce, 0xee, 0x93, 0x9d, 0x82, + 0x8d, 0x1d, 0x15, 0xde, 0x7e, 0x74, 0x1a, 0xa7, 0x13, 0xca, 0x59, 0x1c, 0x7d, 0x83, 0x59, 0x46, + 0xcf, 0xd0, 0xcd, 0xcf, 0x90, 0x7b, 0xd0, 0x8e, 0xf0, 0xd2, 0xbb, 0x60, 0x41, 0x66, 0xc3, 0xb0, + 0x36, 0xea, 0xba, 0xad, 0x08, 0x2f, 0x5f, 0xb3, 0x20, 0x23, 0x8f, 0x61, 0x23, 0xc0, 0x10, 0x39, + 0x06, 0x6a, 0xbb, 0x23, 0xb7, 0x3b, 0x1a, 0x13, 0x26, 0xce, 0x2b, 0xd8, 0x2c, 0xc8, 0x76, 0x31, + 0x4b, 0xe2, 0x28, 0x43, 0x32, 0x82, 0xdb, 0xca, 0xfb, 0x31, 0x7b, 0x83, 0x87, 0x6c, 0xc2, 0xb8, + 0x54, 0xa0, 0xee, 0x56, 0x61, 0x72, 0x17, 0x9a, 0x21, 0xd2, 0x00, 0x53, 0x4d, 0xbb, 0xfe, 0x72, + 0xfe, 0x34, 0xc1, 0xbe, 0x2a, 0x74, 0xa9, 0x69, 0x20, 0x3d, 0x76, 0x5d, 0x93, 0x05, 0x82, 0xb3, + 0x8c, 0xbd, 0x41, 0xa9, 0x69, 0xdd, 0x95, 0x6b, 0xf2, 0x10, 0xc0, 0x8f, 0xc3, 0x10, 0x7d, 0x71, + 0x50, 0x3b, 0x2f, 0x21, 0x82, 0x53, 0x29, 0xd3, 0x42, 0xce, 0xba, 0x6b, 0x09, 0x44, 0x29, 0x59, + 0xbc, 0x5c, 0x1b, 0x28, 0x25, 0xf5, 0xcb, 0x95, 0xc9, 0x07, 0x40, 0x72, 0x72, 0x4e, 0xe6, 0x85, + 0x61, 0x53, 0x1a, 0xf6, 0xf5, 0xce, 0xb3, 0x79, 0x6e, 0x7d, 0x1f, 0xac, 0x14, 0x69, 0xe0, 0xc5, + 0x51, 0x38, 0x97, 0xe2, 0xb6, 0xdd, 0xb6, 0x00, 0x9e, 0x47, 0xe1, 0x9c, 0xbc, 0x0f, 0x9b, 0x29, + 0x26, 0x21, 0xf3, 0xa9, 0x97, 0x84, 0xd4, 0xc7, 0x09, 0x46, 0xb9, 0xce, 0x7d, 0xbd, 0x71, 0x94, + 0xe3, 0xc4, 0x86, 0xd6, 0x05, 0xa6, 0x99, 0x78, 0x96, 0x25, 0x4d, 0xf2, 0x4f, 0xd2, 0x87, 0x1a, + 0xe7, 0xa1, 0x0d, 0x12, 0x15, 0x4b, 0xa7, 0x05, 0x8d, 0xbd, 0x49, 0xc2, 0xe7, 0xce, 0x6f, 0x06, + 0xdc, 0x3e, 0x9e, 0x26, 0x98, 0x3e, 0x0b, 0x63, 0xff, 0x7c, 0x6f, 0xc6, 0x53, 0x4a, 0x9e, 0x43, + 0x0f, 0x53, 0x9a, 0x4d, 0x53, 0x11, 0x7b, 0xc0, 0xa2, 0x33, 0x49, 0x69, 0x67, 0x77, 0x54, 0x4a, + 0x9f, 0xca, 0x99, 0x9d, 0x3d, 0x75, 0x60, 0x2c, 0xed, 0xdd, 0x2e, 0x96, 0x3f, 0x07, 0xdf, 0x43, + 0x77, 0x69, 0x5f, 0x08, 0x23, 0x52, 0x5b, 0x4b, 0x25, 0xd7, 0x42, 0xf1, 0x84, 0xa6, 0x8c, 0xcf, + 0x75, 0x09, 0xea, 0x2f, 0x21, 0x88, 0xae, 0x30, 0x91, 0x69, 0x35, 0x99, 0x69, 0x96, 0x42, 0xf6, + 0x83, 0xcc, 0x79, 0x0f, 0xb6, 0xc6, 0x21, 0xc3, 0x88, 0x1f, 0xb2, 0x8c, 0x63, 0xe4, 0xe2, 0xcf, + 0x53, 0xcc, 0xb8, 0xb8, 0x21, 0xa2, 0x13, 0xd4, 0x05, 0x2e, 0xd7, 0xce, 0x2f, 0xd0, 0x53, 0xa9, + 0x73, 0x18, 0xfb, 0x32, 0x6f, 0x04, 0x31, 0xa2, 0xb2, 0x95, 0x91, 0x58, 0x56, 0x4a, 0xde, 0xac, + 0x96, 0x7c, 0xb9, 0x26, 0x6a, 0xd7, 0xd7, 0x44, 0x7d, 0xb5, 0x26, 0x5e, 0xc2, 0xd6, 0x61, 0x1c, + 0x9f, 0x4f, 0x13, 0x15, 0x46, 0x1e, 0xeb, 0xf2, 0x0b, 0x8d, 0x61, 0x4d, 0xdc, 0x59, 0xbc, 0xb0, + 0x92, 0xb1, 0x66, 0x35, 0x63, 0x9d, 0x7f, 0x0c, 0xd8, 0x5e, 0x76, 0xab, 0xab, 0xed, 0x47, 0xd8, + 0x2a, 0xfc, 0x7a, 0xa1, 0x7e, 0xb3, 0xba, 0xa0, 0xb3, 0xfb, 0xb4, 0x24, 0xe6, 0xba, 0xd3, 0x79, + 0x83, 0x08, 0x72, 0xb2, 0xdc, 0xcd, 0x8b, 0x0a, 0x92, 0x0d, 0x66, 0xd0, 0xaf, 0x9a, 0x89, 0x84, + 0x2e, 0x6e, 0xd5, 0xcc, 0xb6, 0xf3, 0x93, 0xe4, 0x23, 0xb0, 0x16, 0x81, 0x98, 0x32, 0x90, 0xad, + 0xa5, 0x40, 0xf4, 0x5d, 0x0b, 0x2b, 0xb2, 0x0d, 0x0d, 0x4c, 0xd3, 0x38, 0x6f, 0x04, 0xea, 0xc3, + 0xf9, 0x14, 0xda, 0xff, 0x59, 0x45, 0xe7, 0x2f, 0x03, 0xba, 0x5f, 0x64, 0x19, 0x3b, 0x2b, 0xd2, + 0x65, 0x1b, 0x1a, 0xaa, 0x4c, 0x55, 0x3b, 0x52, 0x1f, 0x64, 0x08, 0x1d, 0x5d, 0x65, 0x25, 0xea, + 0xcb, 0xd0, 0x8d, 0xdd, 0x44, 0x57, 0x5e, 0x5d, 0x85, 0xc6, 0x79, 0x58, 0x6d, 0xf4, 0x8d, 0x2b, + 0x1b, 0x7d, 0xb3, 0xd4, 0xe8, 0xef, 0x83, 0x25, 0x0f, 0x45, 0x71, 0x80, 0x7a, 0x02, 0xb4, 0x05, + 0xf0, 0x6d, 0x1c, 0xa0, 0xf3, 0xab, 0x01, 0xbd, 0xfc, 0x35, 0x5a, 0xf9, 0x3e, 0xd4, 0x4e, 0x0b, + 0xf6, 0xc5, 0x32, 0xe7, 0xc8, 0xbc, 0x8a, 0xa3, 0x95, 0xe1, 0x56, 0x30, 0x52, 0x2f, 0x33, 0x52, + 0x88, 0xd1, 0x28, 0x89, 0x21, 0x42, 0xa6, 0x53, 0xfe, 0x53, 0x1e, 0xb2, 0x58, 0x3b, 0x67, 0xb0, + 0x79, 0xcc, 0x29, 0x67, 0x19, 0x67, 0x7e, 0x96, 0xd3, 0x5c, 0x21, 0xd4, 0xb8, 0x89, 0x50, 0xf3, + 0x2a, 0x42, 0x6b, 0x05, 0xa1, 0xce, 0x1f, 0x06, 0x90, 0xf2, 0x4d, 0x9a, 0x82, 0xff, 0xe1, 0x2a, + 0x41, 0x19, 0x8f, 0x39, 0x0d, 0x3d, 0x39, 0x55, 0xf4, 0x6c, 0x90, 0x88, 0x18, 0x5c, 0x42, 0xa5, + 0x69, 0x86, 0x81, 0xda, 0x55, 0x83, 0xa1, 0x2d, 0x00, 0xb9, 0xb9, 0x3c, 0x57, 0x9a, 0x95, 0xb9, + 0xe2, 0xbc, 0x04, 0x18, 0x2f, 0xae, 0x5e, 0xd3, 0xbd, 0xde, 0x22, 0x19, 0x57, 0xb9, 0x39, 0x80, + 0x3b, 0x0b, 0xaf, 0xa2, 0x41, 0xbe, 0xbd, 0x10, 0xda, 0x99, 0xb9, 0x70, 0xf6, 0x02, 0xee, 0x56, + 0x9d, 0x69, 0xae, 0x3f, 0x81, 0xce, 0x82, 0xb7, 0xbc, 0xc1, 0xdc, 0x29, 0xd5, 0xf5, 0xe2, 0x9c, + 0x5b, 0xb6, 0x74, 0x3e, 0x84, 0x77, 0x16, 0x5b, 0x5f, 0xca, 0x4e, 0x79, 0x5d, 0x03, 0x1f, 0x80, + 0xbd, 0x6a, 0xae, 0x62, 0xd8, 0xfd, 0xbd, 0x0e, 0xad, 0x63, 0xa4, 0x97, 0x88, 0x01, 0xd9, 0x87, + 0xee, 0x31, 0x46, 0xc1, 0xe2, 0xc7, 0xde, 0x76, 0x29, 0x96, 0x02, 0x1d, 0xbc, 0xbb, 0x0e, 0xcd, + 0x1d, 0x3a, 0xb7, 0x46, 0xc6, 0x53, 0x83, 0x1c, 0x41, 0xf7, 0x00, 0x31, 0x19, 0xc7, 0x51, 0x84, + 0x3e, 0xc7, 0x80, 0x3c, 0x2c, 0x3f, 0x6b, 0x75, 0xf0, 0x0c, 0xee, 0xad, 0xfc, 0xc6, 0xca, 0xfb, + 0x94, 0xf6, 0xf8, 0x02, 0x36, 0xca, 0xfd, 0x76, 0xc9, 0xe1, 0x9a, 0xe9, 0x30, 0x78, 0x74, 0x43, + 0xa3, 0x76, 0x6e, 0x91, 0xcf, 0xa1, 0xa9, 0x1a, 0x00, 0xb1, 0x4b, 0xc6, 0x4b, 0x1d, 0x6e, 0x29, + 0xae, 0xe5, 0x6e, 0xe1, 0xdc, 0x22, 0x07, 0x00, 0x8b, 0x12, 0x22, 0x65, 0x5e, 0x56, 0x6a, 0x78, + 0xf0, 0xe0, 0x8a, 0xdd, 0xc2, 0xd9, 0x77, 0xd0, 0x5b, 0xce, 0x13, 0x32, 0x5c, 0x9b, 0x0a, 0xa5, + 0x7c, 0x1c, 0x3c, 0xbe, 0xc6, 0xa2, 0x70, 0xfc, 0x03, 0xf4, 0xab, 0xf2, 0x13, 0x67, 0xed, 0xc1, + 0xa5, 0x54, 0x1a, 0x3c, 0xb9, 0xd6, 0x26, 0x77, 0x7f, 0xd2, 0x94, 0xff, 0x18, 0x3e, 0xfe, 0x37, + 0x00, 0x00, 0xff, 0xff, 0xb1, 0x11, 0x1b, 0x49, 0x41, 0x0c, 0x00, 0x00, } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 4234af5f5..13c66543b 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -14,7 +14,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { @@ -239,9 +238,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - for _, master := range fs.option.Masters { - _, err = util.Get(fmt.Sprintf("http://%s/col/delete?collection=%s", master, req.Collection)) - } + err = fs.filer.MasterClient.CollectionDelete(ctx, req.GetCollection()) return &filer_pb.DeleteCollectionResponse{}, err } diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go new file mode 100644 index 000000000..23188d856 --- /dev/null +++ b/weed/server/master_grpc_server_collection.go @@ -0,0 +1,56 @@ +package weed_server + +import ( + "context" + "fmt" + "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +) + +func (ms *MasterServer) CollectionList(ctx context.Context, req *master_pb.CollectionListRequest) (*master_pb.CollectionListResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.CollectionListResponse{} + collections := ms.Topo.ListCollections() + for _, c := range collections { + resp.Collections = append(resp.Collections, &master_pb.Collection{ + Name: c.Name, + }) + } + + return resp, nil +} + +func (ms *MasterServer) CollectionDelete(ctx context.Context, req *master_pb.CollectionDeleteRequest) (*master_pb.CollectionDeleteResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.CollectionDeleteResponse{} + + collection, ok := ms.Topo.FindCollection(req.GetName()) + if !ok { + return resp, fmt.Errorf("collection not found: %v", req.GetName()) + } + + for _, server := range collection.ListVolumeServers() { + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + Collection: collection.Name, + }) + return deleteErr + }) + if err != nil { + return nil, err + } + } + ms.Topo.DeleteCollection(req.GetName()) + + return resp, nil +} diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go new file mode 100644 index 000000000..aab347d09 --- /dev/null +++ b/weed/shell/command_collection_list.go @@ -0,0 +1,39 @@ +package shell + +import ( + "context" + "fmt" + "io" +) + +func init() { + commands = append(commands, &commandCollectionList{}) +} + +type commandCollectionList struct { +} + +func (c *commandCollectionList) Name() string { + return "collection.list" +} + +func (c *commandCollectionList) Help() string { + return "\t\t # list all collections" +} + +func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer io.Writer) error { + + resp, err := commandEnv.masterClient.CollectionList(context.Background()) + + if err != nil { + return err + } + + for _, c := range resp.Collections { + fmt.Fprintf(writer, "collection:\"%s\"\n", c.GetName()) + } + + fmt.Fprintf(writer, "Total %d collections.\n", len(resp.Collections)) + + return nil +} diff --git a/weed/shell/commands.go b/weed/shell/commands.go new file mode 100644 index 000000000..4df70ff55 --- /dev/null +++ b/weed/shell/commands.go @@ -0,0 +1,27 @@ +package shell + +import ( + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" + "io" +) + +type ShellOptions struct { + Masters *string + GrpcDialOption grpc.DialOption +} + +type commandEnv struct { + env map[string]string + masterClient *wdclient.MasterClient +} + +type command interface { + Name() string + Help() string + Do([]string, *commandEnv, io.Writer) error +} + +var ( + commands = []command{} +) diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go new file mode 100644 index 000000000..e86220134 --- /dev/null +++ b/weed/shell/shell_liner.go @@ -0,0 +1,143 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "os" + "regexp" + "strings" + + "github.com/peterh/liner" + "sort" +) + +var ( + line *liner.State + historyPath = "/tmp/weed-shell" +) + +func RunShell(options ShellOptions) { + + line = liner.NewLiner() + defer line.Close() + + line.SetCtrlCAborts(true) + + setCompletionHandler() + loadHisotry() + + defer saveHisotry() + + reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`) + + commandEnv := &commandEnv{ + env: make(map[string]string), + masterClient: wdclient.NewMasterClient(context.Background(), + options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")), + } + + go commandEnv.masterClient.KeepConnectedToMaster() + commandEnv.masterClient.WaitUntilConnected() + + for { + cmd, err := line.Prompt("> ") + if err != nil { + fmt.Printf("%v\n", err) + return + } + + cmds := reg.FindAllString(cmd, -1) + if len(cmds) == 0 { + continue + } else { + line.AppendHistory(cmd) + + args := make([]string, len(cmds[1:])) + + for i := range args { + args[i] = strings.Trim(string(cmds[1+i]), "\"'") + } + + cmd := strings.ToLower(cmds[0]) + if cmd == "help" || cmd == "?" { + printHelp(cmds) + } else if cmd == "exit" || cmd == "quit" { + return + } else { + for _, c := range commands { + if c.Name() == cmd { + if err := c.Do(args, commandEnv, os.Stderr); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + } + } + } + } + + } + } +} + +func printGenericHelp() { + msg := + `Type: "help " for help on +` + fmt.Print(msg) + + sort.Slice(commands, func(i, j int) bool { + return strings.Compare(commands[i].Name(), commands[j].Name()) < 0 + }) + for _, c := range commands { + fmt.Printf("\t%s %s \n", c.Name(), c.Help()) + } +} + +func printHelp(cmds []string) { + args := cmds[1:] + if len(args) == 0 { + printGenericHelp() + } else if len(args) > 1 { + fmt.Println() + } else { + cmd := strings.ToLower(args[0]) + + sort.Slice(commands, func(i, j int) bool { + return strings.Compare(commands[i].Name(), commands[j].Name()) < 0 + }) + + for _, c := range commands { + if c.Name() == cmd { + fmt.Println() + fmt.Printf("\t%s %s \n", c.Name(), c.Help()) + fmt.Println() + } + } + } +} + +func setCompletionHandler() { + line.SetCompleter(func(line string) (c []string) { + for _, i := range commands { + if strings.HasPrefix(i.Name(), strings.ToLower(line)) { + c = append(c, i.Name()) + } + } + return + }) +} + +func loadHisotry() { + if f, err := os.Open(historyPath); err == nil { + line.ReadHistory(f) + f.Close() + } +} + +func saveHisotry() { + if f, err := os.Create(historyPath); err != nil { + fmt.Printf("Error writing history file: %v\n", err) + } else { + line.WriteHistory(f) + f.Close() + } +} diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 77716605a..208c9b5b7 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -117,6 +117,13 @@ func (t *Topology) GetVolumeLayout(collectionName string, rp *storage.ReplicaPla }).(*Collection).GetOrCreateVolumeLayout(rp, ttl) } +func (t *Topology) ListCollections() (ret []*Collection) { + for _, c := range t.collectionMap.Items() { + ret = append(ret, c.(*Collection)) + } + return ret +} + func (t *Topology) FindCollection(collectionName string) (*Collection, bool) { c, hasCollection := t.collectionMap.Find(collectionName) if !hasCollection { diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 1686ad5ff..b3b277c74 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -43,7 +43,7 @@ func (mc *MasterClient) WaitUntilConnected() { } func (mc *MasterClient) KeepConnectedToMaster() { - glog.V(0).Infof("%s bootstraps with masters %v", mc.name, mc.masters) + glog.V(1).Infof("%s bootstraps with masters %v", mc.name, mc.masters) for { mc.tryAllMasters() time.Sleep(time.Second) @@ -52,7 +52,7 @@ func (mc *MasterClient) KeepConnectedToMaster() { func (mc *MasterClient) tryAllMasters() { for _, master := range mc.masters { - glog.V(0).Infof("Connecting to master %v", master) + glog.V(1).Infof("Connecting to master %v", master) gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { stream, err := client.KeepConnected(ctx) @@ -67,7 +67,7 @@ func (mc *MasterClient) tryAllMasters() { } if mc.currentMaster == "" { - glog.V(0).Infof("Connected to %v", master) + glog.V(1).Infof("Connected to %v", master) mc.currentMaster = master } diff --git a/weed/wdclient/masterclient_collection.go b/weed/wdclient/masterclient_collection.go new file mode 100644 index 000000000..bdf791da0 --- /dev/null +++ b/weed/wdclient/masterclient_collection.go @@ -0,0 +1,23 @@ +package wdclient + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +func (mc *MasterClient) CollectionDelete(ctx context.Context, collection string) error { + return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + Name: collection, + }) + return err + }) +} + +func (mc *MasterClient) CollectionList(ctx context.Context) (resp *master_pb.CollectionListResponse, err error) { + err = withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { + resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{}) + return err + }) + return +} From e48267e28731890c5b79d5baa8c91805f10a96f7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 17 Mar 2019 17:28:29 -0700 Subject: [PATCH 072/450] adjust output --- weed/shell/command_collection_list.go | 2 +- weed/shell/shell_liner.go | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index aab347d09..34a406d67 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -18,7 +18,7 @@ func (c *commandCollectionList) Name() string { } func (c *commandCollectionList) Help() string { - return "\t\t # list all collections" + return "# list all collections" } func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer io.Writer) error { diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go index e86220134..cd015fe85 100644 --- a/weed/shell/shell_liner.go +++ b/weed/shell/shell_liner.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "github.com/chrislusf/seaweedfs/weed/wdclient" + "io" "os" "regexp" "strings" @@ -43,7 +44,9 @@ func RunShell(options ShellOptions) { for { cmd, err := line.Prompt("> ") if err != nil { - fmt.Printf("%v\n", err) + if err != io.EOF { + fmt.Printf("%v\n", err) + } return } From 22fbbf023b180e3c51261b0746a52be715e86648 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 17 Mar 2019 17:31:05 -0700 Subject: [PATCH 073/450] adjust collection list output --- weed/shell/command_collection_list.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index 34a406d67..f5ed6cfa6 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -30,7 +30,7 @@ func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer } for _, c := range resp.Collections { - fmt.Fprintf(writer, "collection:\"%s\"\n", c.GetName()) + fmt.Fprintf(writer, "collection:\"%s\"\treplication:\"%s\"\tTTL:\"%s\"\n", c.GetName(), c.GetReplication(), c.GetTtl()) } fmt.Fprintf(writer, "Total %d collections.\n", len(resp.Collections)) From aca653c08bfaae205e3a62ae9e58ce327a5a583f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 17 Mar 2019 20:27:08 -0700 Subject: [PATCH 074/450] weed shell: list volumes --- weed/pb/master.proto | 43 ++ weed/pb/master_pb/master.pb.go | 529 ++++++++++++++++++----- weed/server/master_grpc_server_volume.go | 13 + weed/shell/command_collection_list.go | 2 +- weed/shell/command_volume_list.go | 64 +++ weed/storage/store.go | 16 +- weed/storage/volume.go | 16 + weed/storage/volume_info.go | 16 + weed/topology/data_center.go | 17 + weed/topology/data_node.go | 15 + weed/topology/rack.go | 16 + weed/topology/topology_map.go | 15 + weed/wdclient/masterclient_collection.go | 8 + 13 files changed, 643 insertions(+), 127 deletions(-) create mode 100644 weed/shell/command_volume_list.go diff --git a/weed/pb/master.proto b/weed/pb/master.proto index d4b02cdc5..ad530d909 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -19,6 +19,8 @@ service Seaweed { } rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) { } + rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) { + } } ////////////////////////////////////////////////// @@ -151,3 +153,44 @@ message CollectionDeleteRequest { } message CollectionDeleteResponse { } + +// +// volume related +// +message DataNodeInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated VolumeInformationMessage volume_infos = 6; +} +message RackInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated DataNodeInfo data_node_infos = 6; +} +message DataCenterInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated RackInfo rack_infos = 6; +} +message TopologyInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated DataCenterInfo data_center_infos = 6; +} +message VolumeListRequest { +} +message VolumeListResponse { + TopologyInfo topology_info = 1; +} diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index a046a887b..f24354cd7 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -23,11 +23,18 @@ It has these top-level messages: AssignResponse StatisticsRequest StatisticsResponse + StorageType Collection CollectionListRequest CollectionListResponse CollectionDeleteRequest CollectionDeleteResponse + DataNodeInfo + RackInfo + DataCenterInfo + TopologyInfo + VolumeListRequest + VolumeListResponse */ package master_pb @@ -680,16 +687,38 @@ func (m *StatisticsResponse) GetFileCount() uint64 { return 0 } +type StorageType struct { + Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,2,opt,name=ttl" json:"ttl,omitempty"` +} + +func (m *StorageType) Reset() { *m = StorageType{} } +func (m *StorageType) String() string { return proto.CompactTextString(m) } +func (*StorageType) ProtoMessage() {} +func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *StorageType) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *StorageType) GetTtl() string { + if m != nil { + return m.Ttl + } + return "" +} + type Collection struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *Collection) Reset() { *m = Collection{} } func (m *Collection) String() string { return proto.CompactTextString(m) } func (*Collection) ProtoMessage() {} -func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *Collection) GetName() string { if m != nil { @@ -698,43 +727,13 @@ func (m *Collection) GetName() string { return "" } -func (m *Collection) GetReplication() string { - if m != nil { - return m.Replication - } - return "" -} - -func (m *Collection) GetTtl() string { - if m != nil { - return m.Ttl - } - return "" -} - type CollectionListRequest struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,2,opt,name=ttl" json:"ttl,omitempty"` } func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} } func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) } func (*CollectionListRequest) ProtoMessage() {} -func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -func (m *CollectionListRequest) GetReplication() string { - if m != nil { - return m.Replication - } - return "" -} - -func (m *CollectionListRequest) GetTtl() string { - if m != nil { - return m.Ttl - } - return "" -} +func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } type CollectionListResponse struct { Collections []*Collection `protobuf:"bytes,1,rep,name=collections" json:"collections,omitempty"` @@ -743,7 +742,7 @@ type CollectionListResponse struct { func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} } func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) } func (*CollectionListResponse) ProtoMessage() {} -func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *CollectionListResponse) GetCollections() []*Collection { if m != nil { @@ -759,7 +758,7 @@ type CollectionDeleteRequest struct { func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} } func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) } func (*CollectionDeleteRequest) ProtoMessage() {} -func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *CollectionDeleteRequest) GetName() string { if m != nil { @@ -774,7 +773,258 @@ type CollectionDeleteResponse struct { func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} } func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) } func (*CollectionDeleteResponse) ProtoMessage() {} -func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +// +// volume related +// +type DataNodeInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos" json:"volume_infos,omitempty"` +} + +func (m *DataNodeInfo) Reset() { *m = DataNodeInfo{} } +func (m *DataNodeInfo) String() string { return proto.CompactTextString(m) } +func (*DataNodeInfo) ProtoMessage() {} +func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *DataNodeInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *DataNodeInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetVolumeInfos() []*VolumeInformationMessage { + if m != nil { + return m.VolumeInfos + } + return nil +} + +type RackInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos" json:"data_node_infos,omitempty"` +} + +func (m *RackInfo) Reset() { *m = RackInfo{} } +func (m *RackInfo) String() string { return proto.CompactTextString(m) } +func (*RackInfo) ProtoMessage() {} +func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *RackInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *RackInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *RackInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *RackInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *RackInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *RackInfo) GetDataNodeInfos() []*DataNodeInfo { + if m != nil { + return m.DataNodeInfos + } + return nil +} + +type DataCenterInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos" json:"rack_infos,omitempty"` +} + +func (m *DataCenterInfo) Reset() { *m = DataCenterInfo{} } +func (m *DataCenterInfo) String() string { return proto.CompactTextString(m) } +func (*DataCenterInfo) ProtoMessage() {} +func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *DataCenterInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *DataCenterInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetRackInfos() []*RackInfo { + if m != nil { + return m.RackInfos + } + return nil +} + +type TopologyInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos" json:"data_center_infos,omitempty"` +} + +func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } +func (m *TopologyInfo) String() string { return proto.CompactTextString(m) } +func (*TopologyInfo) ProtoMessage() {} +func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *TopologyInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TopologyInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { + if m != nil { + return m.DataCenterInfos + } + return nil +} + +type VolumeListRequest struct { +} + +func (m *VolumeListRequest) Reset() { *m = VolumeListRequest{} } +func (m *VolumeListRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeListRequest) ProtoMessage() {} +func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +type VolumeListResponse struct { + TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo" json:"topology_info,omitempty"` +} + +func (m *VolumeListResponse) Reset() { *m = VolumeListResponse{} } +func (m *VolumeListResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeListResponse) ProtoMessage() {} +func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *VolumeListResponse) GetTopologyInfo() *TopologyInfo { + if m != nil { + return m.TopologyInfo + } + return nil +} func init() { proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") @@ -793,11 +1043,18 @@ func init() { proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse") proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest") proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse") + proto.RegisterType((*StorageType)(nil), "master_pb.StorageType") proto.RegisterType((*Collection)(nil), "master_pb.Collection") proto.RegisterType((*CollectionListRequest)(nil), "master_pb.CollectionListRequest") proto.RegisterType((*CollectionListResponse)(nil), "master_pb.CollectionListResponse") proto.RegisterType((*CollectionDeleteRequest)(nil), "master_pb.CollectionDeleteRequest") proto.RegisterType((*CollectionDeleteResponse)(nil), "master_pb.CollectionDeleteResponse") + proto.RegisterType((*DataNodeInfo)(nil), "master_pb.DataNodeInfo") + proto.RegisterType((*RackInfo)(nil), "master_pb.RackInfo") + proto.RegisterType((*DataCenterInfo)(nil), "master_pb.DataCenterInfo") + proto.RegisterType((*TopologyInfo)(nil), "master_pb.TopologyInfo") + proto.RegisterType((*VolumeListRequest)(nil), "master_pb.VolumeListRequest") + proto.RegisterType((*VolumeListResponse)(nil), "master_pb.VolumeListResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -818,6 +1075,7 @@ type SeaweedClient interface { Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) + VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) } type seaweedClient struct { @@ -935,6 +1193,15 @@ func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDele return out, nil } +func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) { + out := new(VolumeListResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Seaweed service type SeaweedServer interface { @@ -945,6 +1212,7 @@ type SeaweedServer interface { Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) + VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -1093,6 +1361,24 @@ func _Seaweed_CollectionDelete_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Seaweed_VolumeList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).VolumeList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/VolumeList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).VolumeList(ctx, req.(*VolumeListRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -1117,6 +1403,10 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ MethodName: "CollectionDelete", Handler: _Seaweed_CollectionDelete_Handler, }, + { + MethodName: "VolumeList", + Handler: _Seaweed_VolumeList_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1138,78 +1428,93 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1164 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x72, 0xdb, 0x44, - 0x14, 0xae, 0xe4, 0x5f, 0x1d, 0xc7, 0xae, 0xb3, 0x49, 0x8b, 0xea, 0xd2, 0xd6, 0x55, 0x6f, 0xcc, - 0x00, 0x99, 0x12, 0x2e, 0xb8, 0x60, 0x18, 0x86, 0x9a, 0x30, 0x64, 0x12, 0x68, 0xaa, 0xb4, 0x65, - 0x86, 0x19, 0x46, 0x6c, 0xa4, 0x93, 0xb0, 0x13, 0x59, 0x12, 0xd2, 0x3a, 0xb1, 0x7b, 0xc3, 0x43, - 0xf0, 0x3e, 0x70, 0x01, 0x77, 0x3c, 0x0a, 0x77, 0x3c, 0x01, 0xb3, 0x3f, 0x92, 0x65, 0xd9, 0x49, - 0x3a, 0xcc, 0x70, 0xb7, 0xfa, 0xf6, 0xec, 0xd9, 0xb3, 0xdf, 0x77, 0x7e, 0x6c, 0xd8, 0x98, 0xd0, - 0x8c, 0x63, 0xba, 0x93, 0xa4, 0x31, 0x8f, 0x89, 0xa5, 0xbe, 0xbc, 0xe4, 0xc4, 0xf9, 0xdb, 0x04, - 0xeb, 0x6b, 0xa4, 0x29, 0x3f, 0x41, 0xca, 0x49, 0x0f, 0x4c, 0x96, 0xd8, 0xc6, 0xd0, 0x18, 0x59, - 0xae, 0xc9, 0x12, 0x42, 0xa0, 0x9e, 0xc4, 0x29, 0xb7, 0xcd, 0xa1, 0x31, 0xea, 0xba, 0x72, 0x4d, - 0x1e, 0x00, 0x24, 0xd3, 0x93, 0x90, 0xf9, 0xde, 0x34, 0x0d, 0xed, 0x9a, 0xb4, 0xb5, 0x14, 0xf2, - 0x2a, 0x0d, 0xc9, 0x08, 0xfa, 0x13, 0x3a, 0xf3, 0x2e, 0xe2, 0x70, 0x3a, 0x41, 0xcf, 0x8f, 0xa7, - 0x11, 0xb7, 0xeb, 0xf2, 0x78, 0x6f, 0x42, 0x67, 0xaf, 0x25, 0x3c, 0x16, 0x28, 0x19, 0x8a, 0xa8, - 0x66, 0xde, 0x29, 0x0b, 0xd1, 0x3b, 0xc7, 0xb9, 0xdd, 0x18, 0x1a, 0xa3, 0xba, 0x0b, 0x13, 0x3a, - 0xfb, 0x8a, 0x85, 0x78, 0x80, 0x73, 0xf2, 0x08, 0x3a, 0x01, 0xe5, 0xd4, 0xf3, 0x31, 0xe2, 0x98, - 0xda, 0x4d, 0x79, 0x17, 0x08, 0x68, 0x2c, 0x11, 0x11, 0x5f, 0x4a, 0xfd, 0x73, 0xbb, 0x25, 0x77, - 0xe4, 0x5a, 0xc4, 0x47, 0x83, 0x09, 0x8b, 0x3c, 0x19, 0x79, 0x5b, 0x5e, 0x6d, 0x49, 0xe4, 0x48, - 0x84, 0xff, 0x19, 0xb4, 0x54, 0x6c, 0x99, 0x6d, 0x0d, 0x6b, 0xa3, 0xce, 0xee, 0x93, 0x9d, 0x82, - 0x8d, 0x1d, 0x15, 0xde, 0x7e, 0x74, 0x1a, 0xa7, 0x13, 0xca, 0x59, 0x1c, 0x7d, 0x83, 0x59, 0x46, - 0xcf, 0xd0, 0xcd, 0xcf, 0x90, 0x7b, 0xd0, 0x8e, 0xf0, 0xd2, 0xbb, 0x60, 0x41, 0x66, 0xc3, 0xb0, - 0x36, 0xea, 0xba, 0xad, 0x08, 0x2f, 0x5f, 0xb3, 0x20, 0x23, 0x8f, 0x61, 0x23, 0xc0, 0x10, 0x39, - 0x06, 0x6a, 0xbb, 0x23, 0xb7, 0x3b, 0x1a, 0x13, 0x26, 0xce, 0x2b, 0xd8, 0x2c, 0xc8, 0x76, 0x31, - 0x4b, 0xe2, 0x28, 0x43, 0x32, 0x82, 0xdb, 0xca, 0xfb, 0x31, 0x7b, 0x83, 0x87, 0x6c, 0xc2, 0xb8, - 0x54, 0xa0, 0xee, 0x56, 0x61, 0x72, 0x17, 0x9a, 0x21, 0xd2, 0x00, 0x53, 0x4d, 0xbb, 0xfe, 0x72, - 0xfe, 0x34, 0xc1, 0xbe, 0x2a, 0x74, 0xa9, 0x69, 0x20, 0x3d, 0x76, 0x5d, 0x93, 0x05, 0x82, 0xb3, - 0x8c, 0xbd, 0x41, 0xa9, 0x69, 0xdd, 0x95, 0x6b, 0xf2, 0x10, 0xc0, 0x8f, 0xc3, 0x10, 0x7d, 0x71, - 0x50, 0x3b, 0x2f, 0x21, 0x82, 0x53, 0x29, 0xd3, 0x42, 0xce, 0xba, 0x6b, 0x09, 0x44, 0x29, 0x59, - 0xbc, 0x5c, 0x1b, 0x28, 0x25, 0xf5, 0xcb, 0x95, 0xc9, 0x07, 0x40, 0x72, 0x72, 0x4e, 0xe6, 0x85, - 0x61, 0x53, 0x1a, 0xf6, 0xf5, 0xce, 0xb3, 0x79, 0x6e, 0x7d, 0x1f, 0xac, 0x14, 0x69, 0xe0, 0xc5, - 0x51, 0x38, 0x97, 0xe2, 0xb6, 0xdd, 0xb6, 0x00, 0x9e, 0x47, 0xe1, 0x9c, 0xbc, 0x0f, 0x9b, 0x29, - 0x26, 0x21, 0xf3, 0xa9, 0x97, 0x84, 0xd4, 0xc7, 0x09, 0x46, 0xb9, 0xce, 0x7d, 0xbd, 0x71, 0x94, - 0xe3, 0xc4, 0x86, 0xd6, 0x05, 0xa6, 0x99, 0x78, 0x96, 0x25, 0x4d, 0xf2, 0x4f, 0xd2, 0x87, 0x1a, - 0xe7, 0xa1, 0x0d, 0x12, 0x15, 0x4b, 0xa7, 0x05, 0x8d, 0xbd, 0x49, 0xc2, 0xe7, 0xce, 0x6f, 0x06, - 0xdc, 0x3e, 0x9e, 0x26, 0x98, 0x3e, 0x0b, 0x63, 0xff, 0x7c, 0x6f, 0xc6, 0x53, 0x4a, 0x9e, 0x43, - 0x0f, 0x53, 0x9a, 0x4d, 0x53, 0x11, 0x7b, 0xc0, 0xa2, 0x33, 0x49, 0x69, 0x67, 0x77, 0x54, 0x4a, - 0x9f, 0xca, 0x99, 0x9d, 0x3d, 0x75, 0x60, 0x2c, 0xed, 0xdd, 0x2e, 0x96, 0x3f, 0x07, 0xdf, 0x43, - 0x77, 0x69, 0x5f, 0x08, 0x23, 0x52, 0x5b, 0x4b, 0x25, 0xd7, 0x42, 0xf1, 0x84, 0xa6, 0x8c, 0xcf, - 0x75, 0x09, 0xea, 0x2f, 0x21, 0x88, 0xae, 0x30, 0x91, 0x69, 0x35, 0x99, 0x69, 0x96, 0x42, 0xf6, - 0x83, 0xcc, 0x79, 0x0f, 0xb6, 0xc6, 0x21, 0xc3, 0x88, 0x1f, 0xb2, 0x8c, 0x63, 0xe4, 0xe2, 0xcf, - 0x53, 0xcc, 0xb8, 0xb8, 0x21, 0xa2, 0x13, 0xd4, 0x05, 0x2e, 0xd7, 0xce, 0x2f, 0xd0, 0x53, 0xa9, - 0x73, 0x18, 0xfb, 0x32, 0x6f, 0x04, 0x31, 0xa2, 0xb2, 0x95, 0x91, 0x58, 0x56, 0x4a, 0xde, 0xac, - 0x96, 0x7c, 0xb9, 0x26, 0x6a, 0xd7, 0xd7, 0x44, 0x7d, 0xb5, 0x26, 0x5e, 0xc2, 0xd6, 0x61, 0x1c, - 0x9f, 0x4f, 0x13, 0x15, 0x46, 0x1e, 0xeb, 0xf2, 0x0b, 0x8d, 0x61, 0x4d, 0xdc, 0x59, 0xbc, 0xb0, - 0x92, 0xb1, 0x66, 0x35, 0x63, 0x9d, 0x7f, 0x0c, 0xd8, 0x5e, 0x76, 0xab, 0xab, 0xed, 0x47, 0xd8, - 0x2a, 0xfc, 0x7a, 0xa1, 0x7e, 0xb3, 0xba, 0xa0, 0xb3, 0xfb, 0xb4, 0x24, 0xe6, 0xba, 0xd3, 0x79, - 0x83, 0x08, 0x72, 0xb2, 0xdc, 0xcd, 0x8b, 0x0a, 0x92, 0x0d, 0x66, 0xd0, 0xaf, 0x9a, 0x89, 0x84, - 0x2e, 0x6e, 0xd5, 0xcc, 0xb6, 0xf3, 0x93, 0xe4, 0x23, 0xb0, 0x16, 0x81, 0x98, 0x32, 0x90, 0xad, - 0xa5, 0x40, 0xf4, 0x5d, 0x0b, 0x2b, 0xb2, 0x0d, 0x0d, 0x4c, 0xd3, 0x38, 0x6f, 0x04, 0xea, 0xc3, - 0xf9, 0x14, 0xda, 0xff, 0x59, 0x45, 0xe7, 0x2f, 0x03, 0xba, 0x5f, 0x64, 0x19, 0x3b, 0x2b, 0xd2, - 0x65, 0x1b, 0x1a, 0xaa, 0x4c, 0x55, 0x3b, 0x52, 0x1f, 0x64, 0x08, 0x1d, 0x5d, 0x65, 0x25, 0xea, - 0xcb, 0xd0, 0x8d, 0xdd, 0x44, 0x57, 0x5e, 0x5d, 0x85, 0xc6, 0x79, 0x58, 0x6d, 0xf4, 0x8d, 0x2b, - 0x1b, 0x7d, 0xb3, 0xd4, 0xe8, 0xef, 0x83, 0x25, 0x0f, 0x45, 0x71, 0x80, 0x7a, 0x02, 0xb4, 0x05, - 0xf0, 0x6d, 0x1c, 0xa0, 0xf3, 0xab, 0x01, 0xbd, 0xfc, 0x35, 0x5a, 0xf9, 0x3e, 0xd4, 0x4e, 0x0b, - 0xf6, 0xc5, 0x32, 0xe7, 0xc8, 0xbc, 0x8a, 0xa3, 0x95, 0xe1, 0x56, 0x30, 0x52, 0x2f, 0x33, 0x52, - 0x88, 0xd1, 0x28, 0x89, 0x21, 0x42, 0xa6, 0x53, 0xfe, 0x53, 0x1e, 0xb2, 0x58, 0x3b, 0x67, 0xb0, - 0x79, 0xcc, 0x29, 0x67, 0x19, 0x67, 0x7e, 0x96, 0xd3, 0x5c, 0x21, 0xd4, 0xb8, 0x89, 0x50, 0xf3, - 0x2a, 0x42, 0x6b, 0x05, 0xa1, 0xce, 0x1f, 0x06, 0x90, 0xf2, 0x4d, 0x9a, 0x82, 0xff, 0xe1, 0x2a, - 0x41, 0x19, 0x8f, 0x39, 0x0d, 0x3d, 0x39, 0x55, 0xf4, 0x6c, 0x90, 0x88, 0x18, 0x5c, 0x42, 0xa5, - 0x69, 0x86, 0x81, 0xda, 0x55, 0x83, 0xa1, 0x2d, 0x00, 0xb9, 0xb9, 0x3c, 0x57, 0x9a, 0x95, 0xb9, - 0xe2, 0xbc, 0x04, 0x18, 0x2f, 0xae, 0x5e, 0xd3, 0xbd, 0xde, 0x22, 0x19, 0x57, 0xb9, 0x39, 0x80, - 0x3b, 0x0b, 0xaf, 0xa2, 0x41, 0xbe, 0xbd, 0x10, 0xda, 0x99, 0xb9, 0x70, 0xf6, 0x02, 0xee, 0x56, - 0x9d, 0x69, 0xae, 0x3f, 0x81, 0xce, 0x82, 0xb7, 0xbc, 0xc1, 0xdc, 0x29, 0xd5, 0xf5, 0xe2, 0x9c, - 0x5b, 0xb6, 0x74, 0x3e, 0x84, 0x77, 0x16, 0x5b, 0x5f, 0xca, 0x4e, 0x79, 0x5d, 0x03, 0x1f, 0x80, - 0xbd, 0x6a, 0xae, 0x62, 0xd8, 0xfd, 0xbd, 0x0e, 0xad, 0x63, 0xa4, 0x97, 0x88, 0x01, 0xd9, 0x87, - 0xee, 0x31, 0x46, 0xc1, 0xe2, 0xc7, 0xde, 0x76, 0x29, 0x96, 0x02, 0x1d, 0xbc, 0xbb, 0x0e, 0xcd, - 0x1d, 0x3a, 0xb7, 0x46, 0xc6, 0x53, 0x83, 0x1c, 0x41, 0xf7, 0x00, 0x31, 0x19, 0xc7, 0x51, 0x84, - 0x3e, 0xc7, 0x80, 0x3c, 0x2c, 0x3f, 0x6b, 0x75, 0xf0, 0x0c, 0xee, 0xad, 0xfc, 0xc6, 0xca, 0xfb, - 0x94, 0xf6, 0xf8, 0x02, 0x36, 0xca, 0xfd, 0x76, 0xc9, 0xe1, 0x9a, 0xe9, 0x30, 0x78, 0x74, 0x43, - 0xa3, 0x76, 0x6e, 0x91, 0xcf, 0xa1, 0xa9, 0x1a, 0x00, 0xb1, 0x4b, 0xc6, 0x4b, 0x1d, 0x6e, 0x29, - 0xae, 0xe5, 0x6e, 0xe1, 0xdc, 0x22, 0x07, 0x00, 0x8b, 0x12, 0x22, 0x65, 0x5e, 0x56, 0x6a, 0x78, - 0xf0, 0xe0, 0x8a, 0xdd, 0xc2, 0xd9, 0x77, 0xd0, 0x5b, 0xce, 0x13, 0x32, 0x5c, 0x9b, 0x0a, 0xa5, - 0x7c, 0x1c, 0x3c, 0xbe, 0xc6, 0xa2, 0x70, 0xfc, 0x03, 0xf4, 0xab, 0xf2, 0x13, 0x67, 0xed, 0xc1, - 0xa5, 0x54, 0x1a, 0x3c, 0xb9, 0xd6, 0x26, 0x77, 0x7f, 0xd2, 0x94, 0xff, 0x18, 0x3e, 0xfe, 0x37, - 0x00, 0x00, 0xff, 0xff, 0xb1, 0x11, 0x1b, 0x49, 0x41, 0x0c, 0x00, 0x00, + // 1394 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, + 0x13, 0x37, 0xa9, 0x87, 0xc5, 0xd1, 0xc3, 0xd2, 0xda, 0x49, 0x18, 0xe5, 0x9f, 0x44, 0x61, 0x2e, + 0xfa, 0xbf, 0x8c, 0xd4, 0x3d, 0xf4, 0xd0, 0x16, 0x41, 0xe2, 0x38, 0x68, 0x10, 0xb7, 0x49, 0xe8, + 0x24, 0x05, 0x0a, 0x14, 0xea, 0x9a, 0x1c, 0xbb, 0x84, 0x29, 0x92, 0x25, 0x57, 0x8e, 0x95, 0x4b, + 0x2f, 0x3d, 0x16, 0xed, 0xa1, 0xdf, 0xa7, 0x97, 0xf6, 0xd6, 0x8f, 0xd2, 0x5b, 0xef, 0x05, 0x8a, + 0x7d, 0x90, 0x5a, 0x52, 0xb2, 0x13, 0x14, 0xe8, 0x21, 0xb7, 0xdd, 0x99, 0xd9, 0xdd, 0xe1, 0xef, + 0x37, 0x2f, 0x09, 0x3a, 0x53, 0x9a, 0x31, 0x4c, 0xb7, 0x93, 0x34, 0x66, 0x31, 0xb1, 0xe4, 0x6e, + 0x92, 0x1c, 0x3a, 0xbf, 0x9b, 0x60, 0x7d, 0x82, 0x34, 0x65, 0x87, 0x48, 0x19, 0xe9, 0x81, 0x19, + 0x24, 0xb6, 0x31, 0x32, 0xc6, 0x96, 0x6b, 0x06, 0x09, 0x21, 0x50, 0x4f, 0xe2, 0x94, 0xd9, 0xe6, + 0xc8, 0x18, 0x77, 0x5d, 0xb1, 0x26, 0xd7, 0x01, 0x92, 0xd9, 0x61, 0x18, 0x78, 0x93, 0x59, 0x1a, + 0xda, 0x35, 0x61, 0x6b, 0x49, 0xc9, 0x8b, 0x34, 0x24, 0x63, 0xe8, 0x4f, 0xe9, 0xd9, 0xe4, 0x34, + 0x0e, 0x67, 0x53, 0x9c, 0x78, 0xf1, 0x2c, 0x62, 0x76, 0x5d, 0x1c, 0xef, 0x4d, 0xe9, 0xd9, 0x4b, + 0x21, 0xde, 0xe5, 0x52, 0x32, 0xe2, 0x5e, 0x9d, 0x4d, 0x8e, 0x82, 0x10, 0x27, 0x27, 0x38, 0xb7, + 0x1b, 0x23, 0x63, 0x5c, 0x77, 0x61, 0x4a, 0xcf, 0x1e, 0x06, 0x21, 0x3e, 0xc6, 0x39, 0xb9, 0x09, + 0x6d, 0x9f, 0x32, 0x3a, 0xf1, 0x30, 0x62, 0x98, 0xda, 0x4d, 0xf1, 0x16, 0x70, 0xd1, 0xae, 0x90, + 0x70, 0xff, 0x52, 0xea, 0x9d, 0xd8, 0xeb, 0x42, 0x23, 0xd6, 0xdc, 0x3f, 0xea, 0x4f, 0x83, 0x68, + 0x22, 0x3c, 0x6f, 0x89, 0xa7, 0x2d, 0x21, 0x79, 0xca, 0xdd, 0xff, 0x18, 0xd6, 0xa5, 0x6f, 0x99, + 0x6d, 0x8d, 0x6a, 0xe3, 0xf6, 0xce, 0xed, 0xed, 0x02, 0x8d, 0x6d, 0xe9, 0xde, 0xa3, 0xe8, 0x28, + 0x4e, 0xa7, 0x94, 0x05, 0x71, 0xf4, 0x29, 0x66, 0x19, 0x3d, 0x46, 0x37, 0x3f, 0x43, 0xae, 0x42, + 0x2b, 0xc2, 0x57, 0x93, 0xd3, 0xc0, 0xcf, 0x6c, 0x18, 0xd5, 0xc6, 0x5d, 0x77, 0x3d, 0xc2, 0x57, + 0x2f, 0x03, 0x3f, 0x23, 0xb7, 0xa0, 0xe3, 0x63, 0x88, 0x0c, 0x7d, 0xa9, 0x6e, 0x0b, 0x75, 0x5b, + 0xc9, 0xb8, 0x89, 0xf3, 0x02, 0x06, 0x05, 0xd8, 0x2e, 0x66, 0x49, 0x1c, 0x65, 0x48, 0xc6, 0xb0, + 0x21, 0x6f, 0x3f, 0x08, 0x5e, 0xe3, 0x7e, 0x30, 0x0d, 0x98, 0x60, 0xa0, 0xee, 0x56, 0xc5, 0xe4, + 0x32, 0x34, 0x43, 0xa4, 0x3e, 0xa6, 0x0a, 0x76, 0xb5, 0x73, 0x7e, 0x35, 0xc1, 0x3e, 0xcf, 0x75, + 0xc1, 0xa9, 0x2f, 0x6e, 0xec, 0xba, 0x66, 0xe0, 0x73, 0xcc, 0xb2, 0xe0, 0x35, 0x0a, 0x4e, 0xeb, + 0xae, 0x58, 0x93, 0x1b, 0x00, 0x5e, 0x1c, 0x86, 0xe8, 0xf1, 0x83, 0xea, 0x72, 0x4d, 0xc2, 0x31, + 0x15, 0x34, 0x2d, 0xe8, 0xac, 0xbb, 0x16, 0x97, 0x48, 0x26, 0x8b, 0x2f, 0x57, 0x06, 0x92, 0x49, + 0xf5, 0xe5, 0xd2, 0xe4, 0x7f, 0x40, 0x72, 0x70, 0x0e, 0xe7, 0x85, 0x61, 0x53, 0x18, 0xf6, 0x95, + 0xe6, 0xfe, 0x3c, 0xb7, 0xbe, 0x06, 0x56, 0x8a, 0xd4, 0x9f, 0xc4, 0x51, 0x38, 0x17, 0xe4, 0xb6, + 0xdc, 0x16, 0x17, 0x3c, 0x89, 0xc2, 0x39, 0xf9, 0x2f, 0x0c, 0x52, 0x4c, 0xc2, 0xc0, 0xa3, 0x93, + 0x24, 0xa4, 0x1e, 0x4e, 0x31, 0xca, 0x79, 0xee, 0x2b, 0xc5, 0xd3, 0x5c, 0x4e, 0x6c, 0x58, 0x3f, + 0xc5, 0x34, 0xe3, 0x9f, 0x65, 0x09, 0x93, 0x7c, 0x4b, 0xfa, 0x50, 0x63, 0x2c, 0xb4, 0x41, 0x48, + 0xf9, 0xd2, 0x59, 0x87, 0xc6, 0xde, 0x34, 0x61, 0x73, 0xe7, 0x67, 0x03, 0x36, 0x0e, 0x66, 0x09, + 0xa6, 0xf7, 0xc3, 0xd8, 0x3b, 0xd9, 0x3b, 0x63, 0x29, 0x25, 0x4f, 0xa0, 0x87, 0x29, 0xcd, 0x66, + 0x29, 0xf7, 0xdd, 0x0f, 0xa2, 0x63, 0x01, 0x69, 0x7b, 0x67, 0xac, 0x85, 0x4f, 0xe5, 0xcc, 0xf6, + 0x9e, 0x3c, 0xb0, 0x2b, 0xec, 0xdd, 0x2e, 0xea, 0xdb, 0xe1, 0x17, 0xd0, 0x2d, 0xe9, 0x39, 0x31, + 0x3c, 0xb4, 0x15, 0x55, 0x62, 0xcd, 0x19, 0x4f, 0x68, 0x1a, 0xb0, 0xb9, 0x4a, 0x41, 0xb5, 0xe3, + 0x84, 0xa8, 0x0c, 0xe3, 0x91, 0x56, 0x13, 0x91, 0x66, 0x49, 0xc9, 0x23, 0x3f, 0x73, 0xfe, 0x0d, + 0x9b, 0xbb, 0x61, 0x80, 0x11, 0xdb, 0x0f, 0x32, 0x86, 0x91, 0x8b, 0xdf, 0xcc, 0x30, 0x63, 0xfc, + 0x85, 0x88, 0x4e, 0x51, 0x25, 0xb8, 0x58, 0x3b, 0xdf, 0x42, 0x4f, 0x86, 0xce, 0x7e, 0xec, 0x89, + 0xb8, 0xe1, 0xc0, 0xf0, 0xcc, 0x96, 0x46, 0x7c, 0x59, 0x49, 0x79, 0xb3, 0x9a, 0xf2, 0x7a, 0x4e, + 0xd4, 0x2e, 0xce, 0x89, 0xfa, 0x72, 0x4e, 0x3c, 0x87, 0xcd, 0xfd, 0x38, 0x3e, 0x99, 0x25, 0xd2, + 0x8d, 0xdc, 0xd7, 0xf2, 0x17, 0x1a, 0xa3, 0x1a, 0x7f, 0xb3, 0xf8, 0xc2, 0x4a, 0xc4, 0x9a, 0xd5, + 0x88, 0x75, 0xfe, 0x30, 0x60, 0xab, 0x7c, 0xad, 0xca, 0xb6, 0xaf, 0x60, 0xb3, 0xb8, 0x77, 0x12, + 0xaa, 0x6f, 0x96, 0x0f, 0xb4, 0x77, 0xee, 0x68, 0x64, 0xae, 0x3a, 0x9d, 0x17, 0x08, 0x3f, 0x07, + 0xcb, 0x1d, 0x9c, 0x56, 0x24, 0xd9, 0xf0, 0x0c, 0xfa, 0x55, 0x33, 0x1e, 0xd0, 0xc5, 0xab, 0x0a, + 0xd9, 0x56, 0x7e, 0x92, 0xbc, 0x07, 0xd6, 0xc2, 0x11, 0x53, 0x38, 0xb2, 0x59, 0x72, 0x44, 0xbd, + 0xb5, 0xb0, 0x22, 0x5b, 0xd0, 0xc0, 0x34, 0x8d, 0xf3, 0x42, 0x20, 0x37, 0xce, 0x87, 0xd0, 0xfa, + 0xdb, 0x2c, 0x3a, 0xbf, 0x19, 0xd0, 0xbd, 0x97, 0x65, 0xc1, 0x71, 0x11, 0x2e, 0x5b, 0xd0, 0x90, + 0x69, 0x2a, 0xcb, 0x91, 0xdc, 0x90, 0x11, 0xb4, 0x55, 0x96, 0x69, 0xd0, 0xeb, 0xa2, 0x37, 0x56, + 0x13, 0x95, 0x79, 0x75, 0xe9, 0x1a, 0x63, 0x61, 0xb5, 0xd0, 0x37, 0xce, 0x2d, 0xf4, 0x4d, 0xad, + 0xd0, 0x5f, 0x03, 0x4b, 0x1c, 0x8a, 0x62, 0x1f, 0x55, 0x07, 0x68, 0x71, 0xc1, 0x67, 0xb1, 0x8f, + 0xce, 0x4f, 0x06, 0xf4, 0xf2, 0xaf, 0x51, 0xcc, 0xf7, 0xa1, 0x76, 0x54, 0xa0, 0xcf, 0x97, 0x39, + 0x46, 0xe6, 0x79, 0x18, 0x2d, 0x35, 0xb7, 0x02, 0x91, 0xba, 0x8e, 0x48, 0x41, 0x46, 0x43, 0x23, + 0x83, 0xbb, 0x4c, 0x67, 0xec, 0xeb, 0xdc, 0x65, 0xbe, 0x76, 0x8e, 0x61, 0x70, 0xc0, 0x28, 0x0b, + 0x32, 0x16, 0x78, 0x59, 0x0e, 0x73, 0x05, 0x50, 0xe3, 0x4d, 0x80, 0x9a, 0xe7, 0x01, 0x5a, 0x2b, + 0x00, 0x75, 0x7e, 0x31, 0x80, 0xe8, 0x2f, 0x29, 0x08, 0xfe, 0x81, 0xa7, 0x38, 0x64, 0x2c, 0x66, + 0x34, 0x9c, 0x88, 0xae, 0xa2, 0x7a, 0x83, 0x90, 0xf0, 0xc6, 0xc5, 0x59, 0x9a, 0x65, 0xe8, 0x4b, + 0xad, 0x6c, 0x0c, 0x2d, 0x2e, 0x10, 0xca, 0x72, 0x5f, 0x69, 0x56, 0xfa, 0x8a, 0x73, 0x0f, 0xda, + 0x07, 0x2c, 0x4e, 0xe9, 0x31, 0x3e, 0x9f, 0x27, 0x6f, 0xe3, 0xbd, 0xf2, 0xce, 0x5c, 0x00, 0x31, + 0x02, 0xd8, 0x5d, 0x78, 0xbf, 0xaa, 0x00, 0x5e, 0x81, 0x4b, 0x0b, 0x0b, 0x5e, 0x2f, 0x15, 0x2f, + 0xce, 0x33, 0xb8, 0x5c, 0x55, 0x28, 0x18, 0x3f, 0x80, 0xf6, 0x02, 0x92, 0xbc, 0x76, 0x5c, 0xd2, + 0x52, 0x76, 0x71, 0xce, 0xd5, 0x2d, 0x9d, 0xff, 0xc3, 0x95, 0x85, 0xea, 0x81, 0x28, 0x82, 0x17, + 0xd5, 0xe6, 0x21, 0xd8, 0xcb, 0xe6, 0xd2, 0x07, 0xe7, 0x47, 0x13, 0x3a, 0x0f, 0x54, 0xb4, 0xf3, + 0xae, 0xaf, 0xf5, 0x79, 0x4b, 0xf4, 0xf9, 0x5b, 0xd0, 0x29, 0x0d, 0x61, 0xb2, 0xdf, 0xb7, 0x4f, + 0xb5, 0x09, 0x6c, 0xd5, 0xac, 0x56, 0x13, 0x66, 0xd5, 0x59, 0xed, 0x3f, 0x30, 0x38, 0x4a, 0x11, + 0x97, 0xc7, 0xba, 0xba, 0xbb, 0xc1, 0x15, 0xba, 0xed, 0x36, 0x6c, 0x52, 0x8f, 0x05, 0xa7, 0x15, + 0x6b, 0xc9, 0xfd, 0x40, 0xaa, 0x74, 0xfb, 0x87, 0x85, 0xa3, 0x41, 0x74, 0x14, 0x67, 0x76, 0xf3, + 0xed, 0xc7, 0x32, 0xf5, 0x35, 0x5c, 0x93, 0x39, 0xdf, 0x99, 0xd0, 0x72, 0xa9, 0x77, 0xf2, 0x6e, + 0xa3, 0x71, 0x17, 0x36, 0x8a, 0xaa, 0x56, 0x02, 0xe4, 0x8a, 0x06, 0x88, 0x4e, 0xbc, 0xdb, 0xf5, + 0xb5, 0x5d, 0xe6, 0xfc, 0x69, 0x40, 0xef, 0x41, 0x51, 0x39, 0xdf, 0x6d, 0x30, 0x76, 0x00, 0x78, + 0xa9, 0x2f, 0xe1, 0xa0, 0xb7, 0xc6, 0x9c, 0x6e, 0xd7, 0x4a, 0xd5, 0x2a, 0x73, 0x7e, 0x30, 0xa1, + 0xf3, 0x3c, 0x4e, 0xe2, 0x30, 0x3e, 0x9e, 0xbf, 0xdb, 0x5f, 0xbf, 0x07, 0x03, 0xad, 0x2b, 0x96, + 0x40, 0xb8, 0x5a, 0x09, 0x86, 0x05, 0xd9, 0xee, 0x86, 0x5f, 0xda, 0x67, 0xce, 0x26, 0x0c, 0xd4, + 0x84, 0xa7, 0x15, 0x37, 0x17, 0x88, 0x2e, 0x54, 0x85, 0xed, 0x23, 0xe8, 0x32, 0x05, 0x9d, 0x78, + 0x4e, 0xcd, 0xb8, 0x7a, 0xe8, 0xe9, 0xd0, 0xba, 0x1d, 0xa6, 0xed, 0x76, 0xbe, 0x6f, 0xc0, 0xfa, + 0x01, 0xd2, 0x57, 0x88, 0x3e, 0x79, 0x04, 0xdd, 0x03, 0x8c, 0xfc, 0xc5, 0x4f, 0xcb, 0x2d, 0xed, + 0x8e, 0x42, 0x3a, 0xfc, 0xd7, 0x2a, 0x69, 0x51, 0xe3, 0xd6, 0xc6, 0xc6, 0x1d, 0x83, 0x3c, 0x85, + 0xee, 0x63, 0xc4, 0x64, 0x37, 0x8e, 0x22, 0xf4, 0x18, 0xfa, 0xe4, 0x86, 0x5e, 0x69, 0x97, 0xc7, + 0xdc, 0xe1, 0xd5, 0xa5, 0xd2, 0x91, 0x4f, 0x45, 0xea, 0xc6, 0x67, 0xd0, 0xd1, 0xa7, 0xbb, 0xd2, + 0x85, 0x2b, 0x66, 0xd1, 0xe1, 0xcd, 0x37, 0x8c, 0x85, 0xce, 0x1a, 0xb9, 0x0b, 0x4d, 0x39, 0x6e, + 0x10, 0x5b, 0x33, 0x2e, 0xcd, 0x53, 0x25, 0xbf, 0xca, 0xb3, 0x89, 0xb3, 0x46, 0x1e, 0x03, 0x2c, + 0x1a, 0x36, 0xd1, 0x71, 0x59, 0x9a, 0x18, 0x86, 0xd7, 0xcf, 0xd1, 0x16, 0x97, 0x7d, 0x0e, 0xbd, + 0x72, 0xeb, 0x22, 0xa3, 0x95, 0xdd, 0x49, 0x8b, 0x88, 0xe1, 0xad, 0x0b, 0x2c, 0x8a, 0x8b, 0xbf, + 0x84, 0x7e, 0xb5, 0x23, 0x11, 0x67, 0xe5, 0xc1, 0x52, 0x77, 0x1b, 0xde, 0xbe, 0xd0, 0x46, 0x07, + 0x61, 0x11, 0x95, 0x25, 0x10, 0x96, 0x22, 0xb8, 0x04, 0xc2, 0x72, 0x28, 0x3b, 0x6b, 0x87, 0x4d, + 0xf1, 0x67, 0xc7, 0xfb, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x50, 0x28, 0x66, 0xfc, 0x10, + 0x00, 0x00, } diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index 13f8b37d1..7b8efb933 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -126,3 +126,16 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic return resp, nil } + +func (ms *MasterServer) VolumeList(ctx context.Context, req *master_pb.VolumeListRequest) (*master_pb.VolumeListResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.VolumeListResponse{ + TopologyInfo: ms.Topo.ToTopologyInfo(), + } + + return resp, nil +} diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index f5ed6cfa6..34a406d67 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -30,7 +30,7 @@ func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer } for _, c := range resp.Collections { - fmt.Fprintf(writer, "collection:\"%s\"\treplication:\"%s\"\tTTL:\"%s\"\n", c.GetName(), c.GetReplication(), c.GetTtl()) + fmt.Fprintf(writer, "collection:\"%s\"\n", c.GetName()) } fmt.Fprintf(writer, "Total %d collections.\n", len(resp.Collections)) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go new file mode 100644 index 000000000..1d921e4b4 --- /dev/null +++ b/weed/shell/command_volume_list.go @@ -0,0 +1,64 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "io" +) + +func init() { + commands = append(commands, &commandVolumeList{}) +} + +type commandVolumeList struct { +} + +func (c *commandVolumeList) Name() string { + return "volume.list" +} + +func (c *commandVolumeList) Help() string { + return "# list all volumes" +} + +func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io.Writer) error { + + resp, err := commandEnv.masterClient.VolumeList(context.Background()) + + if err != nil { + return err + } + + writeTopologyInfo(writer,resp.TopologyInfo) + + return nil +} + +func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo) { + fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + for _, dc := range t.DataCenterInfos { + writeDataCenterInfo(writer, dc) + } +} +func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) { + fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + for _, r := range t.RackInfos { + writeRackInfo(writer, r) + } +} +func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) { + fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + for _, dn := range t.DataNodeInfos { + writeDataNodeInfo(writer, dn) + } +} +func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) { + fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + for _, vi := range t.VolumeInfos { + writeVolumeInformationMessage(writer, vi) + } +} +func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) { + fmt.Fprintf(writer, " volume %+v \n", t) +} diff --git a/weed/storage/store.go b/weed/storage/store.go index 96c819666..a29f3c163 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -144,24 +144,12 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { for _, location := range s.Locations { maxVolumeCount = maxVolumeCount + location.MaxVolumeCount location.Lock() - for k, v := range location.volumes { + for _, v := range location.volumes { if maxFileKey < v.nm.MaxFileKey() { maxFileKey = v.nm.MaxFileKey() } if !v.expired(s.VolumeSizeLimit) { - volumeMessage := &master_pb.VolumeInformationMessage{ - Id: uint32(k), - Size: uint64(v.Size()), - Collection: v.Collection, - FileCount: uint64(v.nm.FileCount()), - DeleteCount: uint64(v.nm.DeletedCount()), - DeletedByteCount: v.nm.DeletedSize(), - ReadOnly: v.readOnly, - ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), - Version: uint32(v.Version()), - Ttl: v.Ttl.ToUint32(), - } - volumeMessages = append(volumeMessages, volumeMessage) + volumeMessages = append(volumeMessages, v.ToVolumeInformationMessage()) } else { if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) { location.deleteVolumeById(v.Id) diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 07c72ecb4..5cec0c5ed 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "os" "path" "sync" @@ -134,3 +135,18 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool { } return false } + +func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage { + return &master_pb.VolumeInformationMessage{ + Id: uint32(v.Id), + Size: uint64(v.Size()), + Collection: v.Collection, + FileCount: uint64(v.nm.FileCount()), + DeleteCount: uint64(v.nm.DeletedCount()), + DeletedByteCount: v.nm.DeletedSize(), + ReadOnly: v.readOnly, + ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), + Version: uint32(v.Version()), + Ttl: v.Ttl.ToUint32(), + } +} diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go index f6614a9de..450100e59 100644 --- a/weed/storage/volume_info.go +++ b/weed/storage/volume_info.go @@ -45,6 +45,22 @@ func (vi VolumeInfo) String() string { vi.Id, vi.Size, vi.ReplicaPlacement, vi.Collection, vi.Version, vi.FileCount, vi.DeleteCount, vi.DeletedByteCount, vi.ReadOnly) } +func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage { + return &master_pb.VolumeInformationMessage{ + Id: uint32(vi.Id), + Size: uint64(vi.Size), + Collection: vi.Collection, + FileCount: uint64(vi.FileCount), + DeleteCount: uint64(vi.DeleteCount), + DeletedByteCount: vi.DeletedByteCount, + ReadOnly: vi.ReadOnly, + ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()), + Version: uint32(vi.Version), + Ttl: vi.Ttl.ToUint32(), + } +} + + /*VolumesInfo sorting*/ type volumeInfos []*VolumeInfo diff --git a/weed/topology/data_center.go b/weed/topology/data_center.go index bcf2dfd31..640cb1937 100644 --- a/weed/topology/data_center.go +++ b/weed/topology/data_center.go @@ -1,5 +1,7 @@ package topology +import "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + type DataCenter struct { NodeImpl } @@ -38,3 +40,18 @@ func (dc *DataCenter) ToMap() interface{} { m["Racks"] = racks return m } + +func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo { + m := &master_pb.DataCenterInfo{ + Id: string(dc.Id()), + VolumeCount: uint64(dc.GetVolumeCount()), + MaxVolumeCount: uint64(dc.GetMaxVolumeCount()), + FreeVolumeCount: uint64(dc.FreeSpace()), + ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()), + } + for _, c := range dc.Children() { + rack := c.(*Rack) + m.RackInfos = append(m.RackInfos, rack.ToRackInfo()) + } + return m +} diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 6ea6d3938..84304512f 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -2,6 +2,7 @@ package topology import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "strconv" "github.com/chrislusf/seaweedfs/weed/glog" @@ -128,3 +129,17 @@ func (dn *DataNode) ToMap() interface{} { ret["PublicUrl"] = dn.PublicUrl return ret } + +func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo { + m := &master_pb.DataNodeInfo{ + Id: string(dn.Id()), + VolumeCount: uint64(dn.GetVolumeCount()), + MaxVolumeCount: uint64(dn.GetMaxVolumeCount()), + FreeVolumeCount: uint64(dn.FreeSpace()), + ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()), + } + for _, v := range dn.GetVolumes() { + m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage()) + } + return m +} diff --git a/weed/topology/rack.go b/weed/topology/rack.go index a48d64323..f8f8ce34a 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -1,6 +1,7 @@ package topology import ( + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "strconv" "time" ) @@ -58,3 +59,18 @@ func (r *Rack) ToMap() interface{} { m["DataNodes"] = dns return m } + +func (r *Rack) ToRackInfo() *master_pb.RackInfo { + m := &master_pb.RackInfo{ + Id: string(r.Id()), + VolumeCount: uint64(r.GetVolumeCount()), + MaxVolumeCount: uint64(r.GetMaxVolumeCount()), + FreeVolumeCount: uint64(r.FreeSpace()), + ActiveVolumeCount: uint64(r.GetActiveVolumeCount()), + } + for _, c := range r.Children() { + dn := c.(*DataNode) + m.DataNodeInfos = append(m.DataNodeInfos, dn.ToDataNodeInfo()) + } + return m +} diff --git a/weed/topology/topology_map.go b/weed/topology/topology_map.go index 769ba0e2a..7db11ea14 100644 --- a/weed/topology/topology_map.go +++ b/weed/topology/topology_map.go @@ -74,3 +74,18 @@ func (t *Topology) ToVolumeLocations() (volumeLocations []*master_pb.VolumeLocat } return } + +func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo { + m := &master_pb.TopologyInfo{ + Id: string(t.Id()), + VolumeCount: uint64(t.GetVolumeCount()), + MaxVolumeCount: uint64(t.GetMaxVolumeCount()), + FreeVolumeCount: uint64(t.FreeSpace()), + ActiveVolumeCount: uint64(t.GetActiveVolumeCount()), + } + for _, c := range t.Children() { + dc := c.(*DataCenter) + m.DataCenterInfos = append(m.DataCenterInfos, dc.ToDataCenterInfo()) + } + return m +} diff --git a/weed/wdclient/masterclient_collection.go b/weed/wdclient/masterclient_collection.go index bdf791da0..1a9215a7e 100644 --- a/weed/wdclient/masterclient_collection.go +++ b/weed/wdclient/masterclient_collection.go @@ -21,3 +21,11 @@ func (mc *MasterClient) CollectionList(ctx context.Context) (resp *master_pb.Col }) return } + +func (mc *MasterClient) VolumeList(ctx context.Context) (resp *master_pb.VolumeListResponse, err error) { + err = withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + return +} From 2fcc88116eafbf88a1d757a924cefe66cf8ffc74 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 17 Mar 2019 20:27:36 -0700 Subject: [PATCH 075/450] go fmt --- weed/shell/command_volume_list.go | 2 +- weed/storage/volume_info.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 1d921e4b4..971e18f10 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -30,7 +30,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io. return err } - writeTopologyInfo(writer,resp.TopologyInfo) + writeTopologyInfo(writer, resp.TopologyInfo) return nil } diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go index 450100e59..7afad412d 100644 --- a/weed/storage/volume_info.go +++ b/weed/storage/volume_info.go @@ -60,7 +60,6 @@ func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMe } } - /*VolumesInfo sorting*/ type volumeInfos []*VolumeInfo From 8b26d1574005a993659245599d3db40d3308e011 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 17 Mar 2019 22:32:01 -0700 Subject: [PATCH 076/450] refactoring: simplify function parameter --- weed/storage/disk_location.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index 9589d9281..cd81b2210 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -63,14 +63,7 @@ func (l *DiskLocation) loadExistingVolume(dir os.FileInfo, needleMapKind NeedleM } } -func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrentFlag bool) { - var concurrency int - if concurrentFlag { - //You could choose a better optimized concurency value after testing at your environment - concurrency = 10 - } else { - concurrency = 1 - } +func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrency int) { task_queue := make(chan os.FileInfo, 10*concurrency) go func() { @@ -101,7 +94,7 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { l.Lock() defer l.Unlock() - l.concurrentLoadingVolumes(needleMapKind, true) + l.concurrentLoadingVolumes(needleMapKind, 10) glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount) } From 44647a46c0dc82350cc994da0784f3c3936270d6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 17 Mar 2019 23:28:43 -0700 Subject: [PATCH 077/450] needle scanner read appendAtNs --- unmaintained/see_dat/see_dat.go | 6 ++++-- weed/storage/needle_read_write.go | 6 ++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index f79c0a6a9..28d6447d6 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -4,6 +4,7 @@ import ( "flag" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "time" ) var ( @@ -22,11 +23,12 @@ func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.Supe } func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { - return false + return true } func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *storage.Needle, offset int64) error { - glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie) + t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) + glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x appendedAt %v", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie, t) return nil } diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go index c99395f8b..e69d70dc3 100644 --- a/weed/storage/needle_read_write.go +++ b/weed/storage/needle_read_write.go @@ -283,6 +283,7 @@ func NeedleBodyLength(needleSize uint32, version Version) int64 { //n should be a needle already read the header //the input stream will read until next file entry func (n *Needle) ReadNeedleBody(r *os.File, version Version, offset int64, bodyLength int64) (err error) { + if bodyLength <= 0 { return nil } @@ -301,6 +302,11 @@ func (n *Needle) ReadNeedleBody(r *os.File, version Version, offset int64, bodyL } n.readNeedleDataVersion2(bytes[0:n.Size]) n.Checksum = NewCRC(n.Data) + + if version == Version3 { + tsOffset := n.Size+NeedleChecksumSize + n.AppendAtNs = util.BytesToUint64(bytes[tsOffset : tsOffset+TimestampSize]) + } default: err = fmt.Errorf("Unsupported Version! (%d)", version) } From 104922a3db5068e0192ce2597cdc43714a474412 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Mar 2019 00:35:15 -0700 Subject: [PATCH 078/450] text wrapping --- weed/server/filer_server_handlers_write_autochunk.go | 10 ++++++---- weed/server/volume_server_handlers_write.go | 3 +-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index d1e1e7a09..112f46c0c 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -19,7 +19,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string) bool { +func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, + replication string, collection string, dataCenter string) bool { if r.Method != "POST" { glog.V(4).Infoln("AutoChunking not supported for method", r.Method) return false @@ -64,7 +65,8 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * return true } -func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { +func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, + contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { @@ -177,8 +179,8 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r return } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { - err = nil +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, + chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 6b78cea40..9fb252eb7 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -41,8 +41,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { } ret := operation.UploadResult{} - _, errorStatus := topology.ReplicatedWrite(vs.GetMaster(), - vs.store, volumeId, needle, r) + _, errorStatus := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) httpStatus := http.StatusCreated if errorStatus != "" { httpStatus = http.StatusInternalServerError From ece9d133122fd15faa4efd1b4cec34974e3b37d0 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 18 Mar 2019 09:32:21 -0700 Subject: [PATCH 079/450] volume info collect compact revision number --- weed/pb/master.proto | 1 + weed/pb/master_pb/master.pb.go | 187 +++++++++++++++++---------------- weed/storage/store.go | 4 +- weed/storage/volume.go | 1 + weed/storage/volume_info.go | 3 + 5 files changed, 106 insertions(+), 90 deletions(-) diff --git a/weed/pb/master.proto b/weed/pb/master.proto index ad530d909..ff47a16d4 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -56,6 +56,7 @@ message VolumeInformationMessage { uint32 replica_placement = 8; uint32 version = 9; uint32 ttl = 10; + uint32 compact_revision = 11; } message Empty { diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index f24354cd7..a586d11b3 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -190,6 +190,7 @@ type VolumeInformationMessage struct { ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` } func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } @@ -267,6 +268,13 @@ func (m *VolumeInformationMessage) GetTtl() uint32 { return 0 } +func (m *VolumeInformationMessage) GetCompactRevision() uint32 { + if m != nil { + return m.CompactRevision + } + return 0 +} + type Empty struct { } @@ -1428,93 +1436,94 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1394 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, - 0x13, 0x37, 0xa9, 0x87, 0xc5, 0xd1, 0xc3, 0xd2, 0xda, 0x49, 0x18, 0xe5, 0x9f, 0x44, 0x61, 0x2e, - 0xfa, 0xbf, 0x8c, 0xd4, 0x3d, 0xf4, 0xd0, 0x16, 0x41, 0xe2, 0x38, 0x68, 0x10, 0xb7, 0x49, 0xe8, - 0x24, 0x05, 0x0a, 0x14, 0xea, 0x9a, 0x1c, 0xbb, 0x84, 0x29, 0x92, 0x25, 0x57, 0x8e, 0x95, 0x4b, - 0x2f, 0x3d, 0x16, 0xed, 0xa1, 0xdf, 0xa7, 0x97, 0xf6, 0xd6, 0x8f, 0xd2, 0x5b, 0xef, 0x05, 0x8a, - 0x7d, 0x90, 0x5a, 0x52, 0xb2, 0x13, 0x14, 0xe8, 0x21, 0xb7, 0xdd, 0x99, 0xd9, 0xdd, 0xe1, 0xef, - 0x37, 0x2f, 0x09, 0x3a, 0x53, 0x9a, 0x31, 0x4c, 0xb7, 0x93, 0x34, 0x66, 0x31, 0xb1, 0xe4, 0x6e, - 0x92, 0x1c, 0x3a, 0xbf, 0x9b, 0x60, 0x7d, 0x82, 0x34, 0x65, 0x87, 0x48, 0x19, 0xe9, 0x81, 0x19, - 0x24, 0xb6, 0x31, 0x32, 0xc6, 0x96, 0x6b, 0x06, 0x09, 0x21, 0x50, 0x4f, 0xe2, 0x94, 0xd9, 0xe6, - 0xc8, 0x18, 0x77, 0x5d, 0xb1, 0x26, 0xd7, 0x01, 0x92, 0xd9, 0x61, 0x18, 0x78, 0x93, 0x59, 0x1a, - 0xda, 0x35, 0x61, 0x6b, 0x49, 0xc9, 0x8b, 0x34, 0x24, 0x63, 0xe8, 0x4f, 0xe9, 0xd9, 0xe4, 0x34, - 0x0e, 0x67, 0x53, 0x9c, 0x78, 0xf1, 0x2c, 0x62, 0x76, 0x5d, 0x1c, 0xef, 0x4d, 0xe9, 0xd9, 0x4b, - 0x21, 0xde, 0xe5, 0x52, 0x32, 0xe2, 0x5e, 0x9d, 0x4d, 0x8e, 0x82, 0x10, 0x27, 0x27, 0x38, 0xb7, - 0x1b, 0x23, 0x63, 0x5c, 0x77, 0x61, 0x4a, 0xcf, 0x1e, 0x06, 0x21, 0x3e, 0xc6, 0x39, 0xb9, 0x09, - 0x6d, 0x9f, 0x32, 0x3a, 0xf1, 0x30, 0x62, 0x98, 0xda, 0x4d, 0xf1, 0x16, 0x70, 0xd1, 0xae, 0x90, - 0x70, 0xff, 0x52, 0xea, 0x9d, 0xd8, 0xeb, 0x42, 0x23, 0xd6, 0xdc, 0x3f, 0xea, 0x4f, 0x83, 0x68, - 0x22, 0x3c, 0x6f, 0x89, 0xa7, 0x2d, 0x21, 0x79, 0xca, 0xdd, 0xff, 0x18, 0xd6, 0xa5, 0x6f, 0x99, - 0x6d, 0x8d, 0x6a, 0xe3, 0xf6, 0xce, 0xed, 0xed, 0x02, 0x8d, 0x6d, 0xe9, 0xde, 0xa3, 0xe8, 0x28, - 0x4e, 0xa7, 0x94, 0x05, 0x71, 0xf4, 0x29, 0x66, 0x19, 0x3d, 0x46, 0x37, 0x3f, 0x43, 0xae, 0x42, - 0x2b, 0xc2, 0x57, 0x93, 0xd3, 0xc0, 0xcf, 0x6c, 0x18, 0xd5, 0xc6, 0x5d, 0x77, 0x3d, 0xc2, 0x57, - 0x2f, 0x03, 0x3f, 0x23, 0xb7, 0xa0, 0xe3, 0x63, 0x88, 0x0c, 0x7d, 0xa9, 0x6e, 0x0b, 0x75, 0x5b, - 0xc9, 0xb8, 0x89, 0xf3, 0x02, 0x06, 0x05, 0xd8, 0x2e, 0x66, 0x49, 0x1c, 0x65, 0x48, 0xc6, 0xb0, - 0x21, 0x6f, 0x3f, 0x08, 0x5e, 0xe3, 0x7e, 0x30, 0x0d, 0x98, 0x60, 0xa0, 0xee, 0x56, 0xc5, 0xe4, - 0x32, 0x34, 0x43, 0xa4, 0x3e, 0xa6, 0x0a, 0x76, 0xb5, 0x73, 0x7e, 0x35, 0xc1, 0x3e, 0xcf, 0x75, - 0xc1, 0xa9, 0x2f, 0x6e, 0xec, 0xba, 0x66, 0xe0, 0x73, 0xcc, 0xb2, 0xe0, 0x35, 0x0a, 0x4e, 0xeb, - 0xae, 0x58, 0x93, 0x1b, 0x00, 0x5e, 0x1c, 0x86, 0xe8, 0xf1, 0x83, 0xea, 0x72, 0x4d, 0xc2, 0x31, - 0x15, 0x34, 0x2d, 0xe8, 0xac, 0xbb, 0x16, 0x97, 0x48, 0x26, 0x8b, 0x2f, 0x57, 0x06, 0x92, 0x49, - 0xf5, 0xe5, 0xd2, 0xe4, 0x7f, 0x40, 0x72, 0x70, 0x0e, 0xe7, 0x85, 0x61, 0x53, 0x18, 0xf6, 0x95, - 0xe6, 0xfe, 0x3c, 0xb7, 0xbe, 0x06, 0x56, 0x8a, 0xd4, 0x9f, 0xc4, 0x51, 0x38, 0x17, 0xe4, 0xb6, - 0xdc, 0x16, 0x17, 0x3c, 0x89, 0xc2, 0x39, 0xf9, 0x2f, 0x0c, 0x52, 0x4c, 0xc2, 0xc0, 0xa3, 0x93, - 0x24, 0xa4, 0x1e, 0x4e, 0x31, 0xca, 0x79, 0xee, 0x2b, 0xc5, 0xd3, 0x5c, 0x4e, 0x6c, 0x58, 0x3f, - 0xc5, 0x34, 0xe3, 0x9f, 0x65, 0x09, 0x93, 0x7c, 0x4b, 0xfa, 0x50, 0x63, 0x2c, 0xb4, 0x41, 0x48, - 0xf9, 0xd2, 0x59, 0x87, 0xc6, 0xde, 0x34, 0x61, 0x73, 0xe7, 0x67, 0x03, 0x36, 0x0e, 0x66, 0x09, - 0xa6, 0xf7, 0xc3, 0xd8, 0x3b, 0xd9, 0x3b, 0x63, 0x29, 0x25, 0x4f, 0xa0, 0x87, 0x29, 0xcd, 0x66, - 0x29, 0xf7, 0xdd, 0x0f, 0xa2, 0x63, 0x01, 0x69, 0x7b, 0x67, 0xac, 0x85, 0x4f, 0xe5, 0xcc, 0xf6, - 0x9e, 0x3c, 0xb0, 0x2b, 0xec, 0xdd, 0x2e, 0xea, 0xdb, 0xe1, 0x17, 0xd0, 0x2d, 0xe9, 0x39, 0x31, - 0x3c, 0xb4, 0x15, 0x55, 0x62, 0xcd, 0x19, 0x4f, 0x68, 0x1a, 0xb0, 0xb9, 0x4a, 0x41, 0xb5, 0xe3, - 0x84, 0xa8, 0x0c, 0xe3, 0x91, 0x56, 0x13, 0x91, 0x66, 0x49, 0xc9, 0x23, 0x3f, 0x73, 0xfe, 0x0d, - 0x9b, 0xbb, 0x61, 0x80, 0x11, 0xdb, 0x0f, 0x32, 0x86, 0x91, 0x8b, 0xdf, 0xcc, 0x30, 0x63, 0xfc, - 0x85, 0x88, 0x4e, 0x51, 0x25, 0xb8, 0x58, 0x3b, 0xdf, 0x42, 0x4f, 0x86, 0xce, 0x7e, 0xec, 0x89, - 0xb8, 0xe1, 0xc0, 0xf0, 0xcc, 0x96, 0x46, 0x7c, 0x59, 0x49, 0x79, 0xb3, 0x9a, 0xf2, 0x7a, 0x4e, - 0xd4, 0x2e, 0xce, 0x89, 0xfa, 0x72, 0x4e, 0x3c, 0x87, 0xcd, 0xfd, 0x38, 0x3e, 0x99, 0x25, 0xd2, - 0x8d, 0xdc, 0xd7, 0xf2, 0x17, 0x1a, 0xa3, 0x1a, 0x7f, 0xb3, 0xf8, 0xc2, 0x4a, 0xc4, 0x9a, 0xd5, - 0x88, 0x75, 0xfe, 0x30, 0x60, 0xab, 0x7c, 0xad, 0xca, 0xb6, 0xaf, 0x60, 0xb3, 0xb8, 0x77, 0x12, - 0xaa, 0x6f, 0x96, 0x0f, 0xb4, 0x77, 0xee, 0x68, 0x64, 0xae, 0x3a, 0x9d, 0x17, 0x08, 0x3f, 0x07, - 0xcb, 0x1d, 0x9c, 0x56, 0x24, 0xd9, 0xf0, 0x0c, 0xfa, 0x55, 0x33, 0x1e, 0xd0, 0xc5, 0xab, 0x0a, - 0xd9, 0x56, 0x7e, 0x92, 0xbc, 0x07, 0xd6, 0xc2, 0x11, 0x53, 0x38, 0xb2, 0x59, 0x72, 0x44, 0xbd, - 0xb5, 0xb0, 0x22, 0x5b, 0xd0, 0xc0, 0x34, 0x8d, 0xf3, 0x42, 0x20, 0x37, 0xce, 0x87, 0xd0, 0xfa, - 0xdb, 0x2c, 0x3a, 0xbf, 0x19, 0xd0, 0xbd, 0x97, 0x65, 0xc1, 0x71, 0x11, 0x2e, 0x5b, 0xd0, 0x90, - 0x69, 0x2a, 0xcb, 0x91, 0xdc, 0x90, 0x11, 0xb4, 0x55, 0x96, 0x69, 0xd0, 0xeb, 0xa2, 0x37, 0x56, - 0x13, 0x95, 0x79, 0x75, 0xe9, 0x1a, 0x63, 0x61, 0xb5, 0xd0, 0x37, 0xce, 0x2d, 0xf4, 0x4d, 0xad, - 0xd0, 0x5f, 0x03, 0x4b, 0x1c, 0x8a, 0x62, 0x1f, 0x55, 0x07, 0x68, 0x71, 0xc1, 0x67, 0xb1, 0x8f, - 0xce, 0x4f, 0x06, 0xf4, 0xf2, 0xaf, 0x51, 0xcc, 0xf7, 0xa1, 0x76, 0x54, 0xa0, 0xcf, 0x97, 0x39, - 0x46, 0xe6, 0x79, 0x18, 0x2d, 0x35, 0xb7, 0x02, 0x91, 0xba, 0x8e, 0x48, 0x41, 0x46, 0x43, 0x23, - 0x83, 0xbb, 0x4c, 0x67, 0xec, 0xeb, 0xdc, 0x65, 0xbe, 0x76, 0x8e, 0x61, 0x70, 0xc0, 0x28, 0x0b, - 0x32, 0x16, 0x78, 0x59, 0x0e, 0x73, 0x05, 0x50, 0xe3, 0x4d, 0x80, 0x9a, 0xe7, 0x01, 0x5a, 0x2b, - 0x00, 0x75, 0x7e, 0x31, 0x80, 0xe8, 0x2f, 0x29, 0x08, 0xfe, 0x81, 0xa7, 0x38, 0x64, 0x2c, 0x66, - 0x34, 0x9c, 0x88, 0xae, 0xa2, 0x7a, 0x83, 0x90, 0xf0, 0xc6, 0xc5, 0x59, 0x9a, 0x65, 0xe8, 0x4b, - 0xad, 0x6c, 0x0c, 0x2d, 0x2e, 0x10, 0xca, 0x72, 0x5f, 0x69, 0x56, 0xfa, 0x8a, 0x73, 0x0f, 0xda, - 0x07, 0x2c, 0x4e, 0xe9, 0x31, 0x3e, 0x9f, 0x27, 0x6f, 0xe3, 0xbd, 0xf2, 0xce, 0x5c, 0x00, 0x31, - 0x02, 0xd8, 0x5d, 0x78, 0xbf, 0xaa, 0x00, 0x5e, 0x81, 0x4b, 0x0b, 0x0b, 0x5e, 0x2f, 0x15, 0x2f, - 0xce, 0x33, 0xb8, 0x5c, 0x55, 0x28, 0x18, 0x3f, 0x80, 0xf6, 0x02, 0x92, 0xbc, 0x76, 0x5c, 0xd2, - 0x52, 0x76, 0x71, 0xce, 0xd5, 0x2d, 0x9d, 0xff, 0xc3, 0x95, 0x85, 0xea, 0x81, 0x28, 0x82, 0x17, - 0xd5, 0xe6, 0x21, 0xd8, 0xcb, 0xe6, 0xd2, 0x07, 0xe7, 0x47, 0x13, 0x3a, 0x0f, 0x54, 0xb4, 0xf3, - 0xae, 0xaf, 0xf5, 0x79, 0x4b, 0xf4, 0xf9, 0x5b, 0xd0, 0x29, 0x0d, 0x61, 0xb2, 0xdf, 0xb7, 0x4f, - 0xb5, 0x09, 0x6c, 0xd5, 0xac, 0x56, 0x13, 0x66, 0xd5, 0x59, 0xed, 0x3f, 0x30, 0x38, 0x4a, 0x11, - 0x97, 0xc7, 0xba, 0xba, 0xbb, 0xc1, 0x15, 0xba, 0xed, 0x36, 0x6c, 0x52, 0x8f, 0x05, 0xa7, 0x15, - 0x6b, 0xc9, 0xfd, 0x40, 0xaa, 0x74, 0xfb, 0x87, 0x85, 0xa3, 0x41, 0x74, 0x14, 0x67, 0x76, 0xf3, - 0xed, 0xc7, 0x32, 0xf5, 0x35, 0x5c, 0x93, 0x39, 0xdf, 0x99, 0xd0, 0x72, 0xa9, 0x77, 0xf2, 0x6e, - 0xa3, 0x71, 0x17, 0x36, 0x8a, 0xaa, 0x56, 0x02, 0xe4, 0x8a, 0x06, 0x88, 0x4e, 0xbc, 0xdb, 0xf5, - 0xb5, 0x5d, 0xe6, 0xfc, 0x69, 0x40, 0xef, 0x41, 0x51, 0x39, 0xdf, 0x6d, 0x30, 0x76, 0x00, 0x78, - 0xa9, 0x2f, 0xe1, 0xa0, 0xb7, 0xc6, 0x9c, 0x6e, 0xd7, 0x4a, 0xd5, 0x2a, 0x73, 0x7e, 0x30, 0xa1, - 0xf3, 0x3c, 0x4e, 0xe2, 0x30, 0x3e, 0x9e, 0xbf, 0xdb, 0x5f, 0xbf, 0x07, 0x03, 0xad, 0x2b, 0x96, - 0x40, 0xb8, 0x5a, 0x09, 0x86, 0x05, 0xd9, 0xee, 0x86, 0x5f, 0xda, 0x67, 0xce, 0x26, 0x0c, 0xd4, - 0x84, 0xa7, 0x15, 0x37, 0x17, 0x88, 0x2e, 0x54, 0x85, 0xed, 0x23, 0xe8, 0x32, 0x05, 0x9d, 0x78, - 0x4e, 0xcd, 0xb8, 0x7a, 0xe8, 0xe9, 0xd0, 0xba, 0x1d, 0xa6, 0xed, 0x76, 0xbe, 0x6f, 0xc0, 0xfa, - 0x01, 0xd2, 0x57, 0x88, 0x3e, 0x79, 0x04, 0xdd, 0x03, 0x8c, 0xfc, 0xc5, 0x4f, 0xcb, 0x2d, 0xed, - 0x8e, 0x42, 0x3a, 0xfc, 0xd7, 0x2a, 0x69, 0x51, 0xe3, 0xd6, 0xc6, 0xc6, 0x1d, 0x83, 0x3c, 0x85, - 0xee, 0x63, 0xc4, 0x64, 0x37, 0x8e, 0x22, 0xf4, 0x18, 0xfa, 0xe4, 0x86, 0x5e, 0x69, 0x97, 0xc7, - 0xdc, 0xe1, 0xd5, 0xa5, 0xd2, 0x91, 0x4f, 0x45, 0xea, 0xc6, 0x67, 0xd0, 0xd1, 0xa7, 0xbb, 0xd2, - 0x85, 0x2b, 0x66, 0xd1, 0xe1, 0xcd, 0x37, 0x8c, 0x85, 0xce, 0x1a, 0xb9, 0x0b, 0x4d, 0x39, 0x6e, - 0x10, 0x5b, 0x33, 0x2e, 0xcd, 0x53, 0x25, 0xbf, 0xca, 0xb3, 0x89, 0xb3, 0x46, 0x1e, 0x03, 0x2c, - 0x1a, 0x36, 0xd1, 0x71, 0x59, 0x9a, 0x18, 0x86, 0xd7, 0xcf, 0xd1, 0x16, 0x97, 0x7d, 0x0e, 0xbd, - 0x72, 0xeb, 0x22, 0xa3, 0x95, 0xdd, 0x49, 0x8b, 0x88, 0xe1, 0xad, 0x0b, 0x2c, 0x8a, 0x8b, 0xbf, - 0x84, 0x7e, 0xb5, 0x23, 0x11, 0x67, 0xe5, 0xc1, 0x52, 0x77, 0x1b, 0xde, 0xbe, 0xd0, 0x46, 0x07, - 0x61, 0x11, 0x95, 0x25, 0x10, 0x96, 0x22, 0xb8, 0x04, 0xc2, 0x72, 0x28, 0x3b, 0x6b, 0x87, 0x4d, - 0xf1, 0x67, 0xc7, 0xfb, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x50, 0x28, 0x66, 0xfc, 0x10, - 0x00, 0x00, + // 1416 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0xdc, 0x44, + 0x14, 0x8f, 0xbd, 0x1f, 0x59, 0xbf, 0xfd, 0xc8, 0xee, 0x24, 0x6d, 0xdd, 0x2d, 0x6d, 0xb7, 0xee, + 0x65, 0xcb, 0x47, 0x54, 0xc2, 0x81, 0x03, 0xa0, 0xaa, 0x4d, 0x53, 0x51, 0x35, 0xd0, 0xd6, 0x69, + 0x8b, 0x84, 0x84, 0xcc, 0xc4, 0x7e, 0x09, 0x56, 0xbc, 0xb6, 0xb1, 0x67, 0xb7, 0xd9, 0x5e, 0xb8, + 0x70, 0x44, 0x70, 0xe0, 0xff, 0xe1, 0xc2, 0x91, 0x3f, 0x85, 0x03, 0x12, 0x77, 0x24, 0x34, 0xe3, + 0xb1, 0x77, 0xec, 0xdd, 0xa4, 0x15, 0x12, 0x87, 0xde, 0x3c, 0xbf, 0xf7, 0x66, 0xe6, 0xcd, 0xef, + 0x7d, 0xee, 0x42, 0x67, 0x42, 0x53, 0x86, 0xc9, 0x76, 0x9c, 0x44, 0x2c, 0x22, 0x46, 0xb6, 0x72, + 0xe2, 0x43, 0xeb, 0x4f, 0x1d, 0x8c, 0xcf, 0x91, 0x26, 0xec, 0x10, 0x29, 0x23, 0x3d, 0xd0, 0xfd, + 0xd8, 0xd4, 0x46, 0xda, 0xd8, 0xb0, 0x75, 0x3f, 0x26, 0x04, 0xea, 0x71, 0x94, 0x30, 0x53, 0x1f, + 0x69, 0xe3, 0xae, 0x2d, 0xbe, 0xc9, 0x55, 0x80, 0x78, 0x7a, 0x18, 0xf8, 0xae, 0x33, 0x4d, 0x02, + 0xb3, 0x26, 0x74, 0x8d, 0x0c, 0x79, 0x9e, 0x04, 0x64, 0x0c, 0xfd, 0x09, 0x3d, 0x75, 0x66, 0x51, + 0x30, 0x9d, 0xa0, 0xe3, 0x46, 0xd3, 0x90, 0x99, 0x75, 0xb1, 0xbd, 0x37, 0xa1, 0xa7, 0x2f, 0x04, + 0xbc, 0xcb, 0x51, 0x32, 0xe2, 0x56, 0x9d, 0x3a, 0x47, 0x7e, 0x80, 0xce, 0x09, 0xce, 0xcd, 0xc6, + 0x48, 0x1b, 0xd7, 0x6d, 0x98, 0xd0, 0xd3, 0x07, 0x7e, 0x80, 0x8f, 0x70, 0x4e, 0xae, 0x43, 0xdb, + 0xa3, 0x8c, 0x3a, 0x2e, 0x86, 0x0c, 0x13, 0xb3, 0x29, 0xee, 0x02, 0x0e, 0xed, 0x0a, 0x84, 0xdb, + 0x97, 0x50, 0xf7, 0xc4, 0x5c, 0x17, 0x12, 0xf1, 0xcd, 0xed, 0xa3, 0xde, 0xc4, 0x0f, 0x1d, 0x61, + 0x79, 0x4b, 0x5c, 0x6d, 0x08, 0xe4, 0x09, 0x37, 0xff, 0x33, 0x58, 0xcf, 0x6c, 0x4b, 0x4d, 0x63, + 0x54, 0x1b, 0xb7, 0x77, 0x6e, 0x6e, 0x17, 0x6c, 0x6c, 0x67, 0xe6, 0x3d, 0x0c, 0x8f, 0xa2, 0x64, + 0x42, 0x99, 0x1f, 0x85, 0x5f, 0x60, 0x9a, 0xd2, 0x63, 0xb4, 0xf3, 0x3d, 0xe4, 0x32, 0xb4, 0x42, + 0x7c, 0xe9, 0xcc, 0x7c, 0x2f, 0x35, 0x61, 0x54, 0x1b, 0x77, 0xed, 0xf5, 0x10, 0x5f, 0xbe, 0xf0, + 0xbd, 0x94, 0xdc, 0x80, 0x8e, 0x87, 0x01, 0x32, 0xf4, 0x32, 0x71, 0x5b, 0x88, 0xdb, 0x12, 0xe3, + 0x2a, 0xd6, 0x73, 0x18, 0x14, 0x64, 0xdb, 0x98, 0xc6, 0x51, 0x98, 0x22, 0x19, 0xc3, 0x46, 0x76, + 0xfa, 0x81, 0xff, 0x0a, 0xf7, 0xfd, 0x89, 0xcf, 0x84, 0x07, 0xea, 0x76, 0x15, 0x26, 0x17, 0xa1, + 0x19, 0x20, 0xf5, 0x30, 0x91, 0xb4, 0xcb, 0x95, 0xf5, 0x97, 0x0e, 0xe6, 0x59, 0xa6, 0x0b, 0x9f, + 0x7a, 0xe2, 0xc4, 0xae, 0xad, 0xfb, 0x1e, 0xe7, 0x2c, 0xf5, 0x5f, 0xa1, 0xf0, 0x69, 0xdd, 0x16, + 0xdf, 0xe4, 0x1a, 0x80, 0x1b, 0x05, 0x01, 0xba, 0x7c, 0xa3, 0x3c, 0x5c, 0x41, 0x38, 0xa7, 0xc2, + 0x4d, 0x0b, 0x77, 0xd6, 0x6d, 0x83, 0x23, 0x99, 0x27, 0x8b, 0x97, 0x4b, 0x85, 0xcc, 0x93, 0xf2, + 0xe5, 0x99, 0xca, 0xfb, 0x40, 0x72, 0x72, 0x0e, 0xe7, 0x85, 0x62, 0x53, 0x28, 0xf6, 0xa5, 0xe4, + 0xde, 0x3c, 0xd7, 0xbe, 0x02, 0x46, 0x82, 0xd4, 0x73, 0xa2, 0x30, 0x98, 0x0b, 0xe7, 0xb6, 0xec, + 0x16, 0x07, 0x1e, 0x87, 0xc1, 0x9c, 0xbc, 0x07, 0x83, 0x04, 0xe3, 0xc0, 0x77, 0xa9, 0x13, 0x07, + 0xd4, 0xc5, 0x09, 0x86, 0xb9, 0x9f, 0xfb, 0x52, 0xf0, 0x24, 0xc7, 0x89, 0x09, 0xeb, 0x33, 0x4c, + 0x52, 0xfe, 0x2c, 0x43, 0xa8, 0xe4, 0x4b, 0xd2, 0x87, 0x1a, 0x63, 0x81, 0x09, 0x02, 0xe5, 0x9f, + 0xe4, 0x16, 0xf4, 0xdd, 0x68, 0x12, 0x53, 0x97, 0x39, 0x09, 0xce, 0x7c, 0xb1, 0xa9, 0x2d, 0xc4, + 0x1b, 0x12, 0xb7, 0x25, 0x6c, 0xad, 0x43, 0x63, 0x6f, 0x12, 0xb3, 0xb9, 0xf5, 0x9b, 0x06, 0x1b, + 0x07, 0xd3, 0x18, 0x93, 0x7b, 0x41, 0xe4, 0x9e, 0xec, 0x9d, 0xb2, 0x84, 0x92, 0xc7, 0xd0, 0xc3, + 0x84, 0xa6, 0xd3, 0x84, 0x3f, 0xd3, 0xf3, 0xc3, 0x63, 0xc1, 0x7e, 0x7b, 0x67, 0xac, 0x44, 0x5a, + 0x65, 0xcf, 0xf6, 0x5e, 0xb6, 0x61, 0x57, 0xe8, 0xdb, 0x5d, 0x54, 0x97, 0xc3, 0xaf, 0xa1, 0x5b, + 0x92, 0x73, 0x1f, 0xf2, 0x2c, 0x90, 0x5e, 0x15, 0xdf, 0x3c, 0x38, 0x62, 0x9a, 0xf8, 0x6c, 0x2e, + 0xb3, 0x55, 0xae, 0xb8, 0xef, 0x64, 0x32, 0xf2, 0xa0, 0xac, 0x89, 0xa0, 0x34, 0x32, 0xe4, 0xa1, + 0x97, 0x5a, 0xb7, 0x60, 0x73, 0x37, 0xf0, 0x31, 0x64, 0xfb, 0x7e, 0xca, 0x30, 0xb4, 0xf1, 0xfb, + 0x29, 0xa6, 0x8c, 0xdf, 0x10, 0xd2, 0x09, 0xca, 0x5a, 0x20, 0xbe, 0xad, 0x1f, 0xa0, 0x97, 0x45, + 0xd9, 0x7e, 0xe4, 0x8a, 0x10, 0xe3, 0x1c, 0xf2, 0x22, 0x90, 0x29, 0xf1, 0xcf, 0x4a, 0x75, 0xd0, + 0xab, 0xd5, 0x41, 0x4d, 0x9f, 0xda, 0xf9, 0xe9, 0x53, 0x5f, 0x4e, 0x9f, 0x67, 0xb0, 0xb9, 0x1f, + 0x45, 0x27, 0xd3, 0x38, 0x33, 0x23, 0xb7, 0xb5, 0xfc, 0x42, 0x6d, 0x54, 0xe3, 0x77, 0x16, 0x2f, + 0xac, 0x04, 0xb7, 0x5e, 0x0d, 0x6e, 0xeb, 0x6f, 0x0d, 0xb6, 0xca, 0xc7, 0xca, 0xc4, 0xfc, 0x16, + 0x36, 0x8b, 0x73, 0x9d, 0x40, 0xbe, 0x39, 0xbb, 0xa0, 0xbd, 0x73, 0x5b, 0x71, 0xe6, 0xaa, 0xdd, + 0x79, 0x2d, 0xf1, 0x72, 0xb2, 0xec, 0xc1, 0xac, 0x82, 0xa4, 0xc3, 0x53, 0xe8, 0x57, 0xd5, 0x78, + 0xec, 0x17, 0xb7, 0x4a, 0x66, 0x5b, 0xf9, 0x4e, 0xf2, 0x21, 0x18, 0x0b, 0x43, 0x74, 0x61, 0xc8, + 0x66, 0xc9, 0x10, 0x79, 0xd7, 0x42, 0x8b, 0x6c, 0x41, 0x03, 0x93, 0x24, 0xca, 0x6b, 0x46, 0xb6, + 0xb0, 0x3e, 0x81, 0xd6, 0x7f, 0xf6, 0xa2, 0xf5, 0x87, 0x06, 0xdd, 0xbb, 0x69, 0xea, 0x1f, 0x17, + 0xe1, 0xb2, 0x05, 0x8d, 0x2c, 0xa3, 0xb3, 0xca, 0x95, 0x2d, 0xc8, 0x08, 0xda, 0x32, 0x21, 0x15, + 0xea, 0x55, 0xe8, 0xb5, 0x85, 0x47, 0x26, 0x69, 0x3d, 0x33, 0x8d, 0x27, 0x69, 0xa5, 0x27, 0x34, + 0xce, 0xec, 0x09, 0x4d, 0xa5, 0x27, 0x5c, 0x01, 0x43, 0x6c, 0x0a, 0x23, 0x0f, 0x65, 0xb3, 0x68, + 0x71, 0xe0, 0xcb, 0xc8, 0x43, 0xeb, 0x57, 0x0d, 0x7a, 0xf9, 0x6b, 0xa4, 0xe7, 0xfb, 0x50, 0x3b, + 0x2a, 0xd8, 0xe7, 0x9f, 0x39, 0x47, 0xfa, 0x59, 0x1c, 0x2d, 0xf5, 0xc1, 0x82, 0x91, 0xba, 0xca, + 0x48, 0xe1, 0x8c, 0x86, 0xe2, 0x0c, 0x6e, 0x32, 0x9d, 0xb2, 0xef, 0x72, 0x93, 0xf9, 0xb7, 0x75, + 0x0c, 0x83, 0x03, 0x46, 0x99, 0x9f, 0x32, 0xdf, 0x4d, 0x73, 0x9a, 0x2b, 0x84, 0x6a, 0xaf, 0x23, + 0x54, 0x3f, 0x8b, 0xd0, 0x5a, 0x41, 0xa8, 0xf5, 0xbb, 0x06, 0x44, 0xbd, 0x49, 0x52, 0xf0, 0x3f, + 0x5c, 0xc5, 0x29, 0x63, 0x11, 0xa3, 0x81, 0x23, 0x1a, 0x90, 0x6c, 0x23, 0x02, 0xe1, 0x3d, 0x8e, + 0x7b, 0x69, 0x9a, 0xa2, 0x97, 0x49, 0xb3, 0x1e, 0xd2, 0xe2, 0x80, 0x10, 0x96, 0x5b, 0x50, 0xb3, + 0xd2, 0x82, 0xac, 0xbb, 0xd0, 0x3e, 0x60, 0x51, 0x42, 0x8f, 0xf1, 0xd9, 0x3c, 0x7e, 0x13, 0xeb, + 0xa5, 0x75, 0xfa, 0x82, 0x88, 0x11, 0xc0, 0xee, 0xc2, 0xfa, 0x55, 0x05, 0xf0, 0x12, 0x5c, 0x58, + 0x68, 0xf0, 0x7a, 0x29, 0xfd, 0x62, 0x3d, 0x85, 0x8b, 0x55, 0x81, 0xa4, 0xf1, 0x63, 0x68, 0x2f, + 0x28, 0xc9, 0x6b, 0xc7, 0x05, 0x25, 0x65, 0x17, 0xfb, 0x6c, 0x55, 0xd3, 0xfa, 0x00, 0x2e, 0x2d, + 0x44, 0xf7, 0x45, 0x11, 0x3c, 0xaf, 0x36, 0x0f, 0xc1, 0x5c, 0x56, 0xcf, 0x6c, 0xb0, 0x7e, 0xd1, + 0xa1, 0x73, 0x5f, 0x46, 0x3b, 0x1f, 0x10, 0x94, 0x91, 0xc0, 0x10, 0x23, 0xc1, 0x0d, 0xe8, 0x94, + 0xe6, 0xb5, 0x6c, 0x34, 0x68, 0xcf, 0x94, 0x61, 0x6d, 0xd5, 0x58, 0x57, 0x13, 0x6a, 0xd5, 0xb1, + 0xee, 0x5d, 0x18, 0x1c, 0x25, 0x88, 0xcb, 0x13, 0x60, 0xdd, 0xde, 0xe0, 0x02, 0x55, 0x77, 0x1b, + 0x36, 0xa9, 0xcb, 0xfc, 0x59, 0x45, 0x3b, 0xf3, 0xfd, 0x20, 0x13, 0xa9, 0xfa, 0x0f, 0x0a, 0x43, + 0xfd, 0xf0, 0x28, 0x4a, 0xcd, 0xe6, 0x9b, 0x4f, 0x70, 0xf2, 0x35, 0x5c, 0x92, 0x5a, 0x3f, 0xea, + 0xd0, 0xb2, 0xa9, 0x7b, 0xf2, 0x76, 0xb3, 0x71, 0x07, 0x36, 0x8a, 0xaa, 0x56, 0x22, 0xe4, 0x92, + 0x42, 0x88, 0xea, 0x78, 0xbb, 0xeb, 0x29, 0xab, 0xd4, 0xfa, 0x47, 0x83, 0xde, 0xfd, 0xa2, 0x72, + 0xbe, 0xdd, 0x64, 0xec, 0x00, 0xf0, 0x52, 0x5f, 0xe2, 0x41, 0x6d, 0x8d, 0xb9, 0xbb, 0x6d, 0x23, + 0x91, 0x5f, 0xa9, 0xf5, 0xb3, 0x0e, 0x9d, 0x67, 0x51, 0x1c, 0x05, 0xd1, 0xf1, 0xfc, 0xed, 0x7e, + 0xfd, 0x1e, 0x0c, 0x94, 0xae, 0x58, 0x22, 0xe1, 0x72, 0x25, 0x18, 0x16, 0xce, 0xb6, 0x37, 0xbc, + 0xd2, 0x3a, 0xb5, 0x36, 0x61, 0x20, 0x27, 0x3c, 0xa5, 0xb8, 0xd9, 0x40, 0x54, 0x50, 0x16, 0xb6, + 0x4f, 0xa1, 0xcb, 0x24, 0x75, 0xe2, 0x3a, 0x39, 0xe3, 0xaa, 0xa1, 0xa7, 0x52, 0x6b, 0x77, 0x98, + 0xb2, 0xda, 0xf9, 0xa9, 0x01, 0xeb, 0x07, 0x48, 0x5f, 0x22, 0x7a, 0xe4, 0x21, 0x74, 0x0f, 0x30, + 0xf4, 0x16, 0xbf, 0x42, 0xb7, 0x94, 0x33, 0x0a, 0x74, 0xf8, 0xce, 0x2a, 0xb4, 0xa8, 0x71, 0x6b, + 0x63, 0xed, 0xb6, 0x46, 0x9e, 0x40, 0xf7, 0x11, 0x62, 0xbc, 0x1b, 0x85, 0x21, 0xba, 0x0c, 0x3d, + 0x72, 0x4d, 0xad, 0xb4, 0xcb, 0x63, 0xee, 0xf0, 0xf2, 0x52, 0xe9, 0xc8, 0xa7, 0x22, 0x79, 0xe2, + 0x53, 0xe8, 0xa8, 0xd3, 0x5d, 0xe9, 0xc0, 0x15, 0xb3, 0xe8, 0xf0, 0xfa, 0x6b, 0xc6, 0x42, 0x6b, + 0x8d, 0xdc, 0x81, 0x66, 0x36, 0x6e, 0x10, 0x53, 0x51, 0x2e, 0xcd, 0x53, 0x25, 0xbb, 0xca, 0xb3, + 0x89, 0xb5, 0x46, 0x1e, 0x01, 0x2c, 0x1a, 0x36, 0x51, 0x79, 0x59, 0x9a, 0x18, 0x86, 0x57, 0xcf, + 0x90, 0x16, 0x87, 0x7d, 0x05, 0xbd, 0x72, 0xeb, 0x22, 0xa3, 0x95, 0xdd, 0x49, 0x89, 0x88, 0xe1, + 0x8d, 0x73, 0x34, 0x8a, 0x83, 0xbf, 0x81, 0x7e, 0xb5, 0x23, 0x11, 0x6b, 0xe5, 0xc6, 0x52, 0x77, + 0x1b, 0xde, 0x3c, 0x57, 0x47, 0x25, 0x61, 0x11, 0x95, 0x25, 0x12, 0x96, 0x22, 0xb8, 0x44, 0xc2, + 0x72, 0x28, 0x5b, 0x6b, 0x87, 0x4d, 0xf1, 0xbf, 0xc8, 0x47, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x88, 0xac, 0x76, 0x9c, 0x27, 0x11, 0x00, 0x00, } diff --git a/weed/storage/store.go b/weed/storage/store.go index a29f3c163..8d4d9c55e 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -121,7 +121,9 @@ func (s *Store) Status() []*VolumeInfo { DeleteCount: v.nm.DeletedCount(), DeletedByteCount: v.nm.DeletedSize(), ReadOnly: v.readOnly, - Ttl: v.Ttl} + Ttl: v.Ttl, + CompactRevision: uint32(v.CompactRevision), + } stats = append(stats, s) } location.RUnlock() diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 5cec0c5ed..22acf1653 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -148,5 +148,6 @@ func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessag ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), + CompactRevision: uint32(v.SuperBlock.CompactRevision), } } diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go index 7afad412d..f5ddeca14 100644 --- a/weed/storage/volume_info.go +++ b/weed/storage/volume_info.go @@ -18,6 +18,7 @@ type VolumeInfo struct { DeleteCount int DeletedByteCount uint64 ReadOnly bool + CompactRevision uint32 } func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err error) { @@ -30,6 +31,7 @@ func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err er DeletedByteCount: m.DeletedByteCount, ReadOnly: m.ReadOnly, Version: Version(m.Version), + CompactRevision: m.CompactRevision, } rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) if e != nil { @@ -57,6 +59,7 @@ func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMe ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()), Version: uint32(vi.Version), Ttl: vi.Ttl.ToUint32(), + CompactRevision: vi.CompactRevision, } } From eb4a54d9fe865eafb4de12db96b3e668b333fd1a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Mar 2019 05:19:37 -0700 Subject: [PATCH 080/450] refactoring --- weed/server/filer_grpc_server.go | 7 +++++- weed/shell/command_collection_list.go | 10 ++++++-- weed/shell/command_volume_list.go | 10 +++++--- weed/wdclient/masterclient.go | 6 +++++ weed/wdclient/masterclient_collection.go | 31 ------------------------ 5 files changed, 26 insertions(+), 38 deletions(-) delete mode 100644 weed/wdclient/masterclient_collection.go diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 13c66543b..6f7cf1ad6 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -238,7 +238,12 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - err = fs.filer.MasterClient.CollectionDelete(ctx, req.GetCollection()) + err = fs.filer.MasterClient.WithClient(ctx, func(ctx context.Context, client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + Name: req.GetCollection(), + }) + return err + }) return &filer_pb.DeleteCollectionResponse{}, err } diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index 34a406d67..0797e56fb 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -3,6 +3,7 @@ package shell import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "io" ) @@ -21,9 +22,14 @@ func (c *commandCollectionList) Help() string { return "# list all collections" } -func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer io.Writer) error { +func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { - resp, err := commandEnv.masterClient.CollectionList(context.Background()) + var resp *master_pb.CollectionListResponse + + err = commandEnv.masterClient.WithClient(context.Background(), func(ctx context.Context, client master_pb.SeaweedClient) error { + resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{}) + return err + }) if err != nil { return err diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 971e18f10..52ac4865a 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -22,16 +22,18 @@ func (c *commandVolumeList) Help() string { return "# list all volumes" } -func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io.Writer) error { - - resp, err := commandEnv.masterClient.VolumeList(context.Background()) +func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + var resp *master_pb.VolumeListResponse + err = commandEnv.masterClient.WithClient(context.Background(), func(ctx context.Context, client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) if err != nil { return err } writeTopologyInfo(writer, resp.TopologyInfo) - return nil } diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index b3b277c74..5f147e594 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -116,3 +116,9 @@ func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.Di return fn(ctx, client) } + +func (mc *MasterClient) WithClient(ctx context.Context, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error { + return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { + return fn(ctx, client) + }) +} diff --git a/weed/wdclient/masterclient_collection.go b/weed/wdclient/masterclient_collection.go deleted file mode 100644 index 1a9215a7e..000000000 --- a/weed/wdclient/masterclient_collection.go +++ /dev/null @@ -1,31 +0,0 @@ -package wdclient - -import ( - "context" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" -) - -func (mc *MasterClient) CollectionDelete(ctx context.Context, collection string) error { - return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { - _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ - Name: collection, - }) - return err - }) -} - -func (mc *MasterClient) CollectionList(ctx context.Context) (resp *master_pb.CollectionListResponse, err error) { - err = withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { - resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{}) - return err - }) - return -} - -func (mc *MasterClient) VolumeList(ctx context.Context) (resp *master_pb.VolumeListResponse, err error) { - err = withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) - return err - }) - return -} From 916b809c086e94208eabe2af1086bcb1289ed298 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Mar 2019 05:34:43 -0700 Subject: [PATCH 081/450] add error checking --- weed/storage/needle_read_write.go | 39 +++++++++++++++++++++++-------- weed/storage/volume_vacuum.go | 5 +++- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go index e69d70dc3..4e6bd9cc7 100644 --- a/weed/storage/needle_read_write.go +++ b/weed/storage/needle_read_write.go @@ -182,10 +182,10 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version case Version1: n.Data = bytes[NeedleEntrySize : NeedleEntrySize+size] case Version2, Version3: - n.readNeedleDataVersion2(bytes[NeedleEntrySize : NeedleEntrySize+int(n.Size)]) + err = n.readNeedleDataVersion2(bytes[NeedleEntrySize : NeedleEntrySize+int(n.Size)]) } - if size == 0 { - return nil + if size == 0 || err != nil { + return err } checksum := util.BytesToUint32(bytes[NeedleEntrySize+size : NeedleEntrySize+size+NeedleChecksumSize]) newChecksum := NewCRC(n.Data) @@ -206,15 +206,15 @@ func (n *Needle) ParseNeedleHeader(bytes []byte) { n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize : NeedleEntrySize]) } -func (n *Needle) readNeedleDataVersion2(bytes []byte) { +var ErrIndexOutOfRange = fmt.Errorf("index out of range") + +func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) { index, lenBytes := 0, len(bytes) if index < lenBytes { n.DataSize = util.BytesToUint32(bytes[index : index+4]) index = index + 4 - if int(n.DataSize)+index > lenBytes { - // this if clause is due to bug #87 and #93, fixed in v0.69 - // remove this clause later - return + if int(n.DataSize)+index >= lenBytes { + return ErrIndexOutOfRange } n.Data = bytes[index : index+int(n.DataSize)] index = index + int(n.DataSize) @@ -224,30 +224,49 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) { if index < lenBytes && n.HasName() { n.NameSize = uint8(bytes[index]) index = index + 1 + if int(n.NameSize)+index >= lenBytes { + return ErrIndexOutOfRange + } n.Name = bytes[index : index+int(n.NameSize)] index = index + int(n.NameSize) } if index < lenBytes && n.HasMime() { n.MimeSize = uint8(bytes[index]) index = index + 1 + if int(n.MimeSize)+index >= lenBytes { + return ErrIndexOutOfRange + } n.Mime = bytes[index : index+int(n.MimeSize)] index = index + int(n.MimeSize) } if index < lenBytes && n.HasLastModifiedDate() { + if LastModifiedBytesLength+index >= lenBytes { + return ErrIndexOutOfRange + } n.LastModified = util.BytesToUint64(bytes[index : index+LastModifiedBytesLength]) index = index + LastModifiedBytesLength } if index < lenBytes && n.HasTtl() { + if TtlBytesLength+index >= lenBytes { + return ErrIndexOutOfRange + } n.Ttl = LoadTTLFromBytes(bytes[index : index+TtlBytesLength]) index = index + TtlBytesLength } if index < lenBytes && n.HasPairs() { + if 2+index >= lenBytes { + return ErrIndexOutOfRange + } n.PairsSize = util.BytesToUint16(bytes[index : index+2]) index += 2 + if int(n.PairsSize)+index >= lenBytes { + return ErrIndexOutOfRange + } end := index + int(n.PairsSize) n.Pairs = bytes[index:end] index = end } + return nil } func ReadNeedleHeader(r *os.File, version Version, offset int64) (n *Needle, bodyLength int64, err error) { @@ -300,11 +319,11 @@ func (n *Needle) ReadNeedleBody(r *os.File, version Version, offset int64, bodyL if _, err = r.ReadAt(bytes, offset); err != nil { return } - n.readNeedleDataVersion2(bytes[0:n.Size]) + err = n.readNeedleDataVersion2(bytes[0:n.Size]) n.Checksum = NewCRC(n.Data) if version == Version3 { - tsOffset := n.Size+NeedleChecksumSize + tsOffset := n.Size + NeedleChecksumSize n.AppendAtNs = util.BytesToUint64(bytes[tsOffset : tsOffset+TimestampSize]) } default: diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index b4870423c..63aada2c2 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -335,7 +335,10 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { } n := new(Needle) - n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version()) + err :=n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version()) + if err != nil { + return nil + } if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) { return nil From da871896c345372780c35de7364b42d910935c53 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Mar 2019 05:47:41 -0700 Subject: [PATCH 082/450] weed filer: set grpc port to port + 10000 --- weed/command/mount.go | 7 +------ weed/command/mount_std.go | 2 +- weed/command/s3.go | 2 +- weed/operation/grpc_client.go | 2 +- weed/server/raft_server.go | 4 ++-- weed/server/volume_grpc_client_to_master.go | 2 +- weed/storage/volume_vacuum.go | 2 +- weed/util/grpc_client_server.go | 11 ++++------- weed/wdclient/masterclient.go | 2 +- 9 files changed, 13 insertions(+), 21 deletions(-) diff --git a/weed/command/mount.go b/weed/command/mount.go index 760c68e40..ec790c999 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -8,7 +8,6 @@ import ( type MountOptions struct { filer *string - filerGrpcPort *int filerMountRootPath *string dir *string dirListingLimit *int @@ -29,7 +28,6 @@ var ( func init() { cmdMount.Run = runMount // break init cycle mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") - mountOptions.filerGrpcPort = cmdMount.Flag.Int("filer.grpc.port", 0, "filer grpc server listen port, default to http port + 10000") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size") @@ -61,7 +59,7 @@ var cmdMount = &Command{ `, } -func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress string, err error) { +func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { hostnameAndPort := strings.Split(filer, ":") if len(hostnameAndPort) != 2 { return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort) @@ -73,9 +71,6 @@ func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress } filerGrpcPort := int(filerPort) + 10000 - if optionalGrpcPort != 0 { - filerGrpcPort = optionalGrpcPort - } return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil } diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 2b274e200..c047b94c3 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -87,7 +87,7 @@ func runMount(cmd *Command, args []string) bool { c.Close() }) - filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer, *mountOptions.filerGrpcPort) + filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer) if err != nil { glog.Fatal(err) return false diff --git a/weed/command/s3.go b/weed/command/s3.go index a54ddd2f7..9aa9f8e2f 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -51,7 +51,7 @@ func runS3(cmd *Command, args []string) bool { weed_server.LoadConfiguration("security", false) - filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort) + filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer) if err != nil { glog.Fatal(err) return false diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index eb97f5ce1..ea7a82044 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -42,7 +42,7 @@ func withMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, ctx := context.Background() - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0) + masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v", masterServer) } diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 4be13810f..88320ed98 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -61,7 +61,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr st s.raftServer.Start() for _, peer := range s.peers { - s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer, 19333)) + s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer)) } s.GrpcServer = raft.NewGrpcServer(s.raftServer) @@ -72,7 +72,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr st _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ Name: s.raftServer.Name(), - ConnectionString: util.ServerToGrpcAddress(s.serverAddr, 19333), + ConnectionString: util.ServerToGrpcAddress(s.serverAddr), }) if err != nil { diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index f6ed8ee23..94e99c8f6 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -31,7 +31,7 @@ func (vs *VolumeServer) heartbeat() { if newLeader != "" { master = newLeader } - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) + masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) if parseErr != nil { glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress) continue diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 63aada2c2..b29e15843 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -335,7 +335,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { } n := new(Needle) - err :=n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version()) + err := n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version()) if err != nil { return nil } diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 361d245b8..e5993aeab 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -81,7 +81,7 @@ func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, return err } -func ParseServerToGrpcAddress(server string, optionalGrpcPort int) (serverGrpcAddress string, err error) { +func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) { hostnameAndPort := strings.Split(server, ":") if len(hostnameAndPort) != 2 { return "", fmt.Errorf("server should have hostname:port format: %v", hostnameAndPort) @@ -93,22 +93,19 @@ func ParseServerToGrpcAddress(server string, optionalGrpcPort int) (serverGrpcAd } grpcPort := int(port) + 10000 - if optionalGrpcPort != 0 { - grpcPort = optionalGrpcPort - } return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort), nil } -func ServerToGrpcAddress(server string, defaultGrpcPort int) (serverGrpcAddress string) { +func ServerToGrpcAddress(server string) (serverGrpcAddress string) { hostnameAndPort := strings.Split(server, ":") if len(hostnameAndPort) != 2 { - return fmt.Sprintf("%s:%d", server, defaultGrpcPort) + return fmt.Sprintf("unexpected server address: %s", server) } port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) if parseErr != nil { - return fmt.Sprintf("%s:%d", hostnameAndPort[0], defaultGrpcPort) + return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1]) } grpcPort := int(port) + 10000 diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 5f147e594..29fe50d80 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -101,7 +101,7 @@ func (mc *MasterClient) tryAllMasters() { func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error { - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) + masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v", master) } From cea5c53bf7943f7735d74c1e9f568ba633474e5d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Mar 2019 20:56:27 -0700 Subject: [PATCH 083/450] fix length checking fix https://github.com/chrislusf/seaweedfs/issues/890 --- weed/storage/needle_read_write.go | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go index 4e6bd9cc7..0a73b2977 100644 --- a/weed/storage/needle_read_write.go +++ b/weed/storage/needle_read_write.go @@ -206,15 +206,13 @@ func (n *Needle) ParseNeedleHeader(bytes []byte) { n.Size = util.BytesToUint32(bytes[CookieSize+NeedleIdSize : NeedleEntrySize]) } -var ErrIndexOutOfRange = fmt.Errorf("index out of range") - func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) { index, lenBytes := 0, len(bytes) if index < lenBytes { n.DataSize = util.BytesToUint32(bytes[index : index+4]) index = index + 4 - if int(n.DataSize)+index >= lenBytes { - return ErrIndexOutOfRange + if int(n.DataSize)+index > lenBytes { + return fmt.Errorf("index out of range %d", 1) } n.Data = bytes[index : index+int(n.DataSize)] index = index + int(n.DataSize) @@ -224,8 +222,8 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) { if index < lenBytes && n.HasName() { n.NameSize = uint8(bytes[index]) index = index + 1 - if int(n.NameSize)+index >= lenBytes { - return ErrIndexOutOfRange + if int(n.NameSize)+index > lenBytes { + return fmt.Errorf("index out of range %d", 2) } n.Name = bytes[index : index+int(n.NameSize)] index = index + int(n.NameSize) @@ -233,34 +231,34 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) (err error) { if index < lenBytes && n.HasMime() { n.MimeSize = uint8(bytes[index]) index = index + 1 - if int(n.MimeSize)+index >= lenBytes { - return ErrIndexOutOfRange + if int(n.MimeSize)+index > lenBytes { + return fmt.Errorf("index out of range %d", 3) } n.Mime = bytes[index : index+int(n.MimeSize)] index = index + int(n.MimeSize) } if index < lenBytes && n.HasLastModifiedDate() { - if LastModifiedBytesLength+index >= lenBytes { - return ErrIndexOutOfRange + if LastModifiedBytesLength+index > lenBytes { + return fmt.Errorf("index out of range %d", 4) } n.LastModified = util.BytesToUint64(bytes[index : index+LastModifiedBytesLength]) index = index + LastModifiedBytesLength } if index < lenBytes && n.HasTtl() { - if TtlBytesLength+index >= lenBytes { - return ErrIndexOutOfRange + if TtlBytesLength+index > lenBytes { + return fmt.Errorf("index out of range %d", 5) } n.Ttl = LoadTTLFromBytes(bytes[index : index+TtlBytesLength]) index = index + TtlBytesLength } if index < lenBytes && n.HasPairs() { - if 2+index >= lenBytes { - return ErrIndexOutOfRange + if 2+index > lenBytes { + return fmt.Errorf("index out of range %d", 6) } n.PairsSize = util.BytesToUint16(bytes[index : index+2]) index += 2 - if int(n.PairsSize)+index >= lenBytes { - return ErrIndexOutOfRange + if int(n.PairsSize)+index > lenBytes { + return fmt.Errorf("index out of range %d", 7) } end := index + int(n.PairsSize) n.Pairs = bytes[index:end] From f3d316a846473fe8f33a90172291608d764a264d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Mar 2019 21:58:00 -0700 Subject: [PATCH 084/450] weed shell: adding file system disk usage fs.du fix https://github.com/chrislusf/seaweedfs/issues/889 echo "fs.du http://localhost:8888/some/path" | weed shell --- weed/shell/command_fs_du.go | 137 ++++++++++++++++++++++++++++++++++++ weed/shell/commands.go | 1 + weed/shell/shell_liner.go | 1 + 3 files changed, 139 insertions(+) create mode 100644 weed/shell/command_fs_du.go diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go new file mode 100644 index 000000000..cdc9d98ef --- /dev/null +++ b/weed/shell/command_fs_du.go @@ -0,0 +1,137 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "io" + "net/url" + "strconv" + "strings" +) + +func init() { + commands = append(commands, &commandFsDu{}) +} + +type commandFsDu struct { +} + +func (c *commandFsDu) Name() string { + return "fs.du" +} + +func (c *commandFsDu) Help() string { + return "http://:/dir[/file] # show disk usage" +} + +func (c *commandFsDu) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + filerServer, filerPort, path, err := parseFilerUrl(args[0]) + if err != nil { + return err + } + + dir, name := filer2.FullPath(path).DirAndName() + if strings.HasSuffix(path, "/") { + if path == "/"{ + dir, name = "/", "" + }else{ + dir, name = path[0 : len(path)-1], "" + } + } + + ctx := context.Background() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + _, _, err = paginateDirectory(ctx, writer, client, dir, name, 1000) + + return err + + }) + +} + +func paginateDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int) (blockCount uint64, byteCount uint64, err error) { + + paginatedCount := -1 + startFromFileName := "" + + for paginatedCount == -1 || paginatedCount == paginateSize { + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: name, + StartFromFileName: startFromFileName, + InclusiveStartFrom: false, + Limit: uint32(paginateSize), + }) + if listErr != nil { + err = listErr + return + } + + paginatedCount = len(resp.Entries) + + for _, entry := range resp.Entries { + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name + } + numBlock, numByte, err := paginateDirectory(ctx, writer, client, subDir, "", paginateSize) + if err == nil { + blockCount += numBlock + byteCount += numByte + } + } else { + blockCount += uint64(len(entry.Chunks)) + byteCount += filer2.TotalSize(entry.Chunks) + } + startFromFileName = entry.Name + + if name != "" && !entry.IsDirectory { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + } + } + } + + if name == "" { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) + } + + return + +} + +func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { + if strings.HasPrefix(entryPath, "http") { + var u *url.URL + u, err = url.Parse(entryPath) + if err != nil { + return + } + filerServer = u.Hostname() + portString := u.Port() + if portString != "" { + filerPort, err = strconv.ParseInt(portString, 10, 32) + } + path = u.Path + } else { + err = fmt.Errorf("path should have full url http://:/path/to/dirOrFile : %s", entryPath) + } + return +} + +func (env *commandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, env.option.GrpcDialOption) + +} diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 4df70ff55..280900c80 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -14,6 +14,7 @@ type ShellOptions struct { type commandEnv struct { env map[string]string masterClient *wdclient.MasterClient + option ShellOptions } type command interface { diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go index cd015fe85..096532fdf 100644 --- a/weed/shell/shell_liner.go +++ b/weed/shell/shell_liner.go @@ -36,6 +36,7 @@ func RunShell(options ShellOptions) { env: make(map[string]string), masterClient: wdclient.NewMasterClient(context.Background(), options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")), + option: options, } go commandEnv.masterClient.KeepConnectedToMaster() From 5ae4b963a47a98426392b5434a9c3e26ad550e27 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Mar 2019 22:20:14 -0700 Subject: [PATCH 085/450] avoid using global rand --- weed/wdclient/vid_map.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/wdclient/vid_map.go b/weed/wdclient/vid_map.go index aef29f56f..64c24a809 100644 --- a/weed/wdclient/vid_map.go +++ b/weed/wdclient/vid_map.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -19,11 +20,13 @@ type Location struct { type vidMap struct { sync.RWMutex vid2Locations map[uint32][]Location + r *rand.Rand } func newVidMap() vidMap { return vidMap{ vid2Locations: make(map[uint32][]Location), + r: rand.New(rand.NewSource(time.Now().UnixNano())), } } @@ -39,7 +42,7 @@ func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrl string, err error return "", fmt.Errorf("volume %d not found", id) } - return locations[rand.Intn(len(locations))].Url, nil + return locations[vc.r.Intn(len(locations))].Url, nil } func (vc *vidMap) LookupFileId(fileId string) (fullUrl string, err error) { From 88ab932f7d439715698183fbdfbd4ade71596b67 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 19 Mar 2019 23:01:23 -0700 Subject: [PATCH 086/450] refactoring function parameter --- weed/server/filer_grpc_server.go | 2 +- weed/shell/command_collection_list.go | 4 ++-- weed/shell/command_volume_list.go | 3 ++- weed/wdclient/masterclient.go | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 6f7cf1ad6..c9cb6dbe7 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -238,7 +238,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - err = fs.filer.MasterClient.WithClient(ctx, func(ctx context.Context, client master_pb.SeaweedClient) error { + err = fs.filer.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ Name: req.GetCollection(), }) diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index 0797e56fb..0db74ef20 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -25,8 +25,8 @@ func (c *commandCollectionList) Help() string { func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { var resp *master_pb.CollectionListResponse - - err = commandEnv.masterClient.WithClient(context.Background(), func(ctx context.Context, client master_pb.SeaweedClient) error { + ctx := context.Background() + err = commandEnv.masterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{}) return err }) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 52ac4865a..5be5be569 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -25,7 +25,8 @@ func (c *commandVolumeList) Help() string { func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { var resp *master_pb.VolumeListResponse - err = commandEnv.masterClient.WithClient(context.Background(), func(ctx context.Context, client master_pb.SeaweedClient) error { + ctx := context.Background() + err = commandEnv.masterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) return err }) diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 29fe50d80..794471f7b 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -117,8 +117,8 @@ func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.Di return fn(ctx, client) } -func (mc *MasterClient) WithClient(ctx context.Context, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error { +func (mc *MasterClient) WithClient(ctx context.Context, fn func(client master_pb.SeaweedClient) error) error { return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { - return fn(ctx, client) + return fn(client) }) } From 45a52b17fd83a9ffcda494de5cd800a93de97c9c Mon Sep 17 00:00:00 2001 From: chenwanli Date: Wed, 20 Mar 2019 16:38:11 +0800 Subject: [PATCH 087/450] benchmark: add replication flag --- weed/command/benchmark.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 44601e567..aa54946a7 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -36,6 +36,7 @@ type BenchmarkOptions struct { read *bool sequentialRead *bool collection *string + replication *string cpuprofile *string maxCpu *int grpcDialOption grpc.DialOption @@ -61,6 +62,7 @@ func init() { b.read = cmdBenchmark.Flag.Bool("read", true, "enable read") b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file") b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection") + b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") sharedBytes = make([]byte, 1024) @@ -228,8 +230,9 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { MimeType: "image/bench", // prevent gzip benchmark content } ar := &operation.VolumeAssignRequest{ - Count: 1, - Collection: *b.collection, + Count: 1, + Collection: *b.collection, + Replication: *b.replication, } if assignResult, err := operation.Assign(masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection From 8db82e2b75eb67bc58a4869cc70fb8c5b2c0b97c Mon Sep 17 00:00:00 2001 From: Jonathan Amsterdam Date: Tue, 19 Mar 2019 10:10:43 -0400 Subject: [PATCH 088/450] notification: add Go CDK pubsub support Add the gocdk_pub_sub package, which supports the Go Cloud Development Kit pubsub API. Link in all current providers. Update the notification scaffold. --- weed/command/scaffold.go | 10 +++ .../gocdk_pub_sub/gocdk_pub_sub.go | 71 +++++++++++++++++++ weed/server/filer_server.go | 4 +- 3 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 weed/notification/gocdk_pub_sub/gocdk_pub_sub.go diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 9e45d7381..d72bd6f2f 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -180,6 +180,16 @@ google_application_credentials = "/path/to/x.json" # path to json credential fil project_id = "" # an existing project id topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists +[notification.gocdk_pub_sub] +# The Go Cloud Development Kit (https://gocloud.dev). +# PubSub API (https://godoc.org/gocloud.dev/pubsub). +# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ. +enabled = false +# This URL will Dial the RabbitMQ server at the URL in the environment +# variable RABBIT_SERVER_URL and open the exchange "myexchange". +# The exchange must have already been created by some other means, like +# the RabbitMQ management plugin. +topic_url = "rabbit://myexchange" ` REPLICATION_TOML_EXAMPLE = ` diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go new file mode 100644 index 000000000..94a413ac0 --- /dev/null +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -0,0 +1,71 @@ +// Package gocdk_pub_sub supports the Go CDK (Cloud Development Kit) PubSub API, +// which in turn supports many providers, including Amazon SNS/SQS, Azure Service Bus, +// Google Cloud PubSub, and RabbitMQ. +// +// In the config, select a provider and topic using a URL. See +// https://godoc.org/gocloud.dev/pubsub and its sub-packages for details. +// +// The Go CDK PubSub API does not support administrative operations like topic +// creation. Create the topic using a UI, CLI or provider-specific API before running +// weed. +// +// The Go CDK obtains credentials via environment variables and other +// provider-specific default mechanisms. See the provider's documentation for +// details. +package gocdk_pub_sub + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + _ "gocloud.dev/pubsub/azuresb" + _ "gocloud.dev/pubsub/gcppubsub" + _ "gocloud.dev/pubsub/natspubsub" + _ "gocloud.dev/pubsub/rabbitpubsub" +) + +func init() { + notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{}) +} + +type GoCDKPubSub struct { + topicURL string + topic *pubsub.Topic +} + +func (k *GoCDKPubSub) GetName() string { + return "gocdk_pub_sub" +} + +func (k *GoCDKPubSub) Initialize(config util.Configuration) error { + k.topicURL = config.GetString("topic_url") + glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) + topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) + if err != nil { + glog.Fatalf("Failed to open topic: %v", err) + } + k.topic = topic + return nil +} + +func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { + bytes, err := proto.Marshal(message) + if err != nil { + return err + } + ctx := context.Background() + err = k.topic.Send(ctx, &pubsub.Message{ + Body: bytes, + Metadata: map[string]string{"key": key}, + }) + if err != nil { + return fmt.Errorf("send message via Go CDK pubsub %s: %v", k.topicURL, err) + } + return nil +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 83998a009..43d319398 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -1,10 +1,11 @@ package weed_server import ( - "google.golang.org/grpc" "net/http" "os" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" @@ -15,6 +16,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" + _ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub" _ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub" _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" From 977b30e992108e480c883cdd461bf0c8f6598f25 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 20 Mar 2019 10:13:51 -0700 Subject: [PATCH 089/450] weed filer: http HEAD response header add ETag fix https://github.com/chrislusf/seaweedfs/issues/892 --- weed/server/filer_server_handlers_read.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 4d1f41fd4..89d47b0b8 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -52,6 +52,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) + setEtag(w, filer2.ETag(entry.Chunks)) return } From 531add52c27f2754c36626f1efeab15029a47e29 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 21 Mar 2019 09:49:04 -0700 Subject: [PATCH 090/450] weed master: skip proxied file read in http read only mode --- weed/server/master_server.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/weed/server/master_server.go b/weed/server/master_server.go index a77c8fa19..4ecb9f192 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -93,7 +93,9 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) - r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) + if !httpReadOnly { + r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) + } ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOpiton, garbageThreshold, ms.preallocate) From a3490b600cad88ced4ea9bf3672b8566f207f57f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 21 Mar 2019 16:00:46 -0700 Subject: [PATCH 091/450] weed filer, weed master: add option to disable http --- weed/command/filer.go | 3 +++ weed/command/master.go | 4 ++-- weed/command/server.go | 5 +++-- weed/server/filer_server.go | 5 ++++- weed/server/master_server.go | 18 ++++++++---------- 5 files changed, 20 insertions(+), 15 deletions(-) diff --git a/weed/command/filer.go b/weed/command/filer.go index d12d661a8..5b3f733bd 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -33,6 +33,7 @@ type FilerOptions struct { dirListingLimit *int dataCenter *string enableNotification *bool + disableHttp *bool // default leveldb directory, used in "weed server" mode defaultLevelDbDirectory *string @@ -52,6 +53,7 @@ func init() { f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") + f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") } var cmdFiler = &Command{ @@ -108,6 +110,7 @@ func (fo *FilerOptions) startFiler() { DirListingLimit: *fo.dirListingLimit, DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, + DisableHttp: *fo.disableHttp, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) diff --git a/weed/command/master.go b/weed/command/master.go index 4207a331c..15d1171e0 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -47,7 +47,7 @@ var ( mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - httpReadOnly = cmdMaster.Flag.Bool("httpReadOnly", false, "disable http operations, only gRPC operations are allowed.") + disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file") @@ -79,7 +79,7 @@ func runMaster(cmd *Command, args []string) bool { *volumeSizeLimitMB, *volumePreallocate, *mpulse, *defaultReplicaPlacement, *garbageThreshold, masterWhiteList, - *httpReadOnly, + *disableHttp, ) listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport) diff --git a/weed/command/server.go b/weed/command/server.go index cc63c96e1..d88ded0ee 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -60,7 +60,7 @@ var ( serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - masterHttpReadOnly = cmdServer.Flag.Bool("master.httpReadOnly", false, "disable http operations, only gRPC operations are allowed.") + serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") @@ -127,6 +127,7 @@ func runServer(cmd *Command, args []string) bool { serverOptions.v.pulseSeconds = pulseSeconds filerOptions.dataCenter = serverDataCenter + filerOptions.disableHttp = serverDisableHttp if *filerOptions.defaultReplicaPlacement == "" { *filerOptions.defaultReplicaPlacement = *masterDefaultReplicaPlacement @@ -173,7 +174,7 @@ func runServer(cmd *Command, args []string) bool { ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder, *masterVolumeSizeLimitMB, *masterVolumePreallocate, *pulseSeconds, *masterDefaultReplicaPlacement, *serverGarbageThreshold, - serverWhiteList, *masterHttpReadOnly, + serverWhiteList, *serverDisableHttp, ) glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterPort) diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 43d319398..39238cc1f 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -34,6 +34,7 @@ type FilerOption struct { DirListingLimit int DataCenter string DefaultLevelDbDir string + DisableHttp bool } type FilerServer struct { @@ -74,7 +75,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) notification.LoadConfiguration(v.Sub("notification")) handleStaticResources(defaultMux) - defaultMux.HandleFunc("/", fs.filerHandler) + if !option.DisableHttp { + defaultMux.HandleFunc("/", fs.filerHandler) + } if defaultMux != readonlyMux { readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 4ecb9f192..a70de5e6e 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -49,7 +49,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, defaultReplicaPlacement string, garbageThreshold float64, whiteList []string, - httpReadOnly bool, + disableHttp bool, ) *MasterServer { v := viper.GetViper() @@ -77,10 +77,10 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, ms.guard = security.NewGuard(whiteList, signingKey) - handleStaticResources2(r) - r.HandleFunc("/", ms.uiStatusHandler) - r.HandleFunc("/ui/index.html", ms.uiStatusHandler) - if !httpReadOnly { + if !disableHttp { + handleStaticResources2(r) + r.HandleFunc("/", ms.uiStatusHandler) + r.HandleFunc("/ui/index.html", ms.uiStatusHandler) r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) @@ -89,11 +89,9 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) - } - r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) - r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) - r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) - if !httpReadOnly { + r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) + r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) + r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) } From 95e0520182eeeb57921916dc694b64ff342c93e1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 23 Mar 2019 11:33:34 -0700 Subject: [PATCH 092/450] weed volume: add grpc operation to relicate a volume to local --- weed/operation/sync_volume.go | 4 +- weed/pb/volume_server.proto | 60 ++- weed/pb/volume_server_pb/volume_server.pb.go | 536 +++++++++++++++---- weed/server/volume_grpc_admin.go | 8 +- weed/server/volume_grpc_replicate.go | 155 ++++++ weed/server/volume_grpc_sync.go | 20 +- weed/server/volume_grpc_vacuum.go | 22 +- weed/shell/shell_liner.go | 2 +- weed/storage/store.go | 4 +- weed/storage/volume.go | 13 +- weed/storage/volume_sync.go | 2 +- weed/topology/allocate_volume.go | 2 +- weed/topology/topology_vacuum.go | 8 +- 13 files changed, 662 insertions(+), 174 deletions(-) create mode 100644 weed/server/volume_grpc_replicate.go diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index c979254f4..6af2404c0 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -15,7 +15,7 @@ func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{ - VolumdId: vid, + VolumeId: vid, }) return nil }) @@ -27,7 +27,7 @@ func GetVolumeIdxEntries(server string, grpcDialOption grpc.DialOption, vid uint return WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{ - VolumdId: vid, + VolumeId: vid, }) if err != nil { return err diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 8ab67a1bf..93db5b981 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -36,7 +36,12 @@ service VolumeServer { rpc VolumeDelete (VolumeDeleteRequest) returns (VolumeDeleteResponse) { } - // rpc VolumeUiPage (VolumeUiPageRequest) returns (VolumeUiPageResponse) {} + rpc ReplicateVolume (ReplicateVolumeRequest) returns (ReplicateVolumeResponse) { + } + rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) { + } + rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) { + } } @@ -60,27 +65,27 @@ message Empty { } message VacuumVolumeCheckRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCheckResponse { double garbage_ratio = 1; } message VacuumVolumeCompactRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; int64 preallocate = 2; } message VacuumVolumeCompactResponse { } message VacuumVolumeCommitRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCommitResponse { } message VacuumVolumeCleanupRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCleanupResponse { } @@ -92,7 +97,7 @@ message DeleteCollectionResponse { } message AssignVolumeRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; string collection = 2; int64 preallocate = 3; string replication = 4; @@ -102,10 +107,10 @@ message AssignVolumeResponse { } message VolumeSyncStatusRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeSyncStatusResponse { - uint32 volumd_id = 1; + uint32 volume_id = 1; string collection = 2; string replication = 4; string ttl = 5; @@ -115,14 +120,14 @@ message VolumeSyncStatusResponse { } message VolumeSyncIndexRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeSyncIndexResponse { bytes index_file_content = 1; } message VolumeSyncDataRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; uint32 revision = 2; uint32 offset = 3; uint32 size = 4; @@ -133,26 +138,51 @@ message VolumeSyncDataResponse { } message VolumeMountRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeMountResponse { } message VolumeUnmountRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeUnmountResponse { } message VolumeDeleteRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeDeleteResponse { } -message VolumeUiPageRequest { +message ReplicateVolumeRequest { + uint32 volume_id = 1; + string collection = 2; + string replication = 3; + string ttl = 4; + string source_data_node = 5; } -message VolumeUiPageResponse { +message ReplicateVolumeResponse { +} + +message CopyFileRequest { + uint32 volume_id = 1; + bool is_idx_file = 2; + bool is_dat_file = 3; +} +message CopyFileResponse { + bytes file_content = 1; +} + +message ReadVolumeFileStatusRequest { + uint32 volume_id = 1; +} +message ReadVolumeFileStatusResponse { + uint32 volume_id = 1; + uint64 idx_file_timestamp = 2; + uint64 idx_file_size = 3; + uint64 dat_file_timestamp = 4; + uint64 dat_file_size = 5; } message DiskStatus { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index fa700e2e5..d84a5b099 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -37,8 +37,12 @@ It has these top-level messages: VolumeUnmountResponse VolumeDeleteRequest VolumeDeleteResponse - VolumeUiPageRequest - VolumeUiPageResponse + ReplicateVolumeRequest + ReplicateVolumeResponse + CopyFileRequest + CopyFileResponse + ReadVolumeFileStatusRequest + ReadVolumeFileStatusResponse DiskStatus MemStatus */ @@ -145,7 +149,7 @@ func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type VacuumVolumeCheckRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} } @@ -153,9 +157,9 @@ func (m *VacuumVolumeCheckRequest) String() string { return proto.Com func (*VacuumVolumeCheckRequest) ProtoMessage() {} func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } -func (m *VacuumVolumeCheckRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -177,7 +181,7 @@ func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { } type VacuumVolumeCompactRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Preallocate int64 `protobuf:"varint,2,opt,name=preallocate" json:"preallocate,omitempty"` } @@ -186,9 +190,9 @@ func (m *VacuumVolumeCompactRequest) String() string { return proto.C func (*VacuumVolumeCompactRequest) ProtoMessage() {} func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } -func (m *VacuumVolumeCompactRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -209,7 +213,7 @@ func (*VacuumVolumeCompactResponse) ProtoMessage() {} func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } type VacuumVolumeCommitRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} } @@ -217,9 +221,9 @@ func (m *VacuumVolumeCommitRequest) String() string { return proto.Co func (*VacuumVolumeCommitRequest) ProtoMessage() {} func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } -func (m *VacuumVolumeCommitRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -233,7 +237,7 @@ func (*VacuumVolumeCommitResponse) ProtoMessage() {} func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } type VacuumVolumeCleanupRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} } @@ -241,9 +245,9 @@ func (m *VacuumVolumeCleanupRequest) String() string { return proto.C func (*VacuumVolumeCleanupRequest) ProtoMessage() {} func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } -func (m *VacuumVolumeCleanupRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -281,7 +285,7 @@ func (*DeleteCollectionResponse) ProtoMessage() {} func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } type AssignVolumeRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"` Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` @@ -293,9 +297,9 @@ func (m *AssignVolumeRequest) String() string { return proto.CompactT func (*AssignVolumeRequest) ProtoMessage() {} func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } -func (m *AssignVolumeRequest) GetVolumdId() uint32 { +func (m *AssignVolumeRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -337,7 +341,7 @@ func (*AssignVolumeResponse) ProtoMessage() {} func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } type VolumeSyncStatusRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} } @@ -345,15 +349,15 @@ func (m *VolumeSyncStatusRequest) String() string { return proto.Comp func (*VolumeSyncStatusRequest) ProtoMessage() {} func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } -func (m *VolumeSyncStatusRequest) GetVolumdId() uint32 { +func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } type VolumeSyncStatusResponse struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` @@ -367,9 +371,9 @@ func (m *VolumeSyncStatusResponse) String() string { return proto.Com func (*VolumeSyncStatusResponse) ProtoMessage() {} func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } -func (m *VolumeSyncStatusResponse) GetVolumdId() uint32 { +func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -417,7 +421,7 @@ func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { } type VolumeSyncIndexRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeSyncIndexRequest) Reset() { *m = VolumeSyncIndexRequest{} } @@ -425,9 +429,9 @@ func (m *VolumeSyncIndexRequest) String() string { return proto.Compa func (*VolumeSyncIndexRequest) ProtoMessage() {} func (*VolumeSyncIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } -func (m *VolumeSyncIndexRequest) GetVolumdId() uint32 { +func (m *VolumeSyncIndexRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -449,7 +453,7 @@ func (m *VolumeSyncIndexResponse) GetIndexFileContent() []byte { } type VolumeSyncDataRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Revision uint32 `protobuf:"varint,2,opt,name=revision" json:"revision,omitempty"` Offset uint32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` @@ -461,9 +465,9 @@ func (m *VolumeSyncDataRequest) String() string { return proto.Compac func (*VolumeSyncDataRequest) ProtoMessage() {} func (*VolumeSyncDataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } -func (m *VolumeSyncDataRequest) GetVolumdId() uint32 { +func (m *VolumeSyncDataRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -513,7 +517,7 @@ func (m *VolumeSyncDataResponse) GetFileContent() []byte { } type VolumeMountRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } @@ -521,9 +525,9 @@ func (m *VolumeMountRequest) String() string { return proto.CompactTe func (*VolumeMountRequest) ProtoMessage() {} func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } -func (m *VolumeMountRequest) GetVolumdId() uint32 { +func (m *VolumeMountRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -537,7 +541,7 @@ func (*VolumeMountResponse) ProtoMessage() {} func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } type VolumeUnmountRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } @@ -545,9 +549,9 @@ func (m *VolumeUnmountRequest) String() string { return proto.Compact func (*VolumeUnmountRequest) ProtoMessage() {} func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } -func (m *VolumeUnmountRequest) GetVolumdId() uint32 { +func (m *VolumeUnmountRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -561,7 +565,7 @@ func (*VolumeUnmountResponse) ProtoMessage() {} func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } type VolumeDeleteRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } @@ -569,9 +573,9 @@ func (m *VolumeDeleteRequest) String() string { return proto.CompactT func (*VolumeDeleteRequest) ProtoMessage() {} func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } -func (m *VolumeDeleteRequest) GetVolumdId() uint32 { +func (m *VolumeDeleteRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -584,21 +588,173 @@ func (m *VolumeDeleteResponse) String() string { return proto.Compact func (*VolumeDeleteResponse) ProtoMessage() {} func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } -type VolumeUiPageRequest struct { +type ReplicateVolumeRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` } -func (m *VolumeUiPageRequest) Reset() { *m = VolumeUiPageRequest{} } -func (m *VolumeUiPageRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeUiPageRequest) ProtoMessage() {} -func (*VolumeUiPageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (m *ReplicateVolumeRequest) Reset() { *m = ReplicateVolumeRequest{} } +func (m *ReplicateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ReplicateVolumeRequest) ProtoMessage() {} +func (*ReplicateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } -type VolumeUiPageResponse struct { +func (m *ReplicateVolumeRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 } -func (m *VolumeUiPageResponse) Reset() { *m = VolumeUiPageResponse{} } -func (m *VolumeUiPageResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeUiPageResponse) ProtoMessage() {} -func (*VolumeUiPageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (m *ReplicateVolumeRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *ReplicateVolumeRequest) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *ReplicateVolumeRequest) GetTtl() string { + if m != nil { + return m.Ttl + } + return "" +} + +func (m *ReplicateVolumeRequest) GetSourceDataNode() string { + if m != nil { + return m.SourceDataNode + } + return "" +} + +type ReplicateVolumeResponse struct { +} + +func (m *ReplicateVolumeResponse) Reset() { *m = ReplicateVolumeResponse{} } +func (m *ReplicateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ReplicateVolumeResponse) ProtoMessage() {} +func (*ReplicateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type CopyFileRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + IsIdxFile bool `protobuf:"varint,2,opt,name=is_idx_file,json=isIdxFile" json:"is_idx_file,omitempty"` + IsDatFile bool `protobuf:"varint,3,opt,name=is_dat_file,json=isDatFile" json:"is_dat_file,omitempty"` +} + +func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } +func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } +func (*CopyFileRequest) ProtoMessage() {} +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *CopyFileRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *CopyFileRequest) GetIsIdxFile() bool { + if m != nil { + return m.IsIdxFile + } + return false +} + +func (m *CopyFileRequest) GetIsDatFile() bool { + if m != nil { + return m.IsDatFile + } + return false +} + +type CopyFileResponse struct { + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +} + +func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } +func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } +func (*CopyFileResponse) ProtoMessage() {} +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *CopyFileResponse) GetFileContent() []byte { + if m != nil { + return m.FileContent + } + return nil +} + +type ReadVolumeFileStatusRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +} + +func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } +func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } +func (*ReadVolumeFileStatusRequest) ProtoMessage() {} +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +type ReadVolumeFileStatusResponse struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + IdxFileTimestamp uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp,json=idxFileTimestamp" json:"idx_file_timestamp,omitempty"` + IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` + DatFileTimestamp uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp,json=datFileTimestamp" json:"dat_file_timestamp,omitempty"` + DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize" json:"dat_file_size,omitempty"` +} + +func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } +func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } +func (*ReadVolumeFileStatusResponse) ProtoMessage() {} +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetIdxFileTimestamp() uint64 { + if m != nil { + return m.IdxFileTimestamp + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { + if m != nil { + return m.IdxFileSize + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetDatFileTimestamp() uint64 { + if m != nil { + return m.DatFileTimestamp + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { + if m != nil { + return m.DatFileSize + } + return 0 +} type DiskStatus struct { Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` @@ -610,7 +766,7 @@ type DiskStatus struct { func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -653,7 +809,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -733,8 +889,12 @@ func init() { proto.RegisterType((*VolumeUnmountResponse)(nil), "volume_server_pb.VolumeUnmountResponse") proto.RegisterType((*VolumeDeleteRequest)(nil), "volume_server_pb.VolumeDeleteRequest") proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") - proto.RegisterType((*VolumeUiPageRequest)(nil), "volume_server_pb.VolumeUiPageRequest") - proto.RegisterType((*VolumeUiPageResponse)(nil), "volume_server_pb.VolumeUiPageResponse") + proto.RegisterType((*ReplicateVolumeRequest)(nil), "volume_server_pb.ReplicateVolumeRequest") + proto.RegisterType((*ReplicateVolumeResponse)(nil), "volume_server_pb.ReplicateVolumeResponse") + proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest") + proto.RegisterType((*CopyFileResponse)(nil), "volume_server_pb.CopyFileResponse") + proto.RegisterType((*ReadVolumeFileStatusRequest)(nil), "volume_server_pb.ReadVolumeFileStatusRequest") + proto.RegisterType((*ReadVolumeFileStatusResponse)(nil), "volume_server_pb.ReadVolumeFileStatusResponse") proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus") proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus") } @@ -764,6 +924,9 @@ type VolumeServerClient interface { VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) + ReplicateVolume(ctx context.Context, in *ReplicateVolumeRequest, opts ...grpc.CallOption) (*ReplicateVolumeResponse, error) + ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) + CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) } type volumeServerClient struct { @@ -937,6 +1100,56 @@ func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteR return out, nil } +func (c *volumeServerClient) ReplicateVolume(ctx context.Context, in *ReplicateVolumeRequest, opts ...grpc.CallOption) (*ReplicateVolumeResponse, error) { + out := new(ReplicateVolumeResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReplicateVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { + out := new(ReadVolumeFileStatusResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + if err != nil { + return nil, err + } + x := &volumeServerCopyFileClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_CopyFileClient interface { + Recv() (*CopyFileResponse, error) + grpc.ClientStream +} + +type volumeServerCopyFileClient struct { + grpc.ClientStream +} + +func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { + m := new(CopyFileResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // Server API for VolumeServer service type VolumeServerServer interface { @@ -954,6 +1167,9 @@ type VolumeServerServer interface { VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) + ReplicateVolume(context.Context, *ReplicateVolumeRequest) (*ReplicateVolumeResponse, error) + ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) + CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error } func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { @@ -1200,6 +1416,63 @@ func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _VolumeServer_ReplicateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReplicateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).ReplicateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/ReplicateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).ReplicateVolume(ctx, req.(*ReplicateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadVolumeFileStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, req.(*ReadVolumeFileStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CopyFileRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).CopyFile(m, &volumeServerCopyFileServer{stream}) +} + +type VolumeServer_CopyFileServer interface { + Send(*CopyFileResponse) error + grpc.ServerStream +} + +type volumeServerCopyFileServer struct { + grpc.ServerStream +} + +func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { + return x.ServerStream.SendMsg(m) +} + var _VolumeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "volume_server_pb.VolumeServer", HandlerType: (*VolumeServerServer)(nil), @@ -1248,6 +1521,14 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeDelete", Handler: _VolumeServer_VolumeDelete_Handler, }, + { + MethodName: "ReplicateVolume", + Handler: _VolumeServer_ReplicateVolume_Handler, + }, + { + MethodName: "ReadVolumeFileStatus", + Handler: _VolumeServer_ReadVolumeFileStatus_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1260,6 +1541,11 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_VolumeSyncData_Handler, ServerStreams: true, }, + { + StreamName: "CopyFile", + Handler: _VolumeServer_CopyFile_Handler, + ServerStreams: true, + }, }, Metadata: "volume_server.proto", } @@ -1267,71 +1553,83 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1044 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x57, 0xdd, 0x72, 0xdb, 0x44, - 0x14, 0x8e, 0x6a, 0x3b, 0x76, 0x8e, 0x6d, 0x6a, 0xd6, 0x69, 0xa2, 0xaa, 0x10, 0x8c, 0x80, 0xd4, - 0x69, 0x43, 0x80, 0x74, 0x80, 0x32, 0xdc, 0x00, 0x09, 0x30, 0xb9, 0xe8, 0x94, 0xd9, 0x4c, 0x3b, - 0xcc, 0xd0, 0x19, 0x8f, 0x22, 0xad, 0x9d, 0x25, 0xb2, 0xe4, 0x6a, 0x57, 0x99, 0x94, 0x37, 0xe1, - 0x9a, 0x1b, 0x9e, 0x8e, 0x17, 0xe0, 0x86, 0xd9, 0x1f, 0xd9, 0xfa, 0x73, 0x24, 0xe0, 0x6e, 0xf7, - 0xec, 0x39, 0xdf, 0xf9, 0xd9, 0xa3, 0xf3, 0xad, 0x60, 0x78, 0x1d, 0xfa, 0xf1, 0x9c, 0x4c, 0x18, - 0x89, 0xae, 0x49, 0x74, 0xb4, 0x88, 0x42, 0x1e, 0xa2, 0x41, 0x46, 0x38, 0x59, 0x5c, 0xd8, 0x9f, - 0x00, 0xfa, 0xce, 0xe1, 0xee, 0xe5, 0x29, 0xf1, 0x09, 0x27, 0x98, 0xbc, 0x8e, 0x09, 0xe3, 0xe8, - 0x3e, 0x74, 0xa6, 0xd4, 0x27, 0x13, 0xea, 0x31, 0xd3, 0x18, 0x35, 0xc6, 0x5b, 0xb8, 0x2d, 0xf6, - 0x67, 0x1e, 0xb3, 0x9f, 0xc3, 0x30, 0x63, 0xc0, 0x16, 0x61, 0xc0, 0x08, 0x7a, 0x0a, 0xed, 0x88, - 0xb0, 0xd8, 0xe7, 0xca, 0xa0, 0x7b, 0xbc, 0x77, 0x94, 0xf7, 0x75, 0xb4, 0x34, 0x89, 0x7d, 0x8e, - 0x13, 0x75, 0x9b, 0x42, 0x2f, 0x7d, 0x80, 0x76, 0xa1, 0xad, 0x7d, 0x9b, 0xc6, 0xc8, 0x18, 0x6f, - 0xe1, 0x4d, 0xe5, 0x1a, 0xed, 0xc0, 0x26, 0xe3, 0x0e, 0x8f, 0x99, 0x79, 0x67, 0x64, 0x8c, 0x5b, - 0x58, 0xef, 0xd0, 0x36, 0xb4, 0x48, 0x14, 0x85, 0x91, 0xd9, 0x90, 0xea, 0x6a, 0x83, 0x10, 0x34, - 0x19, 0xfd, 0x8d, 0x98, 0xcd, 0x91, 0x31, 0xee, 0x63, 0xb9, 0xb6, 0xdb, 0xd0, 0xfa, 0x7e, 0xbe, - 0xe0, 0x6f, 0xec, 0x2f, 0xc1, 0x7c, 0xe9, 0xb8, 0x71, 0x3c, 0x7f, 0x29, 0x63, 0x3c, 0xb9, 0x24, - 0xee, 0x55, 0x92, 0xfb, 0x03, 0xd8, 0x92, 0x91, 0x7b, 0x49, 0x04, 0x7d, 0xdc, 0x51, 0x82, 0x33, - 0xcf, 0xfe, 0x06, 0xee, 0x97, 0x18, 0xea, 0x1a, 0x7c, 0x00, 0xfd, 0x99, 0x13, 0x5d, 0x38, 0x33, - 0x32, 0x89, 0x1c, 0x4e, 0x43, 0x69, 0x6d, 0xe0, 0x9e, 0x16, 0x62, 0x21, 0xb3, 0x7f, 0x01, 0x2b, - 0x83, 0x10, 0xce, 0x17, 0x8e, 0xcb, 0xeb, 0x38, 0x47, 0x23, 0xe8, 0x2e, 0x22, 0xe2, 0xf8, 0x7e, - 0xe8, 0x3a, 0x9c, 0xc8, 0x2a, 0x34, 0x70, 0x5a, 0x64, 0xbf, 0x0b, 0x0f, 0x4a, 0xc1, 0x55, 0x80, - 0xf6, 0xd3, 0x5c, 0xf4, 0xe1, 0x7c, 0x4e, 0x6b, 0xb9, 0xb6, 0xdf, 0x29, 0x44, 0x2d, 0x2d, 0x35, - 0xee, 0x57, 0xb9, 0x53, 0x9f, 0x38, 0x41, 0xbc, 0xa8, 0x05, 0x9c, 0x8f, 0x38, 0x31, 0x5d, 0x22, - 0xef, 0xaa, 0xe6, 0x38, 0x09, 0x7d, 0x9f, 0xb8, 0x9c, 0x86, 0x41, 0x02, 0xbb, 0x07, 0xe0, 0x2e, - 0x85, 0xba, 0x55, 0x52, 0x12, 0xdb, 0x02, 0xb3, 0x68, 0xaa, 0x61, 0xff, 0x34, 0x60, 0xf8, 0x2d, - 0x63, 0x74, 0x16, 0x28, 0xb7, 0xb5, 0xca, 0x9f, 0x75, 0x78, 0x27, 0xef, 0x30, 0x7f, 0x3d, 0x8d, - 0xc2, 0xf5, 0x08, 0x8d, 0x88, 0x2c, 0x7c, 0xea, 0x3a, 0x12, 0xa2, 0x29, 0x21, 0xd2, 0x22, 0x34, - 0x80, 0x06, 0xe7, 0xbe, 0xd9, 0x92, 0x27, 0x62, 0x69, 0xef, 0xc0, 0x76, 0x36, 0x52, 0x9d, 0xc2, - 0x17, 0xb0, 0xab, 0x24, 0xe7, 0x6f, 0x02, 0xf7, 0x5c, 0x7e, 0x09, 0xb5, 0x0a, 0xfe, 0xb7, 0x01, - 0x66, 0xd1, 0x50, 0x77, 0xf0, 0xff, 0xcd, 0xff, 0xdf, 0x66, 0x87, 0xde, 0x83, 0x2e, 0x77, 0xa8, - 0x3f, 0x09, 0xa7, 0x53, 0x46, 0xb8, 0xb9, 0x39, 0x32, 0xc6, 0x4d, 0x0c, 0x42, 0xf4, 0x5c, 0x4a, - 0xd0, 0x01, 0x0c, 0x5c, 0xd5, 0xc5, 0x93, 0x88, 0x5c, 0x53, 0x26, 0x90, 0xdb, 0x32, 0xb0, 0xbb, - 0x6e, 0xd2, 0xdd, 0x4a, 0x8c, 0x6c, 0xe8, 0x53, 0xef, 0x66, 0x22, 0x87, 0x87, 0xfc, 0xf4, 0x3b, - 0x12, 0xad, 0x4b, 0xbd, 0x9b, 0x1f, 0xa8, 0x4f, 0xce, 0xc5, 0x04, 0xf8, 0x1c, 0x76, 0x56, 0xc9, - 0x9f, 0x05, 0x1e, 0xb9, 0xa9, 0x55, 0xb4, 0x1f, 0xd3, 0xc5, 0xd6, 0x66, 0xba, 0x64, 0x87, 0x80, - 0xa8, 0x10, 0x28, 0xbf, 0x6e, 0x18, 0x70, 0x12, 0x70, 0x09, 0xd0, 0xc3, 0x03, 0x79, 0x22, 0x9c, - 0x9f, 0x28, 0xb9, 0xfd, 0xbb, 0x01, 0xf7, 0x56, 0x48, 0xa7, 0x0e, 0x77, 0x6a, 0xb5, 0x9e, 0x05, - 0x9d, 0x65, 0xf6, 0x77, 0xd4, 0x59, 0xb2, 0x17, 0x63, 0x51, 0x57, 0xaf, 0x21, 0x4f, 0xf4, 0xae, - 0x6c, 0x00, 0x0a, 0x27, 0x01, 0x21, 0x9e, 0x9a, 0xae, 0xea, 0x1a, 0x3a, 0x4a, 0x70, 0xe6, 0xd9, - 0x5f, 0xa7, 0x6b, 0xa3, 0x42, 0xd3, 0x39, 0xbe, 0x0f, 0xbd, 0x92, 0xec, 0xba, 0xd3, 0x54, 0x62, - 0x9f, 0x01, 0x52, 0xc6, 0xcf, 0xc2, 0x38, 0xa8, 0x37, 0x53, 0xee, 0xc1, 0x30, 0x63, 0xa2, 0x1b, - 0xfb, 0x09, 0x6c, 0x2b, 0xf1, 0x8b, 0x60, 0x5e, 0x1b, 0x6b, 0x37, 0x29, 0xeb, 0xd2, 0x48, 0xa3, - 0x1d, 0x27, 0x4e, 0xb2, 0x04, 0x77, 0x2b, 0xd8, 0x4e, 0x12, 0x41, 0x96, 0xe3, 0x56, 0x01, 0xbf, - 0xa0, 0x3f, 0x89, 0x79, 0xae, 0xb0, 0x56, 0xea, 0x89, 0x58, 0xab, 0xff, 0x0c, 0x70, 0x4a, 0xd9, - 0x95, 0xfa, 0xc4, 0x44, 0xef, 0x7b, 0x34, 0xd2, 0x73, 0x4a, 0x2c, 0x85, 0xc4, 0xf1, 0x7d, 0x79, - 0x9f, 0x4d, 0x2c, 0x96, 0xe2, 0xca, 0x62, 0x46, 0x3c, 0x79, 0x91, 0x4d, 0x2c, 0xd7, 0x42, 0x36, - 0x8d, 0x88, 0xba, 0xc6, 0x26, 0x96, 0x6b, 0xfb, 0x0f, 0x03, 0xb6, 0x9e, 0x91, 0xb9, 0x46, 0xde, - 0x03, 0x98, 0x85, 0x51, 0x18, 0x73, 0x1a, 0x10, 0x26, 0x1d, 0xb4, 0x70, 0x4a, 0xf2, 0xdf, 0xfd, - 0xc8, 0x16, 0x22, 0xfe, 0x54, 0x76, 0x4a, 0x13, 0xcb, 0xb5, 0x90, 0x5d, 0x12, 0x67, 0xa1, 0x3f, - 0x55, 0xb9, 0x16, 0x0c, 0xcc, 0xb8, 0xe3, 0x5e, 0xc9, 0x2f, 0xb3, 0x89, 0xd5, 0xe6, 0xf8, 0x2f, - 0x80, 0x9e, 0x6e, 0x28, 0xf9, 0x04, 0x40, 0xaf, 0xa0, 0x9b, 0x7a, 0x3a, 0xa0, 0x0f, 0x8b, 0x2f, - 0x84, 0xe2, 0x53, 0xc4, 0xfa, 0xa8, 0x42, 0x4b, 0x17, 0x7b, 0x03, 0x05, 0xf0, 0x76, 0x81, 0x9a, - 0xd1, 0xa3, 0xa2, 0xf5, 0x3a, 0xe2, 0xb7, 0x1e, 0xd7, 0xd2, 0x5d, 0xfa, 0xe3, 0x30, 0x2c, 0xe1, - 0x5a, 0x74, 0x58, 0x81, 0x92, 0xe1, 0x7b, 0xeb, 0xe3, 0x9a, 0xda, 0x4b, 0xaf, 0xaf, 0x01, 0x15, - 0x89, 0x18, 0x3d, 0xae, 0x84, 0x59, 0x11, 0xbd, 0x75, 0x58, 0x4f, 0x79, 0x6d, 0xa2, 0x8a, 0xa2, - 0x2b, 0x13, 0xcd, 0x3c, 0x02, 0x2a, 0x13, 0xcd, 0xf1, 0xfe, 0x06, 0xba, 0x82, 0x41, 0x9e, 0xbe, - 0xd1, 0xc1, 0xba, 0x37, 0x65, 0xe1, 0x75, 0x60, 0x3d, 0xaa, 0xa3, 0xba, 0x74, 0x36, 0x81, 0x5e, - 0x9a, 0x64, 0x51, 0x49, 0xd3, 0x95, 0x3c, 0x17, 0xac, 0xfd, 0x2a, 0xb5, 0x74, 0x36, 0x79, 0xd2, - 0x2d, 0xcb, 0x66, 0x0d, 0xa3, 0x97, 0x65, 0xb3, 0x8e, 0xc3, 0xed, 0x0d, 0xf4, 0x2b, 0xdc, 0xcd, - 0xb1, 0x15, 0x1a, 0xdf, 0x06, 0x90, 0xe6, 0x41, 0xeb, 0xa0, 0x86, 0x66, 0xe2, 0xe9, 0x53, 0x03, - 0xcd, 0xe0, 0xad, 0x2c, 0x69, 0xa0, 0x87, 0xb7, 0x01, 0xa4, 0x18, 0xcf, 0x1a, 0x57, 0x2b, 0xa6, - 0x1c, 0xbd, 0x82, 0x6e, 0x8a, 0x2d, 0xca, 0x86, 0x47, 0x91, 0x7f, 0xca, 0x86, 0x47, 0x19, 0xe5, - 0x6c, 0xa0, 0x0b, 0xe8, 0x67, 0xf8, 0x03, 0xed, 0xaf, 0xb3, 0xcc, 0xb2, 0x92, 0xf5, 0xb0, 0x52, - 0x2f, 0xdd, 0x64, 0x69, 0x5a, 0x41, 0x6b, 0x83, 0xcb, 0x0e, 0xc0, 0xfd, 0x2a, 0xb5, 0xc4, 0xc1, - 0xc5, 0xa6, 0xfc, 0xc9, 0x7b, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x3c, 0x6d, 0xd7, - 0xfb, 0x0d, 0x00, 0x00, + // 1247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0x5b, 0x73, 0xdb, 0xc4, + 0x17, 0x8f, 0x62, 0x3b, 0x71, 0x8e, 0xed, 0xc6, 0xff, 0x4d, 0x9a, 0x38, 0x4a, 0xff, 0xc1, 0x15, + 0x90, 0x3a, 0x6d, 0x1a, 0x20, 0x9d, 0x42, 0x81, 0x17, 0x20, 0x01, 0x26, 0x0f, 0xa5, 0x33, 0x0a, + 0xed, 0x30, 0x43, 0x67, 0x34, 0x1b, 0x69, 0x9d, 0x88, 0xc8, 0x92, 0xaa, 0x5d, 0x85, 0x84, 0x6f, + 0xc2, 0x33, 0x2f, 0x7d, 0xe7, 0x03, 0xf1, 0x41, 0x78, 0x61, 0xf6, 0x22, 0x59, 0x37, 0xc7, 0xe2, + 0xf2, 0xb6, 0x3a, 0x7b, 0xce, 0xef, 0x5c, 0xf6, 0xec, 0xd9, 0x9f, 0x0d, 0x6b, 0x57, 0x81, 0x17, + 0x4f, 0x88, 0x45, 0x49, 0x74, 0x45, 0xa2, 0x83, 0x30, 0x0a, 0x58, 0x80, 0xfa, 0x39, 0xa1, 0x15, + 0x9e, 0x19, 0x1f, 0x00, 0xfa, 0x0a, 0x33, 0xfb, 0xe2, 0x98, 0x78, 0x84, 0x11, 0x93, 0xbc, 0x89, + 0x09, 0x65, 0x68, 0x0b, 0xda, 0x63, 0xd7, 0x23, 0x96, 0xeb, 0xd0, 0x81, 0x36, 0x6c, 0x8c, 0x56, + 0xcc, 0x65, 0xfe, 0x7d, 0xe2, 0x50, 0xe3, 0x05, 0xac, 0xe5, 0x0c, 0x68, 0x18, 0xf8, 0x94, 0xa0, + 0x67, 0xb0, 0x1c, 0x11, 0x1a, 0x7b, 0x4c, 0x1a, 0x74, 0x0e, 0x77, 0x0e, 0x8a, 0xbe, 0x0e, 0x52, + 0x93, 0xd8, 0x63, 0x66, 0xa2, 0x6e, 0xb8, 0xd0, 0xcd, 0x6e, 0xa0, 0x4d, 0x58, 0x56, 0xbe, 0x07, + 0xda, 0x50, 0x1b, 0xad, 0x98, 0x4b, 0xd2, 0x35, 0xda, 0x80, 0x25, 0xca, 0x30, 0x8b, 0xe9, 0x60, + 0x71, 0xa8, 0x8d, 0x5a, 0xa6, 0xfa, 0x42, 0xeb, 0xd0, 0x22, 0x51, 0x14, 0x44, 0x83, 0x86, 0x50, + 0x97, 0x1f, 0x08, 0x41, 0x93, 0xba, 0xbf, 0x90, 0x41, 0x73, 0xa8, 0x8d, 0x7a, 0xa6, 0x58, 0x1b, + 0xcb, 0xd0, 0xfa, 0x7a, 0x12, 0xb2, 0x1b, 0xe3, 0x13, 0x18, 0xbc, 0xc2, 0x76, 0x1c, 0x4f, 0x5e, + 0x89, 0x18, 0x8f, 0x2e, 0x88, 0x7d, 0x99, 0xe4, 0xbe, 0x0d, 0x2b, 0x2a, 0x72, 0x15, 0x41, 0xcf, + 0x6c, 0x4b, 0xc1, 0x89, 0x63, 0x7c, 0x01, 0x5b, 0x15, 0x86, 0xaa, 0x06, 0xef, 0x42, 0xef, 0x1c, + 0x47, 0x67, 0xf8, 0x9c, 0x58, 0x11, 0x66, 0x6e, 0x20, 0xac, 0x35, 0xb3, 0xab, 0x84, 0x26, 0x97, + 0x19, 0x3f, 0x82, 0x9e, 0x43, 0x08, 0x26, 0x21, 0xb6, 0x59, 0x1d, 0xe7, 0x68, 0x08, 0x9d, 0x30, + 0x22, 0xd8, 0xf3, 0x02, 0x1b, 0x33, 0x22, 0xaa, 0xd0, 0x30, 0xb3, 0x22, 0xe3, 0xff, 0xb0, 0x5d, + 0x09, 0x2e, 0x03, 0x34, 0x9e, 0x15, 0xa2, 0x0f, 0x26, 0x13, 0xb7, 0x96, 0x6b, 0xe3, 0x5e, 0x29, + 0x6a, 0x61, 0xa9, 0x70, 0x3f, 0x2d, 0xec, 0x7a, 0x04, 0xfb, 0x71, 0x58, 0x0b, 0xb8, 0x18, 0x71, + 0x62, 0x9a, 0x22, 0x6f, 0xca, 0xe6, 0x38, 0x0a, 0x3c, 0x8f, 0xd8, 0xcc, 0x0d, 0xfc, 0x04, 0x76, + 0x07, 0xc0, 0x4e, 0x85, 0xaa, 0x55, 0x32, 0x12, 0x43, 0x87, 0x41, 0xd9, 0x54, 0xc1, 0xbe, 0xd5, + 0x60, 0xed, 0x4b, 0x4a, 0xdd, 0x73, 0x5f, 0xba, 0xad, 0x55, 0xfe, 0xbc, 0xc3, 0xc5, 0xa2, 0xc3, + 0xe2, 0xf1, 0x34, 0x4a, 0xc7, 0xc3, 0x35, 0x22, 0x12, 0x7a, 0xae, 0x8d, 0x05, 0x44, 0x53, 0x40, + 0x64, 0x45, 0xa8, 0x0f, 0x0d, 0xc6, 0xbc, 0x41, 0x4b, 0xec, 0xf0, 0xa5, 0xb1, 0x01, 0xeb, 0xf9, + 0x48, 0x55, 0x0a, 0x1f, 0xc3, 0xa6, 0x94, 0x9c, 0xde, 0xf8, 0xf6, 0xa9, 0xb8, 0x09, 0xb5, 0x0a, + 0xfe, 0xa7, 0x06, 0x83, 0xb2, 0xa1, 0xea, 0xe0, 0x7f, 0x9b, 0xff, 0xdf, 0xcd, 0x0e, 0xbd, 0x03, + 0x1d, 0x86, 0x5d, 0xcf, 0x0a, 0xc6, 0x63, 0x4a, 0xd8, 0x60, 0x69, 0xa8, 0x8d, 0x9a, 0x26, 0x70, + 0xd1, 0x0b, 0x21, 0x41, 0x7b, 0xd0, 0xb7, 0x65, 0x17, 0x5b, 0x11, 0xb9, 0x72, 0x29, 0x47, 0x5e, + 0x16, 0x81, 0xad, 0xda, 0x49, 0x77, 0x4b, 0x31, 0x32, 0xa0, 0xe7, 0x3a, 0xd7, 0x96, 0x18, 0x1e, + 0xe2, 0xea, 0xb7, 0x05, 0x5a, 0xc7, 0x75, 0xae, 0xbf, 0x71, 0x3d, 0x72, 0xca, 0x27, 0xc0, 0x53, + 0xd8, 0x98, 0x26, 0x7f, 0xe2, 0x3b, 0xe4, 0xba, 0x56, 0xd1, 0xbe, 0xcd, 0x16, 0x5b, 0x99, 0xa9, + 0x92, 0xed, 0x03, 0x72, 0xb9, 0x40, 0xfa, 0xb5, 0x03, 0x9f, 0x11, 0x9f, 0x09, 0x80, 0xae, 0xd9, + 0x17, 0x3b, 0xdc, 0xf9, 0x91, 0x94, 0x1b, 0xbf, 0x6a, 0x70, 0x77, 0x8a, 0x74, 0x8c, 0x19, 0xae, + 0xd5, 0x7a, 0x3a, 0xb4, 0xd3, 0xec, 0x17, 0xe5, 0x5e, 0xf2, 0xcd, 0xc7, 0xa2, 0xaa, 0x5e, 0x43, + 0xec, 0xa8, 0xaf, 0xaa, 0x01, 0xc8, 0x9d, 0xf8, 0x84, 0x38, 0x72, 0xba, 0xca, 0x63, 0x68, 0x4b, + 0xc1, 0x89, 0x63, 0x7c, 0x9e, 0xad, 0x8d, 0x0c, 0x4d, 0xe5, 0x78, 0x1f, 0xba, 0x15, 0xd9, 0x75, + 0xc6, 0x99, 0xc4, 0x3e, 0x02, 0x24, 0x8d, 0x9f, 0x07, 0xb1, 0x5f, 0x6f, 0xa6, 0xdc, 0x85, 0xb5, + 0x9c, 0x89, 0x6a, 0xec, 0x27, 0xb0, 0x2e, 0xc5, 0x2f, 0xfd, 0x49, 0x6d, 0xac, 0xcd, 0xa4, 0xac, + 0xa9, 0x91, 0x42, 0x3b, 0x4c, 0x9c, 0xe4, 0x1f, 0xb8, 0x5b, 0xc1, 0x36, 0x92, 0x08, 0xf2, 0x6f, + 0x9c, 0xf1, 0xbb, 0x06, 0x1b, 0xa6, 0x6a, 0x67, 0xf2, 0xdf, 0x0e, 0x8e, 0xec, 0xc5, 0x69, 0xcc, + 0xbc, 0x38, 0xcd, 0xe9, 0xc5, 0x19, 0x41, 0x9f, 0x06, 0x71, 0x64, 0x13, 0xcb, 0xc1, 0x0c, 0x5b, + 0x7e, 0xe0, 0x10, 0x75, 0xa0, 0x77, 0xa4, 0x9c, 0x1f, 0xe0, 0x77, 0x81, 0x43, 0x8c, 0x2d, 0xd8, + 0x2c, 0x05, 0xad, 0x12, 0xf2, 0x61, 0xf5, 0x28, 0x08, 0x6f, 0x78, 0x83, 0xd6, 0x4c, 0xa4, 0xe3, + 0x52, 0x2b, 0xb9, 0x64, 0x22, 0x93, 0xb6, 0xb9, 0xe2, 0xd2, 0x13, 0x79, 0xc3, 0xd4, 0xbe, 0x83, + 0x99, 0xdc, 0x6f, 0x24, 0xfb, 0xc7, 0x98, 0xf1, 0x7d, 0xe3, 0x29, 0xf4, 0xa7, 0xfe, 0xea, 0xf7, + 0xd6, 0x67, 0xb0, 0x6d, 0x12, 0xec, 0xc8, 0xe0, 0xc5, 0x55, 0xae, 0x3f, 0xee, 0xfe, 0xd0, 0xe0, + 0x5e, 0xb5, 0x71, 0x9d, 0x91, 0xc7, 0x2f, 0x77, 0x32, 0x52, 0x98, 0x3b, 0x21, 0x94, 0xe1, 0x49, + 0x28, 0xf2, 0x6e, 0x9a, 0x7d, 0x35, 0x57, 0xbe, 0x4f, 0xe4, 0xe5, 0x01, 0xd4, 0x28, 0x0d, 0x20, + 0x8e, 0x98, 0xd4, 0x27, 0x83, 0xd8, 0x94, 0x88, 0x8e, 0xac, 0x53, 0x0e, 0x31, 0xd5, 0x16, 0x88, + 0x2d, 0x89, 0xa8, 0x14, 0xc5, 0x48, 0xfb, 0x01, 0xe0, 0xd8, 0xa5, 0x97, 0x32, 0x2d, 0xde, 0x29, + 0x8e, 0x1b, 0xa9, 0xe7, 0x90, 0x2f, 0xb9, 0x04, 0x7b, 0x9e, 0x0a, 0x9a, 0x2f, 0xf9, 0x64, 0x88, + 0x29, 0x71, 0x54, 0x78, 0x62, 0xcd, 0x65, 0xe3, 0x88, 0x10, 0x15, 0x89, 0x58, 0x1b, 0xbf, 0x69, + 0xb0, 0xf2, 0x9c, 0x4c, 0x14, 0xf2, 0x0e, 0xc0, 0x79, 0x10, 0x05, 0x31, 0x73, 0x7d, 0x42, 0x85, + 0x83, 0x96, 0x99, 0x91, 0xfc, 0x73, 0x3f, 0x62, 0x52, 0x11, 0x6f, 0xac, 0x92, 0x13, 0x6b, 0x2e, + 0xbb, 0x20, 0x38, 0x54, 0x2f, 0x82, 0x58, 0x73, 0xa2, 0x47, 0x19, 0xb6, 0x2f, 0xc5, 0x03, 0xd0, + 0x34, 0xe5, 0xc7, 0xe1, 0xdb, 0x1e, 0x74, 0xd5, 0xdc, 0x12, 0x4c, 0x13, 0xbd, 0x86, 0x4e, 0x86, + 0xa1, 0xa2, 0xf7, 0xca, 0x44, 0xb4, 0xcc, 0x78, 0xf5, 0xf7, 0xe7, 0x68, 0xa9, 0x1b, 0xb3, 0x80, + 0x7c, 0xf8, 0x5f, 0x89, 0x01, 0xa2, 0x87, 0x65, 0xeb, 0x59, 0xfc, 0x52, 0x7f, 0x54, 0x4b, 0x37, + 0xf5, 0xc7, 0x60, 0xad, 0x82, 0xd2, 0xa1, 0xfd, 0x39, 0x28, 0x39, 0x5a, 0xa9, 0x3f, 0xae, 0xa9, + 0x9d, 0x7a, 0x7d, 0x03, 0xa8, 0xcc, 0xf7, 0xd0, 0xa3, 0xb9, 0x30, 0x53, 0x3e, 0xa9, 0xef, 0xd7, + 0x53, 0x9e, 0x99, 0xa8, 0x64, 0x82, 0x73, 0x13, 0xcd, 0x71, 0xcd, 0xb9, 0x89, 0x16, 0xe8, 0xe5, + 0x02, 0xba, 0x84, 0x7e, 0x91, 0x25, 0xa2, 0xbd, 0x59, 0x3f, 0x5d, 0x4a, 0x24, 0x54, 0x7f, 0x58, + 0x47, 0x35, 0x75, 0x66, 0x41, 0x37, 0xcb, 0xe5, 0x50, 0x45, 0xd3, 0x55, 0xb0, 0x52, 0x7d, 0x77, + 0x9e, 0x5a, 0x36, 0x9b, 0x22, 0xb7, 0xab, 0xca, 0x66, 0x06, 0x71, 0xac, 0xca, 0x66, 0x16, 0x55, + 0x34, 0x16, 0xd0, 0x4f, 0xb0, 0x5a, 0x20, 0x45, 0x68, 0x74, 0x1b, 0x40, 0x96, 0x6e, 0xe9, 0x7b, + 0x35, 0x34, 0x13, 0x4f, 0x1f, 0x6a, 0xe8, 0x1c, 0xee, 0xe4, 0xb9, 0x09, 0x7a, 0x70, 0x1b, 0x40, + 0x86, 0x58, 0xe9, 0xa3, 0xf9, 0x8a, 0x19, 0x47, 0xaf, 0xa1, 0x93, 0x21, 0x25, 0x55, 0xc3, 0xa3, + 0x4c, 0x73, 0xaa, 0x86, 0x47, 0x15, 0xb3, 0x59, 0x40, 0x67, 0xd0, 0xcb, 0xd1, 0x14, 0xb4, 0x3b, + 0xcb, 0x32, 0x4f, 0x7e, 0xf4, 0x07, 0x73, 0xf5, 0xb2, 0x4d, 0x96, 0x65, 0x2f, 0x68, 0x66, 0x70, + 0xf9, 0x01, 0xb8, 0x3b, 0x4f, 0x2d, 0x75, 0x70, 0x01, 0xab, 0x05, 0x42, 0x51, 0x75, 0xee, 0xd5, + 0x44, 0xa9, 0xea, 0xdc, 0x67, 0xb1, 0x93, 0x05, 0xf4, 0x33, 0xac, 0x57, 0xbd, 0xdd, 0xe8, 0x71, + 0x15, 0xc8, 0x4c, 0x82, 0xa0, 0x1f, 0xd4, 0x55, 0x4f, 0x1d, 0xbf, 0x84, 0x76, 0x42, 0x54, 0xd0, + 0xfd, 0xb2, 0x75, 0x81, 0x34, 0xe9, 0xc6, 0x6d, 0x2a, 0xd3, 0xe6, 0x3a, 0x5b, 0x12, 0xff, 0xc2, + 0x3c, 0xf9, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x7d, 0x0d, 0xbb, 0x9c, 0x11, 0x00, 0x00, } diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index 429ca9b68..c924b7a62 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -29,7 +29,7 @@ func (vs *VolumeServer) AssignVolume(ctx context.Context, req *volume_server_pb. resp := &volume_server_pb.AssignVolumeResponse{} err := vs.store.AddVolume( - storage.VolumeId(req.VolumdId), + storage.VolumeId(req.VolumeId), req.Collection, vs.needleMapKind, req.Replication, @@ -51,7 +51,7 @@ func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.V resp := &volume_server_pb.VolumeMountResponse{} - err := vs.store.MountVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.MountVolume(storage.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume mount %v: %v", req, err) @@ -67,7 +67,7 @@ func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb resp := &volume_server_pb.VolumeUnmountResponse{} - err := vs.store.UnmountVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.UnmountVolume(storage.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume unmount %v: %v", req, err) @@ -83,7 +83,7 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. resp := &volume_server_pb.VolumeDeleteResponse{} - err := vs.store.DeleteVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.DeleteVolume(storage.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume delete %v: %v", req, err) diff --git a/weed/server/volume_grpc_replicate.go b/weed/server/volume_grpc_replicate.go new file mode 100644 index 000000000..20a85fd6f --- /dev/null +++ b/weed/server/volume_grpc_replicate.go @@ -0,0 +1,155 @@ +package weed_server + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "io" + "os" +) + +func (vs *VolumeServer) ReplicateVolume(ctx context.Context, req *volume_server_pb.ReplicateVolumeRequest) (*volume_server_pb.ReplicateVolumeResponse, error) { + + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) + if v != nil { + // unmount the volume + err := vs.store.UnmountVolume(storage.VolumeId(req.VolumeId)) + if err != nil { + return nil, fmt.Errorf("failed to unmount volume %d: %v", req.VolumeId, err) + } + } + + location := vs.store.FindFreeLocation() + if location == nil { + return nil, fmt.Errorf("no space left") + } + + volumeFileName := storage.VolumeFileName(req.Collection, location.Directory, int(req.VolumeId)) + + // the master will not start compaction for read-only volumes, so it is safe to just copy files directly + // copy .dat and .idx files + // read .idx .dat file size and timestamp + // send .idx file + // send .dat file + // confirm size and timestamp + + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + + // TODO read file sizes before copying + client.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{}) + + copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ + VolumeId: req.VolumeId, + IsIdxFile: true, + }) + + if err != nil { + return fmt.Errorf("failed to start copying volume %d idx file: %v", req.VolumeId, err) + } + + err = writeToFile(copyFileClient, volumeFileName+".idx") + if err != nil { + return fmt.Errorf("failed to copy volume %d idx file: %v", req.VolumeId, err) + } + + copyFileClient, err = client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ + VolumeId: req.VolumeId, + IsDatFile: true, + }) + + if err != nil { + return fmt.Errorf("failed to start copying volume %d dat file: %v", req.VolumeId, err) + } + + err = writeToFile(copyFileClient, volumeFileName+".dat") + if err != nil { + return fmt.Errorf("failed to copy volume %d dat file: %v", req.VolumeId, err) + } + + return nil + }) + + if err != nil { + return nil, err + } + + // TODO: check the timestamp and size + + // mount the volume + err = vs.store.MountVolume(storage.VolumeId(req.VolumeId)) + if err != nil { + return nil, fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err) + } + + return &volume_server_pb.ReplicateVolumeResponse{}, err + +} + +func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error { + println("writing to ", fileName) + dst, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil + } + defer dst.Close() + + for { + resp, receiveErr := client.Recv() + if receiveErr == io.EOF { + break + } + if receiveErr != nil { + return fmt.Errorf("receiving %s: %v", fileName, receiveErr) + } + dst.Write(resp.FileContent) + } + return nil +} + +func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_server_pb.ReadVolumeFileStatusRequest) (*volume_server_pb.ReadVolumeFileStatusResponse, error) { + resp := &volume_server_pb.ReadVolumeFileStatusResponse{} + return resp, nil +} + +func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) (error) { + + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + const BufferSize = 1024 * 16 + var fileName = v.FileName() + if req.IsDatFile { + fileName += ".dat" + } else if req.IsIdxFile { + fileName += ".idx" + } + file, err := os.Open(fileName) + if err != nil { + return err + } + defer file.Close() + + buffer := make([]byte, BufferSize) + + for { + bytesread, err := file.Read(buffer) + + if err != nil { + if err != io.EOF { + return err + } + break + } + + stream.Send(&volume_server_pb.CopyFileResponse{ + FileContent: buffer[:bytesread], + }) + + } + + return nil +} diff --git a/weed/server/volume_grpc_sync.go b/weed/server/volume_grpc_sync.go index 0114b38a4..971258689 100644 --- a/weed/server/volume_grpc_sync.go +++ b/weed/server/volume_grpc_sync.go @@ -12,14 +12,14 @@ import ( func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) if v == nil { - return nil, fmt.Errorf("not found volume id %d", req.VolumdId) + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) } resp := v.GetVolumeSyncStatus() - glog.V(2).Infof("volume sync status %d", req.VolumdId) + glog.V(2).Infof("volume sync status %d", req.VolumeId) return resp, nil @@ -27,17 +27,17 @@ func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server func (vs *VolumeServer) VolumeSyncIndex(req *volume_server_pb.VolumeSyncIndexRequest, stream volume_server_pb.VolumeServer_VolumeSyncIndexServer) error { - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) if v == nil { - return fmt.Errorf("not found volume id %d", req.VolumdId) + return fmt.Errorf("not found volume id %d", req.VolumeId) } content, err := v.IndexFileContent() if err != nil { - glog.Errorf("sync volume %d index: %v", req.VolumdId, err) + glog.Errorf("sync volume %d index: %v", req.VolumeId, err) } else { - glog.V(2).Infof("sync volume %d index", req.VolumdId) + glog.V(2).Infof("sync volume %d index", req.VolumeId) } const blockSizeLimit = 1024 * 1024 * 2 @@ -57,9 +57,9 @@ func (vs *VolumeServer) VolumeSyncIndex(req *volume_server_pb.VolumeSyncIndexReq func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataRequest, stream volume_server_pb.VolumeServer_VolumeSyncDataServer) error { - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) if v == nil { - return fmt.Errorf("not found volume id %d", req.VolumdId) + return fmt.Errorf("not found volume id %d", req.VolumeId) } if uint32(v.SuperBlock.CompactRevision) != req.Revision { @@ -82,7 +82,7 @@ func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataReque } if err != nil { - glog.Errorf("sync volume %d data: %v", req.VolumdId, err) + glog.Errorf("sync volume %d data: %v", req.VolumeId, err) } const blockSizeLimit = 1024 * 1024 * 2 diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go index f0c87b582..d31b8f8e7 100644 --- a/weed/server/volume_grpc_vacuum.go +++ b/weed/server/volume_grpc_vacuum.go @@ -12,12 +12,12 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve resp := &volume_server_pb.VacuumVolumeCheckResponse{} - garbageRatio, err := vs.store.CheckCompactVolume(storage.VolumeId(req.VolumdId)) + garbageRatio, err := vs.store.CheckCompactVolume(storage.VolumeId(req.VolumeId)) resp.GarbageRatio = garbageRatio if err != nil { - glog.V(3).Infof("check volume %d: %v", req.VolumdId, err) + glog.V(3).Infof("check volume %d: %v", req.VolumeId, err) } return resp, err @@ -28,12 +28,12 @@ func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_ser resp := &volume_server_pb.VacuumVolumeCompactResponse{} - err := vs.store.CompactVolume(storage.VolumeId(req.VolumdId), req.Preallocate) + err := vs.store.CompactVolume(storage.VolumeId(req.VolumeId), req.Preallocate) if err != nil { - glog.Errorf("compact volume %d: %v", req.VolumdId, err) + glog.Errorf("compact volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("compact volume %d", req.VolumdId) + glog.V(1).Infof("compact volume %d", req.VolumeId) } return resp, err @@ -44,12 +44,12 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv resp := &volume_server_pb.VacuumVolumeCommitResponse{} - err := vs.store.CommitCompactVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.CommitCompactVolume(storage.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("commit volume %d: %v", req.VolumdId, err) + glog.Errorf("commit volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("commit volume %d", req.VolumdId) + glog.V(1).Infof("commit volume %d", req.VolumeId) } return resp, err @@ -60,12 +60,12 @@ func (vs *VolumeServer) VacuumVolumeCleanup(ctx context.Context, req *volume_ser resp := &volume_server_pb.VacuumVolumeCleanupResponse{} - err := vs.store.CommitCleanupVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.CommitCleanupVolume(storage.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("cleanup volume %d: %v", req.VolumdId, err) + glog.Errorf("cleanup volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("cleanup volume %d", req.VolumdId) + glog.V(1).Infof("cleanup volume %d", req.VolumeId) } return resp, err diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go index 096532fdf..7d09661dc 100644 --- a/weed/shell/shell_liner.go +++ b/weed/shell/shell_liner.go @@ -71,7 +71,7 @@ func RunShell(options ShellOptions) { } else { for _, c := range commands { if c.Name() == cmd { - if err := c.Do(args, commandEnv, os.Stderr); err != nil { + if err := c.Do(args, commandEnv, os.Stdout); err != nil { fmt.Fprintf(os.Stderr, "error: %v\n", err) } } diff --git a/weed/storage/store.go b/weed/storage/store.go index 8d4d9c55e..56e973738 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -77,7 +77,7 @@ func (s *Store) findVolume(vid VolumeId) *Volume { } return nil } -func (s *Store) findFreeLocation() (ret *DiskLocation) { +func (s *Store) FindFreeLocation() (ret *DiskLocation) { max := 0 for _, location := range s.Locations { currentFreeCount := location.MaxVolumeCount - location.VolumesLen() @@ -92,7 +92,7 @@ func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleM if s.findVolume(vid) != nil { return fmt.Errorf("Volume Id %d already exists!", vid) } - if location := s.findFreeLocation(); location != nil { + if location := s.FindFreeLocation(); location != nil { glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", location.Directory, vid, collection, replicaPlacement, ttl) if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate); err == nil { diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 22acf1653..807fefa38 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -5,6 +5,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "os" "path" + "strconv" "sync" "time" @@ -42,14 +43,18 @@ func (v *Volume) String() string { return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, readOnly:%v", v.Id, v.dir, v.Collection, v.dataFile, v.nm, v.readOnly) } -func (v *Volume) FileName() (fileName string) { - if v.Collection == "" { - fileName = path.Join(v.dir, v.Id.String()) +func VolumeFileName(collection string, dir string, id int) (fileName string) { + idString := strconv.Itoa(id) + if collection == "" { + fileName = path.Join(dir, idString) } else { - fileName = path.Join(v.dir, v.Collection+"_"+v.Id.String()) + fileName = path.Join(dir, collection+"_"+idString) } return } +func (v *Volume) FileName() (fileName string) { + return VolumeFileName(v.Collection, v.dir, int(v.Id)) +} func (v *Volume) DataFile() *os.File { return v.dataFile } diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go index 8d90a729d..827e6685a 100644 --- a/weed/storage/volume_sync.go +++ b/weed/storage/volume_sync.go @@ -192,7 +192,7 @@ func (v *Volume) fetchNeedle(volumeServer string, grpcDialOption grpc.DialOption return operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { stream, err := client.VolumeSyncData(context.Background(), &volume_server_pb.VolumeSyncDataRequest{ - VolumdId: uint32(v.Id), + VolumeId: uint32(v.Id), Revision: uint32(compactRevision), Offset: uint32(needleValue.Offset), Size: uint32(needleValue.Size), diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index 66b1b3af5..1360988b3 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -17,7 +17,7 @@ func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid storage.Vo return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { _, deleteErr := client.AssignVolume(context.Background(), &volume_server_pb.AssignVolumeRequest{ - VolumdId: uint32(vid), + VolumeId: uint32(vid), Collection: option.Collection, Replication: option.ReplicaPlacement.String(), Ttl: option.Ttl.String(), diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 840821efa..ea65b2ff9 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -17,7 +17,7 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi go func(index int, url string, vid storage.VolumeId) { err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{ - VolumdId: uint32(vid), + VolumeId: uint32(vid), }) if err != nil { ch <- false @@ -52,7 +52,7 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ - VolumdId: uint32(vid), + VolumeId: uint32(vid), }) return err }) @@ -83,7 +83,7 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ - VolumdId: uint32(vid), + VolumeId: uint32(vid), }) return err }) @@ -104,7 +104,7 @@ func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ - VolumdId: uint32(vid), + VolumeId: uint32(vid), }) return err }) From 6b70b3610530d9b3637fd4179a0f2cecc5541fdd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 23 Mar 2019 11:34:09 -0700 Subject: [PATCH 093/450] weed shell: add "volume.fix.replication" --- weed/shell/command_volume_fix_replication.go | 194 +++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 weed/shell/command_volume_fix_replication.go diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go new file mode 100644 index 000000000..6a76ba458 --- /dev/null +++ b/weed/shell/command_volume_fix_replication.go @@ -0,0 +1,194 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "io" + "math/rand" + "sort" +) + +func init() { + commands = append(commands, &commandVolumeFixReplication{}) +} + +type commandVolumeFixReplication struct { +} + +func (c *commandVolumeFixReplication) Name() string { + return "volume.fix.replication" +} + +func (c *commandVolumeFixReplication) Help() string { + return `add replicas to volumes that are missing replicas + + -n do not take action +` +} + +func (c *commandVolumeFixReplication) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + takeAction := true + if len(args) > 0 && args[0] == "-n" { + takeAction = false + } + + var resp *master_pb.VolumeListResponse + ctx := context.Background() + err = commandEnv.masterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return err + } + + // find all volumes that needs replication + // collect all data nodes + replicatedVolumeLocations := make(map[uint32][]location) + replicatedVolumeInfo := make(map[uint32]*master_pb.VolumeInformationMessage) + var allLocations []location + for _, dc := range resp.TopologyInfo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, dn := range rack.DataNodeInfos { + loc := newLocation(dc.Id, rack.Id, dn) + for _, v := range dn.VolumeInfos { + if v.ReplicaPlacement > 0 { + replicatedVolumeLocations[v.Id] = append(replicatedVolumeLocations[v.Id], loc) + replicatedVolumeInfo[v.Id] = v + } + } + allLocations = append(allLocations, loc) + } + } + } + + // find all under replicated volumes + underReplicatedVolumeLocations := make(map[uint32][]location) + for vid, locations := range replicatedVolumeLocations { + volumeInfo := replicatedVolumeInfo[vid] + replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + if replicaPlacement.GetCopyCount() > len(locations) { + underReplicatedVolumeLocations[vid] = locations + } + } + + if len(underReplicatedVolumeLocations) == 0 { + return fmt.Errorf("no under replicated volumes") + } + + if len(allLocations) == 0 { + return fmt.Errorf("no data nodes at all") + } + + // find the most under populated data nodes + keepDataNodesSorted(allLocations) + + for vid, locations := range underReplicatedVolumeLocations { + volumeInfo := replicatedVolumeInfo[vid] + replicaPlacement, _ := storage.NewReplicaPlacementFromByte(byte(volumeInfo.ReplicaPlacement)) + foundNewLocation := false + for _, dst := range allLocations { + // check whether data nodes satisfy the constraints + if dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, locations, dst) { + // ask the volume server to replicate the volume + sourceNodes := underReplicatedVolumeLocations[vid] + sourceNode := sourceNodes[rand.Intn(len(sourceNodes))] + foundNewLocation = true + fmt.Fprintf(writer, "replicating volume %d %s from %s to dataNode %s ...\n", volumeInfo.Id, replicaPlacement, sourceNode.dataNode.Id, dst.dataNode.Id) + + if !takeAction { + break + } + + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, replicateErr := volumeServerClient.ReplicateVolume(ctx, &volume_server_pb.ReplicateVolumeRequest{ + VolumeId: volumeInfo.Id, + Collection: volumeInfo.Collection, + SourceDataNode: sourceNode.dataNode.Id, + }) + return replicateErr + }) + + if err != nil { + return err + } + + // adjust free volume count + dst.dataNode.FreeVolumeCount-- + keepDataNodesSorted(allLocations) + break + } + } + if !foundNewLocation { + fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", volumeInfo.Id, replicaPlacement, locations) + } + + } + + return nil +} + +func keepDataNodesSorted(dataNodes []location) { + sort.Slice(dataNodes, func(i, j int) bool { + return dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount + }) +} + +func satisfyReplicaPlacement(replicaPlacement *storage.ReplicaPlacement, existingLocations []location, possibleLocation location) bool { + + existingDataCenters := make(map[string]bool) + existingRacks := make(map[string]bool) + existingDataNodes := make(map[string]bool) + for _, loc := range existingLocations { + existingDataCenters[loc.DataCenter()] = true + existingRacks[loc.Rack()] = true + existingDataNodes[loc.String()] = true + } + + if replicaPlacement.DiffDataCenterCount >= len(existingDataCenters) { + // check dc, good if different from any existing data centers + _, found := existingDataCenters[possibleLocation.DataCenter()] + return !found + } else if replicaPlacement.DiffRackCount >= len(existingRacks) { + // check rack, good if different from any existing racks + _, found := existingRacks[possibleLocation.Rack()] + return !found + } else if replicaPlacement.SameRackCount >= len(existingDataNodes) { + // check data node, good if different from any existing data nodes + _, found := existingDataNodes[possibleLocation.String()] + return !found + } + + return false +} + +type location struct { + dc string + rack string + dataNode *master_pb.DataNodeInfo +} + +func newLocation(dc, rack string, dataNode *master_pb.DataNodeInfo) location { + return location{ + dc: dc, + rack: rack, + dataNode: dataNode, + } +} + +func (l location) String() string { + return fmt.Sprintf("%s %s %s", l.dc, l.rack, l.dataNode.Id) +} + +func (l location) Rack() string { + return fmt.Sprintf("%s %s", l.dc, l.rack) +} + +func (l location) DataCenter() string { + return l.dc +} From bd1c0735e0bc3bf7a24ec948372b72a4a9652d03 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 23 Mar 2019 11:54:26 -0700 Subject: [PATCH 094/450] weed shell: adjust help text format --- weed/shell/command_collection_list.go | 2 +- weed/shell/command_fs_du.go | 7 ++++++- weed/shell/command_volume_fix_replication.go | 7 ++++++- weed/shell/command_volume_list.go | 6 +++++- weed/shell/shell_liner.go | 7 +++---- 5 files changed, 21 insertions(+), 8 deletions(-) diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index 0db74ef20..cc16fd291 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -19,7 +19,7 @@ func (c *commandCollectionList) Name() string { } func (c *commandCollectionList) Help() string { - return "# list all collections" + return `list all collections` } func (c *commandCollectionList) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index cdc9d98ef..1206596b0 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -25,7 +25,12 @@ func (c *commandFsDu) Name() string { } func (c *commandFsDu) Help() string { - return "http://:/dir[/file] # show disk usage" + return `show disk usage + + fs.du http://:/dir + fs.du http://:/dir/file_name + fs.du http://:/dir/file_prefix +` } func (c *commandFsDu) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 6a76ba458..2ec850140 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -26,7 +26,12 @@ func (c *commandVolumeFixReplication) Name() string { func (c *commandVolumeFixReplication) Help() string { return `add replicas to volumes that are missing replicas - -n do not take action + This command file all under-replicated volumes, and find volume servers with free slots. + If the free slots satisfy the replication requirement, the volume content is copied over and mounted. + + volume.fix.replication -n # do not take action + volume.fix.replication # actually copying the volume files and mount the volume + ` } diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 5be5be569..f3f843d58 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -19,7 +19,11 @@ func (c *commandVolumeList) Name() string { } func (c *commandVolumeList) Help() string { - return "# list all volumes" + return `list all volumes + + This command list all volumes as a tree of dataCenter > rack > dataNode > volume. + +` } func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go index 7d09661dc..403980568 100644 --- a/weed/shell/shell_liner.go +++ b/weed/shell/shell_liner.go @@ -92,7 +92,8 @@ func printGenericHelp() { return strings.Compare(commands[i].Name(), commands[j].Name()) < 0 }) for _, c := range commands { - fmt.Printf("\t%s %s \n", c.Name(), c.Help()) + helpTexts := strings.SplitN(c.Help(), "\n", 2) + fmt.Printf(" %-30s\t# %s \n", c.Name(), helpTexts[0]) } } @@ -111,9 +112,7 @@ func printHelp(cmds []string) { for _, c := range commands { if c.Name() == cmd { - fmt.Println() - fmt.Printf("\t%s %s \n", c.Name(), c.Help()) - fmt.Println() + fmt.Printf(" %s\t# %s\n", c.Name(), c.Help()) } } } From cd8a3b99bb095bf19b7b8fac54ec1360bc754e9b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 23 Mar 2019 12:57:35 -0700 Subject: [PATCH 095/450] textual changes --- weed/shell/command_volume_fix_replication.go | 6 ++++++ weed/shell/shell_liner.go | 11 ++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 2ec850140..b8daa2d9f 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -32,6 +32,12 @@ func (c *commandVolumeFixReplication) Help() string { volume.fix.replication -n # do not take action volume.fix.replication # actually copying the volume files and mount the volume + Note: + * each time this will only add back one replica for one volume id. If there are multiple replicas + are missing, e.g. multiple volume servers are new, you may need to run this multiple times. + * do not run this too quick within seconds, since the new volume replica may take a few seconds + to register itself to the master. + ` } diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go index 403980568..2fc64b60f 100644 --- a/weed/shell/shell_liner.go +++ b/weed/shell/shell_liner.go @@ -6,6 +6,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/wdclient" "io" "os" + "path" "regexp" "strings" @@ -15,7 +16,7 @@ import ( var ( line *liner.State - historyPath = "/tmp/weed-shell" + historyPath = path.Join(os.TempDir(), "weed-shell") ) func RunShell(options ShellOptions) { @@ -26,9 +27,9 @@ func RunShell(options ShellOptions) { line.SetCtrlCAborts(true) setCompletionHandler() - loadHisotry() + loadHistory() - defer saveHisotry() + defer saveHistory() reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`) @@ -129,14 +130,14 @@ func setCompletionHandler() { }) } -func loadHisotry() { +func loadHistory() { if f, err := os.Open(historyPath); err == nil { line.ReadHistory(f) f.Close() } } -func saveHisotry() { +func saveHistory() { if f, err := os.Create(historyPath); err != nil { fmt.Printf("Error writing history file: %v\n", err) } else { From eaa42c3865f65153d12fc8e9b63bdf45b13ea9c3 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 23 Mar 2019 14:14:40 -0700 Subject: [PATCH 096/450] 1.27 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 7fbb6971a..ee6125a8c 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,5 @@ package util const ( - VERSION = "1.26" + VERSION = "1.27" ) From 70815e91249f481b71ca1fbca14ff41430e42681 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Mar 2019 09:16:12 -0700 Subject: [PATCH 097/450] WIP --- weed/command/backup.go | 27 +- weed/pb/volume_server.proto | 10 + weed/pb/volume_server_pb/volume_server.pb.go | 307 +++++++++++++------ weed/server/volume_grpc_follow.go | 53 ++++ weed/storage/store_vacuum.go | 2 +- weed/storage/volume_follow.go | 220 +++++++++++++ weed/storage/volume_follow_test.go | 39 +++ weed/storage/volume_sync.go | 24 +- weed/storage/volume_vacuum.go | 4 +- weed/storage/volume_vacuum_test.go | 2 +- 10 files changed, 568 insertions(+), 120 deletions(-) create mode 100644 weed/server/volume_grpc_follow.go create mode 100644 weed/storage/volume_follow.go create mode 100644 weed/storage/volume_follow_test.go diff --git a/weed/command/backup.go b/weed/command/backup.go index 86391f9c4..9c0bcbc52 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -88,7 +88,32 @@ func runBackup(cmd *Command, args []string) bool { return true } - if err := v.Synchronize(volumeServer, grpcDialOption); err != nil { + if v.SuperBlock.CompactRevision < uint16(stats.CompactRevision) { + if err = v.Compact(0); err != nil { + fmt.Printf("Compact Volume before synchronizing %v\n", err) + return true + } + if err = v.CommitCompact(); err != nil { + fmt.Printf("Commit Compact before synchronizing %v\n", err) + return true + } + v.SuperBlock.CompactRevision = uint16(stats.CompactRevision) + v.DataFile().WriteAt(v.SuperBlock.Bytes(),0) + } + + if uint64(v.Size()) > stats.TailOffset { + // remove the old data + v.Destroy() + // recreate an empty volume + v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0) + if err != nil { + fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) + return true + } + } + defer v.Close() + + if err := v.Follow(volumeServer, grpcDialOption); err != nil { fmt.Printf("Error synchronizing volume %d: %v\n", vid, err) return true } diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 93db5b981..7686cc614 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -24,6 +24,8 @@ service VolumeServer { rpc VolumeSyncStatus (VolumeSyncStatusRequest) returns (VolumeSyncStatusResponse) { } + rpc VolumeFollow (VolumeFollowRequest) returns (stream VolumeFollowResponse) { + } rpc VolumeSyncIndex (VolumeSyncIndexRequest) returns (stream VolumeSyncIndexResponse) { } rpc VolumeSyncData (VolumeSyncDataRequest) returns (stream VolumeSyncDataResponse) { @@ -119,6 +121,14 @@ message VolumeSyncStatusResponse { uint64 idx_file_size = 8; } +message VolumeFollowRequest { + uint32 volume_id = 1; + uint64 since = 2; +} +message VolumeFollowResponse { + bytes file_content = 1; +} + message VolumeSyncIndexRequest { uint32 volume_id = 1; } diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index d84a5b099..4c5f1effa 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -27,6 +27,8 @@ It has these top-level messages: AssignVolumeResponse VolumeSyncStatusRequest VolumeSyncStatusResponse + VolumeFollowRequest + VolumeFollowResponse VolumeSyncIndexRequest VolumeSyncIndexResponse VolumeSyncDataRequest @@ -420,6 +422,46 @@ func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { return 0 } +type VolumeFollowRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Since uint64 `protobuf:"varint,2,opt,name=since" json:"since,omitempty"` +} + +func (m *VolumeFollowRequest) Reset() { *m = VolumeFollowRequest{} } +func (m *VolumeFollowRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeFollowRequest) ProtoMessage() {} +func (*VolumeFollowRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *VolumeFollowRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeFollowRequest) GetSince() uint64 { + if m != nil { + return m.Since + } + return 0 +} + +type VolumeFollowResponse struct { + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +} + +func (m *VolumeFollowResponse) Reset() { *m = VolumeFollowResponse{} } +func (m *VolumeFollowResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeFollowResponse) ProtoMessage() {} +func (*VolumeFollowResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *VolumeFollowResponse) GetFileContent() []byte { + if m != nil { + return m.FileContent + } + return nil +} + type VolumeSyncIndexRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } @@ -427,7 +469,7 @@ type VolumeSyncIndexRequest struct { func (m *VolumeSyncIndexRequest) Reset() { *m = VolumeSyncIndexRequest{} } func (m *VolumeSyncIndexRequest) String() string { return proto.CompactTextString(m) } func (*VolumeSyncIndexRequest) ProtoMessage() {} -func (*VolumeSyncIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*VolumeSyncIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *VolumeSyncIndexRequest) GetVolumeId() uint32 { if m != nil { @@ -443,7 +485,7 @@ type VolumeSyncIndexResponse struct { func (m *VolumeSyncIndexResponse) Reset() { *m = VolumeSyncIndexResponse{} } func (m *VolumeSyncIndexResponse) String() string { return proto.CompactTextString(m) } func (*VolumeSyncIndexResponse) ProtoMessage() {} -func (*VolumeSyncIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*VolumeSyncIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *VolumeSyncIndexResponse) GetIndexFileContent() []byte { if m != nil { @@ -463,7 +505,7 @@ type VolumeSyncDataRequest struct { func (m *VolumeSyncDataRequest) Reset() { *m = VolumeSyncDataRequest{} } func (m *VolumeSyncDataRequest) String() string { return proto.CompactTextString(m) } func (*VolumeSyncDataRequest) ProtoMessage() {} -func (*VolumeSyncDataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*VolumeSyncDataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *VolumeSyncDataRequest) GetVolumeId() uint32 { if m != nil { @@ -507,7 +549,7 @@ type VolumeSyncDataResponse struct { func (m *VolumeSyncDataResponse) Reset() { *m = VolumeSyncDataResponse{} } func (m *VolumeSyncDataResponse) String() string { return proto.CompactTextString(m) } func (*VolumeSyncDataResponse) ProtoMessage() {} -func (*VolumeSyncDataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*VolumeSyncDataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *VolumeSyncDataResponse) GetFileContent() []byte { if m != nil { @@ -523,7 +565,7 @@ type VolumeMountRequest struct { func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *VolumeMountRequest) GetVolumeId() uint32 { if m != nil { @@ -538,7 +580,7 @@ type VolumeMountResponse struct { func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } type VolumeUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -547,7 +589,7 @@ type VolumeUnmountRequest struct { func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *VolumeUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -562,7 +604,7 @@ type VolumeUnmountResponse struct { func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } type VolumeDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -571,7 +613,7 @@ type VolumeDeleteRequest struct { func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *VolumeDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -586,7 +628,7 @@ type VolumeDeleteResponse struct { func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } type ReplicateVolumeRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -599,7 +641,7 @@ type ReplicateVolumeRequest struct { func (m *ReplicateVolumeRequest) Reset() { *m = ReplicateVolumeRequest{} } func (m *ReplicateVolumeRequest) String() string { return proto.CompactTextString(m) } func (*ReplicateVolumeRequest) ProtoMessage() {} -func (*ReplicateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*ReplicateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *ReplicateVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -642,7 +684,7 @@ type ReplicateVolumeResponse struct { func (m *ReplicateVolumeResponse) Reset() { *m = ReplicateVolumeResponse{} } func (m *ReplicateVolumeResponse) String() string { return proto.CompactTextString(m) } func (*ReplicateVolumeResponse) ProtoMessage() {} -func (*ReplicateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*ReplicateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } type CopyFileRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -653,7 +695,7 @@ type CopyFileRequest struct { func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *CopyFileRequest) GetVolumeId() uint32 { if m != nil { @@ -683,7 +725,7 @@ type CopyFileResponse struct { func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } func (m *CopyFileResponse) GetFileContent() []byte { if m != nil { @@ -699,7 +741,7 @@ type ReadVolumeFileStatusRequest struct { func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -719,7 +761,7 @@ type ReadVolumeFileStatusResponse struct { func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -766,7 +808,7 @@ type DiskStatus struct { func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -809,7 +851,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -879,6 +921,8 @@ func init() { proto.RegisterType((*AssignVolumeResponse)(nil), "volume_server_pb.AssignVolumeResponse") proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest") proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse") + proto.RegisterType((*VolumeFollowRequest)(nil), "volume_server_pb.VolumeFollowRequest") + proto.RegisterType((*VolumeFollowResponse)(nil), "volume_server_pb.VolumeFollowResponse") proto.RegisterType((*VolumeSyncIndexRequest)(nil), "volume_server_pb.VolumeSyncIndexRequest") proto.RegisterType((*VolumeSyncIndexResponse)(nil), "volume_server_pb.VolumeSyncIndexResponse") proto.RegisterType((*VolumeSyncDataRequest)(nil), "volume_server_pb.VolumeSyncDataRequest") @@ -919,6 +963,7 @@ type VolumeServerClient interface { DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) + VolumeFollow(ctx context.Context, in *VolumeFollowRequest, opts ...grpc.CallOption) (VolumeServer_VolumeFollowClient, error) VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) @@ -1009,8 +1054,40 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn return out, nil } +func (c *volumeServerClient) VolumeFollow(ctx context.Context, in *VolumeFollowRequest, opts ...grpc.CallOption) (VolumeServer_VolumeFollowClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeFollow", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeFollowClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeFollowClient interface { + Recv() (*VolumeFollowResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeFollowClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeFollowClient) Recv() (*VolumeFollowResponse, error) { + m := new(VolumeFollowResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *volumeServerClient) VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncIndex", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncIndex", opts...) if err != nil { return nil, err } @@ -1042,7 +1119,7 @@ func (x *volumeServerVolumeSyncIndexClient) Recv() (*VolumeSyncIndexResponse, er } func (c *volumeServerClient) VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncData", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncData", opts...) if err != nil { return nil, err } @@ -1119,7 +1196,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV } func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } @@ -1162,6 +1239,7 @@ type VolumeServerServer interface { DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) + VolumeFollow(*VolumeFollowRequest, VolumeServer_VolumeFollowServer) error VolumeSyncIndex(*VolumeSyncIndexRequest, VolumeServer_VolumeSyncIndexServer) error VolumeSyncData(*VolumeSyncDataRequest, VolumeServer_VolumeSyncDataServer) error VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) @@ -1320,6 +1398,27 @@ func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeFollow_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeFollowRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeFollow(m, &volumeServerVolumeFollowServer{stream}) +} + +type VolumeServer_VolumeFollowServer interface { + Send(*VolumeFollowResponse) error + grpc.ServerStream +} + +type volumeServerVolumeFollowServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeFollowServer) Send(m *VolumeFollowResponse) error { + return x.ServerStream.SendMsg(m) +} + func _VolumeServer_VolumeSyncIndex_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeSyncIndexRequest) if err := stream.RecvMsg(m); err != nil { @@ -1531,6 +1630,11 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{ + { + StreamName: "VolumeFollow", + Handler: _VolumeServer_VolumeFollow_Handler, + ServerStreams: true, + }, { StreamName: "VolumeSyncIndex", Handler: _VolumeServer_VolumeSyncIndex_Handler, @@ -1553,83 +1657,86 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1247 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0x5b, 0x73, 0xdb, 0xc4, - 0x17, 0x8f, 0x62, 0x3b, 0x71, 0x8e, 0xed, 0xc6, 0xff, 0x4d, 0x9a, 0x38, 0x4a, 0xff, 0xc1, 0x15, - 0x90, 0x3a, 0x6d, 0x1a, 0x20, 0x9d, 0x42, 0x81, 0x17, 0x20, 0x01, 0x26, 0x0f, 0xa5, 0x33, 0x0a, - 0xed, 0x30, 0x43, 0x67, 0x34, 0x1b, 0x69, 0x9d, 0x88, 0xc8, 0x92, 0xaa, 0x5d, 0x85, 0x84, 0x6f, - 0xc2, 0x33, 0x2f, 0x7d, 0xe7, 0x03, 0xf1, 0x41, 0x78, 0x61, 0xf6, 0x22, 0x59, 0x37, 0xc7, 0xe2, - 0xf2, 0xb6, 0x3a, 0x7b, 0xce, 0xef, 0x5c, 0xf6, 0xec, 0xd9, 0x9f, 0x0d, 0x6b, 0x57, 0x81, 0x17, - 0x4f, 0x88, 0x45, 0x49, 0x74, 0x45, 0xa2, 0x83, 0x30, 0x0a, 0x58, 0x80, 0xfa, 0x39, 0xa1, 0x15, - 0x9e, 0x19, 0x1f, 0x00, 0xfa, 0x0a, 0x33, 0xfb, 0xe2, 0x98, 0x78, 0x84, 0x11, 0x93, 0xbc, 0x89, - 0x09, 0x65, 0x68, 0x0b, 0xda, 0x63, 0xd7, 0x23, 0x96, 0xeb, 0xd0, 0x81, 0x36, 0x6c, 0x8c, 0x56, - 0xcc, 0x65, 0xfe, 0x7d, 0xe2, 0x50, 0xe3, 0x05, 0xac, 0xe5, 0x0c, 0x68, 0x18, 0xf8, 0x94, 0xa0, - 0x67, 0xb0, 0x1c, 0x11, 0x1a, 0x7b, 0x4c, 0x1a, 0x74, 0x0e, 0x77, 0x0e, 0x8a, 0xbe, 0x0e, 0x52, - 0x93, 0xd8, 0x63, 0x66, 0xa2, 0x6e, 0xb8, 0xd0, 0xcd, 0x6e, 0xa0, 0x4d, 0x58, 0x56, 0xbe, 0x07, - 0xda, 0x50, 0x1b, 0xad, 0x98, 0x4b, 0xd2, 0x35, 0xda, 0x80, 0x25, 0xca, 0x30, 0x8b, 0xe9, 0x60, - 0x71, 0xa8, 0x8d, 0x5a, 0xa6, 0xfa, 0x42, 0xeb, 0xd0, 0x22, 0x51, 0x14, 0x44, 0x83, 0x86, 0x50, - 0x97, 0x1f, 0x08, 0x41, 0x93, 0xba, 0xbf, 0x90, 0x41, 0x73, 0xa8, 0x8d, 0x7a, 0xa6, 0x58, 0x1b, - 0xcb, 0xd0, 0xfa, 0x7a, 0x12, 0xb2, 0x1b, 0xe3, 0x13, 0x18, 0xbc, 0xc2, 0x76, 0x1c, 0x4f, 0x5e, - 0x89, 0x18, 0x8f, 0x2e, 0x88, 0x7d, 0x99, 0xe4, 0xbe, 0x0d, 0x2b, 0x2a, 0x72, 0x15, 0x41, 0xcf, - 0x6c, 0x4b, 0xc1, 0x89, 0x63, 0x7c, 0x01, 0x5b, 0x15, 0x86, 0xaa, 0x06, 0xef, 0x42, 0xef, 0x1c, - 0x47, 0x67, 0xf8, 0x9c, 0x58, 0x11, 0x66, 0x6e, 0x20, 0xac, 0x35, 0xb3, 0xab, 0x84, 0x26, 0x97, - 0x19, 0x3f, 0x82, 0x9e, 0x43, 0x08, 0x26, 0x21, 0xb6, 0x59, 0x1d, 0xe7, 0x68, 0x08, 0x9d, 0x30, - 0x22, 0xd8, 0xf3, 0x02, 0x1b, 0x33, 0x22, 0xaa, 0xd0, 0x30, 0xb3, 0x22, 0xe3, 0xff, 0xb0, 0x5d, - 0x09, 0x2e, 0x03, 0x34, 0x9e, 0x15, 0xa2, 0x0f, 0x26, 0x13, 0xb7, 0x96, 0x6b, 0xe3, 0x5e, 0x29, - 0x6a, 0x61, 0xa9, 0x70, 0x3f, 0x2d, 0xec, 0x7a, 0x04, 0xfb, 0x71, 0x58, 0x0b, 0xb8, 0x18, 0x71, - 0x62, 0x9a, 0x22, 0x6f, 0xca, 0xe6, 0x38, 0x0a, 0x3c, 0x8f, 0xd8, 0xcc, 0x0d, 0xfc, 0x04, 0x76, - 0x07, 0xc0, 0x4e, 0x85, 0xaa, 0x55, 0x32, 0x12, 0x43, 0x87, 0x41, 0xd9, 0x54, 0xc1, 0xbe, 0xd5, - 0x60, 0xed, 0x4b, 0x4a, 0xdd, 0x73, 0x5f, 0xba, 0xad, 0x55, 0xfe, 0xbc, 0xc3, 0xc5, 0xa2, 0xc3, - 0xe2, 0xf1, 0x34, 0x4a, 0xc7, 0xc3, 0x35, 0x22, 0x12, 0x7a, 0xae, 0x8d, 0x05, 0x44, 0x53, 0x40, - 0x64, 0x45, 0xa8, 0x0f, 0x0d, 0xc6, 0xbc, 0x41, 0x4b, 0xec, 0xf0, 0xa5, 0xb1, 0x01, 0xeb, 0xf9, - 0x48, 0x55, 0x0a, 0x1f, 0xc3, 0xa6, 0x94, 0x9c, 0xde, 0xf8, 0xf6, 0xa9, 0xb8, 0x09, 0xb5, 0x0a, - 0xfe, 0xa7, 0x06, 0x83, 0xb2, 0xa1, 0xea, 0xe0, 0x7f, 0x9b, 0xff, 0xdf, 0xcd, 0x0e, 0xbd, 0x03, - 0x1d, 0x86, 0x5d, 0xcf, 0x0a, 0xc6, 0x63, 0x4a, 0xd8, 0x60, 0x69, 0xa8, 0x8d, 0x9a, 0x26, 0x70, - 0xd1, 0x0b, 0x21, 0x41, 0x7b, 0xd0, 0xb7, 0x65, 0x17, 0x5b, 0x11, 0xb9, 0x72, 0x29, 0x47, 0x5e, - 0x16, 0x81, 0xad, 0xda, 0x49, 0x77, 0x4b, 0x31, 0x32, 0xa0, 0xe7, 0x3a, 0xd7, 0x96, 0x18, 0x1e, - 0xe2, 0xea, 0xb7, 0x05, 0x5a, 0xc7, 0x75, 0xae, 0xbf, 0x71, 0x3d, 0x72, 0xca, 0x27, 0xc0, 0x53, - 0xd8, 0x98, 0x26, 0x7f, 0xe2, 0x3b, 0xe4, 0xba, 0x56, 0xd1, 0xbe, 0xcd, 0x16, 0x5b, 0x99, 0xa9, - 0x92, 0xed, 0x03, 0x72, 0xb9, 0x40, 0xfa, 0xb5, 0x03, 0x9f, 0x11, 0x9f, 0x09, 0x80, 0xae, 0xd9, - 0x17, 0x3b, 0xdc, 0xf9, 0x91, 0x94, 0x1b, 0xbf, 0x6a, 0x70, 0x77, 0x8a, 0x74, 0x8c, 0x19, 0xae, - 0xd5, 0x7a, 0x3a, 0xb4, 0xd3, 0xec, 0x17, 0xe5, 0x5e, 0xf2, 0xcd, 0xc7, 0xa2, 0xaa, 0x5e, 0x43, - 0xec, 0xa8, 0xaf, 0xaa, 0x01, 0xc8, 0x9d, 0xf8, 0x84, 0x38, 0x72, 0xba, 0xca, 0x63, 0x68, 0x4b, - 0xc1, 0x89, 0x63, 0x7c, 0x9e, 0xad, 0x8d, 0x0c, 0x4d, 0xe5, 0x78, 0x1f, 0xba, 0x15, 0xd9, 0x75, - 0xc6, 0x99, 0xc4, 0x3e, 0x02, 0x24, 0x8d, 0x9f, 0x07, 0xb1, 0x5f, 0x6f, 0xa6, 0xdc, 0x85, 0xb5, - 0x9c, 0x89, 0x6a, 0xec, 0x27, 0xb0, 0x2e, 0xc5, 0x2f, 0xfd, 0x49, 0x6d, 0xac, 0xcd, 0xa4, 0xac, - 0xa9, 0x91, 0x42, 0x3b, 0x4c, 0x9c, 0xe4, 0x1f, 0xb8, 0x5b, 0xc1, 0x36, 0x92, 0x08, 0xf2, 0x6f, - 0x9c, 0xf1, 0xbb, 0x06, 0x1b, 0xa6, 0x6a, 0x67, 0xf2, 0xdf, 0x0e, 0x8e, 0xec, 0xc5, 0x69, 0xcc, - 0xbc, 0x38, 0xcd, 0xe9, 0xc5, 0x19, 0x41, 0x9f, 0x06, 0x71, 0x64, 0x13, 0xcb, 0xc1, 0x0c, 0x5b, - 0x7e, 0xe0, 0x10, 0x75, 0xa0, 0x77, 0xa4, 0x9c, 0x1f, 0xe0, 0x77, 0x81, 0x43, 0x8c, 0x2d, 0xd8, - 0x2c, 0x05, 0xad, 0x12, 0xf2, 0x61, 0xf5, 0x28, 0x08, 0x6f, 0x78, 0x83, 0xd6, 0x4c, 0xa4, 0xe3, - 0x52, 0x2b, 0xb9, 0x64, 0x22, 0x93, 0xb6, 0xb9, 0xe2, 0xd2, 0x13, 0x79, 0xc3, 0xd4, 0xbe, 0x83, - 0x99, 0xdc, 0x6f, 0x24, 0xfb, 0xc7, 0x98, 0xf1, 0x7d, 0xe3, 0x29, 0xf4, 0xa7, 0xfe, 0xea, 0xf7, - 0xd6, 0x67, 0xb0, 0x6d, 0x12, 0xec, 0xc8, 0xe0, 0xc5, 0x55, 0xae, 0x3f, 0xee, 0xfe, 0xd0, 0xe0, - 0x5e, 0xb5, 0x71, 0x9d, 0x91, 0xc7, 0x2f, 0x77, 0x32, 0x52, 0x98, 0x3b, 0x21, 0x94, 0xe1, 0x49, - 0x28, 0xf2, 0x6e, 0x9a, 0x7d, 0x35, 0x57, 0xbe, 0x4f, 0xe4, 0xe5, 0x01, 0xd4, 0x28, 0x0d, 0x20, - 0x8e, 0x98, 0xd4, 0x27, 0x83, 0xd8, 0x94, 0x88, 0x8e, 0xac, 0x53, 0x0e, 0x31, 0xd5, 0x16, 0x88, - 0x2d, 0x89, 0xa8, 0x14, 0xc5, 0x48, 0xfb, 0x01, 0xe0, 0xd8, 0xa5, 0x97, 0x32, 0x2d, 0xde, 0x29, - 0x8e, 0x1b, 0xa9, 0xe7, 0x90, 0x2f, 0xb9, 0x04, 0x7b, 0x9e, 0x0a, 0x9a, 0x2f, 0xf9, 0x64, 0x88, - 0x29, 0x71, 0x54, 0x78, 0x62, 0xcd, 0x65, 0xe3, 0x88, 0x10, 0x15, 0x89, 0x58, 0x1b, 0xbf, 0x69, - 0xb0, 0xf2, 0x9c, 0x4c, 0x14, 0xf2, 0x0e, 0xc0, 0x79, 0x10, 0x05, 0x31, 0x73, 0x7d, 0x42, 0x85, - 0x83, 0x96, 0x99, 0x91, 0xfc, 0x73, 0x3f, 0x62, 0x52, 0x11, 0x6f, 0xac, 0x92, 0x13, 0x6b, 0x2e, - 0xbb, 0x20, 0x38, 0x54, 0x2f, 0x82, 0x58, 0x73, 0xa2, 0x47, 0x19, 0xb6, 0x2f, 0xc5, 0x03, 0xd0, - 0x34, 0xe5, 0xc7, 0xe1, 0xdb, 0x1e, 0x74, 0xd5, 0xdc, 0x12, 0x4c, 0x13, 0xbd, 0x86, 0x4e, 0x86, - 0xa1, 0xa2, 0xf7, 0xca, 0x44, 0xb4, 0xcc, 0x78, 0xf5, 0xf7, 0xe7, 0x68, 0xa9, 0x1b, 0xb3, 0x80, - 0x7c, 0xf8, 0x5f, 0x89, 0x01, 0xa2, 0x87, 0x65, 0xeb, 0x59, 0xfc, 0x52, 0x7f, 0x54, 0x4b, 0x37, - 0xf5, 0xc7, 0x60, 0xad, 0x82, 0xd2, 0xa1, 0xfd, 0x39, 0x28, 0x39, 0x5a, 0xa9, 0x3f, 0xae, 0xa9, - 0x9d, 0x7a, 0x7d, 0x03, 0xa8, 0xcc, 0xf7, 0xd0, 0xa3, 0xb9, 0x30, 0x53, 0x3e, 0xa9, 0xef, 0xd7, - 0x53, 0x9e, 0x99, 0xa8, 0x64, 0x82, 0x73, 0x13, 0xcd, 0x71, 0xcd, 0xb9, 0x89, 0x16, 0xe8, 0xe5, - 0x02, 0xba, 0x84, 0x7e, 0x91, 0x25, 0xa2, 0xbd, 0x59, 0x3f, 0x5d, 0x4a, 0x24, 0x54, 0x7f, 0x58, - 0x47, 0x35, 0x75, 0x66, 0x41, 0x37, 0xcb, 0xe5, 0x50, 0x45, 0xd3, 0x55, 0xb0, 0x52, 0x7d, 0x77, - 0x9e, 0x5a, 0x36, 0x9b, 0x22, 0xb7, 0xab, 0xca, 0x66, 0x06, 0x71, 0xac, 0xca, 0x66, 0x16, 0x55, - 0x34, 0x16, 0xd0, 0x4f, 0xb0, 0x5a, 0x20, 0x45, 0x68, 0x74, 0x1b, 0x40, 0x96, 0x6e, 0xe9, 0x7b, - 0x35, 0x34, 0x13, 0x4f, 0x1f, 0x6a, 0xe8, 0x1c, 0xee, 0xe4, 0xb9, 0x09, 0x7a, 0x70, 0x1b, 0x40, - 0x86, 0x58, 0xe9, 0xa3, 0xf9, 0x8a, 0x19, 0x47, 0xaf, 0xa1, 0x93, 0x21, 0x25, 0x55, 0xc3, 0xa3, - 0x4c, 0x73, 0xaa, 0x86, 0x47, 0x15, 0xb3, 0x59, 0x40, 0x67, 0xd0, 0xcb, 0xd1, 0x14, 0xb4, 0x3b, - 0xcb, 0x32, 0x4f, 0x7e, 0xf4, 0x07, 0x73, 0xf5, 0xb2, 0x4d, 0x96, 0x65, 0x2f, 0x68, 0x66, 0x70, - 0xf9, 0x01, 0xb8, 0x3b, 0x4f, 0x2d, 0x75, 0x70, 0x01, 0xab, 0x05, 0x42, 0x51, 0x75, 0xee, 0xd5, - 0x44, 0xa9, 0xea, 0xdc, 0x67, 0xb1, 0x93, 0x05, 0xf4, 0x33, 0xac, 0x57, 0xbd, 0xdd, 0xe8, 0x71, - 0x15, 0xc8, 0x4c, 0x82, 0xa0, 0x1f, 0xd4, 0x55, 0x4f, 0x1d, 0xbf, 0x84, 0x76, 0x42, 0x54, 0xd0, - 0xfd, 0xb2, 0x75, 0x81, 0x34, 0xe9, 0xc6, 0x6d, 0x2a, 0xd3, 0xe6, 0x3a, 0x5b, 0x12, 0xff, 0xc2, - 0x3c, 0xf9, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x7d, 0x0d, 0xbb, 0x9c, 0x11, 0x00, 0x00, + // 1296 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0x5f, 0x73, 0xdb, 0x44, + 0x10, 0x8f, 0x62, 0x3b, 0x71, 0xd6, 0x4e, 0x62, 0x2e, 0x69, 0xe2, 0x28, 0x25, 0xb8, 0x07, 0x6d, + 0x9d, 0x36, 0x0d, 0x90, 0x4e, 0xa1, 0x85, 0x17, 0x20, 0x21, 0x90, 0x87, 0xd2, 0x19, 0x85, 0x76, + 0x98, 0xa1, 0x33, 0x9a, 0x8b, 0x74, 0x49, 0x44, 0x64, 0x49, 0x95, 0x4e, 0x69, 0xc2, 0x57, 0xe0, + 0x13, 0xf0, 0xcc, 0x0b, 0xef, 0x7c, 0x20, 0x3e, 0x08, 0x2f, 0xcc, 0xfd, 0x91, 0xac, 0x7f, 0x8e, + 0xc5, 0x9f, 0xb7, 0xd3, 0xde, 0xee, 0x6f, 0x77, 0x6f, 0x6f, 0xf7, 0x7e, 0x36, 0xac, 0x5c, 0xfa, + 0x6e, 0x3c, 0xa2, 0x66, 0x44, 0xc3, 0x4b, 0x1a, 0xee, 0x06, 0xa1, 0xcf, 0x7c, 0xd4, 0xcb, 0x09, + 0xcd, 0xe0, 0x04, 0x7f, 0x08, 0xe8, 0x2b, 0xc2, 0xac, 0xf3, 0x03, 0xea, 0x52, 0x46, 0x0d, 0xfa, + 0x26, 0xa6, 0x11, 0x43, 0x1b, 0xd0, 0x3e, 0x75, 0x5c, 0x6a, 0x3a, 0x76, 0xd4, 0xd7, 0x06, 0x8d, + 0xe1, 0x82, 0x31, 0xcf, 0xbf, 0x8f, 0xec, 0x08, 0xbf, 0x80, 0x95, 0x9c, 0x41, 0x14, 0xf8, 0x5e, + 0x44, 0xd1, 0x53, 0x98, 0x0f, 0x69, 0x14, 0xbb, 0x4c, 0x1a, 0x74, 0xf6, 0xb6, 0x76, 0x8b, 0xbe, + 0x76, 0x53, 0x93, 0xd8, 0x65, 0x46, 0xa2, 0x8e, 0x1d, 0xe8, 0x66, 0x37, 0xd0, 0x3a, 0xcc, 0x2b, + 0xdf, 0x7d, 0x6d, 0xa0, 0x0d, 0x17, 0x8c, 0x39, 0xe9, 0x1a, 0xad, 0xc1, 0x5c, 0xc4, 0x08, 0x8b, + 0xa3, 0xfe, 0xec, 0x40, 0x1b, 0xb6, 0x0c, 0xf5, 0x85, 0x56, 0xa1, 0x45, 0xc3, 0xd0, 0x0f, 0xfb, + 0x0d, 0xa1, 0x2e, 0x3f, 0x10, 0x82, 0x66, 0xe4, 0xfc, 0x4c, 0xfb, 0xcd, 0x81, 0x36, 0x5c, 0x34, + 0xc4, 0x1a, 0xcf, 0x43, 0xeb, 0xeb, 0x51, 0xc0, 0xae, 0xf1, 0xa7, 0xd0, 0x7f, 0x45, 0xac, 0x38, + 0x1e, 0xbd, 0x12, 0x31, 0xee, 0x9f, 0x53, 0xeb, 0x22, 0xc9, 0x7d, 0x13, 0x16, 0x54, 0xe4, 0x2a, + 0x82, 0x45, 0xa3, 0x2d, 0x05, 0x47, 0x36, 0xfe, 0x02, 0x36, 0x2a, 0x0c, 0xd5, 0x19, 0xbc, 0x0f, + 0x8b, 0x67, 0x24, 0x3c, 0x21, 0x67, 0xd4, 0x0c, 0x09, 0x73, 0x7c, 0x61, 0xad, 0x19, 0x5d, 0x25, + 0x34, 0xb8, 0x0c, 0xff, 0x08, 0x7a, 0x0e, 0xc1, 0x1f, 0x05, 0xc4, 0x62, 0x75, 0x9c, 0xa3, 0x01, + 0x74, 0x82, 0x90, 0x12, 0xd7, 0xf5, 0x2d, 0xc2, 0xa8, 0x38, 0x85, 0x86, 0x91, 0x15, 0xe1, 0x77, + 0x61, 0xb3, 0x12, 0x5c, 0x06, 0x88, 0x9f, 0x16, 0xa2, 0xf7, 0x47, 0x23, 0xa7, 0x96, 0x6b, 0x7c, + 0xbb, 0x14, 0xb5, 0xb0, 0x54, 0xb8, 0xcf, 0x0a, 0xbb, 0x2e, 0x25, 0x5e, 0x1c, 0xd4, 0x02, 0x2e, + 0x46, 0x9c, 0x98, 0xa6, 0xc8, 0xeb, 0xf2, 0x72, 0xec, 0xfb, 0xae, 0x4b, 0x2d, 0xe6, 0xf8, 0x5e, + 0x02, 0xbb, 0x05, 0x60, 0xa5, 0x42, 0x75, 0x55, 0x32, 0x12, 0xac, 0x43, 0xbf, 0x6c, 0xaa, 0x60, + 0x7f, 0xd7, 0x60, 0xe5, 0xcb, 0x28, 0x72, 0xce, 0x3c, 0xe9, 0xb6, 0xd6, 0xf1, 0xe7, 0x1d, 0xce, + 0x16, 0x1d, 0x16, 0xcb, 0xd3, 0x28, 0x95, 0x87, 0x6b, 0x84, 0x34, 0x70, 0x1d, 0x8b, 0x08, 0x88, + 0xa6, 0x80, 0xc8, 0x8a, 0x50, 0x0f, 0x1a, 0x8c, 0xb9, 0xfd, 0x96, 0xd8, 0xe1, 0x4b, 0xbc, 0x06, + 0xab, 0xf9, 0x48, 0x55, 0x0a, 0x9f, 0xc0, 0xba, 0x94, 0x1c, 0x5f, 0x7b, 0xd6, 0xb1, 0xe8, 0x84, + 0x5a, 0x07, 0xfe, 0x97, 0x06, 0xfd, 0xb2, 0xa1, 0xba, 0xc1, 0xff, 0x35, 0xff, 0x7f, 0x9a, 0x1d, + 0x7a, 0x0f, 0x3a, 0x8c, 0x38, 0xae, 0xe9, 0x9f, 0x9e, 0x46, 0x94, 0xf5, 0xe7, 0x06, 0xda, 0xb0, + 0x69, 0x00, 0x17, 0xbd, 0x10, 0x12, 0xb4, 0x0d, 0x3d, 0x4b, 0xde, 0x62, 0x33, 0xa4, 0x97, 0x4e, + 0xc4, 0x91, 0xe7, 0x45, 0x60, 0xcb, 0x56, 0x72, 0xbb, 0xa5, 0x18, 0x61, 0x58, 0x74, 0xec, 0x2b, + 0x53, 0x0c, 0x0f, 0xd1, 0xfa, 0x6d, 0x81, 0xd6, 0x71, 0xec, 0xab, 0x43, 0xc7, 0xa5, 0xc7, 0x7c, + 0x02, 0x7c, 0x0b, 0x2b, 0x32, 0xf9, 0x43, 0xdf, 0x75, 0xfd, 0xb7, 0xb5, 0xea, 0xbe, 0x0a, 0xad, + 0xc8, 0xf1, 0x2c, 0xd9, 0x70, 0x4d, 0x43, 0x7e, 0xe0, 0x67, 0xb0, 0x9a, 0x47, 0x52, 0x47, 0x78, + 0x07, 0xba, 0x22, 0x02, 0xcb, 0xf7, 0x18, 0xf5, 0x98, 0x40, 0xeb, 0x1a, 0x1d, 0x2e, 0xdb, 0x97, + 0x22, 0xfc, 0x04, 0xd6, 0xc6, 0x15, 0x38, 0xf2, 0x6c, 0x7a, 0x55, 0xab, 0x72, 0xdf, 0x64, 0x2b, + 0xae, 0xcc, 0x94, 0xd3, 0x1d, 0x40, 0x0e, 0x17, 0x98, 0x15, 0xae, 0x7b, 0x62, 0xe7, 0x30, 0xe3, + 0xff, 0x57, 0x0d, 0x6e, 0x8d, 0x91, 0x0e, 0x08, 0x23, 0xb5, 0xce, 0x41, 0x87, 0x76, 0x5a, 0x82, + 0x59, 0xb9, 0x97, 0x7c, 0xf3, 0xd9, 0xac, 0x4a, 0xd8, 0x10, 0x3b, 0xea, 0xab, 0x6a, 0x0a, 0x73, + 0x27, 0x1e, 0xa5, 0xb6, 0x1c, 0xf1, 0xf2, 0x2e, 0xb4, 0xa5, 0xe0, 0xc8, 0xc6, 0x9f, 0x67, 0xcf, + 0x46, 0x86, 0x56, 0xff, 0x60, 0x3f, 0x06, 0x24, 0x8d, 0x9f, 0xfb, 0xb1, 0x57, 0x6f, 0xb0, 0xdd, + 0x4a, 0x2e, 0x84, 0x32, 0x51, 0xdd, 0xf5, 0x38, 0xa9, 0xee, 0x4b, 0x6f, 0x54, 0x1b, 0x6b, 0x3d, + 0x39, 0xd6, 0xd4, 0x48, 0xa1, 0xed, 0x25, 0x4e, 0xf2, 0xaf, 0xec, 0x8d, 0x60, 0x6b, 0x49, 0x04, + 0xf9, 0x87, 0x16, 0xff, 0xa1, 0xc1, 0x9a, 0xa1, 0x7a, 0x8a, 0xfe, 0xbf, 0xd3, 0x2b, 0xdb, 0xbd, + 0x8d, 0x89, 0xdd, 0xdb, 0x1c, 0x77, 0xef, 0x10, 0x7a, 0x91, 0x1f, 0x87, 0x16, 0x35, 0x6d, 0xc2, + 0x88, 0xe9, 0xf9, 0x36, 0x55, 0x05, 0x5d, 0x92, 0x72, 0x5e, 0xc0, 0xef, 0x7c, 0x9b, 0xe2, 0x0d, + 0x58, 0x2f, 0x05, 0xad, 0x12, 0xf2, 0x60, 0x79, 0xdf, 0x0f, 0xae, 0xf9, 0x05, 0xad, 0x99, 0x48, + 0xc7, 0x89, 0xcc, 0xa4, 0xd3, 0x45, 0x26, 0x6d, 0x63, 0xc1, 0x89, 0x8e, 0x64, 0x9b, 0xab, 0x7d, + 0x9b, 0x30, 0xb9, 0xdf, 0x48, 0xf6, 0x0f, 0x08, 0xe3, 0xfb, 0xf8, 0x09, 0xf4, 0xc6, 0xfe, 0xea, + 0xdf, 0xad, 0xcf, 0x60, 0xd3, 0xa0, 0xc4, 0x56, 0x3d, 0xcf, 0xe7, 0x49, 0xfd, 0x99, 0xfb, 0xa7, + 0x06, 0xb7, 0xab, 0x8d, 0xeb, 0xcc, 0x5d, 0xde, 0xdc, 0xc9, 0x5c, 0x63, 0xce, 0x88, 0x46, 0x8c, + 0x8c, 0x02, 0x35, 0x8c, 0x7a, 0x6a, 0xb8, 0x7d, 0x9f, 0xc8, 0xcb, 0x53, 0xb0, 0x51, 0x9a, 0x82, + 0x1c, 0x31, 0x39, 0x9f, 0x0c, 0x62, 0x53, 0x22, 0xda, 0xf2, 0x9c, 0x72, 0x88, 0xa9, 0xb6, 0x40, + 0x6c, 0x49, 0x44, 0xa5, 0x28, 0xe6, 0xea, 0x0f, 0x00, 0x07, 0x4e, 0x74, 0x21, 0xd3, 0xe2, 0x37, + 0xc5, 0x76, 0x42, 0xf5, 0x26, 0xf3, 0x25, 0x97, 0x10, 0xd7, 0x55, 0x41, 0xf3, 0x25, 0x9f, 0x0c, + 0x71, 0x44, 0x6d, 0x15, 0x9e, 0x58, 0x73, 0xd9, 0x69, 0x48, 0xa9, 0x8a, 0x44, 0xac, 0xf1, 0x6f, + 0x1a, 0x2c, 0x3c, 0xa7, 0x23, 0x85, 0xbc, 0x05, 0x70, 0xe6, 0x87, 0x7e, 0xcc, 0x1c, 0x8f, 0x46, + 0xc2, 0x41, 0xcb, 0xc8, 0x48, 0xfe, 0xbd, 0x1f, 0x31, 0xa9, 0xa8, 0x7b, 0xaa, 0x92, 0x13, 0x6b, + 0x2e, 0x3b, 0xa7, 0x24, 0x50, 0xcf, 0x92, 0x58, 0x8b, 0xd7, 0x80, 0x11, 0xeb, 0x42, 0xbc, 0x42, + 0xfc, 0x35, 0xe0, 0x1f, 0x7b, 0xbf, 0x2c, 0x41, 0x57, 0xcd, 0x2d, 0x41, 0x77, 0xd1, 0x6b, 0xe8, + 0x64, 0x68, 0x32, 0xfa, 0xa0, 0xcc, 0x86, 0xcb, 0xb4, 0x5b, 0xbf, 0x3b, 0x45, 0x4b, 0x75, 0xcc, + 0x0c, 0xf2, 0xe0, 0x9d, 0x12, 0x0d, 0x45, 0x0f, 0xca, 0xd6, 0x93, 0x48, 0xae, 0xfe, 0xb0, 0x96, + 0x6e, 0xea, 0x8f, 0xc1, 0x4a, 0x05, 0xaf, 0x44, 0x3b, 0x53, 0x50, 0x72, 0xdc, 0x56, 0x7f, 0x54, + 0x53, 0x3b, 0xf5, 0xfa, 0x06, 0x50, 0x99, 0x74, 0xa2, 0x87, 0x53, 0x61, 0xc6, 0xa4, 0x56, 0xdf, + 0xa9, 0xa7, 0x3c, 0x31, 0x51, 0x49, 0x47, 0xa7, 0x26, 0x9a, 0x23, 0xbc, 0x53, 0x13, 0x2d, 0x70, + 0xdc, 0x19, 0x74, 0x01, 0xbd, 0x22, 0x55, 0x45, 0xdb, 0x93, 0x7e, 0x3f, 0x95, 0x98, 0xb0, 0xfe, + 0xa0, 0x8e, 0x6a, 0xea, 0xcc, 0x84, 0x6e, 0x96, 0x50, 0xa2, 0x8a, 0x4b, 0x57, 0x41, 0x8d, 0xf5, + 0x7b, 0xd3, 0xd4, 0xb2, 0xd9, 0x14, 0x09, 0x66, 0x55, 0x36, 0x13, 0xd8, 0x6b, 0x55, 0x36, 0x93, + 0xf8, 0x2a, 0x9e, 0x41, 0x24, 0xe9, 0x3b, 0x49, 0xc3, 0xaa, 0xb2, 0xa9, 0x20, 0x7c, 0x55, 0xd9, + 0x54, 0xb1, 0x39, 0x3c, 0xf3, 0x91, 0x86, 0x7e, 0x82, 0xe5, 0x02, 0xef, 0x42, 0xc3, 0x9b, 0x62, + 0xcc, 0x32, 0x3a, 0x7d, 0xbb, 0x86, 0x66, 0xc6, 0xd7, 0x19, 0x2c, 0xe5, 0xe9, 0x0f, 0xba, 0x7f, + 0x13, 0x40, 0x86, 0xbb, 0xe9, 0xc3, 0xe9, 0x8a, 0x19, 0x47, 0xaf, 0xa1, 0x93, 0xe1, 0x3d, 0x55, + 0xf3, 0xa9, 0xcc, 0xa4, 0xf4, 0xbb, 0x53, 0xb4, 0xd2, 0xaa, 0x9c, 0xc0, 0x62, 0x8e, 0x09, 0xa1, + 0x89, 0xe7, 0x9d, 0xe7, 0x57, 0xfa, 0xfd, 0xa9, 0x7a, 0xd9, 0x7b, 0x9c, 0x25, 0x48, 0x93, 0x2b, + 0x9f, 0x9f, 0xb1, 0xf7, 0xa6, 0xa9, 0xa5, 0x0e, 0xce, 0x61, 0xb9, 0xc0, 0x59, 0xaa, 0xea, 0x5e, + 0xcd, 0xc5, 0xaa, 0xea, 0x3e, 0x89, 0x00, 0xcd, 0xa0, 0xb7, 0xb0, 0x5a, 0x45, 0x0f, 0xd0, 0xa3, + 0x2a, 0x90, 0x89, 0x1c, 0x44, 0xdf, 0xad, 0xab, 0x9e, 0x3a, 0x7e, 0x09, 0xed, 0x84, 0x0b, 0xa1, + 0x3b, 0x65, 0xeb, 0x02, 0x2f, 0xd3, 0xf1, 0x4d, 0x2a, 0xe3, 0xcb, 0x75, 0x32, 0x27, 0xfe, 0x6d, + 0x7a, 0xfc, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0xe2, 0xb6, 0x81, 0x84, 0x12, 0x00, 0x00, } diff --git a/weed/server/volume_grpc_follow.go b/weed/server/volume_grpc_follow.go new file mode 100644 index 000000000..bdd0ef6f5 --- /dev/null +++ b/weed/server/volume_grpc_follow.go @@ -0,0 +1,53 @@ +package weed_server + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" +) + +func (vs *VolumeServer) VolumeFollow(req *volume_server_pb.VolumeFollowRequest, stream volume_server_pb.VolumeServer_VolumeFollowServer) error { + + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + stopOffset := v.Size() + foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(req.Since) + if err != nil { + return fmt.Errorf("fail to locate by appendAtNs: %s", err) + } + + if isLastOne { + return nil + } + + startOffset := int64(foundOffset) * int64(types.NeedleEntrySize) + + buf := make([]byte, 1024*1024*2) + return sendFileContent(v.DataFile(), buf, startOffset, stopOffset, stream) + +} + +func sendFileContent(datFile *os.File, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeFollowServer) error { + var blockSizeLimit = int64(len(buf)) + for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit { + n, readErr := datFile.ReadAt(buf, startOffset+i) + if readErr == nil || readErr == io.EOF { + resp := &volume_server_pb.VolumeFollowResponse{} + resp.FileContent = buf[i : i+int64(n)] + sendErr := stream.Send(resp) + if sendErr != nil { + return sendErr + } + } else { + return readErr + } + } + return nil +} diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index cc0521491..0036315c8 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -20,7 +20,7 @@ func (s *Store) CompactVolume(vid VolumeId, preallocate int64) error { } func (s *Store) CommitCompactVolume(vid VolumeId) error { if v := s.findVolume(vid); v != nil { - return v.commitCompact() + return v.CommitCompact() } return fmt.Errorf("volume id %d is not found during commit compact", vid) } diff --git a/weed/storage/volume_follow.go b/weed/storage/volume_follow.go new file mode 100644 index 000000000..2aedd1682 --- /dev/null +++ b/weed/storage/volume_follow.go @@ -0,0 +1,220 @@ +package storage + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + . "github.com/chrislusf/seaweedfs/weed/storage/types" + "google.golang.org/grpc" + "io" + "os" +) + +// The volume sync with a master volume via 2 steps: +// 1. The slave checks master side to find subscription checkpoint +// to setup the replication. +// 2. The slave receives the updates from master + +/* +Assume the slave volume needs to follow the master volume. + +The master volume could be compacted, and could be many files ahead of +slave volume. + +Step 0: // implemented in command/backup.go, to avoid dat file size overflow. +0.1 If slave compact version is less than the master, do a local compaction, and set +local compact version the same as the master. +0.2 If the slave size is still bigger than the master, discard local copy and do a full copy. + +Step 1: +The slave volume ask the master by the last modification time t. +The master do a binary search in volume (use .idx as an array, and check the appendAtNs in .dat file), +to find the first entry with appendAtNs > t. + +Step 2: +The master send content bytes to the slave. The bytes are not chunked by needle. + +Step 3: +The slave generate the needle map for the new bytes. (This may be optimized to incrementally +update needle map when receiving new .dat bytes. But seems not necessary now.) + +*/ + +func (v *Volume) Follow(volumeServer string, grpcDialOption grpc.DialOption) (error) { + + ctx := context.Background() + + startFromOffset := v.Size() + appendAtNs, err := v.findLastAppendAtNs() + if err != nil { + return err + } + + err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + + stream, err := client.VolumeFollow(ctx, &volume_server_pb.VolumeFollowRequest{ + VolumeId: uint32(v.Id), + Since: appendAtNs, + }) + if err != nil { + return err + } + + v.dataFile.Seek(startFromOffset, io.SeekStart) + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + _, writeErr := v.dataFile.Write(resp.FileContent) + if writeErr != nil { + return writeErr + } + } + + return nil + + }) + + if err != nil { + return err + } + + // TODO add to needle map + + return nil +} + +func (v *Volume) findLastAppendAtNs() (uint64, error) { + offset, err := v.locateLastAppendEntry() + if err != nil { + return 0, err + } + if offset == 0 { + return 0, nil + } + return v.readAppendAtNs(offset) +} + +func (v *Volume) locateLastAppendEntry() (Offset, error) { + indexFile, e := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644) + if e != nil { + return 0, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e) + } + defer indexFile.Close() + + fi, err := indexFile.Stat() + if err != nil { + return 0, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err) + } + fileSize := fi.Size() + if fileSize%NeedleEntrySize != 0 { + return 0, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize) + } + if fileSize == 0 { + return 0, nil + } + + bytes := make([]byte, NeedleEntrySize) + n, e := indexFile.ReadAt(bytes, fileSize-NeedleEntrySize) + if n != NeedleEntrySize { + return 0, fmt.Errorf("file %s read error: %v", indexFile.Name(), e) + } + _, offset, _ := IdxFileEntry(bytes) + + return offset, nil +} + +func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) { + + n, bodyLength, err := ReadNeedleHeader(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize) + if err != nil { + return 0, fmt.Errorf("ReadNeedleHeader: %v", err) + } + err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize, bodyLength) + if err != nil { + return 0, fmt.Errorf("ReadNeedleBody offset %d: %v", int64(offset)*NeedlePaddingSize, err) + } + return n.AppendAtNs, nil + +} + +// on server side +func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast bool, err error) { + indexFile, openErr := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644) + if openErr != nil { + err = fmt.Errorf("cannot read %s.idx: %v", v.FileName(), openErr) + return + } + defer indexFile.Close() + + fi, statErr := indexFile.Stat() + if statErr != nil { + err = fmt.Errorf("file %s stat error: %v", indexFile.Name(), statErr) + return + } + fileSize := fi.Size() + if fileSize%NeedleEntrySize != 0 { + err = fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize) + return + } + + bytes := make([]byte, NeedleEntrySize) + entryCount := fileSize / NeedleEntrySize + l := int64(0) + h := entryCount + + for l < h { + + m := (l + h) / 2 + + if m == entryCount { + return 0, true, nil + } + + // read the appendAtNs for entry m + offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, m) + if err != nil { + return + } + + mNs, nsReadErr := v.readAppendAtNs(offset) + if nsReadErr != nil { + err = nsReadErr + return + } + + // move the boundary + if mNs <= sinceNs { + l = m + 1 + } else { + h = m + } + + } + + if l == entryCount { + return 0, true, nil + } + + offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, l) + + return offset, false, err + +} + +// bytes is of size NeedleEntrySize +func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m int64) (Offset, error) { + if _, readErr := indexFile.ReadAt(bytes, m*NeedleEntrySize); readErr != nil && readErr != io.EOF { + return 0, readErr + } + _, offset, _ := IdxFileEntry(bytes) + return offset, nil +} diff --git a/weed/storage/volume_follow_test.go b/weed/storage/volume_follow_test.go new file mode 100644 index 000000000..3291d203a --- /dev/null +++ b/weed/storage/volume_follow_test.go @@ -0,0 +1,39 @@ +package storage + +import "testing" + +func TestBinarySearch(t *testing.T) { + var testInput []int + testInput = []int{-1, 0, 3, 5, 9, 12} + + if 3 != binarySearchForLargerThanTarget(testInput, 4) { + t.Errorf("failed to find target %d", 4) + } + if 3 != binarySearchForLargerThanTarget(testInput, 3) { + t.Errorf("failed to find target %d", 3) + } + if 6 != binarySearchForLargerThanTarget(testInput, 12) { + t.Errorf("failed to find target %d", 12) + } + if 1 != binarySearchForLargerThanTarget(testInput, -1) { + t.Errorf("failed to find target %d", -1) + } + if 0 != binarySearchForLargerThanTarget(testInput, -2) { + t.Errorf("failed to find target %d", -2) + } + +} + +func binarySearchForLargerThanTarget(nums []int, target int) int { + l := 0 + h := len(nums) + for l < h { + m := (l + h) / 2 + if nums[m] <= target { + l = m + 1 + } else { + h = m + } + } + return l +} diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go index 827e6685a..7e5432417 100644 --- a/weed/storage/volume_sync.go +++ b/weed/storage/volume_sync.go @@ -26,23 +26,17 @@ Assume the slave volume needs to follow the master volume. The master volume could be compacted, and could be many files ahead of slave volume. +Step 0: +If slave compact version is less than the master, do a local compaction. +If the slave size is still less than the master, discard local copy and do a full copy. + Step 1: -The slave volume will ask the master volume for a snapshot -of (existing file entries, last offset, number of compacted times). - -For each entry x in master existing file entries: - if x does not exist locally: - add x locally - -For each entry y in local slave existing file entries: - if y does not exist on master: - delete y locally +The slave volume ask the master by the last modification time t. +The master do a binary search in volume (use .idx as an array, and check the appendAtNs in .dat file), +to find the first entry with appendAtNs > t. Step 2: -After this, use the last offset and number of compacted times to request -the master volume to send a new file, and keep looping. If the number of -compacted times is changed, go back to step 1 (very likely this can be -optimized more later). +The master iterate following entries (including the first one) and send it to the follower. */ @@ -58,7 +52,7 @@ func (v *Volume) Synchronize(volumeServer string, grpcDialOption grpc.DialOption if err = v.Compact(0); err != nil { return fmt.Errorf("Compact Volume before synchronizing %v", err) } - if err = v.commitCompact(); err != nil { + if err = v.CommitCompact(); err != nil { return fmt.Errorf("Commit Compact before synchronizing %v", err) } } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index b29e15843..de39628db 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -38,7 +38,7 @@ func (v *Volume) Compact2() error { return v.copyDataBasedOnIndexFile(filePath+".cpd", filePath+".cpx") } -func (v *Volume) commitCompact() error { +func (v *Volume) CommitCompact() error { glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() @@ -53,7 +53,7 @@ func (v *Volume) commitCompact() error { var e error if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil { - glog.V(0).Infof("makeupDiff in commitCompact volume %d failed %v", v.Id, e) + glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) e = os.Remove(v.FileName() + ".cpd") if e != nil { return e diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index 0bc24037d..4909885b9 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -85,7 +85,7 @@ func TestCompaction(t *testing.T) { doSomeWritesDeletes(i+beforeCommitFileCount, v, t, infos) } - v.commitCompact() + v.CommitCompact() v.Close() From a32797518bfadf0982c5519ab636943c90d51bec Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Mar 2019 09:20:05 -0700 Subject: [PATCH 098/450] weed master: redirect to leader for default admin UI fix https://github.com/chrislusf/seaweedfs/issues/898 --- weed/server/master_server.go | 2 +- weed/server/master_ui/templates.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/server/master_server.go b/weed/server/master_server.go index a70de5e6e..ef32809b0 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -79,7 +79,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, if !disableHttp { handleStaticResources2(r) - r.HandleFunc("/", ms.uiStatusHandler) + r.HandleFunc("/", ms.proxyToLeader(ms.uiStatusHandler)) r.HandleFunc("/ui/index.html", ms.uiStatusHandler) r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go index ce632b099..ceb0528cf 100644 --- a/weed/server/master_ui/templates.go +++ b/weed/server/master_ui/templates.go @@ -41,7 +41,7 @@ var StatusTpl = template.Must(template.New("status").Parse(`

From 91b9a684933138b21b9453b5cdc5ab97f54ed85d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Mar 2019 09:39:54 -0700 Subject: [PATCH 099/450] udpate glide --- weed/glide.lock | 352 ------------------------------------------------ weed/glide.yaml | 9 ++ 2 files changed, 9 insertions(+), 352 deletions(-) delete mode 100644 weed/glide.lock diff --git a/weed/glide.lock b/weed/glide.lock deleted file mode 100644 index f5039bb71..000000000 --- a/weed/glide.lock +++ /dev/null @@ -1,352 +0,0 @@ -hash: 306ab43df769fe8072733ba28c3d2b6308288f248ee65806ac7d0bfa3349ab73 -updated: 2019-02-25T00:25:56.429024-08:00 -imports: -- name: cloud.google.com/go - version: e2c125ceac8b663cfcf4610477d4d67827377cb7 - subpackages: - - compute/metadata - - iam - - internal - - internal/optional - - internal/trace - - internal/version - - pubsub - - pubsub/apiv1 - - pubsub/internal/distribution - - storage -- name: github.com/aws/aws-sdk-go - version: 5604f1add1ce6b18465fd50f7fe8de7561cc8a62 - subpackages: - - aws - - aws/awserr - - aws/awsutil - - aws/client - - aws/client/metadata - - aws/corehandlers - - aws/credentials - - aws/credentials/ec2rolecreds - - aws/credentials/endpointcreds - - aws/credentials/processcreds - - aws/credentials/stscreds - - aws/csm - - aws/defaults - - aws/ec2metadata - - aws/endpoints - - aws/request - - aws/session - - aws/signer/v4 - - internal/ini - - internal/s3err - - internal/sdkio - - internal/sdkrand - - internal/sdkuri - - internal/shareddefaults - - private/protocol - - private/protocol/eventstream - - private/protocol/eventstream/eventstreamapi - - private/protocol/query - - private/protocol/query/queryutil - - private/protocol/rest - - private/protocol/restxml - - private/protocol/xml/xmlutil - - service/s3 - - service/s3/s3iface - - service/sqs - - service/sts -- name: github.com/Azure/azure-pipeline-go - version: 76b57228f36adfbb7e6990ba1347a7fbbf3043da - subpackages: - - pipeline -- name: github.com/Azure/azure-storage-blob-go - version: 457680cc0804810f6d02958481e0ffdda51d5c60 - subpackages: - - azblob -- name: github.com/boltdb/bolt - version: fd01fc79c553a8e99d512a07e8e0c63d4a3ccfc5 -- name: github.com/chrislusf/raft - version: 10d6e2182d923e93ec0cc1aa1d556e5b1f8a39e7 - subpackages: - - protobuf -- name: github.com/DataDog/zstd - version: 1e382f59b41eebd6f592c5db4fd1958ec38a0eba -- name: github.com/davecgh/go-spew - version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d - subpackages: - - spew -- name: github.com/dgrijalva/jwt-go - version: 3af4c746e1c248ee8491a3e0c6f7a9cd831e95f8 -- name: github.com/disintegration/imaging - version: 5362c131d56305ce787e79a5b94ffc956df00d62 -- name: github.com/dustin/go-humanize - version: 9f541cc9db5d55bce703bd99987c9d5cb8eea45e -- name: github.com/eapache/go-resiliency - version: 842e16ec2c98ef0c59eebfe60d2d3500a793ba19 - subpackages: - - breaker -- name: github.com/eapache/go-xerial-snappy - version: 776d5712da21bc4762676d614db1d8a64f4238b0 -- name: github.com/eapache/queue - version: 093482f3f8ce946c05bcba64badd2c82369e084d -- name: github.com/fsnotify/fsnotify - version: ccc981bf80385c528a65fbfdd49bf2d8da22aa23 -- name: github.com/go-redis/redis - version: bd542089bb6e776e6fced5038edac8a0f526aa53 - subpackages: - - internal - - internal/consistenthash - - internal/hashtag - - internal/pool - - internal/proto - - internal/util -- name: github.com/go-sql-driver/mysql - version: 972a708cf97995463843c08c8585b26997daf0e1 -- name: github.com/gocql/gocql - version: ec4793573d1447b6f92a1b359a0594566fad9d0e - subpackages: - - internal/lru - - internal/murmur - - internal/streams -- name: github.com/golang/protobuf - version: c823c79ea1570fb5ff454033735a8e68575d1d0f - subpackages: - - proto - - protoc-gen-go/descriptor - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/empty - - ptypes/timestamp -- name: github.com/golang/snappy - version: 2a8bb927dd31d8daada140a5d09578521ce5c36a -- name: github.com/google/btree - version: 4030bb1f1f0c35b30ca7009e9ebd06849dd45306 -- name: github.com/googleapis/gax-go - version: ddfab93c3faef4935403ac75a7c11f0e731dc181 - subpackages: - - v2 -- name: github.com/gorilla/mux - version: 8559a4f775fc329165fe32bd4c2543de8ada8fce -- name: github.com/hailocab/go-hostpool - version: e80d13ce29ede4452c43dea11e79b9bc8a15b478 -- name: github.com/hashicorp/golang-lru - version: 20f1fb78b0740ba8c3cb143a61e86ba5c8669768 - subpackages: - - simplelru -- name: github.com/hashicorp/hcl - version: 65a6292f0157eff210d03ed1bf6c59b190b8b906 - subpackages: - - hcl/ast - - hcl/parser - - hcl/printer - - hcl/scanner - - hcl/strconv - - hcl/token - - json/parser - - json/scanner - - json/token -- name: github.com/jmespath/go-jmespath - version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5 -- name: github.com/karlseguin/ccache - version: ec06cd93a07565b373789b0078ba88fe697fddd9 -- name: github.com/klauspost/crc32 - version: bab58d77464aa9cf4e84200c3276da0831fe0c03 -- name: github.com/kurin/blazer - version: f20ef4f2aa8ccc2e94a1981dc37f199e90fa4ba5 - subpackages: - - b2 - - base - - internal/b2assets - - internal/b2types - - internal/blog - - x/window -- name: github.com/lib/pq - version: 9eb73efc1fcc404148b56765b0d3f61d9a5ef8ee - subpackages: - - oid -- name: github.com/magiconair/properties - version: 7757cc9fdb852f7579b24170bcacda2c7471bb6a -- name: github.com/mitchellh/mapstructure - version: 3536a929edddb9a5b34bd6861dc4a9647cb459fe -- name: github.com/pelletier/go-toml - version: 27c6b39a135b7dc87a14afb068809132fb7a9a8f -- name: github.com/pierrec/lz4 - version: 062282ea0dcff40c9fb8525789eef9644b1fbd6e - subpackages: - - internal/xxh32 -- name: github.com/rakyll/statik - version: 79258177a57a85a8ab2eca7ce0936aad80307f4e - subpackages: - - fs -- name: github.com/rcrowley/go-metrics - version: 3113b8401b8a98917cde58f8bbd42a1b1c03b1fd -- name: github.com/rwcarlsen/goexif - version: b1fd11e07dc5bc0d2ca3b79d28cbdf3c6d186247 - subpackages: - - exif - - tiff -- name: github.com/satori/go.uuid - version: b2ce2384e17bbe0c6d34077efa39dbab3e09123b -- name: github.com/seaweedfs/fuse - version: 1aae43e32cadcfa182fc60777f20fb02673e8f82 - subpackages: - - fs - - fuseutil -- name: github.com/Shopify/sarama - version: 4602b5a8c6e826f9e0737865818dd43b2339a092 -- name: github.com/spaolacci/murmur3 - version: f09979ecbc725b9e6d41a297405f65e7e8804acc -- name: github.com/spf13/afero - version: f4711e4db9e9a1d3887343acb72b2bbfc2f686f5 - subpackages: - - mem -- name: github.com/spf13/cast - version: 8c9545af88b134710ab1cd196795e7f2388358d7 -- name: github.com/spf13/jwalterweatherman - version: 94f6ae3ed3bceceafa716478c5fbf8d29ca601a1 -- name: github.com/spf13/pflag - version: 24fa6976df40757dce6aea913e7b81ade90530e1 -- name: github.com/spf13/viper - version: d104d259b3380cb653bb793756823c3c41b37b53 -- name: github.com/syndtr/goleveldb - version: 9d007e481048296f09f59bd19bb7ae584563cd95 - subpackages: - - leveldb - - leveldb/cache - - leveldb/comparer - - leveldb/errors - - leveldb/filter - - leveldb/iterator - - leveldb/journal - - leveldb/memdb - - leveldb/opt - - leveldb/storage - - leveldb/table - - leveldb/util -- name: github.com/willf/bitset - version: 20ad246f50b49590afcb1ed8ad143da7163869cb -- name: github.com/willf/bloom - version: 54e3b963ee1652b06c4562cb9b6020ebc6e36e59 -- name: go.opencensus.io - version: beafb2a85a579a4918ba259877a1625e9213a263 - subpackages: - - exemplar - - internal - - internal/tagencoding - - plugin/ocgrpc - - plugin/ochttp - - plugin/ochttp/propagation/b3 - - stats - - stats/internal - - stats/view - - tag - - trace - - trace/internal - - trace/propagation - - trace/tracestate -- name: golang.org/x/image - version: 31aff87c08e9a5e5d524279a564f96968336f886 - subpackages: - - bmp - - tiff - - tiff/lzw -- name: golang.org/x/net - version: 3a22650c66bd7f4fb6d1e8072ffd7b75c8a27898 - subpackages: - - context - - context/ctxhttp - - http/httpguts - - http2 - - http2/hpack - - idna - - internal/timeseries - - trace -- name: golang.org/x/oauth2 - version: 9b3c75971fc92dd27c6436a37c05c831498658f1 - subpackages: - - google - - internal - - jws - - jwt -- name: golang.org/x/sync - version: 37e7f081c4d4c64e13b10787722085407fe5d15f - subpackages: - - errgroup - - semaphore -- name: golang.org/x/text - version: 6c92c7dc7f53607809182301b96e4cc1975143f1 - subpackages: - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: golang.org/x/tools - version: 83362c3779f5f48611068d488a03ea7bbaddc81e - subpackages: - - godoc/util - - godoc/vfs -- name: google.golang.org/api - version: 8a550ba84cafabe9b2262c41303f31e5a4626318 - subpackages: - - gensupport - - googleapi - - googleapi/internal/uritemplates - - googleapi/transport - - internal - - iterator - - option - - storage/v1 - - support/bundler - - transport - - transport/grpc - - transport/http - - transport/http/internal/propagation -- name: google.golang.org/genproto - version: 082222b4a5c572e33e82ee9162d1352c7cf38682 - subpackages: - - googleapis/api/annotations - - googleapis/iam/v1 - - googleapis/pubsub/v1 - - googleapis/rpc/code - - googleapis/rpc/status - - protobuf/field_mask -- name: google.golang.org/grpc - version: 2773c7bbcf81cf358d3f0038b1469b2d44062acb - subpackages: - - balancer - - balancer/base - - balancer/roundrobin - - binarylog/grpc_binarylog_v1 - - codes - - connectivity - - credentials - - credentials/internal - - credentials/oauth - - encoding - - encoding/proto - - grpclog - - internal - - internal/backoff - - internal/binarylog - - internal/channelz - - internal/envconfig - - internal/grpcrand - - internal/grpcsync - - internal/syscall - - internal/transport - - keepalive - - metadata - - naming - - peer - - reflection - - reflection/grpc_reflection_v1alpha - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap -- name: gopkg.in/inf.v0 - version: d2d2541c53f18d2a059457998ce2876cc8e67cbf -- name: gopkg.in/yaml.v2 - version: 51d6538a90f86fe93ac480b35f37b2be17fef232 -testImports: [] diff --git a/weed/glide.yaml b/weed/glide.yaml index dd00f8522..8e4c090aa 100644 --- a/weed/glide.yaml +++ b/weed/glide.yaml @@ -38,6 +38,7 @@ import: subpackages: - b2 - package: github.com/lib/pq +- package: github.com/peterh/liner - package: github.com/rakyll/statik subpackages: - fs @@ -54,6 +55,14 @@ import: - leveldb - leveldb/util - package: github.com/willf/bloom +- package: gocloud.dev + subpackages: + - pubsub + - pubsub/awssnssqs + - pubsub/azuresb + - pubsub/gcppubsub + - pubsub/natspubsub + - pubsub/rabbitpubsub - package: golang.org/x/net subpackages: - context From df95ce0b6cc5e4dc91f7dcb0d2fe1ad2bbe47dd7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Mar 2019 23:01:53 -0700 Subject: [PATCH 100/450] weed backup: efficient delta backup fix https://github.com/chrislusf/seaweedfs/issues/399 --- weed/command/export.go | 2 +- weed/command/fix.go | 2 +- weed/server/volume_grpc_follow.go | 6 +++--- weed/storage/volume_follow.go | 28 ++++++++++++++++++++++++---- weed/storage/volume_read_write.go | 28 +++++++++++++++++----------- weed/storage/volume_vacuum.go | 2 +- 6 files changed, 47 insertions(+), 21 deletions(-) diff --git a/weed/command/export.go b/weed/command/export.go index 5c7e064ce..cdced5936 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -107,7 +107,7 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *storage.Needle, offset i nv, ok := needleMap.Get(n.Id) glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv) - if ok && nv.Size > 0 && int64(nv.Offset)*types.NeedlePaddingSize == offset { + if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && int64(nv.Offset)*types.NeedlePaddingSize == offset { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", n.LastModified, newerThanUnix) diff --git a/weed/command/fix.go b/weed/command/fix.go index a800978c6..42ae23a3c 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -44,7 +44,7 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *storage.Needle, offset int64) error { glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) - if n.Size > 0 { + if n.Size > 0 && n.Size != types.TombstoneFileSize { pe := scanner.nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { diff --git a/weed/server/volume_grpc_follow.go b/weed/server/volume_grpc_follow.go index bdd0ef6f5..6b3330a0d 100644 --- a/weed/server/volume_grpc_follow.go +++ b/weed/server/volume_grpc_follow.go @@ -20,14 +20,14 @@ func (vs *VolumeServer) VolumeFollow(req *volume_server_pb.VolumeFollowRequest, stopOffset := v.Size() foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(req.Since) if err != nil { - return fmt.Errorf("fail to locate by appendAtNs: %s", err) + return fmt.Errorf("fail to locate by appendAtNs %d: %s", req.Since, err) } if isLastOne { return nil } - startOffset := int64(foundOffset) * int64(types.NeedleEntrySize) + startOffset := int64(foundOffset) * int64(types.NeedlePaddingSize) buf := make([]byte, 1024*1024*2) return sendFileContent(v.DataFile(), buf, startOffset, stopOffset, stream) @@ -40,7 +40,7 @@ func sendFileContent(datFile *os.File, buf []byte, startOffset, stopOffset int64 n, readErr := datFile.ReadAt(buf, startOffset+i) if readErr == nil || readErr == io.EOF { resp := &volume_server_pb.VolumeFollowResponse{} - resp.FileContent = buf[i : i+int64(n)] + resp.FileContent = buf[:int64(n)] sendErr := stream.Send(resp) if sendErr != nil { return sendErr diff --git a/weed/storage/volume_follow.go b/weed/storage/volume_follow.go index 2aedd1682..e1a5fcb83 100644 --- a/weed/storage/volume_follow.go +++ b/weed/storage/volume_follow.go @@ -87,9 +87,9 @@ func (v *Volume) Follow(volumeServer string, grpcDialOption grpc.DialOption) (er return err } - // TODO add to needle map + // add to needle map + return ScanVolumeFileFrom(v.version, v.dataFile, startFromOffset, &VolumeFileScanner4GenIdx{v:v}) - return nil } func (v *Volume) findLastAppendAtNs() (uint64, error) { @@ -138,9 +138,9 @@ func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) { if err != nil { return 0, fmt.Errorf("ReadNeedleHeader: %v", err) } - err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize, bodyLength) + err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize+int64(NeedleEntrySize), bodyLength) if err != nil { - return 0, fmt.Errorf("ReadNeedleBody offset %d: %v", int64(offset)*NeedlePaddingSize, err) + return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", int64(offset)*NeedlePaddingSize, bodyLength, err) } return n.AppendAtNs, nil @@ -218,3 +218,23 @@ func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m _, offset, _ := IdxFileEntry(bytes) return offset, nil } + +// generate the volume idx +type VolumeFileScanner4GenIdx struct { + v *Volume +} + +func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock SuperBlock) error { + return nil + +} +func (scanner *VolumeFileScanner4GenIdx) ReadNeedleBody() bool { + return false +} + +func (scanner *VolumeFileScanner4GenIdx) VisitNeedle(n *Needle, offset int64) error { + if n.Size > 0 && n.Size != TombstoneFileSize { + return scanner.v.nm.Put(n.Id, Offset(offset/NeedlePaddingSize), n.Size) + } + return scanner.v.nm.Delete(n.Id, Offset(offset/NeedlePaddingSize)) +} diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go index ed9729c84..5366a547d 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_read_write.go @@ -180,30 +180,37 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId, volumeFileScanner VolumeFileScanner) (err error) { var v *Volume if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { - return fmt.Errorf("Failed to load volume %d: %v", id, err) + return fmt.Errorf("failed to load volume %d: %v", id, err) } if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { - return fmt.Errorf("Failed to process volume %d super block: %v", id, err) + return fmt.Errorf("failed to process volume %d super block: %v", id, err) } defer v.Close() version := v.Version() offset := int64(v.SuperBlock.BlockSize()) - n, rest, e := ReadNeedleHeader(v.dataFile, version, offset) + + return ScanVolumeFileFrom(version, v.dataFile, offset, volumeFileScanner) +} + +func ScanVolumeFileFrom(version Version, dataFile *os.File, offset int64, volumeFileScanner VolumeFileScanner) (err error) { + n, rest, e := ReadNeedleHeader(dataFile, version, offset) if e != nil { - err = fmt.Errorf("cannot read needle header: %v", e) - return + if e == io.EOF { + return nil + } + return fmt.Errorf("cannot read %s at offset %d: %v", dataFile.Name(), offset, e) } for n != nil { if volumeFileScanner.ReadNeedleBody() { - if err = n.ReadNeedleBody(v.dataFile, version, offset+NeedleEntrySize, rest); err != nil { + if err = n.ReadNeedleBody(dataFile, version, offset+NeedleEntrySize, rest); err != nil { glog.V(0).Infof("cannot read needle body: %v", err) //err = fmt.Errorf("cannot read needle body: %v", err) //return } } - err = volumeFileScanner.VisitNeedle(n, offset) + err := volumeFileScanner.VisitNeedle(n, offset) if err == io.EOF { return nil } @@ -212,14 +219,13 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId, } offset += NeedleEntrySize + rest glog.V(4).Infof("==> new entry offset %d", offset) - if n, rest, err = ReadNeedleHeader(v.dataFile, version, offset); err != nil { + if n, rest, err = ReadNeedleHeader(dataFile, version, offset); err != nil { if err == io.EOF { return nil } - return fmt.Errorf("cannot read needle header: %v", err) + return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err) } glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) } - - return + return nil } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index de39628db..b575277cd 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -261,7 +261,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *Needle, offset int64) er } nv, ok := scanner.v.nm.Get(n.Id) glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) - if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 { + if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 && nv.Size != TombstoneFileSize { if err := scanner.nm.Put(n.Id, Offset(scanner.newOffset/NeedlePaddingSize), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } From 19728fe3f6dd66047fe2efd769932a7706833170 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Mar 2019 23:12:14 -0700 Subject: [PATCH 101/450] remove deprecated code --- weed/operation/sync_volume.go | 41 -- weed/pb/volume_server.proto | 22 - weed/pb/volume_server_pb/volume_server.pb.go | 411 ++++--------------- weed/server/volume_grpc_sync.go | 76 ---- weed/storage/volume_sync.go | 203 +-------- 5 files changed, 91 insertions(+), 662 deletions(-) diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index 6af2404c0..5562f12ab 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -2,12 +2,8 @@ package operation import ( "context" - "fmt" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" - "io" ) func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { @@ -22,40 +18,3 @@ func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint return } - -func GetVolumeIdxEntries(server string, grpcDialOption grpc.DialOption, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error { - - return WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{ - VolumeId: vid, - }) - if err != nil { - return err - } - - var indexFileContent []byte - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("read index entries: %v", err) - } - indexFileContent = append(indexFileContent, resp.IndexFileContent...) - } - - dataSize := len(indexFileContent) - - for idx := 0; idx+NeedleEntrySize <= dataSize; idx += NeedleEntrySize { - line := indexFileContent[idx : idx+NeedleEntrySize] - key := BytesToNeedleId(line[:NeedleIdSize]) - offset := BytesToOffset(line[NeedleIdSize : NeedleIdSize+OffsetSize]) - size := util.BytesToUint32(line[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize]) - eachEntryFn(key, offset, size) - } - - return nil - }) -} diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 7686cc614..3b5b36a21 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -26,10 +26,6 @@ service VolumeServer { } rpc VolumeFollow (VolumeFollowRequest) returns (stream VolumeFollowResponse) { } - rpc VolumeSyncIndex (VolumeSyncIndexRequest) returns (stream VolumeSyncIndexResponse) { - } - rpc VolumeSyncData (VolumeSyncDataRequest) returns (stream VolumeSyncDataResponse) { - } rpc VolumeMount (VolumeMountRequest) returns (VolumeMountResponse) { } @@ -129,24 +125,6 @@ message VolumeFollowResponse { bytes file_content = 1; } -message VolumeSyncIndexRequest { - uint32 volume_id = 1; -} -message VolumeSyncIndexResponse { - bytes index_file_content = 1; -} - -message VolumeSyncDataRequest { - uint32 volume_id = 1; - uint32 revision = 2; - uint32 offset = 3; - uint32 size = 4; - string needle_id = 5; -} -message VolumeSyncDataResponse { - bytes file_content = 1; -} - message VolumeMountRequest { uint32 volume_id = 1; } diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 4c5f1effa..0f3b47ee0 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -29,10 +29,6 @@ It has these top-level messages: VolumeSyncStatusResponse VolumeFollowRequest VolumeFollowResponse - VolumeSyncIndexRequest - VolumeSyncIndexResponse - VolumeSyncDataRequest - VolumeSyncDataResponse VolumeMountRequest VolumeMountResponse VolumeUnmountRequest @@ -462,102 +458,6 @@ func (m *VolumeFollowResponse) GetFileContent() []byte { return nil } -type VolumeSyncIndexRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` -} - -func (m *VolumeSyncIndexRequest) Reset() { *m = VolumeSyncIndexRequest{} } -func (m *VolumeSyncIndexRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncIndexRequest) ProtoMessage() {} -func (*VolumeSyncIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *VolumeSyncIndexRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId - } - return 0 -} - -type VolumeSyncIndexResponse struct { - IndexFileContent []byte `protobuf:"bytes,1,opt,name=index_file_content,json=indexFileContent,proto3" json:"index_file_content,omitempty"` -} - -func (m *VolumeSyncIndexResponse) Reset() { *m = VolumeSyncIndexResponse{} } -func (m *VolumeSyncIndexResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncIndexResponse) ProtoMessage() {} -func (*VolumeSyncIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *VolumeSyncIndexResponse) GetIndexFileContent() []byte { - if m != nil { - return m.IndexFileContent - } - return nil -} - -type VolumeSyncDataRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Revision uint32 `protobuf:"varint,2,opt,name=revision" json:"revision,omitempty"` - Offset uint32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` - NeedleId string `protobuf:"bytes,5,opt,name=needle_id,json=needleId" json:"needle_id,omitempty"` -} - -func (m *VolumeSyncDataRequest) Reset() { *m = VolumeSyncDataRequest{} } -func (m *VolumeSyncDataRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncDataRequest) ProtoMessage() {} -func (*VolumeSyncDataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *VolumeSyncDataRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId - } - return 0 -} - -func (m *VolumeSyncDataRequest) GetRevision() uint32 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *VolumeSyncDataRequest) GetOffset() uint32 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *VolumeSyncDataRequest) GetSize() uint32 { - if m != nil { - return m.Size - } - return 0 -} - -func (m *VolumeSyncDataRequest) GetNeedleId() string { - if m != nil { - return m.NeedleId - } - return "" -} - -type VolumeSyncDataResponse struct { - FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` -} - -func (m *VolumeSyncDataResponse) Reset() { *m = VolumeSyncDataResponse{} } -func (m *VolumeSyncDataResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncDataResponse) ProtoMessage() {} -func (*VolumeSyncDataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -func (m *VolumeSyncDataResponse) GetFileContent() []byte { - if m != nil { - return m.FileContent - } - return nil -} - type VolumeMountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } @@ -565,7 +465,7 @@ type VolumeMountRequest struct { func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *VolumeMountRequest) GetVolumeId() uint32 { if m != nil { @@ -580,7 +480,7 @@ type VolumeMountResponse struct { func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } type VolumeUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -589,7 +489,7 @@ type VolumeUnmountRequest struct { func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *VolumeUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -604,7 +504,7 @@ type VolumeUnmountResponse struct { func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } type VolumeDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -613,7 +513,7 @@ type VolumeDeleteRequest struct { func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *VolumeDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -628,7 +528,7 @@ type VolumeDeleteResponse struct { func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } type ReplicateVolumeRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -641,7 +541,7 @@ type ReplicateVolumeRequest struct { func (m *ReplicateVolumeRequest) Reset() { *m = ReplicateVolumeRequest{} } func (m *ReplicateVolumeRequest) String() string { return proto.CompactTextString(m) } func (*ReplicateVolumeRequest) ProtoMessage() {} -func (*ReplicateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*ReplicateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *ReplicateVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -684,7 +584,7 @@ type ReplicateVolumeResponse struct { func (m *ReplicateVolumeResponse) Reset() { *m = ReplicateVolumeResponse{} } func (m *ReplicateVolumeResponse) String() string { return proto.CompactTextString(m) } func (*ReplicateVolumeResponse) ProtoMessage() {} -func (*ReplicateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*ReplicateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } type CopyFileRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -695,7 +595,7 @@ type CopyFileRequest struct { func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *CopyFileRequest) GetVolumeId() uint32 { if m != nil { @@ -725,7 +625,7 @@ type CopyFileResponse struct { func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } func (m *CopyFileResponse) GetFileContent() []byte { if m != nil { @@ -741,7 +641,7 @@ type ReadVolumeFileStatusRequest struct { func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -761,7 +661,7 @@ type ReadVolumeFileStatusResponse struct { func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -808,7 +708,7 @@ type DiskStatus struct { func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -851,7 +751,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -923,10 +823,6 @@ func init() { proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse") proto.RegisterType((*VolumeFollowRequest)(nil), "volume_server_pb.VolumeFollowRequest") proto.RegisterType((*VolumeFollowResponse)(nil), "volume_server_pb.VolumeFollowResponse") - proto.RegisterType((*VolumeSyncIndexRequest)(nil), "volume_server_pb.VolumeSyncIndexRequest") - proto.RegisterType((*VolumeSyncIndexResponse)(nil), "volume_server_pb.VolumeSyncIndexResponse") - proto.RegisterType((*VolumeSyncDataRequest)(nil), "volume_server_pb.VolumeSyncDataRequest") - proto.RegisterType((*VolumeSyncDataResponse)(nil), "volume_server_pb.VolumeSyncDataResponse") proto.RegisterType((*VolumeMountRequest)(nil), "volume_server_pb.VolumeMountRequest") proto.RegisterType((*VolumeMountResponse)(nil), "volume_server_pb.VolumeMountResponse") proto.RegisterType((*VolumeUnmountRequest)(nil), "volume_server_pb.VolumeUnmountRequest") @@ -964,8 +860,6 @@ type VolumeServerClient interface { AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) VolumeFollow(ctx context.Context, in *VolumeFollowRequest, opts ...grpc.CallOption) (VolumeServer_VolumeFollowClient, error) - VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) - VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) @@ -1086,70 +980,6 @@ func (x *volumeServerVolumeFollowClient) Recv() (*VolumeFollowResponse, error) { return m, nil } -func (c *volumeServerClient) VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncIndex", opts...) - if err != nil { - return nil, err - } - x := &volumeServerVolumeSyncIndexClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type VolumeServer_VolumeSyncIndexClient interface { - Recv() (*VolumeSyncIndexResponse, error) - grpc.ClientStream -} - -type volumeServerVolumeSyncIndexClient struct { - grpc.ClientStream -} - -func (x *volumeServerVolumeSyncIndexClient) Recv() (*VolumeSyncIndexResponse, error) { - m := new(VolumeSyncIndexResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *volumeServerClient) VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncData", opts...) - if err != nil { - return nil, err - } - x := &volumeServerVolumeSyncDataClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type VolumeServer_VolumeSyncDataClient interface { - Recv() (*VolumeSyncDataResponse, error) - grpc.ClientStream -} - -type volumeServerVolumeSyncDataClient struct { - grpc.ClientStream -} - -func (x *volumeServerVolumeSyncDataClient) Recv() (*VolumeSyncDataResponse, error) { - m := new(VolumeSyncDataResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) { out := new(VolumeMountResponse) err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, c.cc, opts...) @@ -1196,7 +1026,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV } func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } @@ -1240,8 +1070,6 @@ type VolumeServerServer interface { AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) VolumeFollow(*VolumeFollowRequest, VolumeServer_VolumeFollowServer) error - VolumeSyncIndex(*VolumeSyncIndexRequest, VolumeServer_VolumeSyncIndexServer) error - VolumeSyncData(*VolumeSyncDataRequest, VolumeServer_VolumeSyncDataServer) error VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) @@ -1419,48 +1247,6 @@ func (x *volumeServerVolumeFollowServer) Send(m *VolumeFollowResponse) error { return x.ServerStream.SendMsg(m) } -func _VolumeServer_VolumeSyncIndex_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(VolumeSyncIndexRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(VolumeServerServer).VolumeSyncIndex(m, &volumeServerVolumeSyncIndexServer{stream}) -} - -type VolumeServer_VolumeSyncIndexServer interface { - Send(*VolumeSyncIndexResponse) error - grpc.ServerStream -} - -type volumeServerVolumeSyncIndexServer struct { - grpc.ServerStream -} - -func (x *volumeServerVolumeSyncIndexServer) Send(m *VolumeSyncIndexResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _VolumeServer_VolumeSyncData_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(VolumeSyncDataRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(VolumeServerServer).VolumeSyncData(m, &volumeServerVolumeSyncDataServer{stream}) -} - -type VolumeServer_VolumeSyncDataServer interface { - Send(*VolumeSyncDataResponse) error - grpc.ServerStream -} - -type volumeServerVolumeSyncDataServer struct { - grpc.ServerStream -} - -func (x *volumeServerVolumeSyncDataServer) Send(m *VolumeSyncDataResponse) error { - return x.ServerStream.SendMsg(m) -} - func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeMountRequest) if err := dec(in); err != nil { @@ -1635,16 +1421,6 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_VolumeFollow_Handler, ServerStreams: true, }, - { - StreamName: "VolumeSyncIndex", - Handler: _VolumeServer_VolumeSyncIndex_Handler, - ServerStreams: true, - }, - { - StreamName: "VolumeSyncData", - Handler: _VolumeServer_VolumeSyncData_Handler, - ServerStreams: true, - }, { StreamName: "CopyFile", Handler: _VolumeServer_CopyFile_Handler, @@ -1657,86 +1433,79 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1296 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0x5f, 0x73, 0xdb, 0x44, - 0x10, 0x8f, 0x62, 0x3b, 0x71, 0xd6, 0x4e, 0x62, 0x2e, 0x69, 0xe2, 0x28, 0x25, 0xb8, 0x07, 0x6d, - 0x9d, 0x36, 0x0d, 0x90, 0x4e, 0xa1, 0x85, 0x17, 0x20, 0x21, 0x90, 0x87, 0xd2, 0x19, 0x85, 0x76, - 0x98, 0xa1, 0x33, 0x9a, 0x8b, 0x74, 0x49, 0x44, 0x64, 0x49, 0x95, 0x4e, 0x69, 0xc2, 0x57, 0xe0, - 0x13, 0xf0, 0xcc, 0x0b, 0xef, 0x7c, 0x20, 0x3e, 0x08, 0x2f, 0xcc, 0xfd, 0x91, 0xac, 0x7f, 0x8e, - 0xc5, 0x9f, 0xb7, 0xd3, 0xde, 0xee, 0x6f, 0x77, 0x6f, 0x6f, 0xf7, 0x7e, 0x36, 0xac, 0x5c, 0xfa, - 0x6e, 0x3c, 0xa2, 0x66, 0x44, 0xc3, 0x4b, 0x1a, 0xee, 0x06, 0xa1, 0xcf, 0x7c, 0xd4, 0xcb, 0x09, - 0xcd, 0xe0, 0x04, 0x7f, 0x08, 0xe8, 0x2b, 0xc2, 0xac, 0xf3, 0x03, 0xea, 0x52, 0x46, 0x0d, 0xfa, - 0x26, 0xa6, 0x11, 0x43, 0x1b, 0xd0, 0x3e, 0x75, 0x5c, 0x6a, 0x3a, 0x76, 0xd4, 0xd7, 0x06, 0x8d, - 0xe1, 0x82, 0x31, 0xcf, 0xbf, 0x8f, 0xec, 0x08, 0xbf, 0x80, 0x95, 0x9c, 0x41, 0x14, 0xf8, 0x5e, - 0x44, 0xd1, 0x53, 0x98, 0x0f, 0x69, 0x14, 0xbb, 0x4c, 0x1a, 0x74, 0xf6, 0xb6, 0x76, 0x8b, 0xbe, - 0x76, 0x53, 0x93, 0xd8, 0x65, 0x46, 0xa2, 0x8e, 0x1d, 0xe8, 0x66, 0x37, 0xd0, 0x3a, 0xcc, 0x2b, - 0xdf, 0x7d, 0x6d, 0xa0, 0x0d, 0x17, 0x8c, 0x39, 0xe9, 0x1a, 0xad, 0xc1, 0x5c, 0xc4, 0x08, 0x8b, - 0xa3, 0xfe, 0xec, 0x40, 0x1b, 0xb6, 0x0c, 0xf5, 0x85, 0x56, 0xa1, 0x45, 0xc3, 0xd0, 0x0f, 0xfb, - 0x0d, 0xa1, 0x2e, 0x3f, 0x10, 0x82, 0x66, 0xe4, 0xfc, 0x4c, 0xfb, 0xcd, 0x81, 0x36, 0x5c, 0x34, - 0xc4, 0x1a, 0xcf, 0x43, 0xeb, 0xeb, 0x51, 0xc0, 0xae, 0xf1, 0xa7, 0xd0, 0x7f, 0x45, 0xac, 0x38, - 0x1e, 0xbd, 0x12, 0x31, 0xee, 0x9f, 0x53, 0xeb, 0x22, 0xc9, 0x7d, 0x13, 0x16, 0x54, 0xe4, 0x2a, - 0x82, 0x45, 0xa3, 0x2d, 0x05, 0x47, 0x36, 0xfe, 0x02, 0x36, 0x2a, 0x0c, 0xd5, 0x19, 0xbc, 0x0f, - 0x8b, 0x67, 0x24, 0x3c, 0x21, 0x67, 0xd4, 0x0c, 0x09, 0x73, 0x7c, 0x61, 0xad, 0x19, 0x5d, 0x25, - 0x34, 0xb8, 0x0c, 0xff, 0x08, 0x7a, 0x0e, 0xc1, 0x1f, 0x05, 0xc4, 0x62, 0x75, 0x9c, 0xa3, 0x01, - 0x74, 0x82, 0x90, 0x12, 0xd7, 0xf5, 0x2d, 0xc2, 0xa8, 0x38, 0x85, 0x86, 0x91, 0x15, 0xe1, 0x77, - 0x61, 0xb3, 0x12, 0x5c, 0x06, 0x88, 0x9f, 0x16, 0xa2, 0xf7, 0x47, 0x23, 0xa7, 0x96, 0x6b, 0x7c, - 0xbb, 0x14, 0xb5, 0xb0, 0x54, 0xb8, 0xcf, 0x0a, 0xbb, 0x2e, 0x25, 0x5e, 0x1c, 0xd4, 0x02, 0x2e, - 0x46, 0x9c, 0x98, 0xa6, 0xc8, 0xeb, 0xf2, 0x72, 0xec, 0xfb, 0xae, 0x4b, 0x2d, 0xe6, 0xf8, 0x5e, - 0x02, 0xbb, 0x05, 0x60, 0xa5, 0x42, 0x75, 0x55, 0x32, 0x12, 0xac, 0x43, 0xbf, 0x6c, 0xaa, 0x60, - 0x7f, 0xd7, 0x60, 0xe5, 0xcb, 0x28, 0x72, 0xce, 0x3c, 0xe9, 0xb6, 0xd6, 0xf1, 0xe7, 0x1d, 0xce, - 0x16, 0x1d, 0x16, 0xcb, 0xd3, 0x28, 0x95, 0x87, 0x6b, 0x84, 0x34, 0x70, 0x1d, 0x8b, 0x08, 0x88, - 0xa6, 0x80, 0xc8, 0x8a, 0x50, 0x0f, 0x1a, 0x8c, 0xb9, 0xfd, 0x96, 0xd8, 0xe1, 0x4b, 0xbc, 0x06, - 0xab, 0xf9, 0x48, 0x55, 0x0a, 0x9f, 0xc0, 0xba, 0x94, 0x1c, 0x5f, 0x7b, 0xd6, 0xb1, 0xe8, 0x84, - 0x5a, 0x07, 0xfe, 0x97, 0x06, 0xfd, 0xb2, 0xa1, 0xba, 0xc1, 0xff, 0x35, 0xff, 0x7f, 0x9a, 0x1d, - 0x7a, 0x0f, 0x3a, 0x8c, 0x38, 0xae, 0xe9, 0x9f, 0x9e, 0x46, 0x94, 0xf5, 0xe7, 0x06, 0xda, 0xb0, - 0x69, 0x00, 0x17, 0xbd, 0x10, 0x12, 0xb4, 0x0d, 0x3d, 0x4b, 0xde, 0x62, 0x33, 0xa4, 0x97, 0x4e, - 0xc4, 0x91, 0xe7, 0x45, 0x60, 0xcb, 0x56, 0x72, 0xbb, 0xa5, 0x18, 0x61, 0x58, 0x74, 0xec, 0x2b, - 0x53, 0x0c, 0x0f, 0xd1, 0xfa, 0x6d, 0x81, 0xd6, 0x71, 0xec, 0xab, 0x43, 0xc7, 0xa5, 0xc7, 0x7c, - 0x02, 0x7c, 0x0b, 0x2b, 0x32, 0xf9, 0x43, 0xdf, 0x75, 0xfd, 0xb7, 0xb5, 0xea, 0xbe, 0x0a, 0xad, - 0xc8, 0xf1, 0x2c, 0xd9, 0x70, 0x4d, 0x43, 0x7e, 0xe0, 0x67, 0xb0, 0x9a, 0x47, 0x52, 0x47, 0x78, - 0x07, 0xba, 0x22, 0x02, 0xcb, 0xf7, 0x18, 0xf5, 0x98, 0x40, 0xeb, 0x1a, 0x1d, 0x2e, 0xdb, 0x97, - 0x22, 0xfc, 0x04, 0xd6, 0xc6, 0x15, 0x38, 0xf2, 0x6c, 0x7a, 0x55, 0xab, 0x72, 0xdf, 0x64, 0x2b, - 0xae, 0xcc, 0x94, 0xd3, 0x1d, 0x40, 0x0e, 0x17, 0x98, 0x15, 0xae, 0x7b, 0x62, 0xe7, 0x30, 0xe3, - 0xff, 0x57, 0x0d, 0x6e, 0x8d, 0x91, 0x0e, 0x08, 0x23, 0xb5, 0xce, 0x41, 0x87, 0x76, 0x5a, 0x82, - 0x59, 0xb9, 0x97, 0x7c, 0xf3, 0xd9, 0xac, 0x4a, 0xd8, 0x10, 0x3b, 0xea, 0xab, 0x6a, 0x0a, 0x73, - 0x27, 0x1e, 0xa5, 0xb6, 0x1c, 0xf1, 0xf2, 0x2e, 0xb4, 0xa5, 0xe0, 0xc8, 0xc6, 0x9f, 0x67, 0xcf, - 0x46, 0x86, 0x56, 0xff, 0x60, 0x3f, 0x06, 0x24, 0x8d, 0x9f, 0xfb, 0xb1, 0x57, 0x6f, 0xb0, 0xdd, - 0x4a, 0x2e, 0x84, 0x32, 0x51, 0xdd, 0xf5, 0x38, 0xa9, 0xee, 0x4b, 0x6f, 0x54, 0x1b, 0x6b, 0x3d, - 0x39, 0xd6, 0xd4, 0x48, 0xa1, 0xed, 0x25, 0x4e, 0xf2, 0xaf, 0xec, 0x8d, 0x60, 0x6b, 0x49, 0x04, - 0xf9, 0x87, 0x16, 0xff, 0xa1, 0xc1, 0x9a, 0xa1, 0x7a, 0x8a, 0xfe, 0xbf, 0xd3, 0x2b, 0xdb, 0xbd, - 0x8d, 0x89, 0xdd, 0xdb, 0x1c, 0x77, 0xef, 0x10, 0x7a, 0x91, 0x1f, 0x87, 0x16, 0x35, 0x6d, 0xc2, - 0x88, 0xe9, 0xf9, 0x36, 0x55, 0x05, 0x5d, 0x92, 0x72, 0x5e, 0xc0, 0xef, 0x7c, 0x9b, 0xe2, 0x0d, - 0x58, 0x2f, 0x05, 0xad, 0x12, 0xf2, 0x60, 0x79, 0xdf, 0x0f, 0xae, 0xf9, 0x05, 0xad, 0x99, 0x48, - 0xc7, 0x89, 0xcc, 0xa4, 0xd3, 0x45, 0x26, 0x6d, 0x63, 0xc1, 0x89, 0x8e, 0x64, 0x9b, 0xab, 0x7d, - 0x9b, 0x30, 0xb9, 0xdf, 0x48, 0xf6, 0x0f, 0x08, 0xe3, 0xfb, 0xf8, 0x09, 0xf4, 0xc6, 0xfe, 0xea, - 0xdf, 0xad, 0xcf, 0x60, 0xd3, 0xa0, 0xc4, 0x56, 0x3d, 0xcf, 0xe7, 0x49, 0xfd, 0x99, 0xfb, 0xa7, - 0x06, 0xb7, 0xab, 0x8d, 0xeb, 0xcc, 0x5d, 0xde, 0xdc, 0xc9, 0x5c, 0x63, 0xce, 0x88, 0x46, 0x8c, - 0x8c, 0x02, 0x35, 0x8c, 0x7a, 0x6a, 0xb8, 0x7d, 0x9f, 0xc8, 0xcb, 0x53, 0xb0, 0x51, 0x9a, 0x82, - 0x1c, 0x31, 0x39, 0x9f, 0x0c, 0x62, 0x53, 0x22, 0xda, 0xf2, 0x9c, 0x72, 0x88, 0xa9, 0xb6, 0x40, - 0x6c, 0x49, 0x44, 0xa5, 0x28, 0xe6, 0xea, 0x0f, 0x00, 0x07, 0x4e, 0x74, 0x21, 0xd3, 0xe2, 0x37, - 0xc5, 0x76, 0x42, 0xf5, 0x26, 0xf3, 0x25, 0x97, 0x10, 0xd7, 0x55, 0x41, 0xf3, 0x25, 0x9f, 0x0c, - 0x71, 0x44, 0x6d, 0x15, 0x9e, 0x58, 0x73, 0xd9, 0x69, 0x48, 0xa9, 0x8a, 0x44, 0xac, 0xf1, 0x6f, - 0x1a, 0x2c, 0x3c, 0xa7, 0x23, 0x85, 0xbc, 0x05, 0x70, 0xe6, 0x87, 0x7e, 0xcc, 0x1c, 0x8f, 0x46, - 0xc2, 0x41, 0xcb, 0xc8, 0x48, 0xfe, 0xbd, 0x1f, 0x31, 0xa9, 0xa8, 0x7b, 0xaa, 0x92, 0x13, 0x6b, - 0x2e, 0x3b, 0xa7, 0x24, 0x50, 0xcf, 0x92, 0x58, 0x8b, 0xd7, 0x80, 0x11, 0xeb, 0x42, 0xbc, 0x42, - 0xfc, 0x35, 0xe0, 0x1f, 0x7b, 0xbf, 0x2c, 0x41, 0x57, 0xcd, 0x2d, 0x41, 0x77, 0xd1, 0x6b, 0xe8, - 0x64, 0x68, 0x32, 0xfa, 0xa0, 0xcc, 0x86, 0xcb, 0xb4, 0x5b, 0xbf, 0x3b, 0x45, 0x4b, 0x75, 0xcc, - 0x0c, 0xf2, 0xe0, 0x9d, 0x12, 0x0d, 0x45, 0x0f, 0xca, 0xd6, 0x93, 0x48, 0xae, 0xfe, 0xb0, 0x96, - 0x6e, 0xea, 0x8f, 0xc1, 0x4a, 0x05, 0xaf, 0x44, 0x3b, 0x53, 0x50, 0x72, 0xdc, 0x56, 0x7f, 0x54, - 0x53, 0x3b, 0xf5, 0xfa, 0x06, 0x50, 0x99, 0x74, 0xa2, 0x87, 0x53, 0x61, 0xc6, 0xa4, 0x56, 0xdf, - 0xa9, 0xa7, 0x3c, 0x31, 0x51, 0x49, 0x47, 0xa7, 0x26, 0x9a, 0x23, 0xbc, 0x53, 0x13, 0x2d, 0x70, - 0xdc, 0x19, 0x74, 0x01, 0xbd, 0x22, 0x55, 0x45, 0xdb, 0x93, 0x7e, 0x3f, 0x95, 0x98, 0xb0, 0xfe, - 0xa0, 0x8e, 0x6a, 0xea, 0xcc, 0x84, 0x6e, 0x96, 0x50, 0xa2, 0x8a, 0x4b, 0x57, 0x41, 0x8d, 0xf5, - 0x7b, 0xd3, 0xd4, 0xb2, 0xd9, 0x14, 0x09, 0x66, 0x55, 0x36, 0x13, 0xd8, 0x6b, 0x55, 0x36, 0x93, - 0xf8, 0x2a, 0x9e, 0x41, 0x24, 0xe9, 0x3b, 0x49, 0xc3, 0xaa, 0xb2, 0xa9, 0x20, 0x7c, 0x55, 0xd9, - 0x54, 0xb1, 0x39, 0x3c, 0xf3, 0x91, 0x86, 0x7e, 0x82, 0xe5, 0x02, 0xef, 0x42, 0xc3, 0x9b, 0x62, - 0xcc, 0x32, 0x3a, 0x7d, 0xbb, 0x86, 0x66, 0xc6, 0xd7, 0x19, 0x2c, 0xe5, 0xe9, 0x0f, 0xba, 0x7f, - 0x13, 0x40, 0x86, 0xbb, 0xe9, 0xc3, 0xe9, 0x8a, 0x19, 0x47, 0xaf, 0xa1, 0x93, 0xe1, 0x3d, 0x55, - 0xf3, 0xa9, 0xcc, 0xa4, 0xf4, 0xbb, 0x53, 0xb4, 0xd2, 0xaa, 0x9c, 0xc0, 0x62, 0x8e, 0x09, 0xa1, - 0x89, 0xe7, 0x9d, 0xe7, 0x57, 0xfa, 0xfd, 0xa9, 0x7a, 0xd9, 0x7b, 0x9c, 0x25, 0x48, 0x93, 0x2b, - 0x9f, 0x9f, 0xb1, 0xf7, 0xa6, 0xa9, 0xa5, 0x0e, 0xce, 0x61, 0xb9, 0xc0, 0x59, 0xaa, 0xea, 0x5e, - 0xcd, 0xc5, 0xaa, 0xea, 0x3e, 0x89, 0x00, 0xcd, 0xa0, 0xb7, 0xb0, 0x5a, 0x45, 0x0f, 0xd0, 0xa3, - 0x2a, 0x90, 0x89, 0x1c, 0x44, 0xdf, 0xad, 0xab, 0x9e, 0x3a, 0x7e, 0x09, 0xed, 0x84, 0x0b, 0xa1, - 0x3b, 0x65, 0xeb, 0x02, 0x2f, 0xd3, 0xf1, 0x4d, 0x2a, 0xe3, 0xcb, 0x75, 0x32, 0x27, 0xfe, 0x6d, - 0x7a, 0xfc, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0xe2, 0xb6, 0x81, 0x84, 0x12, 0x00, 0x00, + // 1169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0xdd, 0x6f, 0xdc, 0x44, + 0x10, 0x8f, 0x7b, 0x77, 0xf9, 0x98, 0xbb, 0xd0, 0x63, 0x93, 0x26, 0x57, 0xb7, 0x84, 0xeb, 0x42, + 0xcb, 0xa5, 0x4d, 0x03, 0xa4, 0x02, 0x5a, 0x9e, 0x80, 0x84, 0x8a, 0x3c, 0x94, 0x4a, 0x0e, 0xad, + 0x90, 0x40, 0xb2, 0x36, 0xf6, 0x26, 0xb1, 0xe2, 0xf3, 0xba, 0xde, 0x75, 0xda, 0xf0, 0xef, 0xf0, + 0xc2, 0x3b, 0x12, 0xff, 0x0e, 0x7f, 0x08, 0x2f, 0x68, 0x3f, 0xec, 0xf8, 0xeb, 0x7a, 0xe6, 0xe3, + 0x6d, 0x3d, 0x3b, 0xf3, 0x9b, 0x99, 0xdd, 0x99, 0xd9, 0x9f, 0x0c, 0x6b, 0x17, 0x2c, 0x4c, 0xa7, + 0xd4, 0xe5, 0x34, 0xb9, 0xa0, 0xc9, 0x6e, 0x9c, 0x30, 0xc1, 0xd0, 0xb0, 0x24, 0x74, 0xe3, 0x63, + 0xfc, 0x31, 0xa0, 0x6f, 0x88, 0xf0, 0xce, 0x0e, 0x68, 0x48, 0x05, 0x75, 0xe8, 0xab, 0x94, 0x72, + 0x81, 0x6e, 0xc2, 0xf2, 0x49, 0x10, 0x52, 0x37, 0xf0, 0xf9, 0xc8, 0x1a, 0x77, 0x26, 0x2b, 0xce, + 0x92, 0xfc, 0x3e, 0xf4, 0x39, 0x7e, 0x0e, 0x6b, 0x25, 0x03, 0x1e, 0xb3, 0x88, 0x53, 0xf4, 0x18, + 0x96, 0x12, 0xca, 0xd3, 0x50, 0x68, 0x83, 0xfe, 0xde, 0xd6, 0x6e, 0xd5, 0xd7, 0x6e, 0x6e, 0x92, + 0x86, 0xc2, 0xc9, 0xd4, 0x71, 0x00, 0x83, 0xe2, 0x06, 0xda, 0x84, 0x25, 0xe3, 0x7b, 0x64, 0x8d, + 0xad, 0xc9, 0x8a, 0xb3, 0xa8, 0x5d, 0xa3, 0x0d, 0x58, 0xe4, 0x82, 0x88, 0x94, 0x8f, 0xae, 0x8d, + 0xad, 0x49, 0xcf, 0x31, 0x5f, 0x68, 0x1d, 0x7a, 0x34, 0x49, 0x58, 0x32, 0xea, 0x28, 0x75, 0xfd, + 0x81, 0x10, 0x74, 0x79, 0xf0, 0x0b, 0x1d, 0x75, 0xc7, 0xd6, 0x64, 0xd5, 0x51, 0x6b, 0xbc, 0x04, + 0xbd, 0x6f, 0xa7, 0xb1, 0xb8, 0xc4, 0x5f, 0xc0, 0xe8, 0x25, 0xf1, 0xd2, 0x74, 0xfa, 0x52, 0xc5, + 0xb8, 0x7f, 0x46, 0xbd, 0xf3, 0x2c, 0xf7, 0x5b, 0xb0, 0x62, 0x22, 0x37, 0x11, 0xac, 0x3a, 0xcb, + 0x5a, 0x70, 0xe8, 0xe3, 0xaf, 0xe0, 0x66, 0x83, 0xa1, 0x39, 0x83, 0x0f, 0x60, 0xf5, 0x94, 0x24, + 0xc7, 0xe4, 0x94, 0xba, 0x09, 0x11, 0x01, 0x53, 0xd6, 0x96, 0x33, 0x30, 0x42, 0x47, 0xca, 0xf0, + 0x4f, 0x60, 0x97, 0x10, 0xd8, 0x34, 0x26, 0x9e, 0x68, 0xe3, 0x1c, 0x8d, 0xa1, 0x1f, 0x27, 0x94, + 0x84, 0x21, 0xf3, 0x88, 0xa0, 0xea, 0x14, 0x3a, 0x4e, 0x51, 0x84, 0xdf, 0x83, 0x5b, 0x8d, 0xe0, + 0x3a, 0x40, 0xfc, 0xb8, 0x12, 0x3d, 0x9b, 0x4e, 0x83, 0x56, 0xae, 0xf1, 0xed, 0x5a, 0xd4, 0xca, + 0xd2, 0xe0, 0x3e, 0xa9, 0xec, 0x86, 0x94, 0x44, 0x69, 0xdc, 0x0a, 0xb8, 0x1a, 0x71, 0x66, 0x9a, + 0x23, 0x6f, 0xea, 0xe2, 0xd8, 0x67, 0x61, 0x48, 0x3d, 0x11, 0xb0, 0x28, 0x83, 0xdd, 0x02, 0xf0, + 0x72, 0xa1, 0x29, 0x95, 0x82, 0x04, 0xdb, 0x30, 0xaa, 0x9b, 0x1a, 0xd8, 0xdf, 0x2c, 0x58, 0xfb, + 0x9a, 0xf3, 0xe0, 0x34, 0xd2, 0x6e, 0x5b, 0x1d, 0x7f, 0xd9, 0xe1, 0xb5, 0xaa, 0xc3, 0xea, 0xf5, + 0x74, 0x6a, 0xd7, 0x23, 0x35, 0x12, 0x1a, 0x87, 0x81, 0x47, 0x14, 0x44, 0x57, 0x41, 0x14, 0x45, + 0x68, 0x08, 0x1d, 0x21, 0xc2, 0x51, 0x4f, 0xed, 0xc8, 0x25, 0xde, 0x80, 0xf5, 0x72, 0xa4, 0x26, + 0x85, 0xcf, 0x61, 0x53, 0x4b, 0x8e, 0x2e, 0x23, 0xef, 0x48, 0x75, 0x42, 0xab, 0x03, 0xff, 0xcb, + 0x82, 0x51, 0xdd, 0xd0, 0x54, 0xf0, 0x7f, 0xcd, 0xff, 0x9f, 0x66, 0x87, 0xde, 0x87, 0xbe, 0x20, + 0x41, 0xe8, 0xb2, 0x93, 0x13, 0x4e, 0xc5, 0x68, 0x71, 0x6c, 0x4d, 0xba, 0x0e, 0x48, 0xd1, 0x73, + 0x25, 0x41, 0xdb, 0x30, 0xf4, 0x74, 0x15, 0xbb, 0x09, 0xbd, 0x08, 0xb8, 0x44, 0x5e, 0x52, 0x81, + 0x5d, 0xf7, 0xb2, 0xea, 0xd6, 0x62, 0x84, 0x61, 0x35, 0xf0, 0xdf, 0xb8, 0x6a, 0x78, 0xa8, 0xd6, + 0x5f, 0x56, 0x68, 0xfd, 0xc0, 0x7f, 0xf3, 0x34, 0x08, 0xe9, 0x91, 0x9c, 0x00, 0xdf, 0xc1, 0x9a, + 0x4e, 0xfe, 0x29, 0x0b, 0x43, 0xf6, 0xba, 0xd5, 0xbd, 0xaf, 0x43, 0x8f, 0x07, 0x91, 0xa7, 0x1b, + 0xae, 0xeb, 0xe8, 0x0f, 0xfc, 0x04, 0xd6, 0xcb, 0x48, 0xe6, 0x08, 0xef, 0xc0, 0x40, 0x45, 0xe0, + 0xb1, 0x48, 0xd0, 0x48, 0x28, 0xb4, 0x81, 0xd3, 0x97, 0xb2, 0x7d, 0x2d, 0xc2, 0x9f, 0x02, 0xd2, + 0xa6, 0xcf, 0x58, 0x1a, 0xb5, 0xeb, 0xbf, 0x1b, 0x59, 0xdc, 0xc6, 0xc4, 0x14, 0xc1, 0xa3, 0x2c, + 0x88, 0x17, 0xd1, 0xb4, 0x35, 0xd6, 0x26, 0xdc, 0xa8, 0x18, 0x19, 0xb4, 0xbd, 0xcc, 0x49, 0xf9, + 0x31, 0x78, 0x2b, 0xd8, 0x46, 0x16, 0x41, 0xf9, 0x3d, 0xc0, 0xbf, 0x5b, 0xb0, 0xe1, 0x98, 0xab, + 0xa7, 0xff, 0x6f, 0x93, 0x15, 0x8b, 0xac, 0x33, 0xb3, 0xc8, 0xba, 0x57, 0x45, 0x36, 0x81, 0x21, + 0x67, 0x69, 0xe2, 0x51, 0xd7, 0x27, 0x82, 0xb8, 0x11, 0xf3, 0xa9, 0xa9, 0xc1, 0x77, 0xb4, 0xfc, + 0x80, 0x08, 0xf2, 0x3d, 0xf3, 0x29, 0xbe, 0x09, 0x9b, 0xb5, 0xa0, 0x4d, 0x42, 0x11, 0x5c, 0xdf, + 0x67, 0xf1, 0xa5, 0xac, 0xa4, 0x96, 0x89, 0xf4, 0x03, 0xee, 0x66, 0x05, 0xa9, 0x32, 0x59, 0x76, + 0x56, 0x02, 0x7e, 0xa8, 0xab, 0xd1, 0xec, 0xfb, 0x44, 0xe8, 0xfd, 0x4e, 0xb6, 0x7f, 0x40, 0x84, + 0xdc, 0xc7, 0x9f, 0xc1, 0xf0, 0xca, 0x5f, 0xfb, 0xda, 0xfa, 0x12, 0x6e, 0x39, 0x94, 0xf8, 0xa6, + 0x34, 0x65, 0xd9, 0xb7, 0x1f, 0x0d, 0x7f, 0x5a, 0x70, 0xbb, 0xd9, 0xb8, 0xcd, 0x78, 0xd8, 0x01, + 0x94, 0xb7, 0x9f, 0x08, 0xa6, 0x94, 0x0b, 0x32, 0x8d, 0x4d, 0xcf, 0x0c, 0x4d, 0x0f, 0xfe, 0x90, + 0xc9, 0xeb, 0xcd, 0xda, 0xa9, 0x35, 0xab, 0x44, 0xcc, 0xce, 0xa7, 0x80, 0xd8, 0xd5, 0x88, 0xbe, + 0x3e, 0xa7, 0x12, 0x62, 0xae, 0xad, 0x10, 0x7b, 0x1a, 0xd1, 0x28, 0xaa, 0xf6, 0xff, 0x11, 0xe0, + 0x20, 0xe0, 0xe7, 0x3a, 0x2d, 0x59, 0x29, 0x7e, 0x90, 0x98, 0xa7, 0x43, 0x2e, 0xa5, 0x84, 0x84, + 0xa1, 0x09, 0x5a, 0x2e, 0x25, 0x8d, 0x48, 0x39, 0xf5, 0x4d, 0x78, 0x6a, 0x2d, 0x65, 0x27, 0x09, + 0xa5, 0x26, 0x12, 0xb5, 0xc6, 0xbf, 0x5a, 0xb0, 0xf2, 0x8c, 0x4e, 0x0d, 0xf2, 0x16, 0xc0, 0x29, + 0x4b, 0x58, 0x2a, 0x82, 0x88, 0x72, 0xe5, 0xa0, 0xe7, 0x14, 0x24, 0xff, 0xde, 0x8f, 0xa2, 0x35, + 0x34, 0x3c, 0x31, 0xc9, 0xa9, 0xb5, 0x94, 0x9d, 0x51, 0x12, 0x9b, 0xe9, 0xa9, 0xd6, 0x6a, 0x68, + 0x09, 0xe2, 0x9d, 0xab, 0x61, 0x29, 0x87, 0x96, 0xfc, 0xd8, 0xfb, 0x63, 0x00, 0x03, 0x33, 0xfc, + 0x15, 0x2b, 0x43, 0x3f, 0x43, 0xbf, 0xc0, 0xe6, 0xd0, 0x87, 0x75, 0xd2, 0x56, 0x67, 0x87, 0xf6, + 0xdd, 0x39, 0x5a, 0xa6, 0x63, 0x16, 0x50, 0x04, 0xef, 0xd6, 0xd8, 0x12, 0xba, 0x5f, 0xb7, 0x9e, + 0xc5, 0xc5, 0xec, 0x07, 0xad, 0x74, 0x73, 0x7f, 0x02, 0xd6, 0x1a, 0xe8, 0x0f, 0xda, 0x99, 0x83, + 0x52, 0xa2, 0x60, 0xf6, 0xc3, 0x96, 0xda, 0xb9, 0xd7, 0x57, 0x80, 0xea, 0xdc, 0x08, 0x3d, 0x98, + 0x0b, 0x73, 0xc5, 0xbd, 0xec, 0x9d, 0x76, 0xca, 0x33, 0x13, 0xd5, 0xac, 0x69, 0x6e, 0xa2, 0x25, + 0x5e, 0x36, 0x37, 0xd1, 0x0a, 0x15, 0x5b, 0x40, 0xe7, 0x30, 0xac, 0x32, 0x2a, 0xb4, 0x3d, 0x8b, + 0xe6, 0xd7, 0x08, 0x9b, 0x7d, 0xbf, 0x8d, 0x6a, 0xee, 0xcc, 0x85, 0x41, 0x91, 0xf7, 0xa0, 0x86, + 0xa2, 0x6b, 0x60, 0x70, 0xf6, 0xbd, 0x79, 0x6a, 0xc5, 0x6c, 0xaa, 0x3c, 0xa8, 0x29, 0x9b, 0x19, + 0x24, 0xab, 0x29, 0x9b, 0x59, 0xb4, 0x0a, 0x2f, 0x20, 0x92, 0xf5, 0x9d, 0x66, 0x0b, 0x4d, 0xd9, + 0x34, 0xf0, 0x92, 0xa6, 0x6c, 0x9a, 0x48, 0x07, 0x5e, 0xf8, 0xc4, 0x92, 0xad, 0x5c, 0xa0, 0x08, + 0x4d, 0xad, 0x5c, 0x27, 0x1d, 0xf6, 0xdd, 0x39, 0x5a, 0x79, 0x02, 0xc7, 0xb0, 0x5a, 0x22, 0x0d, + 0x68, 0x66, 0x68, 0x65, 0x2a, 0x62, 0x7f, 0x34, 0x57, 0xaf, 0x78, 0xe5, 0x45, 0x2e, 0x31, 0xfb, + 0x90, 0xca, 0xe3, 0xe8, 0xde, 0x3c, 0xb5, 0xdc, 0xc1, 0x19, 0x5c, 0xaf, 0x3c, 0xef, 0x68, 0x52, + 0x37, 0x6e, 0xa6, 0x2d, 0xf6, 0x76, 0x0b, 0xcd, 0xdc, 0xd3, 0x6b, 0x58, 0x6f, 0x7a, 0x49, 0xd1, + 0xc3, 0x26, 0x90, 0x99, 0xcf, 0xb5, 0xbd, 0xdb, 0x56, 0x3d, 0x77, 0xfc, 0x02, 0x96, 0x33, 0xda, + 0x80, 0xee, 0xd4, 0xad, 0x2b, 0x14, 0xc6, 0xc6, 0x6f, 0x53, 0xb9, 0x2a, 0xae, 0xe3, 0x45, 0xf5, + 0xff, 0xe0, 0xd1, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13, 0x86, 0xf1, 0x0f, 0x56, 0x10, 0x00, + 0x00, } diff --git a/weed/server/volume_grpc_sync.go b/weed/server/volume_grpc_sync.go index 971258689..8b7b5934c 100644 --- a/weed/server/volume_grpc_sync.go +++ b/weed/server/volume_grpc_sync.go @@ -7,7 +7,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/chrislusf/seaweedfs/weed/storage/types" ) func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { @@ -24,78 +23,3 @@ func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server return resp, nil } - -func (vs *VolumeServer) VolumeSyncIndex(req *volume_server_pb.VolumeSyncIndexRequest, stream volume_server_pb.VolumeServer_VolumeSyncIndexServer) error { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) - if v == nil { - return fmt.Errorf("not found volume id %d", req.VolumeId) - } - - content, err := v.IndexFileContent() - - if err != nil { - glog.Errorf("sync volume %d index: %v", req.VolumeId, err) - } else { - glog.V(2).Infof("sync volume %d index", req.VolumeId) - } - - const blockSizeLimit = 1024 * 1024 * 2 - for i := 0; i < len(content); i += blockSizeLimit { - blockSize := len(content) - i - if blockSize > blockSizeLimit { - blockSize = blockSizeLimit - } - resp := &volume_server_pb.VolumeSyncIndexResponse{} - resp.IndexFileContent = content[i : i+blockSize] - stream.Send(resp) - } - - return nil - -} - -func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataRequest, stream volume_server_pb.VolumeServer_VolumeSyncDataServer) error { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) - if v == nil { - return fmt.Errorf("not found volume id %d", req.VolumeId) - } - - if uint32(v.SuperBlock.CompactRevision) != req.Revision { - return fmt.Errorf("requested volume revision is %d, but current revision is %d", req.Revision, v.SuperBlock.CompactRevision) - } - - content, err := storage.ReadNeedleBlob(v.DataFile(), int64(req.Offset)*types.NeedlePaddingSize, req.Size, v.Version()) - if err != nil { - return fmt.Errorf("read offset:%d size:%d", req.Offset, req.Size) - } - - id, err := types.ParseNeedleId(req.NeedleId) - if err != nil { - return fmt.Errorf("parsing needle id %s: %v", req.NeedleId, err) - } - n := new(storage.Needle) - n.ParseNeedleHeader(content) - if id != n.Id { - return fmt.Errorf("expected file entry id %d, but found %d", id, n.Id) - } - - if err != nil { - glog.Errorf("sync volume %d data: %v", req.VolumeId, err) - } - - const blockSizeLimit = 1024 * 1024 * 2 - for i := 0; i < len(content); i += blockSizeLimit { - blockSize := len(content) - i - if blockSize > blockSizeLimit { - blockSize = blockSizeLimit - } - resp := &volume_server_pb.VolumeSyncDataResponse{} - resp.FileContent = content[i : i+blockSize] - stream.Send(resp) - } - - return nil - -} diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go index 7e5432417..199561302 100644 --- a/weed/storage/volume_sync.go +++ b/weed/storage/volume_sync.go @@ -1,160 +1,9 @@ package storage import ( - "context" - "fmt" - "google.golang.org/grpc" - "io" - "os" - "sort" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage/needle" - . "github.com/chrislusf/seaweedfs/weed/storage/types" ) -// The volume sync with a master volume via 2 steps: -// 1. The slave checks master side to find subscription checkpoint -// to setup the replication. -// 2. The slave receives the updates from master - -/* -Assume the slave volume needs to follow the master volume. - -The master volume could be compacted, and could be many files ahead of -slave volume. - -Step 0: -If slave compact version is less than the master, do a local compaction. -If the slave size is still less than the master, discard local copy and do a full copy. - -Step 1: -The slave volume ask the master by the last modification time t. -The master do a binary search in volume (use .idx as an array, and check the appendAtNs in .dat file), -to find the first entry with appendAtNs > t. - -Step 2: -The master iterate following entries (including the first one) and send it to the follower. - -*/ - -func (v *Volume) Synchronize(volumeServer string, grpcDialOption grpc.DialOption) (err error) { - var lastCompactRevision uint16 = 0 - var compactRevision uint16 = 0 - var masterMap *needle.CompactMap - for i := 0; i < 3; i++ { - if masterMap, _, compactRevision, err = fetchVolumeFileEntries(volumeServer, grpcDialOption, v.Id); err != nil { - return fmt.Errorf("Failed to sync volume %d entries with %s: %v", v.Id, volumeServer, err) - } - if lastCompactRevision != compactRevision && lastCompactRevision != 0 { - if err = v.Compact(0); err != nil { - return fmt.Errorf("Compact Volume before synchronizing %v", err) - } - if err = v.CommitCompact(); err != nil { - return fmt.Errorf("Commit Compact before synchronizing %v", err) - } - } - lastCompactRevision = compactRevision - if err = v.trySynchronizing(volumeServer, grpcDialOption, masterMap, compactRevision); err == nil { - return - } - } - return -} - -type ByOffset []needle.NeedleValue - -func (a ByOffset) Len() int { return len(a) } -func (a ByOffset) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByOffset) Less(i, j int) bool { return a[i].Offset < a[j].Offset } - -// trySynchronizing sync with remote volume server incrementally by -// make up the local and remote delta. -func (v *Volume) trySynchronizing(volumeServer string, grpcDialOption grpc.DialOption, masterMap *needle.CompactMap, compactRevision uint16) error { - slaveIdxFile, err := os.Open(v.nm.IndexFileName()) - if err != nil { - return fmt.Errorf("Open volume %d index file: %v", v.Id, err) - } - defer slaveIdxFile.Close() - slaveMap, err := LoadBtreeNeedleMap(slaveIdxFile) - if err != nil { - return fmt.Errorf("Load volume %d index file: %v", v.Id, err) - } - var delta []needle.NeedleValue - if err := masterMap.Visit(func(needleValue needle.NeedleValue) error { - if needleValue.Key == NeedleIdEmpty { - return nil - } - if _, ok := slaveMap.Get(needleValue.Key); ok { - return nil // skip intersection - } - delta = append(delta, needleValue) - return nil - }); err != nil { - return fmt.Errorf("Add master entry: %v", err) - } - if err := slaveMap.m.Visit(func(needleValue needle.NeedleValue) error { - if needleValue.Key == NeedleIdEmpty { - return nil - } - if _, ok := masterMap.Get(needleValue.Key); ok { - return nil // skip intersection - } - needleValue.Size = 0 - delta = append(delta, needleValue) - return nil - }); err != nil { - return fmt.Errorf("Remove local entry: %v", err) - } - - // simulate to same ordering of remote .dat file needle entries - sort.Sort(ByOffset(delta)) - - // make up the delta - fetchCount := 0 - for _, needleValue := range delta { - if needleValue.Size == 0 { - // remove file entry from local - v.removeNeedle(needleValue.Key) - continue - } - // add master file entry to local data file - if err := v.fetchNeedle(volumeServer, grpcDialOption, needleValue, compactRevision); err != nil { - glog.V(0).Infof("Fetch needle %v from %s: %v", needleValue, volumeServer, err) - return err - } - fetchCount++ - } - glog.V(1).Infof("Fetched %d needles from %s", fetchCount, volumeServer) - return nil -} - -func fetchVolumeFileEntries(volumeServer string, grpcDialOption grpc.DialOption, vid VolumeId) (m *needle.CompactMap, lastOffset uint64, compactRevision uint16, err error) { - m = needle.NewCompactMap() - - syncStatus, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid)) - if err != nil { - return m, 0, 0, err - } - - total := 0 - err = operation.GetVolumeIdxEntries(volumeServer, grpcDialOption, uint32(vid), func(key NeedleId, offset Offset, size uint32) { - // println("remote key", key, "offset", offset*NeedlePaddingSize, "size", size) - if offset > 0 && size != TombstoneFileSize { - m.Set(NeedleId(key), offset, size) - } else { - m.Delete(NeedleId(key)) - } - total++ - }) - - glog.V(2).Infof("server %s volume %d, entries %d, last offset %d, revision %d", volumeServer, vid, total, syncStatus.TailOffset, syncStatus.CompactRevision) - return m, syncStatus.TailOffset, uint16(syncStatus.CompactRevision), err - -} - func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse { var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{} if stat, err := v.dataFile.Stat(); err == nil { @@ -166,54 +15,4 @@ func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusRespons syncStatus.Ttl = v.SuperBlock.Ttl.String() syncStatus.Replication = v.SuperBlock.ReplicaPlacement.String() return syncStatus -} - -func (v *Volume) IndexFileContent() ([]byte, error) { - return v.nm.IndexFileContent() -} - -// removeNeedle removes one needle by needle key -func (v *Volume) removeNeedle(key NeedleId) { - n := new(Needle) - n.Id = key - v.deleteNeedle(n) -} - -// fetchNeedle fetches a remote volume needle by vid, id, offset -// The compact revision is checked first in case the remote volume -// is compacted and the offset is invalid any more. -func (v *Volume) fetchNeedle(volumeServer string, grpcDialOption grpc.DialOption, needleValue needle.NeedleValue, compactRevision uint16) error { - - return operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeSyncData(context.Background(), &volume_server_pb.VolumeSyncDataRequest{ - VolumeId: uint32(v.Id), - Revision: uint32(compactRevision), - Offset: uint32(needleValue.Offset), - Size: uint32(needleValue.Size), - NeedleId: needleValue.Key.String(), - }) - if err != nil { - return err - } - var fileContent []byte - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("read needle %v: %v", needleValue.Key.String(), err) - } - fileContent = append(fileContent, resp.FileContent...) - } - - offset, err := v.AppendBlob(fileContent) - if err != nil { - return fmt.Errorf("Appending volume %d error: %v", v.Id, err) - } - // println("add key", needleValue.Key, "offset", offset, "size", needleValue.Size) - v.nm.Put(needleValue.Key, Offset(offset/NeedlePaddingSize), needleValue.Size) - return nil - }) - -} +} \ No newline at end of file From 7a14cdc90cc673286e501b900ec28b7f186f2cfe Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 25 Mar 2019 23:18:40 -0700 Subject: [PATCH 102/450] refactoring, go fmt --- weed/command/backup.go | 2 +- weed/server/volume_grpc_follow.go | 14 ++++++++++++++ weed/server/volume_grpc_replicate.go | 2 +- weed/server/volume_grpc_sync.go | 25 ------------------------- weed/shell/command_fs_du.go | 6 +++--- weed/storage/volume_follow.go | 17 +++++++++++++++-- weed/storage/volume_sync.go | 18 ------------------ 7 files changed, 34 insertions(+), 50 deletions(-) delete mode 100644 weed/server/volume_grpc_sync.go delete mode 100644 weed/storage/volume_sync.go diff --git a/weed/command/backup.go b/weed/command/backup.go index 9c0bcbc52..48e2eba89 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -98,7 +98,7 @@ func runBackup(cmd *Command, args []string) bool { return true } v.SuperBlock.CompactRevision = uint16(stats.CompactRevision) - v.DataFile().WriteAt(v.SuperBlock.Bytes(),0) + v.DataFile().WriteAt(v.SuperBlock.Bytes(), 0) } if uint64(v.Size()) > stats.TailOffset { diff --git a/weed/server/volume_grpc_follow.go b/weed/server/volume_grpc_follow.go index 6b3330a0d..cc5dcc78e 100644 --- a/weed/server/volume_grpc_follow.go +++ b/weed/server/volume_grpc_follow.go @@ -1,6 +1,7 @@ package weed_server import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/storage/types" "io" @@ -34,6 +35,19 @@ func (vs *VolumeServer) VolumeFollow(req *volume_server_pb.VolumeFollowRequest, } +func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { + + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp := v.GetVolumeSyncStatus() + + return resp, nil + +} + func sendFileContent(datFile *os.File, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeFollowServer) error { var blockSizeLimit = int64(len(buf)) for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit { diff --git a/weed/server/volume_grpc_replicate.go b/weed/server/volume_grpc_replicate.go index 20a85fd6f..1a31a37f3 100644 --- a/weed/server/volume_grpc_replicate.go +++ b/weed/server/volume_grpc_replicate.go @@ -113,7 +113,7 @@ func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_se return resp, nil } -func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) (error) { +func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) error { v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) if v == nil { diff --git a/weed/server/volume_grpc_sync.go b/weed/server/volume_grpc_sync.go deleted file mode 100644 index 8b7b5934c..000000000 --- a/weed/server/volume_grpc_sync.go +++ /dev/null @@ -1,25 +0,0 @@ -package weed_server - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" -) - -func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) - if v == nil { - return nil, fmt.Errorf("not found volume id %d", req.VolumeId) - } - - resp := v.GetVolumeSyncStatus() - - glog.V(2).Infof("volume sync status %d", req.VolumeId) - - return resp, nil - -} diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 1206596b0..3fecac9a8 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -42,10 +42,10 @@ func (c *commandFsDu) Do(args []string, commandEnv *commandEnv, writer io.Writer dir, name := filer2.FullPath(path).DirAndName() if strings.HasSuffix(path, "/") { - if path == "/"{ + if path == "/" { dir, name = "/", "" - }else{ - dir, name = path[0 : len(path)-1], "" + } else { + dir, name = path[0:len(path)-1], "" } } diff --git a/weed/storage/volume_follow.go b/weed/storage/volume_follow.go index e1a5fcb83..b8353f9d1 100644 --- a/weed/storage/volume_follow.go +++ b/weed/storage/volume_follow.go @@ -11,6 +11,19 @@ import ( "os" ) +func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse { + var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{} + if stat, err := v.dataFile.Stat(); err == nil { + syncStatus.TailOffset = uint64(stat.Size()) + } + syncStatus.Collection = v.Collection + syncStatus.IdxFileSize = v.nm.IndexFileSize() + syncStatus.CompactRevision = uint32(v.SuperBlock.CompactRevision) + syncStatus.Ttl = v.SuperBlock.Ttl.String() + syncStatus.Replication = v.SuperBlock.ReplicaPlacement.String() + return syncStatus +} + // The volume sync with a master volume via 2 steps: // 1. The slave checks master side to find subscription checkpoint // to setup the replication. @@ -41,7 +54,7 @@ update needle map when receiving new .dat bytes. But seems not necessary now.) */ -func (v *Volume) Follow(volumeServer string, grpcDialOption grpc.DialOption) (error) { +func (v *Volume) Follow(volumeServer string, grpcDialOption grpc.DialOption) error { ctx := context.Background() @@ -88,7 +101,7 @@ func (v *Volume) Follow(volumeServer string, grpcDialOption grpc.DialOption) (er } // add to needle map - return ScanVolumeFileFrom(v.version, v.dataFile, startFromOffset, &VolumeFileScanner4GenIdx{v:v}) + return ScanVolumeFileFrom(v.version, v.dataFile, startFromOffset, &VolumeFileScanner4GenIdx{v: v}) } diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go deleted file mode 100644 index 199561302..000000000 --- a/weed/storage/volume_sync.go +++ /dev/null @@ -1,18 +0,0 @@ -package storage - -import ( - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" -) - -func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse { - var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{} - if stat, err := v.dataFile.Stat(); err == nil { - syncStatus.TailOffset = uint64(stat.Size()) - } - syncStatus.Collection = v.Collection - syncStatus.IdxFileSize = v.nm.IndexFileSize() - syncStatus.CompactRevision = uint32(v.SuperBlock.CompactRevision) - syncStatus.Ttl = v.SuperBlock.Ttl.String() - syncStatus.Replication = v.SuperBlock.ReplicaPlacement.String() - return syncStatus -} \ No newline at end of file From 37ce4c5269a61ada46a99337d1d1f7657a83198b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 26 Mar 2019 12:43:51 -0700 Subject: [PATCH 103/450] weed shell: add fs.ls --- weed/shell/command_fs_ls.go | 141 ++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 weed/shell/command_fs_ls.go diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go new file mode 100644 index 000000000..6ad96f5ac --- /dev/null +++ b/weed/shell/command_fs_ls.go @@ -0,0 +1,141 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "io" + "os" + "os/user" + "strconv" + "strings" +) + +func init() { + commands = append(commands, &commandFsLs{}) +} + +type commandFsLs struct { +} + +func (c *commandFsLs) Name() string { + return "fs.ls" +} + +func (c *commandFsLs) Help() string { + return `[-l] [-a] list all files under a directory + + fs.ls http://:/dir/ + fs.ls http://:/dir/file_name + fs.ls http://:/dir/file_prefix +` +} + +func (c *commandFsLs) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + var isLongFormat, showHidden bool + for _, arg := range args { + switch arg { + case "-a": + showHidden = true + case "-l": + isLongFormat = true + } + } + + filerServer, filerPort, path, err := parseFilerUrl(args[len(args)-1]) + if err != nil { + return err + } + + dir, name := filer2.FullPath(path).DirAndName() + if strings.HasSuffix(path, "/") { + if path == "/" { + dir, name = "/", "" + } else { + dir, name = path[0 : len(path)-1], "" + } + } + + ctx := context.Background() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + return paginateOneDirectory(ctx, writer, client, dir, name, 1000, isLongFormat, showHidden) + + }) + +} + +func paginateOneDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int, isLongFormat, showHidden bool) (err error) { + + entryCount := 0 + paginatedCount := -1 + startFromFileName := "" + + for paginatedCount == -1 || paginatedCount == paginateSize { + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: name, + StartFromFileName: startFromFileName, + InclusiveStartFrom: false, + Limit: uint32(paginateSize), + }) + if listErr != nil { + err = listErr + return + } + + paginatedCount = len(resp.Entries) + + for _, entry := range resp.Entries { + + if !showHidden && strings.HasPrefix(entry.Name, ".") { + continue + } + + entryCount++ + + if isLongFormat { + fileMode := os.FileMode(entry.Attributes.FileMode) + userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName + if userName == "" { + if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { + userName = user.Username + } + } + groupName := "" + if len(groupNames) > 0 { + groupName = groupNames[0] + } + if groupName == "" { + if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { + groupName = group.Name + } + } + + if dir == "/" { + // just for printing + dir = "" + } + fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", + fileMode, len(entry.Chunks), + userName, groupName, + filer2.TotalSize(entry.Chunks), dir, entry.Name) + } else { + fmt.Fprintf(writer, "%s\n", entry.Name) + } + + startFromFileName = entry.Name + + } + } + + if isLongFormat { + fmt.Fprintf(writer, "total %d\n", entryCount) + } + + return + +} From 65757ae2fd41ca74a173e2b6d1716343517e9bbb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 27 Mar 2019 11:41:11 -0700 Subject: [PATCH 104/450] weed filer: set content-type consistent with filer store --- weed/server/filer_server_handlers_read.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 89d47b0b8..63fd2cc39 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -111,13 +111,16 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, for k, v := range resp.Header { w.Header()[k] = v } + if entry.Attr.Mime != "" { + w.Header().Set("Content-Type", entry.Attr.Mime) + } w.WriteHeader(resp.StatusCode) io.Copy(w, resp.Body) } func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - mimeType := entry.Mime + mimeType := entry.Attr.Mime if mimeType == "" { if ext := path.Ext(entry.Name()); ext != "" { mimeType = mime.TypeByExtension(ext) From c7e7b6229f16edb8a0adbcf85d21bed9e44d8109 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 27 Mar 2019 14:25:18 -0700 Subject: [PATCH 105/450] detect mime type --- weed/filesys/filehandle.go | 12 ++++++++++-- weed/server/filer_server_handlers_write.go | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 2c2e041e7..e87e0608e 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -8,10 +8,12 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" + "github.com/gabriel-vasile/mimetype" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" "google.golang.org/grpc" - "net/http" + "mime" + "path" "strings" "sync" "time" @@ -154,7 +156,13 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f resp.Size = len(req.Data) if req.Offset == 0 { - fh.contentType = http.DetectContentType(req.Data) + // detect mime type + var possibleExt string + fh.contentType, possibleExt = mimetype.Detect(req.Data) + if ext := path.Ext(fh.f.Name); ext != possibleExt { + fh.contentType = mime.TypeByExtension(ext) + } + fh.dirtyMetadata = true } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index f20212cc2..cb30ff4c0 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -5,9 +5,11 @@ import ( "encoding/json" "errors" "io/ioutil" + "mime" "net/http" "net/url" "os" + filenamePath "path" "strconv" "strings" "time" @@ -195,6 +197,9 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { ETag: etag, }}, } + if ext := filenamePath.Ext(path); ext != "" { + entry.Attr.Mime = mime.TypeByExtension(ext) + } // glog.V(4).Infof("saving %s => %+v", path, entry) if db_err := fs.filer.CreateEntry(ctx, entry); db_err != nil { fs.filer.DeleteFileByFileId(fileId) From 53b81fcfcd26965262947423102e0d3f60a960b6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 28 Mar 2019 00:05:04 -0700 Subject: [PATCH 106/450] weed shell: add fs.tree --- weed/shell/command_fs_ls.go | 8 +-- weed/shell/command_fs_tree.go | 132 ++++++++++++++++++++++++++++++++++ 2 files changed, 136 insertions(+), 4 deletions(-) create mode 100644 weed/shell/command_fs_tree.go diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 6ad96f5ac..39f356916 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -24,11 +24,11 @@ func (c *commandFsLs) Name() string { } func (c *commandFsLs) Help() string { - return `[-l] [-a] list all files under a directory + return `list all files under a directory - fs.ls http://:/dir/ - fs.ls http://:/dir/file_name - fs.ls http://:/dir/file_prefix + fs.ls [-l] [-a] http://:/dir/ + fs.ls [-l] [-a] http://:/dir/file_name + fs.ls [-l] [-a] http://:/dir/file_prefix ` } diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go new file mode 100644 index 000000000..1f0f2935f --- /dev/null +++ b/weed/shell/command_fs_tree.go @@ -0,0 +1,132 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "io" + "strings" +) + +func init() { + commands = append(commands, &commandFsTree{}) +} + +type commandFsTree struct { +} + +func (c *commandFsTree) Name() string { + return "fs.tree" +} + +func (c *commandFsTree) Help() string { + return `recursively list all files under a directory + + fs.tree http://:/dir/ +` +} + +func (c *commandFsTree) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + filerServer, filerPort, path, err := parseFilerUrl(args[len(args)-1]) + if err != nil { + return err + } + + dir, name := filer2.FullPath(path).DirAndName() + if strings.HasSuffix(path, "/") { + if path == "/" { + dir, name = "/", "" + } else { + dir, name = path[0 : len(path)-1], "" + } + } + + ctx := context.Background() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + return treeTraverseDirectory(ctx, writer, client, dir, name, 1000, newPrefix(), 0) + + }) + +} +func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int, prefix *Prefix, level int) (err error) { + + paginatedCount := -1 + startFromFileName := "" + + for paginatedCount == -1 || paginatedCount == paginateSize { + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: name, + StartFromFileName: startFromFileName, + InclusiveStartFrom: false, + Limit: uint32(paginateSize), + }) + if listErr != nil { + err = listErr + return + } + + paginatedCount = len(resp.Entries) + if paginatedCount > 0 { + prefix.addMarker(level) + } + + for i, entry := range resp.Entries { + + isLast := paginatedCount < paginateSize && i == paginatedCount-1 + fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name + } + err = treeTraverseDirectory(ctx, writer, client, subDir, "", paginateSize, prefix, level+1) + } else { + } + startFromFileName = entry.Name + + } + } + + return + +} + +type Prefix struct { + markers map[int]bool +} + +func newPrefix() *Prefix { + return &Prefix{ + markers: make(map[int]bool), + } +} +func (p *Prefix) addMarker(marker int) { + p.markers[marker] = true +} +func (p *Prefix) removeMarker(marker int) { + delete(p.markers, marker) +} +func (p *Prefix) getPrefix(level int, isLastChild bool) string { + var sb strings.Builder + for i := 0; i < level; i++ { + if _, ok := p.markers[i]; ok { + sb.WriteString("│") + } else { + sb.WriteString(" ") + } + sb.WriteString(" ") + } + if isLastChild { + sb.WriteString("└──") + p.removeMarker(level); + } else { + sb.WriteString("├──") + } + return sb.String() +} From 07775998a9af92e87f02da058c7cc47c3965ed95 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 28 Mar 2019 00:38:30 -0700 Subject: [PATCH 107/450] some comments --- weed/shell/command_fs_tree.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 1f0f2935f..019616627 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -76,7 +76,7 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_p } for i, entry := range resp.Entries { - + // 0.1% wrong prefix here, but fixing it would need to paginate to the next batch first isLast := paginatedCount < paginateSize && i == paginatedCount-1 fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) From 487ca514915fd6db3f05ef5f2d5cd558a42a9e27 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 28 Mar 2019 00:46:15 -0700 Subject: [PATCH 108/450] weed filer.export: fix pagination error --- weed/command/filer_export.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/command/filer_export.go b/weed/command/filer_export.go index 396d0d71f..ed1ee8966 100644 --- a/weed/command/filer_export.go +++ b/weed/command/filer_export.go @@ -158,6 +158,7 @@ func doTraverse(ctx context.Context, stat *statistics, filerStore filer2.FilerSt } else { stat.fileCount++ } + lastEntryName = entry.Name() } if len(entries) < limit { break From f60d383947a18640135066a4b8829941f4475089 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 29 Mar 2019 22:50:14 -0700 Subject: [PATCH 109/450] weed filer.copy: support multiple masters --- weed/command/filer_copy.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index a852ca773..dd763974c 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/spf13/viper" "google.golang.org/grpc" "io/ioutil" @@ -35,6 +36,7 @@ type CopyOptions struct { ttl *string maxMB *int grpcDialOption grpc.DialOption + masterClient *wdclient.MasterClient } func init() { @@ -105,6 +107,10 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + copy.masterClient = wdclient.NewMasterClient(context.Background(), copy.grpcDialOption, "client", strings.Split(*copy.master, ",")) + go copy.masterClient.KeepConnectedToMaster() + copy.masterClient.WaitUntilConnected() + for _, fileOrDir := range fileOrDirs { if !doEachCopy(context.Background(), fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) { return false @@ -170,7 +176,7 @@ func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, if fi.Size() > 0 { // assign a volume - assignResult, err := operation.Assign(*copy.master, grpcDialOption, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(copy.masterClient.GetMaster(), grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, Replication: *copy.replication, Collection: *copy.collection, @@ -247,7 +253,7 @@ func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress stri for i := int64(0); i < int64(chunkCount); i++ { // assign a volume - assignResult, err := operation.Assign(*copy.master, grpcDialOption, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(copy.masterClient.GetMaster(), grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, Replication: *copy.replication, Collection: *copy.collection, From 920b4e56aa76fbf37780363d5b345c2882d311b5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 29 Mar 2019 22:53:35 -0700 Subject: [PATCH 110/450] refactoring --- weed/command/benchmark.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index aa54946a7..6c64c4591 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -40,13 +40,13 @@ type BenchmarkOptions struct { cpuprofile *string maxCpu *int grpcDialOption grpc.DialOption + masterClient *wdclient.MasterClient } var ( - b BenchmarkOptions - sharedBytes []byte - masterClient *wdclient.MasterClient - isSecure bool + b BenchmarkOptions + sharedBytes []byte + isSecure bool ) func init() { @@ -125,9 +125,9 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) - go masterClient.KeepConnectedToMaster() - masterClient.WaitUntilConnected() + b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) + go b.masterClient.KeepConnectedToMaster() + b.masterClient.WaitUntilConnected() if *b.write { benchWrite() @@ -208,7 +208,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { } var jwtAuthorization security.EncodedJwt if isSecure { - jwtAuthorization = operation.LookupJwt(masterClient.GetMaster(), df.fp.Fid) + jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), df.fp.Fid) } if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil { s.completed++ @@ -234,12 +234,12 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { Collection: *b.collection, Replication: *b.replication, } - if assignResult, err := operation.Assign(masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { + if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection if !isSecure && assignResult.Auth != "" { isSecure = true } - if _, err := fp.Upload(0, masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil { + if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} @@ -279,7 +279,7 @@ func readFiles(fileIdLineChan chan string, s *stat) { fmt.Printf("reading file %s\n", fid) } start := time.Now() - url, err := masterClient.LookupFileId(fid) + url, err := b.masterClient.LookupFileId(fid) if err != nil { s.failed++ println("!!!! ", fid, " location not found!!!!!") From 97406333a5ecc5b0d2cdaa74ff9901e3100e4bf2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 30 Mar 2019 23:08:29 -0700 Subject: [PATCH 111/450] support atomic renaming for mysql/postgres filer store --- other/java/client/pom.xml | 2 +- .../java/seaweedfs/client/FilerClient.java | 141 +++++---- other/java/client/src/main/proto/filer.proto | 13 + other/java/hdfs/pom.xml | 2 +- .../seaweed/hdfs/SeaweedFileSystemStore.java | 30 +- .../filer2/abstract_sql/abstract_sql_store.go | 44 ++- weed/filer2/cassandra/cassandra_store.go | 10 + weed/filer2/filer.go | 12 + weed/filer2/filerstore.go | 4 + weed/filer2/fullpath.go | 13 +- weed/filer2/leveldb/leveldb_store.go | 10 + weed/filer2/memdb/memdb_store.go | 10 + weed/filer2/redis/universal_redis_store.go | 10 + weed/filesys/dir_rename.go | 106 +------ weed/pb/filer.proto | 13 + weed/pb/filer_pb/filer.pb.go | 276 ++++++++++++------ weed/server/filer_grpc_server_rename.go | 106 +++++++ 17 files changed, 517 insertions(+), 285 deletions(-) create mode 100644 weed/server/filer_grpc_server_rename.go diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 30b9d4d55..67b338c37 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -4,7 +4,7 @@ com.github.chrislusf seaweedfs-client - 1.0.8 + 1.0.9 org.sonatype.oss diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index 63d0d8320..562a36894 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -51,12 +51,26 @@ public class FilerClient { } return createEntry( - parent, - newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build() + parent, + newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build() ); } + public boolean mv(String oldPath, String newPath) { + + Path oldPathObject = Paths.get(oldPath); + String oldParent = oldPathObject.getParent().toString(); + String oldName = oldPathObject.getFileName().toString(); + + Path newPathObject = Paths.get(newPath); + String newParent = newPathObject.getParent().toString(); + String newName = newPathObject.getFileName().toString(); + + return atomicRenameEntry(oldParent, oldName, newParent, newName); + + } + public boolean rm(String path, boolean isRecursive) { Path pathObject = Paths.get(path); @@ -64,10 +78,10 @@ public class FilerClient { String name = pathObject.getFileName().toString(); return deleteEntry( - parent, - name, - true, - isRecursive); + parent, + name, + true, + isRecursive); } public boolean touch(String path, int mode) { @@ -84,18 +98,18 @@ public class FilerClient { FilerProto.Entry entry = lookupEntry(parent, name); if (entry == null) { return createEntry( - parent, - newFileEntry(name, mode, uid, gid, userName, groupNames).build() + parent, + newFileEntry(name, mode, uid, gid, userName, groupNames).build() ); } long now = System.currentTimeMillis() / 1000L; FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder() - .setMtime(now) - .setUid(uid) - .setGid(gid) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames)); + .setMtime(now) + .setUid(uid) + .setGid(gid) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames)); return updateEntry(parent, entry.toBuilder().setAttributes(attr).build()); } @@ -105,17 +119,17 @@ public class FilerClient { long now = System.currentTimeMillis() / 1000L; return FilerProto.Entry.newBuilder() - .setName(name) - .setIsDirectory(true) - .setAttributes(FilerProto.FuseAttributes.newBuilder() - .setMtime(now) - .setCrtime(now) - .setUid(uid) - .setGid(gid) - .setFileMode(mode | 1 << 31) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames))); + .setName(name) + .setIsDirectory(true) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setMtime(now) + .setCrtime(now) + .setUid(uid) + .setGid(gid) + .setFileMode(mode | 1 << 31) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames))); } public FilerProto.Entry.Builder newFileEntry(String name, int mode, @@ -124,17 +138,17 @@ public class FilerClient { long now = System.currentTimeMillis() / 1000L; return FilerProto.Entry.newBuilder() - .setName(name) - .setIsDirectory(false) - .setAttributes(FilerProto.FuseAttributes.newBuilder() - .setMtime(now) - .setCrtime(now) - .setUid(uid) - .setGid(gid) - .setFileMode(mode) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames))); + .setName(name) + .setIsDirectory(false) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setMtime(now) + .setCrtime(now) + .setUid(uid) + .setGid(gid) + .setFileMode(mode) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames))); } public List listEntries(String path) { @@ -160,20 +174,20 @@ public class FilerClient { public List listEntries(String path, String entryPrefix, String lastEntryName, int limit) { return filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() - .setDirectory(path) - .setPrefix(entryPrefix) - .setStartFromFileName(lastEntryName) - .setLimit(limit) - .build()).getEntriesList(); + .setDirectory(path) + .setPrefix(entryPrefix) + .setStartFromFileName(lastEntryName) + .setLimit(limit) + .build()).getEntriesList(); } public FilerProto.Entry lookupEntry(String directory, String entryName) { try { return filerGrpcClient.getBlockingStub().lookupDirectoryEntry( - FilerProto.LookupDirectoryEntryRequest.newBuilder() - .setDirectory(directory) - .setName(entryName) - .build()).getEntry(); + FilerProto.LookupDirectoryEntryRequest.newBuilder() + .setDirectory(directory) + .setName(entryName) + .build()).getEntry(); } catch (Exception e) { LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e); return null; @@ -184,9 +198,9 @@ public class FilerClient { public boolean createEntry(String parent, FilerProto.Entry entry) { try { filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parent) - .setEntry(entry) - .build()); + .setDirectory(parent) + .setEntry(entry) + .build()); } catch (Exception e) { LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); return false; @@ -197,9 +211,9 @@ public class FilerClient { public boolean updateEntry(String parent, FilerProto.Entry entry) { try { filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder() - .setDirectory(parent) - .setEntry(entry) - .build()); + .setDirectory(parent) + .setEntry(entry) + .build()); } catch (Exception e) { LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); return false; @@ -210,11 +224,11 @@ public class FilerClient { public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive) { try { filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder() - .setDirectory(parent) - .setName(entryName) - .setIsDeleteData(isDeleteFileChunk) - .setIsRecursive(isRecursive) - .build()); + .setDirectory(parent) + .setName(entryName) + .setIsDeleteData(isDeleteFileChunk) + .setIsRecursive(isRecursive) + .build()); } catch (Exception e) { LOG.warn("deleteEntry {}/{}: {}", parent, entryName, e); return false; @@ -222,4 +236,19 @@ public class FilerClient { return true; } + public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) { + try { + filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder() + .setOldDirectory(oldParent) + .setOldName(oldName) + .setNewDirectory(newParent) + .setNewName(newName) + .build()); + } catch (Exception e) { + LOG.warn("atomicRenameEntry {}/{} => {}/{}: {}", oldParent, oldName, newParent, newName, e); + return false; + } + return true; + } + } diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 5cdcb6a97..07c73f1d4 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { + } + rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { } @@ -126,6 +129,16 @@ message DeleteEntryRequest { message DeleteEntryResponse { } +message AtomicRenameEntryRequest { + string old_directory = 1; + string old_name = 2; + string new_directory = 3; + string new_name = 4; +} + +message AtomicRenameEntryResponse { +} + message AssignVolumeRequest { int32 count = 1; string collection = 2; diff --git a/other/java/hdfs/pom.xml b/other/java/hdfs/pom.xml index 18ae192d0..35911d463 100644 --- a/other/java/hdfs/pom.xml +++ b/other/java/hdfs/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.0.8 + 1.0.9 3.1.1 diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index c93a28abc..643467898 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -151,35 +151,7 @@ public class SeaweedFileSystemStore { LOG.warn("rename non-existing source: {}", source); return; } - LOG.warn("rename moveEntry source: {}", source); - moveEntry(source.getParent(), entry, destination); - } - - private boolean moveEntry(Path oldParent, FilerProto.Entry entry, Path destination) { - - LOG.debug("moveEntry: {}/{} => {}", oldParent, entry.getName(), destination); - - FilerProto.Entry.Builder newEntry = entry.toBuilder().setName(destination.getName()); - boolean isDirectoryCreated = filerClient.createEntry(getParentDirectory(destination), newEntry.build()); - - if (!isDirectoryCreated) { - return false; - } - - if (entry.getIsDirectory()) { - Path entryPath = new Path(oldParent, entry.getName()); - List entries = filerClient.listEntries(entryPath.toUri().getPath()); - for (FilerProto.Entry ent : entries) { - boolean isSucess = moveEntry(entryPath, ent, new Path(destination, ent.getName())); - if (!isSucess) { - return false; - } - } - } - - return filerClient.deleteEntry( - oldParent.toUri().getPath(), entry.getName(), false, false); - + filerClient.mv(source.toUri().getPath(), destination.toUri().getPath()); } public OutputStream createFile(final Path path, diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 95ce9cb9f..9a3ee51c3 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -19,6 +19,40 @@ type AbstractSqlStore struct { SqlListInclusive string } +type TxOrDB interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { + tx, err := store.DB.BeginTx(ctx, nil) + if err != nil { + return ctx, err + } + + return context.WithValue(ctx, "tx", tx), nil +} +func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Commit() + } + return nil +} +func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Rollback() + } + return nil +} + +func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx + } + return store.DB +} + func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() @@ -27,7 +61,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta) if err != nil { return fmt.Errorf("insert %s: %s", entry.FullPath, err) } @@ -47,7 +81,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir) if err != nil { return fmt.Errorf("update %s: %s", entry.FullPath, err) } @@ -62,7 +96,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() - row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir) + row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir) var data []byte if err := row.Scan(&data); err != nil { return nil, filer2.ErrNotFound @@ -82,7 +116,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. dir, name := fullpath.DirAndName() - res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir) if err != nil { return fmt.Errorf("delete %s: %s", fullpath, err) } @@ -102,7 +136,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat sqlText = store.SqlListInclusive } - rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) + rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index e14a9e023..72680b5e1 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -40,6 +40,16 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er return } +func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error){ + return ctx, nil +} +func (store *CassandraStore) CommitTransaction(ctx context.Context) error{ + return nil +} +func (store *CassandraStore) RollbackTransaction(ctx context.Context) error{ + return nil +} + func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 4220e24d3..06c26abb4 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -57,6 +57,18 @@ func (fs *Filer) KeepConnectedToMaster() { fs.MasterClient.KeepConnectedToMaster() } +func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { + return f.store.BeginTransaction(ctx) +} + +func (f *Filer) CommitTransaction(ctx context.Context) error { + return f.store.CommitTransaction(ctx) +} + +func (f *Filer) RollbackTransaction(ctx context.Context) error { + return f.store.RollbackTransaction(ctx) +} + func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { if string(entry.FullPath) == "/" { diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index c10074eb2..0b256e56e 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -17,6 +17,10 @@ type FilerStore interface { FindEntry(context.Context, FullPath) (entry *Entry, err error) DeleteEntry(context.Context, FullPath) (err error) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) + + BeginTransaction(ctx context.Context) (context.Context, error) + CommitTransaction(ctx context.Context) error + RollbackTransaction(ctx context.Context) error } var ErrNotFound = errors.New("filer: no entry is found in filer store") diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go index be6e34431..191e51cf3 100644 --- a/weed/filer2/fullpath.go +++ b/weed/filer2/fullpath.go @@ -8,10 +8,7 @@ import ( type FullPath string func NewFullPath(dir, name string) FullPath { - if strings.HasSuffix(dir, "/") { - return FullPath(dir + name) - } - return FullPath(dir + "/" + name) + return FullPath(dir).Child(name) } func (fp FullPath) DirAndName() (string, string) { @@ -29,3 +26,11 @@ func (fp FullPath) Name() string { _, name := filepath.Split(string(fp)) return name } + +func (fp FullPath) Child(name string) FullPath { + dir := string(fp) + if strings.HasSuffix(dir, "/") { + return FullPath(dir + name) + } + return FullPath(dir + "/" + name) +} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 60de11565..97df9163b 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -46,6 +46,16 @@ func (store *LevelDBStore) initialize(dir string) (err error) { return } +func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error){ + return ctx, nil +} +func (store *LevelDBStore) CommitTransaction(ctx context.Context) error{ + return nil +} +func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error{ + return nil +} + func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { key := genKey(entry.DirAndName()) diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go index d4c906f2d..811c87440 100644 --- a/weed/filer2/memdb/memdb_store.go +++ b/weed/filer2/memdb/memdb_store.go @@ -34,6 +34,16 @@ func (store *MemDbStore) Initialize(configuration util.Configuration) (err error return nil } +func (store *MemDbStore) BeginTransaction(ctx context.Context) (context.Context, error){ + return ctx, nil +} +func (store *MemDbStore) CommitTransaction(ctx context.Context) error{ + return nil +} +func (store *MemDbStore) RollbackTransaction(ctx context.Context) error{ + return nil +} + func (store *MemDbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { // println("inserting", entry.FullPath) store.tree.ReplaceOrInsert(entryItem{entry}) diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index ec78f70e7..72e9ce8b3 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -19,6 +19,16 @@ type UniversalRedisStore struct { Client redis.UniversalClient } +func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error){ + return ctx, nil +} +func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error{ + return nil +} +func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error{ + return nil +} + func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { value, err := entry.EncodeAttributesAndChunks() diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 8c586eb73..7a415ff82 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,13 +2,10 @@ package filesys import ( "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" - "math" - "path/filepath" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { @@ -17,103 +14,20 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector return dir.wfs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - // find existing entry - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir.Path, - Name: req.OldName, + request := &filer_pb.AtomicRenameEntryRequest{ + OldDirectory: dir.Path, + OldName: req.OldName, + NewDirectory: newDir.Path, + NewName: req.NewName, } - glog.V(4).Infof("find existing directory entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + _, err := client.AtomicRenameEntry(ctx, request) if err != nil { - glog.V(3).Infof("renaming find %s/%s: %v", dir.Path, req.OldName, err) - return fuse.ENOENT + return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) } - entry := resp.Entry + return nil - glog.V(4).Infof("found existing directory entry resp: %+v", resp) - - return moveEntry(ctx, client, dir.Path, entry, newDir.Path, req.NewName) }) } - -func moveEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, oldParent string, entry *filer_pb.Entry, newParent, newName string) error { - if entry.IsDirectory { - currentDirPath := filepath.ToSlash(filepath.Join(oldParent, entry.Name)) - - lastFileName := "" - includeLastFile := false - limit := math.MaxInt32 - for limit > 0 { - request := &filer_pb.ListEntriesRequest{ - Directory: currentDirPath, - StartFromFileName: lastFileName, - InclusiveStartFrom: includeLastFile, - Limit: 1024, - } - glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("list %s: %v", oldParent, err) - return fuse.EIO - } - if len(resp.Entries) == 0 { - break - } - - for _, item := range resp.Entries { - lastFileName = item.Name - err := moveEntry(ctx, client, currentDirPath, item, filepath.ToSlash(filepath.Join(newParent, newName)), item.Name) - if err != nil { - return err - } - limit-- - } - if len(resp.Entries) < 1024 { - break - } - } - - } - - // add to new directory - { - request := &filer_pb.CreateEntryRequest{ - Directory: newParent, - Entry: &filer_pb.Entry{ - Name: newName, - IsDirectory: entry.IsDirectory, - Attributes: entry.Attributes, - Chunks: entry.Chunks, - }, - } - - glog.V(1).Infof("create new entry: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("renaming create %s/%s: %v", newParent, newName, err) - return fuse.EIO - } - } - - // delete old entry - { - request := &filer_pb.DeleteEntryRequest{ - Directory: oldParent, - Name: entry.Name, - IsDeleteData: false, - } - - glog.V(1).Infof("remove old entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(0).Infof("renaming delete %s/%s: %v", oldParent, entry.Name, err) - return fuse.EIO - } - - } - - return nil - -} diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 5cdcb6a97..07c73f1d4 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { + } + rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { } @@ -126,6 +129,16 @@ message DeleteEntryRequest { message DeleteEntryResponse { } +message AtomicRenameEntryRequest { + string old_directory = 1; + string old_name = 2; + string new_directory = 3; + string new_name = 4; +} + +message AtomicRenameEntryResponse { +} + message AssignVolumeRequest { int32 count = 1; string collection = 2; diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 920261538..4f8c915d7 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -23,6 +23,8 @@ It has these top-level messages: UpdateEntryResponse DeleteEntryRequest DeleteEntryResponse + AtomicRenameEntryRequest + AtomicRenameEntryResponse AssignVolumeRequest AssignVolumeResponse LookupVolumeRequest @@ -521,6 +523,54 @@ func (m *DeleteEntryResponse) String() string { return proto.CompactT func (*DeleteEntryResponse) ProtoMessage() {} func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +type AtomicRenameEntryRequest struct { + OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"` + OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"` + NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory" json:"new_directory,omitempty"` + NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName" json:"new_name,omitempty"` +} + +func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} } +func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) } +func (*AtomicRenameEntryRequest) ProtoMessage() {} +func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *AtomicRenameEntryRequest) GetOldDirectory() string { + if m != nil { + return m.OldDirectory + } + return "" +} + +func (m *AtomicRenameEntryRequest) GetOldName() string { + if m != nil { + return m.OldName + } + return "" +} + +func (m *AtomicRenameEntryRequest) GetNewDirectory() string { + if m != nil { + return m.NewDirectory + } + return "" +} + +func (m *AtomicRenameEntryRequest) GetNewName() string { + if m != nil { + return m.NewName + } + return "" +} + +type AtomicRenameEntryResponse struct { +} + +func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} } +func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) } +func (*AtomicRenameEntryResponse) ProtoMessage() {} +func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + type AssignVolumeRequest struct { Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` @@ -532,7 +582,7 @@ type AssignVolumeRequest struct { func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *AssignVolumeRequest) GetCount() int32 { if m != nil { @@ -580,7 +630,7 @@ type AssignVolumeResponse struct { func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *AssignVolumeResponse) GetFileId() string { if m != nil { @@ -624,7 +674,7 @@ type LookupVolumeRequest struct { func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *LookupVolumeRequest) GetVolumeIds() []string { if m != nil { @@ -640,7 +690,7 @@ type Locations struct { func (m *Locations) Reset() { *m = Locations{} } func (m *Locations) String() string { return proto.CompactTextString(m) } func (*Locations) ProtoMessage() {} -func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *Locations) GetLocations() []*Location { if m != nil { @@ -657,7 +707,7 @@ type Location struct { func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *Location) GetUrl() string { if m != nil { @@ -680,7 +730,7 @@ type LookupVolumeResponse struct { func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { if m != nil { @@ -696,7 +746,7 @@ type DeleteCollectionRequest struct { func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *DeleteCollectionRequest) GetCollection() string { if m != nil { @@ -711,7 +761,7 @@ type DeleteCollectionResponse struct { func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } type StatisticsRequest struct { Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` @@ -722,7 +772,7 @@ type StatisticsRequest struct { func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *StatisticsRequest) GetReplication() string { if m != nil { @@ -757,7 +807,7 @@ type StatisticsResponse struct { func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *StatisticsResponse) GetReplication() string { if m != nil { @@ -816,6 +866,8 @@ func init() { proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse") proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest") proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse") + proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest") + proto.RegisterType((*AtomicRenameEntryResponse)(nil), "filer_pb.AtomicRenameEntryResponse") proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest") proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse") proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest") @@ -844,6 +896,7 @@ type SeaweedFilerClient interface { CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) + AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) @@ -903,6 +956,15 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq return out, nil } +func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { + out := new(AtomicRenameEntryResponse) + err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { out := new(AssignVolumeResponse) err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...) @@ -947,6 +1009,7 @@ type SeaweedFilerServer interface { CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) + AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) @@ -1047,6 +1110,24 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AtomicRenameEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/AtomicRenameEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, req.(*AtomicRenameEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AssignVolumeRequest) if err := dec(in); err != nil { @@ -1143,6 +1224,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "DeleteEntry", Handler: _SeaweedFiler_DeleteEntry_Handler, }, + { + MethodName: "AtomicRenameEntry", + Handler: _SeaweedFiler_AtomicRenameEntry_Handler, + }, { MethodName: "AssignVolume", Handler: _SeaweedFiler_AssignVolume_Handler, @@ -1167,87 +1252,92 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1301 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x8f, 0xd3, 0xc6, - 0x1b, 0xc7, 0x79, 0x23, 0x7e, 0x92, 0xf0, 0xdf, 0x9d, 0xec, 0xbf, 0x58, 0x61, 0x97, 0x06, 0xb7, - 0x54, 0x8b, 0x8a, 0x22, 0x44, 0x7b, 0x80, 0xa2, 0x4a, 0x85, 0x65, 0x91, 0x90, 0x16, 0xa8, 0xbc, - 0x50, 0xa9, 0xea, 0xc1, 0xf2, 0xda, 0x93, 0x30, 0x5a, 0xc7, 0x4e, 0x3d, 0xe3, 0x05, 0xfa, 0x11, - 0x7a, 0xe9, 0xa5, 0xd7, 0x1e, 0x7a, 0xea, 0xb7, 0xe8, 0xa5, 0xdf, 0xa7, 0xf7, 0xde, 0xaa, 0x79, - 0x66, 0xec, 0x8c, 0x63, 0x2f, 0xb4, 0xaa, 0xb8, 0xcd, 0xfc, 0x9e, 0xf7, 0x67, 0x9e, 0x17, 0x1b, - 0x06, 0x73, 0x16, 0xd3, 0x6c, 0xb6, 0xca, 0x52, 0x91, 0x92, 0x3e, 0x5e, 0xfc, 0xd5, 0x89, 0xfb, - 0x0c, 0xae, 0x1c, 0xa5, 0xe9, 0x69, 0xbe, 0x7a, 0xc8, 0x32, 0x1a, 0x8a, 0x34, 0x7b, 0x73, 0x98, - 0x88, 0xec, 0x8d, 0x47, 0xbf, 0xcf, 0x29, 0x17, 0x64, 0x17, 0xec, 0xa8, 0x20, 0x38, 0xd6, 0xd4, - 0xda, 0xb7, 0xbd, 0x35, 0x40, 0x08, 0x74, 0x92, 0x60, 0x49, 0x9d, 0x16, 0x12, 0xf0, 0xec, 0x1e, - 0xc2, 0x6e, 0xb3, 0x42, 0xbe, 0x4a, 0x13, 0x4e, 0xc9, 0x75, 0xe8, 0x52, 0x09, 0xa0, 0xb6, 0xc1, - 0xed, 0xff, 0xcd, 0x0a, 0x57, 0x66, 0x8a, 0x4f, 0x51, 0xdd, 0xdf, 0x2d, 0x20, 0x47, 0x8c, 0x0b, - 0x09, 0x32, 0xca, 0xff, 0x99, 0x3f, 0x1f, 0x40, 0x6f, 0x95, 0xd1, 0x39, 0x7b, 0xad, 0x3d, 0xd2, - 0x37, 0x72, 0x13, 0xb6, 0xb9, 0x08, 0x32, 0xf1, 0x28, 0x4b, 0x97, 0x8f, 0x58, 0x4c, 0x9f, 0x4a, - 0xa7, 0xdb, 0xc8, 0x52, 0x27, 0x90, 0x19, 0x10, 0x96, 0x84, 0x71, 0xce, 0xd9, 0x19, 0x3d, 0x2e, - 0xa8, 0x4e, 0x67, 0x6a, 0xed, 0xf7, 0xbd, 0x06, 0x0a, 0xd9, 0x81, 0x6e, 0xcc, 0x96, 0x4c, 0x38, - 0xdd, 0xa9, 0xb5, 0x3f, 0xf2, 0xd4, 0xc5, 0xfd, 0x0a, 0xc6, 0x15, 0xff, 0x75, 0xf8, 0x37, 0xe0, - 0x22, 0x55, 0x90, 0x63, 0x4d, 0xdb, 0x4d, 0x09, 0x28, 0xe8, 0xee, 0x2f, 0x2d, 0xe8, 0x22, 0x54, - 0xe6, 0xd9, 0x5a, 0xe7, 0x99, 0x5c, 0x83, 0x21, 0xe3, 0xfe, 0x3a, 0x19, 0x2d, 0xf4, 0x6f, 0xc0, - 0x78, 0x99, 0x77, 0xf2, 0x29, 0xf4, 0xc2, 0x97, 0x79, 0x72, 0xca, 0x9d, 0x36, 0x9a, 0x1a, 0xaf, - 0x4d, 0xc9, 0x60, 0x0f, 0x24, 0xcd, 0xd3, 0x2c, 0xe4, 0x0e, 0x40, 0x20, 0x44, 0xc6, 0x4e, 0x72, - 0x41, 0x39, 0x46, 0x3b, 0xb8, 0xed, 0x18, 0x02, 0x39, 0xa7, 0xf7, 0x4b, 0xba, 0x67, 0xf0, 0x92, - 0xbb, 0xd0, 0xa7, 0xaf, 0x05, 0x4d, 0x22, 0x1a, 0x39, 0x5d, 0x34, 0xb4, 0xb7, 0x11, 0xd3, 0xec, - 0x50, 0xd3, 0x55, 0x84, 0x25, 0xfb, 0xe4, 0x1e, 0x8c, 0x2a, 0x24, 0xb2, 0x05, 0xed, 0x53, 0x5a, - 0xbc, 0xac, 0x3c, 0xca, 0xec, 0x9e, 0x05, 0x71, 0xae, 0x8a, 0x6c, 0xe8, 0xa9, 0xcb, 0x17, 0xad, - 0x3b, 0x96, 0xfb, 0xb3, 0x05, 0xdb, 0x87, 0x67, 0x34, 0x11, 0x4f, 0x53, 0xc1, 0xe6, 0x2c, 0x0c, - 0x04, 0x4b, 0x13, 0x72, 0x13, 0xec, 0x34, 0x8e, 0xfc, 0xb7, 0xd6, 0x58, 0x3f, 0x8d, 0xb5, 0xbd, - 0x9b, 0x60, 0x27, 0xf4, 0x95, 0xe6, 0x6e, 0x9d, 0xc3, 0x9d, 0xd0, 0x57, 0x8a, 0xfb, 0x23, 0x18, - 0x45, 0x34, 0xa6, 0x82, 0xfa, 0x65, 0x5e, 0x65, 0xd2, 0x87, 0x0a, 0xc4, 0x7c, 0x72, 0xf7, 0x57, - 0x0b, 0xec, 0x32, 0xbd, 0xe4, 0x32, 0x5c, 0x94, 0xea, 0x7c, 0x16, 0xe9, 0xa0, 0x7a, 0xf2, 0xfa, - 0x38, 0x92, 0xb5, 0x9a, 0xce, 0xe7, 0x9c, 0x0a, 0x34, 0xdb, 0xf6, 0xf4, 0x4d, 0xbe, 0x35, 0x67, - 0x3f, 0xa8, 0xf2, 0xec, 0x78, 0x78, 0x96, 0x39, 0x58, 0x0a, 0xb6, 0xa4, 0xf8, 0x2c, 0x6d, 0x4f, - 0x5d, 0xc8, 0x18, 0xba, 0xd4, 0x17, 0xc1, 0x02, 0xeb, 0xce, 0xf6, 0x3a, 0xf4, 0x79, 0xb0, 0x20, - 0x1f, 0xc3, 0x25, 0x9e, 0xe6, 0x59, 0x48, 0xfd, 0xc2, 0x6c, 0x0f, 0xa9, 0x43, 0x85, 0x3e, 0x42, - 0xe3, 0xee, 0x9f, 0x2d, 0xb8, 0x54, 0x7d, 0x51, 0x72, 0x05, 0x6c, 0x94, 0x40, 0xe3, 0x16, 0x1a, - 0xc7, 0x29, 0x71, 0x5c, 0x71, 0xa0, 0x65, 0x3a, 0x50, 0x88, 0x2c, 0xd3, 0x48, 0xf9, 0x3b, 0x52, - 0x22, 0x4f, 0xd2, 0x88, 0xca, 0x97, 0xcc, 0x59, 0x84, 0x1e, 0x8f, 0x3c, 0x79, 0x94, 0xc8, 0x82, - 0x45, 0xba, 0x4b, 0xe4, 0x51, 0xe6, 0x20, 0xcc, 0x50, 0x6f, 0x4f, 0xe5, 0x40, 0xdd, 0x64, 0x0e, - 0x96, 0x12, 0xbd, 0xa8, 0x02, 0x93, 0x67, 0x32, 0x85, 0x41, 0x46, 0x57, 0xb1, 0x7e, 0x66, 0xa7, - 0x8f, 0x24, 0x13, 0x22, 0x57, 0x01, 0xc2, 0x34, 0x8e, 0x69, 0x88, 0x0c, 0x36, 0x32, 0x18, 0x88, - 0x7c, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xa9, 0xb5, 0xdf, 0xf5, 0x7a, 0x42, 0xc4, 0xc7, - 0x34, 0x94, 0x71, 0xe4, 0x9c, 0x66, 0x3e, 0xf6, 0xd8, 0x00, 0xe5, 0xfa, 0x12, 0xc0, 0x69, 0xb0, - 0x07, 0xb0, 0xc8, 0xd2, 0x7c, 0xa5, 0xa8, 0xc3, 0x69, 0x5b, 0x8e, 0x1c, 0x44, 0x90, 0x7c, 0x1d, - 0x2e, 0xf1, 0x37, 0xcb, 0x98, 0x25, 0xa7, 0xbe, 0x08, 0xb2, 0x05, 0x15, 0xce, 0x08, 0x15, 0x8c, - 0x34, 0xfa, 0x1c, 0x41, 0xf7, 0x5b, 0x20, 0x07, 0x19, 0x0d, 0x04, 0xfd, 0x17, 0xd3, 0xb5, 0x9c, - 0x94, 0xad, 0xb7, 0x4e, 0xca, 0xff, 0xc3, 0xb8, 0xa2, 0x5a, 0x0d, 0x1a, 0x69, 0xf1, 0xc5, 0x2a, - 0x7a, 0x5f, 0x16, 0x2b, 0xaa, 0xb5, 0xc5, 0x9f, 0x2c, 0x20, 0x0f, 0xb1, 0x13, 0xfe, 0xdb, 0x0a, - 0x91, 0x35, 0x2c, 0x47, 0x9b, 0xea, 0xb4, 0x28, 0x10, 0x81, 0x1e, 0xbe, 0x43, 0xc6, 0x95, 0xfe, - 0x87, 0x81, 0x08, 0xf4, 0x00, 0xcc, 0x68, 0x98, 0x67, 0x72, 0x1e, 0x63, 0x5d, 0xe1, 0x00, 0xf4, - 0x0a, 0x48, 0x3a, 0x5a, 0x71, 0x48, 0x3b, 0xfa, 0x9b, 0x05, 0xe3, 0xfb, 0x9c, 0xb3, 0x45, 0xf2, - 0x4d, 0x1a, 0xe7, 0x4b, 0x5a, 0x78, 0xba, 0x03, 0xdd, 0x30, 0xcd, 0x13, 0x81, 0x5e, 0x76, 0x3d, - 0x75, 0xd9, 0x28, 0xab, 0x56, 0xad, 0xac, 0x36, 0x0a, 0xb3, 0x5d, 0x2f, 0x4c, 0xa3, 0xf0, 0x3a, - 0x95, 0xc2, 0xfb, 0x10, 0x06, 0x32, 0x3c, 0x3f, 0xa4, 0x89, 0xa0, 0x99, 0xee, 0x63, 0x90, 0xd0, - 0x01, 0x22, 0xee, 0x8f, 0x16, 0xec, 0x54, 0x3d, 0xd5, 0x6b, 0xe4, 0xdc, 0xb1, 0x22, 0xdb, 0x2e, - 0x8b, 0xb5, 0x9b, 0xf2, 0x28, 0x0b, 0x78, 0x95, 0x9f, 0xc4, 0x2c, 0xf4, 0x25, 0x41, 0xb9, 0x67, - 0x2b, 0xe4, 0x45, 0x16, 0xaf, 0x83, 0xee, 0x98, 0x41, 0x13, 0xe8, 0x04, 0xb9, 0x78, 0x59, 0x8c, - 0x16, 0x79, 0x76, 0x3f, 0x87, 0xb1, 0xda, 0xec, 0xd5, 0xac, 0xed, 0x01, 0x9c, 0x21, 0xe0, 0xb3, - 0x48, 0x2d, 0x35, 0xdb, 0xb3, 0x15, 0xf2, 0x38, 0xe2, 0xee, 0x97, 0x60, 0x1f, 0xa5, 0x2a, 0x11, - 0x9c, 0xdc, 0x02, 0x3b, 0x2e, 0x2e, 0x7a, 0xff, 0x91, 0x75, 0x91, 0x15, 0x7c, 0xde, 0x9a, 0xc9, - 0xbd, 0x07, 0xfd, 0x02, 0x2e, 0x62, 0xb3, 0xce, 0x8b, 0xad, 0xb5, 0x11, 0x9b, 0xfb, 0x87, 0x05, - 0x3b, 0x55, 0x97, 0x75, 0xfa, 0x5e, 0xc0, 0xa8, 0x34, 0xe1, 0x2f, 0x83, 0x95, 0xf6, 0xe5, 0x96, - 0xe9, 0x4b, 0x5d, 0xac, 0x74, 0x90, 0x3f, 0x09, 0x56, 0xaa, 0xa4, 0x86, 0xb1, 0x01, 0x4d, 0x9e, - 0xc3, 0x76, 0x8d, 0xa5, 0x61, 0xa5, 0xdd, 0x30, 0x57, 0x5a, 0x65, 0x2d, 0x97, 0xd2, 0xe6, 0x9e, - 0xbb, 0x0b, 0x97, 0x55, 0x15, 0x1f, 0x94, 0x45, 0x57, 0xe4, 0xbe, 0x5a, 0x9b, 0xd6, 0x66, 0x6d, - 0xba, 0x13, 0x70, 0xea, 0xa2, 0xba, 0x0b, 0x16, 0xb0, 0x7d, 0x2c, 0x02, 0xc1, 0xb8, 0x60, 0x61, - 0xf9, 0x7d, 0xb5, 0x51, 0xcc, 0xd6, 0xbb, 0xa6, 0x6c, 0xbd, 0x1d, 0xb6, 0xa0, 0x2d, 0x44, 0x51, - 0x67, 0xf2, 0x28, 0x5f, 0x81, 0x98, 0x96, 0xf4, 0x1b, 0xbc, 0x07, 0x53, 0xb2, 0x1e, 0x44, 0x2a, - 0x82, 0x58, 0x6d, 0xb1, 0x0e, 0x6e, 0x31, 0x1b, 0x11, 0x5c, 0x63, 0x6a, 0xd0, 0x47, 0x8a, 0xda, - 0x55, 0x3b, 0x4e, 0x02, 0x48, 0xdc, 0x03, 0xc0, 0x96, 0x52, 0xdd, 0xd0, 0x53, 0xb2, 0x12, 0x39, - 0x90, 0xc0, 0xed, 0xbf, 0xba, 0x30, 0x3c, 0xa6, 0xc1, 0x2b, 0x4a, 0x23, 0xb9, 0x44, 0x33, 0xb2, - 0x28, 0x6a, 0xab, 0xfa, 0xa1, 0x4b, 0xae, 0x6f, 0x16, 0x51, 0xe3, 0x97, 0xf5, 0xe4, 0x93, 0x77, - 0xb1, 0xe9, 0x67, 0xba, 0x40, 0x8e, 0x60, 0x60, 0x7c, 0x49, 0x92, 0x5d, 0x43, 0xb0, 0xf6, 0x81, - 0x3c, 0xd9, 0x3b, 0x87, 0x6a, 0x6a, 0x33, 0xd6, 0x85, 0xa9, 0xad, 0xbe, 0xa0, 0x4c, 0x6d, 0x4d, - 0x3b, 0x06, 0xb5, 0x19, 0xab, 0xc0, 0xd4, 0x56, 0x5f, 0x3e, 0xa6, 0xb6, 0xa6, 0xfd, 0x81, 0xda, - 0x8c, 0x79, 0x6d, 0x6a, 0xab, 0xef, 0x15, 0x53, 0x5b, 0xd3, 0x90, 0xbf, 0x40, 0x9e, 0xc1, 0xd0, - 0x9c, 0x9d, 0xc4, 0x10, 0x68, 0x98, 0xfe, 0x93, 0xab, 0xe7, 0x91, 0x4d, 0x85, 0xe6, 0x58, 0x30, - 0x15, 0x36, 0x0c, 0x46, 0x53, 0x61, 0xd3, 0x34, 0x71, 0x2f, 0x90, 0xef, 0x60, 0x6b, 0xb3, 0x3d, - 0xc9, 0xb5, 0xcd, 0xb0, 0x6a, 0x5d, 0x3f, 0x71, 0xdf, 0xc6, 0x52, 0x2a, 0x7f, 0x0c, 0xb0, 0xee, - 0x3a, 0x72, 0x65, 0x2d, 0x53, 0xeb, 0xfa, 0xc9, 0x6e, 0x33, 0xb1, 0x50, 0xf5, 0xe0, 0x2a, 0x6c, - 0x71, 0x55, 0xfa, 0x73, 0x3e, 0x0b, 0x63, 0x46, 0x13, 0xf1, 0x00, 0xb0, 0x0b, 0xbe, 0x96, 0xbf, - 0x93, 0x27, 0x3d, 0xfc, 0xab, 0xfc, 0xec, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x6d, 0xf7, - 0x42, 0x64, 0x0e, 0x00, 0x00, + // 1391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x6e, 0xdc, 0x44, + 0x14, 0xae, 0xf7, 0x2f, 0xf1, 0xd9, 0xdd, 0x92, 0x4c, 0x02, 0x75, 0x37, 0x49, 0xd9, 0x3a, 0x14, + 0xa5, 0xa2, 0x8a, 0xaa, 0xc2, 0x45, 0x4b, 0x85, 0x44, 0x9b, 0xa6, 0x52, 0xa5, 0xb4, 0x45, 0x4e, + 0x8b, 0x84, 0x90, 0xb0, 0x1c, 0x7b, 0xb2, 0x1d, 0xc5, 0x6b, 0x2f, 0x9e, 0x71, 0xd2, 0xf2, 0x08, + 0xdc, 0x70, 0xc3, 0x15, 0x12, 0x17, 0x5c, 0xf1, 0x16, 0xdc, 0xf0, 0x14, 0xbc, 0x04, 0xcf, 0x80, + 0xce, 0xcc, 0xd8, 0x3b, 0x5e, 0x3b, 0x29, 0x08, 0xf5, 0x6e, 0xe6, 0xfc, 0x7e, 0xe7, 0xcc, 0xf9, + 0xb1, 0xa1, 0x7f, 0xcc, 0x62, 0x9a, 0xed, 0xce, 0xb2, 0x54, 0xa4, 0x64, 0x59, 0x5e, 0xfc, 0xd9, + 0x91, 0xfb, 0x1c, 0x36, 0x0e, 0xd2, 0xf4, 0x24, 0x9f, 0x3d, 0x62, 0x19, 0x0d, 0x45, 0x9a, 0xbd, + 0xd9, 0x4f, 0x44, 0xf6, 0xc6, 0xa3, 0xdf, 0xe7, 0x94, 0x0b, 0xb2, 0x09, 0x76, 0x54, 0x30, 0x1c, + 0x6b, 0x6c, 0xed, 0xd8, 0xde, 0x9c, 0x40, 0x08, 0x74, 0x92, 0x60, 0x4a, 0x9d, 0x96, 0x64, 0xc8, + 0xb3, 0xbb, 0x0f, 0x9b, 0xcd, 0x06, 0xf9, 0x2c, 0x4d, 0x38, 0x25, 0x37, 0xa0, 0x4b, 0x91, 0x20, + 0xad, 0xf5, 0xef, 0xbc, 0xb7, 0x5b, 0x40, 0xd9, 0x55, 0x72, 0x8a, 0xeb, 0xfe, 0x61, 0x01, 0x39, + 0x60, 0x5c, 0x20, 0x91, 0x51, 0xfe, 0xef, 0xf0, 0x7c, 0x00, 0xbd, 0x59, 0x46, 0x8f, 0xd9, 0x6b, + 0x8d, 0x48, 0xdf, 0xc8, 0x2d, 0x58, 0xe5, 0x22, 0xc8, 0xc4, 0xe3, 0x2c, 0x9d, 0x3e, 0x66, 0x31, + 0x7d, 0x86, 0xa0, 0xdb, 0x52, 0xa4, 0xce, 0x20, 0xbb, 0x40, 0x58, 0x12, 0xc6, 0x39, 0x67, 0xa7, + 0xf4, 0xb0, 0xe0, 0x3a, 0x9d, 0xb1, 0xb5, 0xb3, 0xec, 0x35, 0x70, 0xc8, 0x3a, 0x74, 0x63, 0x36, + 0x65, 0xc2, 0xe9, 0x8e, 0xad, 0x9d, 0xa1, 0xa7, 0x2e, 0xee, 0x97, 0xb0, 0x56, 0xc1, 0xaf, 0xc3, + 0xbf, 0x09, 0x4b, 0x54, 0x91, 0x1c, 0x6b, 0xdc, 0x6e, 0x4a, 0x40, 0xc1, 0x77, 0x7f, 0x6d, 0x41, + 0x57, 0x92, 0xca, 0x3c, 0x5b, 0xf3, 0x3c, 0x93, 0xeb, 0x30, 0x60, 0xdc, 0x9f, 0x27, 0xa3, 0x25, + 0xf1, 0xf5, 0x19, 0x2f, 0xf3, 0x4e, 0x3e, 0x81, 0x5e, 0xf8, 0x2a, 0x4f, 0x4e, 0xb8, 0xd3, 0x96, + 0xae, 0xd6, 0xe6, 0xae, 0x30, 0xd8, 0x3d, 0xe4, 0x79, 0x5a, 0x84, 0xdc, 0x05, 0x08, 0x84, 0xc8, + 0xd8, 0x51, 0x2e, 0x28, 0x97, 0xd1, 0xf6, 0xef, 0x38, 0x86, 0x42, 0xce, 0xe9, 0x83, 0x92, 0xef, + 0x19, 0xb2, 0xe4, 0x1e, 0x2c, 0xd3, 0xd7, 0x82, 0x26, 0x11, 0x8d, 0x9c, 0xae, 0x74, 0xb4, 0xb5, + 0x10, 0xd3, 0xee, 0xbe, 0xe6, 0xab, 0x08, 0x4b, 0xf1, 0xd1, 0x7d, 0x18, 0x56, 0x58, 0x64, 0x05, + 0xda, 0x27, 0xb4, 0x78, 0x59, 0x3c, 0x62, 0x76, 0x4f, 0x83, 0x38, 0x57, 0x45, 0x36, 0xf0, 0xd4, + 0xe5, 0xf3, 0xd6, 0x5d, 0xcb, 0xfd, 0xd9, 0x82, 0xd5, 0xfd, 0x53, 0x9a, 0x88, 0x67, 0xa9, 0x60, + 0xc7, 0x2c, 0x0c, 0x04, 0x4b, 0x13, 0x72, 0x0b, 0xec, 0x34, 0x8e, 0xfc, 0x0b, 0x6b, 0x6c, 0x39, + 0x8d, 0xb5, 0xbf, 0x5b, 0x60, 0x27, 0xf4, 0x4c, 0x4b, 0xb7, 0xce, 0x91, 0x4e, 0xe8, 0x99, 0x92, + 0xde, 0x86, 0x61, 0x44, 0x63, 0x2a, 0xa8, 0x5f, 0xe6, 0x15, 0x93, 0x3e, 0x50, 0x44, 0x99, 0x4f, + 0xee, 0xfe, 0x66, 0x81, 0x5d, 0xa6, 0x97, 0x5c, 0x81, 0x25, 0x34, 0xe7, 0xb3, 0x48, 0x07, 0xd5, + 0xc3, 0xeb, 0x93, 0x08, 0x6b, 0x35, 0x3d, 0x3e, 0xe6, 0x54, 0x48, 0xb7, 0x6d, 0x4f, 0xdf, 0xf0, + 0xad, 0x39, 0xfb, 0x41, 0x95, 0x67, 0xc7, 0x93, 0x67, 0xcc, 0xc1, 0x54, 0xb0, 0x29, 0x95, 0xcf, + 0xd2, 0xf6, 0xd4, 0x85, 0xac, 0x41, 0x97, 0xfa, 0x22, 0x98, 0xc8, 0xba, 0xb3, 0xbd, 0x0e, 0x7d, + 0x11, 0x4c, 0xc8, 0x47, 0x70, 0x99, 0xa7, 0x79, 0x16, 0x52, 0xbf, 0x70, 0xdb, 0x93, 0xdc, 0x81, + 0xa2, 0x3e, 0x96, 0xce, 0xdd, 0xbf, 0x5b, 0x70, 0xb9, 0xfa, 0xa2, 0x64, 0x03, 0x6c, 0xa9, 0x21, + 0x9d, 0x5b, 0xd2, 0xb9, 0x9c, 0x12, 0x87, 0x15, 0x00, 0x2d, 0x13, 0x40, 0xa1, 0x32, 0x4d, 0x23, + 0x85, 0x77, 0xa8, 0x54, 0x9e, 0xa6, 0x11, 0xc5, 0x97, 0xcc, 0x59, 0x24, 0x11, 0x0f, 0x3d, 0x3c, + 0x22, 0x65, 0xc2, 0x22, 0xdd, 0x25, 0x78, 0xc4, 0x1c, 0x84, 0x99, 0xb4, 0xdb, 0x53, 0x39, 0x50, + 0x37, 0xcc, 0xc1, 0x14, 0xa9, 0x4b, 0x2a, 0x30, 0x3c, 0x93, 0x31, 0xf4, 0x33, 0x3a, 0x8b, 0xf5, + 0x33, 0x3b, 0xcb, 0x92, 0x65, 0x92, 0xc8, 0x35, 0x80, 0x30, 0x8d, 0x63, 0x1a, 0x4a, 0x01, 0x5b, + 0x0a, 0x18, 0x14, 0x7c, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xb1, 0xb5, 0xd3, 0xf5, 0x7a, + 0x42, 0xc4, 0x87, 0x34, 0xc4, 0x38, 0x72, 0x4e, 0x33, 0x5f, 0xf6, 0x58, 0x5f, 0xea, 0x2d, 0x23, + 0x41, 0x4e, 0x83, 0x2d, 0x80, 0x49, 0x96, 0xe6, 0x33, 0xc5, 0x1d, 0x8c, 0xdb, 0x38, 0x72, 0x24, + 0x45, 0xb2, 0x6f, 0xc0, 0x65, 0xfe, 0x66, 0x1a, 0xb3, 0xe4, 0xc4, 0x17, 0x41, 0x36, 0xa1, 0xc2, + 0x19, 0x4a, 0x03, 0x43, 0x4d, 0x7d, 0x21, 0x89, 0xee, 0x37, 0x40, 0xf6, 0x32, 0x1a, 0x08, 0xfa, + 0x1f, 0xa6, 0x6b, 0x39, 0x29, 0x5b, 0x17, 0x4e, 0xca, 0xf7, 0x61, 0xad, 0x62, 0x5a, 0x0d, 0x1a, + 0xf4, 0xf8, 0x72, 0x16, 0xbd, 0x2b, 0x8f, 0x15, 0xd3, 0xda, 0xe3, 0x4f, 0x16, 0x90, 0x47, 0xb2, + 0x13, 0xfe, 0xdf, 0x0a, 0xc1, 0x1a, 0xc6, 0xd1, 0xa6, 0x3a, 0x2d, 0x0a, 0x44, 0xa0, 0x87, 0xef, + 0x80, 0x71, 0x65, 0xff, 0x51, 0x20, 0x02, 0x3d, 0x00, 0x33, 0x1a, 0xe6, 0x19, 0xce, 0x63, 0x59, + 0x57, 0x72, 0x00, 0x7a, 0x05, 0x09, 0x81, 0x56, 0x00, 0x69, 0xa0, 0xbf, 0x58, 0xe0, 0x3c, 0x10, + 0xe9, 0x94, 0x85, 0x1e, 0x45, 0x87, 0x15, 0xb8, 0xdb, 0x30, 0xc4, 0xf9, 0xb1, 0x08, 0x79, 0x90, + 0xc6, 0xd1, 0x7c, 0xb2, 0x5e, 0x05, 0x1c, 0x21, 0xbe, 0x81, 0x7c, 0x29, 0x8d, 0x23, 0x59, 0x10, + 0xdb, 0x30, 0xc4, 0x89, 0x32, 0xd7, 0x57, 0x7b, 0x66, 0x90, 0xd0, 0xb3, 0x8a, 0x3e, 0x0a, 0x49, + 0xfd, 0x8e, 0xd2, 0x4f, 0xe8, 0x19, 0xea, 0xbb, 0x1b, 0x70, 0xb5, 0x01, 0x9b, 0x46, 0xfe, 0xbb, + 0x05, 0x6b, 0x0f, 0x38, 0x67, 0x93, 0xe4, 0xeb, 0x34, 0xce, 0xa7, 0xb4, 0x00, 0xbd, 0x0e, 0xdd, + 0x30, 0xcd, 0x13, 0x21, 0xc1, 0x76, 0x3d, 0x75, 0x59, 0x68, 0x88, 0x56, 0xad, 0x21, 0x16, 0x5a, + 0xaa, 0x5d, 0x6f, 0x29, 0xa3, 0x65, 0x3a, 0x95, 0x96, 0xf9, 0x10, 0xfa, 0xf8, 0x30, 0x7e, 0x48, + 0x13, 0x41, 0x33, 0x3d, 0x81, 0x00, 0x49, 0x7b, 0x92, 0xe2, 0xfe, 0x68, 0xc1, 0x7a, 0x15, 0xa9, + 0x5e, 0x80, 0xe7, 0x0e, 0x44, 0x1c, 0x18, 0x59, 0xac, 0x61, 0xe2, 0x11, 0x5b, 0x6f, 0x96, 0x1f, + 0xc5, 0x2c, 0xf4, 0x91, 0xa1, 0xe0, 0xd9, 0x8a, 0xf2, 0x32, 0x8b, 0xe7, 0x41, 0x77, 0xcc, 0xa0, + 0x09, 0x74, 0x82, 0x5c, 0xbc, 0x2a, 0x86, 0x22, 0x9e, 0xdd, 0xcf, 0x60, 0x4d, 0x7d, 0x93, 0x54, + 0xb3, 0xb6, 0x05, 0x70, 0x2a, 0x09, 0x3e, 0x8b, 0xd4, 0x3a, 0xb6, 0x3d, 0x5b, 0x51, 0x9e, 0x44, + 0xdc, 0xfd, 0x02, 0xec, 0x83, 0x54, 0x25, 0x82, 0x93, 0xdb, 0x60, 0xc7, 0xc5, 0x45, 0x6f, 0x6e, + 0x32, 0x6f, 0x8f, 0x42, 0xce, 0x9b, 0x0b, 0xb9, 0xf7, 0x61, 0xb9, 0x20, 0x17, 0xb1, 0x59, 0xe7, + 0xc5, 0xd6, 0x5a, 0x88, 0xcd, 0xfd, 0xd3, 0x82, 0xf5, 0x2a, 0x64, 0x9d, 0xbe, 0x97, 0x30, 0x2c, + 0x5d, 0xf8, 0xd3, 0x60, 0xa6, 0xb1, 0xdc, 0x36, 0xb1, 0xd4, 0xd5, 0x4a, 0x80, 0xfc, 0x69, 0x30, + 0x53, 0x25, 0x35, 0x88, 0x0d, 0xd2, 0xe8, 0x05, 0xac, 0xd6, 0x44, 0x1a, 0x96, 0xf1, 0x4d, 0x73, + 0x19, 0x57, 0x3e, 0x28, 0x4a, 0x6d, 0x73, 0x43, 0xdf, 0x83, 0x2b, 0xaa, 0xff, 0xf6, 0xca, 0xa2, + 0x2b, 0x72, 0x5f, 0xad, 0x4d, 0x6b, 0xb1, 0x36, 0xdd, 0x11, 0x38, 0x75, 0x55, 0xdd, 0x05, 0x13, + 0x58, 0x3d, 0x14, 0x81, 0x60, 0x5c, 0xb0, 0xb0, 0xfc, 0x32, 0x5c, 0x28, 0x66, 0xeb, 0x6d, 0xfb, + 0xa1, 0xde, 0x0e, 0x2b, 0xd0, 0x16, 0xa2, 0xa8, 0x33, 0x3c, 0xe2, 0x2b, 0x10, 0xd3, 0x93, 0x7e, + 0x83, 0x77, 0xe0, 0x0a, 0xeb, 0x41, 0xa4, 0x22, 0x88, 0xd5, 0xfe, 0xed, 0xc8, 0xfd, 0x6b, 0x4b, + 0x8a, 0x5c, 0xc0, 0x6a, 0x45, 0x45, 0x8a, 0xdb, 0x55, 0xdb, 0x19, 0x09, 0x92, 0xb9, 0x05, 0x20, + 0x5b, 0x4a, 0x75, 0x43, 0x4f, 0xe9, 0x22, 0x65, 0x0f, 0x09, 0x77, 0xfe, 0xea, 0xc1, 0xe0, 0x90, + 0x06, 0x67, 0x94, 0x46, 0xb8, 0xfe, 0x33, 0x32, 0x29, 0x6a, 0xab, 0xfa, 0x89, 0x4e, 0x6e, 0x2c, + 0x16, 0x51, 0xe3, 0x3f, 0xc1, 0xe8, 0xe3, 0xb7, 0x89, 0xe9, 0x67, 0xba, 0x44, 0x0e, 0xa0, 0x6f, + 0x7c, 0x03, 0x93, 0x4d, 0x43, 0xb1, 0xf6, 0x69, 0x3f, 0xda, 0x3a, 0x87, 0x6b, 0x5a, 0x33, 0x16, + 0x9d, 0x69, 0xad, 0xbe, 0x5a, 0x4d, 0x6b, 0x4d, 0xdb, 0x51, 0x5a, 0x33, 0x96, 0x98, 0x69, 0xad, + 0xbe, 0x36, 0x4d, 0x6b, 0x4d, 0x9b, 0x4f, 0x5a, 0x33, 0x36, 0x8d, 0x69, 0xad, 0xbe, 0x11, 0x4d, + 0x6b, 0x4d, 0xeb, 0xe9, 0x12, 0xf9, 0x0e, 0x56, 0x6b, 0x3b, 0x80, 0xb8, 0x73, 0xad, 0xf3, 0x96, + 0xd7, 0x68, 0xfb, 0x42, 0x99, 0xd2, 0xfe, 0x73, 0x18, 0x98, 0xb3, 0x99, 0x18, 0x80, 0x1a, 0xb6, + 0xcb, 0xe8, 0xda, 0x79, 0x6c, 0xd3, 0xa0, 0x39, 0x76, 0x4c, 0x83, 0x0d, 0x83, 0xd7, 0x34, 0xd8, + 0x34, 0xad, 0xdc, 0x4b, 0xe4, 0x5b, 0x58, 0x59, 0x6c, 0x7f, 0x72, 0x7d, 0x31, 0x6d, 0xb5, 0xa9, + 0x32, 0x72, 0x2f, 0x12, 0x29, 0x8d, 0x3f, 0x01, 0x98, 0x77, 0x35, 0xd9, 0x98, 0xeb, 0xd4, 0xa6, + 0xca, 0x68, 0xb3, 0x99, 0x59, 0x98, 0x7a, 0x78, 0x0d, 0x56, 0xb8, 0x6a, 0xad, 0x63, 0xbe, 0x1b, + 0xc6, 0x8c, 0x26, 0xe2, 0x21, 0xc8, 0x2e, 0xfb, 0x0a, 0x7f, 0xb4, 0x8f, 0x7a, 0xf2, 0x7f, 0xfb, + 0xd3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x11, 0xc9, 0xda, 0x7e, 0x0f, 0x00, 0x00, } diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go new file mode 100644 index 000000000..600cc3ca7 --- /dev/null +++ b/weed/server/filer_grpc_server_rename.go @@ -0,0 +1,106 @@ +package weed_server + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "path/filepath" +) + +func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { + + ctx, err := fs.filer.BeginTransaction(ctx) + if err != nil { + return nil, err + } + + oldParent := filer2.FullPath(filepath.ToSlash(req.OldDirectory)) + + oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName)) + if err != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err) + } + + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName) + if moveErr != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, err) + } else { + if commitError := fs.filer.CommitTransaction(ctx); commitError != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, err) + } + } + + return &filer_pb.AtomicRenameEntryResponse{}, nil +} + +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) (error) { + if entry.IsDirectory() { + if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName); err != nil { + return err + } + } + return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName) +} + +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) (error) { + + currentDirPath := oldParent.Child(entry.Name()) + newDirPath := newParent.Child(newName) + + glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath) + + lastFileName := "" + includeLastFile := false + for { + + entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) + if err != nil { + return err + } + + println("found", len(entries), "entries under", currentDirPath) + + for _, item := range entries { + lastFileName = item.Name() + println("processing", lastFileName) + err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name()) + if err != nil { + return err + } + } + if len(entries) < 1024 { + break + } + } + return nil +} + +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) (error) { + + oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) + + glog.V(1).Infof("moving entry %s => %s", oldPath, newPath) + + // add to new directory + createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{ + FullPath: newPath, + Attr: entry.Attr, + Chunks: entry.Chunks, + }) + if createErr != nil { + return createErr + } + + // delete old entry + deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false) + if deleteErr != nil { + return deleteErr + } + return nil + +} From 78ac2bef3cf5083d9e79e5d1a4e6557a097a26ef Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 30 Mar 2019 23:09:16 -0700 Subject: [PATCH 112/450] go fmt --- weed/filer2/cassandra/cassandra_store.go | 6 +++--- weed/filer2/leveldb/leveldb_store.go | 6 +++--- weed/filer2/memdb/memdb_store.go | 6 +++--- weed/filer2/redis/universal_redis_store.go | 6 +++--- weed/server/filer_grpc_server_rename.go | 6 +++--- weed/shell/command_fs_ls.go | 2 +- weed/shell/command_fs_tree.go | 4 ++-- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index 72680b5e1..627de3055 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -40,13 +40,13 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er return } -func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error){ +func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) { return ctx, nil } -func (store *CassandraStore) CommitTransaction(ctx context.Context) error{ +func (store *CassandraStore) CommitTransaction(ctx context.Context) error { return nil } -func (store *CassandraStore) RollbackTransaction(ctx context.Context) error{ +func (store *CassandraStore) RollbackTransaction(ctx context.Context) error { return nil } diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 97df9163b..5b3a63959 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -46,13 +46,13 @@ func (store *LevelDBStore) initialize(dir string) (err error) { return } -func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error){ +func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error) { return ctx, nil } -func (store *LevelDBStore) CommitTransaction(ctx context.Context) error{ +func (store *LevelDBStore) CommitTransaction(ctx context.Context) error { return nil } -func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error{ +func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error { return nil } diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go index 811c87440..855a13faa 100644 --- a/weed/filer2/memdb/memdb_store.go +++ b/weed/filer2/memdb/memdb_store.go @@ -34,13 +34,13 @@ func (store *MemDbStore) Initialize(configuration util.Configuration) (err error return nil } -func (store *MemDbStore) BeginTransaction(ctx context.Context) (context.Context, error){ +func (store *MemDbStore) BeginTransaction(ctx context.Context) (context.Context, error) { return ctx, nil } -func (store *MemDbStore) CommitTransaction(ctx context.Context) error{ +func (store *MemDbStore) CommitTransaction(ctx context.Context) error { return nil } -func (store *MemDbStore) RollbackTransaction(ctx context.Context) error{ +func (store *MemDbStore) RollbackTransaction(ctx context.Context) error { return nil } diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index 72e9ce8b3..ce41d4d70 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -19,13 +19,13 @@ type UniversalRedisStore struct { Client redis.UniversalClient } -func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error){ +func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error) { return ctx, nil } -func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error{ +func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error { return nil } -func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error{ +func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error { return nil } diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index 600cc3ca7..5d5af506d 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -38,7 +38,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom return &filer_pb.AtomicRenameEntryResponse{}, nil } -func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) (error) { +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) error { if entry.IsDirectory() { if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName); err != nil { return err @@ -47,7 +47,7 @@ func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName) } -func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) (error) { +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) error { currentDirPath := oldParent.Child(entry.Name()) newDirPath := newParent.Child(newName) @@ -80,7 +80,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer return nil } -func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) (error) { +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) error { oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 39f356916..b94f24694 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -54,7 +54,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *commandEnv, writer io.Writer if path == "/" { dir, name = "/", "" } else { - dir, name = path[0 : len(path)-1], "" + dir, name = path[0:len(path)-1], "" } } diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 019616627..805b17d2a 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -39,7 +39,7 @@ func (c *commandFsTree) Do(args []string, commandEnv *commandEnv, writer io.Writ if path == "/" { dir, name = "/", "" } else { - dir, name = path[0 : len(path)-1], "" + dir, name = path[0:len(path)-1], "" } } @@ -124,7 +124,7 @@ func (p *Prefix) getPrefix(level int, isLastChild bool) string { } if isLastChild { sb.WriteString("└──") - p.removeMarker(level); + p.removeMarker(level) } else { sb.WriteString("├──") } From 8c823abe1f5d95eacb3bb6798a924ac297238fb9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 30 Mar 2019 23:22:41 -0700 Subject: [PATCH 113/450] 1.28 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index ee6125a8c..0a4980e6f 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,5 @@ package util const ( - VERSION = "1.27" + VERSION = "1.28" ) From 189c890715d5090ef24d0936112b6cf64fc96bc5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 31 Mar 2019 08:10:47 -0700 Subject: [PATCH 114/450] weed replicate: replicate atomic rename to other systems --- weed/server/filer_grpc_server_rename.go | 35 ++++++++++++++++++------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index 5d5af506d..c0edd559b 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -24,7 +24,8 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err) } - moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName) + var events MoveEvents + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events) if moveErr != nil { fs.filer.RollbackTransaction(ctx) return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, err) @@ -35,19 +36,26 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom } } + for _, entry := range events.newEntries { + fs.filer.NotifyUpdateEvent(nil, entry, false) + } + for _, entry := range events.oldEntries { + fs.filer.NotifyUpdateEvent(entry, nil, false) + } + return &filer_pb.AtomicRenameEntryResponse{}, nil } -func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) error { +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { if entry.IsDirectory() { - if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName); err != nil { + if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil { return err } } - return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName) + return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events) } -func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) error { +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { currentDirPath := oldParent.Child(entry.Name()) newDirPath := newParent.Child(newName) @@ -68,7 +76,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer for _, item := range entries { lastFileName = item.Name() println("processing", lastFileName) - err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name()) + err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events) if err != nil { return err } @@ -80,18 +88,19 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer return nil } -func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string) error { +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) (error) { oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) glog.V(1).Infof("moving entry %s => %s", oldPath, newPath) // add to new directory - createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{ + newEntry := &filer2.Entry{ FullPath: newPath, Attr: entry.Attr, Chunks: entry.Chunks, - }) + } + createErr := fs.filer.CreateEntry(ctx, newEntry) if createErr != nil { return createErr } @@ -101,6 +110,14 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP if deleteErr != nil { return deleteErr } + + events.oldEntries = append(events.oldEntries, entry) + events.newEntries = append(events.newEntries, newEntry) return nil } + +type MoveEvents struct { + oldEntries []*filer2.Entry + newEntries []*filer2.Entry +} From c5a3ff1c3599763ff9e0aaa860fb2d71a8ece55c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 31 Mar 2019 11:10:19 -0700 Subject: [PATCH 115/450] weed s3: multipart upload goes to the right bucket fix https://github.com/chrislusf/seaweedfs/issues/908 --- weed/s3api/s3api_object_multipart_handlers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 6643cb105..47f83a264 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -200,8 +200,8 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ dataReader = newSignV4ChunkedReader(r) } - uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part", - s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1) + uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) From b4c276263fe1073f14d42fe49980f3aaa5c762f5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 1 Apr 2019 11:03:04 -0700 Subject: [PATCH 116/450] weed filer: mysql/postgres use ReadCommitted transaction isolation level for atomic rename --- weed/filer2/abstract_sql/abstract_sql_store.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 9a3ee51c3..3e8554957 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -26,7 +26,10 @@ type TxOrDB interface { } func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { - tx, err := store.DB.BeginTx(ctx, nil) + tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{ + Isolation: sql.LevelReadCommitted, + ReadOnly: false, + }) if err != nil { return ctx, err } From 2a52e70a41cd1a196a43fe196960d9ed9d25f43d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 1 Apr 2019 12:37:54 -0700 Subject: [PATCH 117/450] weed filer: increase default mysql/postgres dir or name length to 65535 --- weed/command/scaffold.go | 10 +++++----- weed/filer2/postgres/README.txt | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index d72bd6f2f..b21641f6b 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -72,9 +72,9 @@ dir = "." # directory to store level db files [mysql] # CREATE TABLE IF NOT EXISTS filemeta ( -# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', -# name VARCHAR(1000) COMMENT 'directory or file name', -# directory VARCHAR(4096) COMMENT 'full path to parent directory', +# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', +# name VARCHAR(65535) COMMENT 'directory or file name', +# directory VARCHAR(65535) COMMENT 'full path to parent directory', # meta BLOB, # PRIMARY KEY (dirhash, name) # ) DEFAULT CHARSET=utf8; @@ -90,8 +90,8 @@ connection_max_open = 100 [postgres] # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT, -# name VARCHAR(1000), -# directory VARCHAR(4096), +# name VARCHAR(65535), +# directory VARCHAR(65535), # meta bytea, # PRIMARY KEY (dirhash, name) # ); diff --git a/weed/filer2/postgres/README.txt b/weed/filer2/postgres/README.txt index ef2ef683b..cb0c99c63 100644 --- a/weed/filer2/postgres/README.txt +++ b/weed/filer2/postgres/README.txt @@ -9,8 +9,8 @@ $PGHOME/bin/psql --username=postgres --password seaweedfs CREATE TABLE IF NOT EXISTS filemeta ( dirhash BIGINT, - name VARCHAR(1000), - directory VARCHAR(4096), + name VARCHAR(65535), + directory VARCHAR(65535), meta bytea, PRIMARY KEY (dirhash, name) ); From 78b9db34d5a3b491221a8ad6f4fd7eab0498e3f9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 1 Apr 2019 16:13:29 -0700 Subject: [PATCH 118/450] weed s3: generate time format acceptable by aws cli s3 fix https://github.com/chrislusf/seaweedfs/issues/911 --- weed/s3api/custom_types.go | 3 +++ weed/s3api/s3api_objects_list_handlers.go | 6 +++--- weed/s3api/s3api_xsd_generated.go | 4 ++-- 3 files changed, 8 insertions(+), 5 deletions(-) create mode 100644 weed/s3api/custom_types.go diff --git a/weed/s3api/custom_types.go b/weed/s3api/custom_types.go new file mode 100644 index 000000000..569dfc3ac --- /dev/null +++ b/weed/s3api/custom_types.go @@ -0,0 +1,3 @@ +package s3api + +const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00" diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index a685802d2..fd20ebe3c 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -131,8 +131,8 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr ETag: "\"" + filer2.ETag(entry.Chunks) + "\"", Size: int64(filer2.TotalSize(entry.Chunks)), Owner: CanonicalUser{ - ID: "bcaf161ca5fb16fd081034f", - DisplayName: "webfile", + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, }, StorageClass: "STANDARD", }) @@ -151,7 +151,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr CommonPrefixes: commonPrefixes, } - glog.V(4).Infof("read directory: %v, found: %v", request, counter) + glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response) return nil }) diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go index e678ecf0d..573c09ede 100644 --- a/weed/s3api/s3api_xsd_generated.go +++ b/weed/s3api/s3api_xsd_generated.go @@ -966,10 +966,10 @@ func (b xsdBase64Binary) MarshalText() ([]byte, error) { type xsdDateTime time.Time func (t *xsdDateTime) UnmarshalText(text []byte) error { - return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999") + return _unmarshalTime(text, (*time.Time)(t), s3TimeFormat) } func (t xsdDateTime) MarshalText() ([]byte, error) { - return []byte((time.Time)(t).Format("2006-01-02T15:04:05.999999999")), nil + return []byte((time.Time)(t).Format(s3TimeFormat)), nil } func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if (time.Time)(t).IsZero() { From beaa2bd71aea9df7fd6d92c0a7da7d97015be7ef Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 1 Apr 2019 17:03:04 -0700 Subject: [PATCH 119/450] weed filer: properly delete cached directory entry fix https://github.com/chrislusf/seaweedfs/issues/910 --- weed/filer2/filer.go | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 06c26abb4..faf9b9bca 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -217,29 +217,32 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecurs if err != nil { return fmt.Errorf("list folder %s: %v", p, err) } + if len(entries) == 0 { break - } else { - if isRecursive { - for _, sub := range entries { - lastFileName = sub.Name() - f.DeleteEntryMetaAndData(ctx, sub.FullPath, isRecursive, shouldDeleteChunks) - limit-- - if limit <= 0 { - break - } + } + + if isRecursive { + for _, sub := range entries { + lastFileName = sub.Name() + err = f.DeleteEntryMetaAndData(ctx, sub.FullPath, isRecursive, shouldDeleteChunks) + if err != nil { + return err } - } else { - if len(entries) > 0 { - return fmt.Errorf("folder %s is not empty", p) + limit-- + if limit <= 0 { + break } } - f.cacheDelDirectory(string(p)) - if len(entries) < 1024 { - break - } + } + + if len(entries) < 1024 { + break } } + + f.cacheDelDirectory(string(p)) + } if shouldDeleteChunks { @@ -264,6 +267,11 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileN } func (f *Filer) cacheDelDirectory(dirpath string) { + + if dirpath == "/" { + return + } + if f.directoryCache == nil { return } @@ -272,6 +280,7 @@ func (f *Filer) cacheDelDirectory(dirpath string) { } func (f *Filer) cacheGetDirectory(dirpath string) *Entry { + if f.directoryCache == nil { return nil } From 20dcb44077bcb8164b8351ee506af8385e8fd6ef Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 1 Apr 2019 23:59:31 -0700 Subject: [PATCH 120/450] fix tests --- weed/s3api/s3api_objects_list_handlers_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/s3api/s3api_objects_list_handlers_test.go b/weed/s3api/s3api_objects_list_handlers_test.go index 9feb25920..7b87b32fb 100644 --- a/weed/s3api/s3api_objects_list_handlers_test.go +++ b/weed/s3api/s3api_objects_list_handlers_test.go @@ -10,7 +10,7 @@ func TestListObjectsHandler(t *testing.T) { // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html expected := ` -test_container1000false1.zip"4397da7a7649e8085de9916c240e8166"123456765a011niqo39cdf8ec533ec3d1ccaafsa932STANDARD2011-04-09T12:34:49` +test_container1000false1.zip"4397da7a7649e8085de9916c240e8166"123456765a011niqo39cdf8ec533ec3d1ccaafsa932STANDARD2011-04-09T12:34:49Z` response := ListBucketResult{ Name: "test_container", From 715a38da1e4fce05631f230ccf09ce92c99a4fd4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 3 Apr 2019 00:20:00 -0700 Subject: [PATCH 121/450] weed shell: add fs.cd, fs.pwd to change to a directory and print current directory --- weed/command/shell.go | 4 ++ weed/shell/command_fs_cd.go | 99 ++++++++++++++++++++++++++++++++++++ weed/shell/command_fs_du.go | 25 +-------- weed/shell/command_fs_ls.go | 16 +++++- weed/shell/command_fs_pwd.go | 32 ++++++++++++ weed/shell/commands.go | 38 ++++++++++++++ 6 files changed, 189 insertions(+), 25 deletions(-) create mode 100644 weed/shell/command_fs_cd.go create mode 100644 weed/shell/command_fs_pwd.go diff --git a/weed/command/shell.go b/weed/command/shell.go index 1c3ce5f10..3216d5d48 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -31,6 +31,10 @@ func runShell(command *Command, args []string) bool { weed_server.LoadConfiguration("security", false) shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + shellOptions.FilerHost = "localhost" + shellOptions.FilerPort = 8888 + shellOptions.Directory = "" + shell.RunShell(shellOptions) return true diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go new file mode 100644 index 000000000..13208a3f8 --- /dev/null +++ b/weed/shell/command_fs_cd.go @@ -0,0 +1,99 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "io" + "strings" +) + +func init() { + commands = append(commands, &commandFsCd{}) +} + +type commandFsCd struct { +} + +func (c *commandFsCd) Name() string { + return "fs.cd" +} + +func (c *commandFsCd) Help() string { + return `change directory to http://:/dir/ + + The full path can be too long to type. For example, + fs.ls http://:/some/path/to/file_name + + can be simplified as + + fs.cd http://:/some/path + fs.ls to/file_name +` +} + +func (c *commandFsCd) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + input := "" + if len(args) > 0 { + input = args[len(args)-1] + } + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) + if err != nil { + return err + } + + dir, name := filer2.FullPath(path).DirAndName() + if strings.HasSuffix(path, "/") { + if path == "/" { + dir, name = "/", "" + } else { + dir, name = filer2.FullPath(path[0:len(path)-1]).DirAndName() + } + } + + ctx := context.Background() + + err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: name, + StartFromFileName: name, + InclusiveStartFrom: true, + Limit: 1, + }) + if listErr != nil { + return listErr + } + + if path == "" || path == "/" { + return nil + } + + if len(resp.Entries) == 0 { + return fmt.Errorf("entry not found") + } + + if resp.Entries[0].Name != name { + println("path", path, "dir", dir, "name", name, "found", resp.Entries[0].Name) + return fmt.Errorf("not a valid directory, found %s", resp.Entries[0].Name) + } + + if !resp.Entries[0].IsDirectory { + return fmt.Errorf("not a directory") + } + + return nil + }) + + if err == nil { + commandEnv.option.FilerHost = filerServer + commandEnv.option.FilerPort = filerPort + commandEnv.option.Directory = path + } + + return err +} diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 3fecac9a8..98e2eebd1 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -8,8 +8,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" "io" - "net/url" - "strconv" "strings" ) @@ -35,7 +33,7 @@ func (c *commandFsDu) Help() string { func (c *commandFsDu) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := parseFilerUrl(args[0]) + filerServer, filerPort, path, err := commandEnv.parseUrl(args[0]) if err != nil { return err } @@ -45,7 +43,7 @@ func (c *commandFsDu) Do(args []string, commandEnv *commandEnv, writer io.Writer if path == "/" { dir, name = "/", "" } else { - dir, name = path[0:len(path)-1], "" + dir, name = path[0 : len(path)-1], "" } } @@ -112,25 +110,6 @@ func paginateDirectory(ctx context.Context, writer io.Writer, client filer_pb.Se } -func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { - if strings.HasPrefix(entryPath, "http") { - var u *url.URL - u, err = url.Parse(entryPath) - if err != nil { - return - } - filerServer = u.Hostname() - portString := u.Port() - if portString != "" { - filerPort, err = strconv.ParseInt(portString, 10, 32) - } - path = u.Path - } else { - err = fmt.Errorf("path should have full url http://:/path/to/dirOrFile : %s", entryPath) - } - return -} - func (env *commandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index b94f24694..7b8d1d0cc 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -44,17 +44,29 @@ func (c *commandFsLs) Do(args []string, commandEnv *commandEnv, writer io.Writer } } - filerServer, filerPort, path, err := parseFilerUrl(args[len(args)-1]) + input := "" + if len(args) > 0 { + input = args[len(args)-1] + if strings.HasPrefix(input, "-") { + input = "" + } + } + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) if err != nil { return err } + if input == "" && !strings.HasSuffix(path, "/") { + path = path + "/" + } dir, name := filer2.FullPath(path).DirAndName() + // println("path", path, "dir", dir, "name", name) if strings.HasSuffix(path, "/") { if path == "/" { dir, name = "/", "" } else { - dir, name = path[0:len(path)-1], "" + dir, name = path[0 : len(path)-1], "" } } diff --git a/weed/shell/command_fs_pwd.go b/weed/shell/command_fs_pwd.go new file mode 100644 index 000000000..0b0a7f176 --- /dev/null +++ b/weed/shell/command_fs_pwd.go @@ -0,0 +1,32 @@ +package shell + +import ( + "fmt" + "io" +) + +func init() { + commands = append(commands, &commandFsPwd{}) +} + +type commandFsPwd struct { +} + +func (c *commandFsPwd) Name() string { + return "fs.pwd" +} + +func (c *commandFsPwd) Help() string { + return `print out current directory` +} + +func (c *commandFsPwd) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + fmt.Fprintf(writer, "http://%s:%d%s\n", + commandEnv.option.FilerHost, + commandEnv.option.FilerPort, + commandEnv.option.Directory, + ) + + return nil +} diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 280900c80..2a262d913 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -1,14 +1,23 @@ package shell import ( + "fmt" "github.com/chrislusf/seaweedfs/weed/wdclient" "google.golang.org/grpc" "io" + "net/url" + "path/filepath" + "strconv" + "strings" ) type ShellOptions struct { Masters *string GrpcDialOption grpc.DialOption + // shell transient context + FilerHost string + FilerPort int64 + Directory string } type commandEnv struct { @@ -26,3 +35,32 @@ type command interface { var ( commands = []command{} ) + +func (ce *commandEnv) parseUrl(input string) (filerServer string, filerPort int64, path string, err error) { + if strings.HasPrefix(input, "http") { + return parseFilerUrl(input) + } + if !strings.HasPrefix(input, "/") { + input = filepath.ToSlash(filepath.Join(ce.option.Directory, input)) + } + return ce.option.FilerHost, ce.option.FilerPort, input, err +} + +func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { + if strings.HasPrefix(entryPath, "http") { + var u *url.URL + u, err = url.Parse(entryPath) + if err != nil { + return + } + filerServer = u.Hostname() + portString := u.Port() + if portString != "" { + filerPort, err = strconv.ParseInt(portString, 10, 32) + } + path = u.Path + } else { + err = fmt.Errorf("path should have full url http://:/path/to/dirOrFile : %s", entryPath) + } + return +} From c0c9a8bad51d448042a1251c5daa73aecf79109a Mon Sep 17 00:00:00 2001 From: Jonathan Amsterdam Date: Thu, 4 Apr 2019 17:22:45 -0400 Subject: [PATCH 122/450] replication: add GoCDK PubSub support --- .../sub/notification_gocdk_pub_sub.go | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 weed/replication/sub/notification_gocdk_pub_sub.go diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go new file mode 100644 index 000000000..c8b16e308 --- /dev/null +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -0,0 +1,50 @@ +package sub + +import ( + "context" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + _ "gocloud.dev/pubsub/azuresb" + _ "gocloud.dev/pubsub/gcppubsub" + _ "gocloud.dev/pubsub/natspubsub" + _ "gocloud.dev/pubsub/rabbitpubsub" +) + +func init() { + NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{}) +} + +type GoCDKPubSubInput struct { + sub *pubsub.Subscription +} + +func (k *GoCDKPubSubInput) GetName() string { + return "gocdk_pub_sub" +} + +func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { + subURL := config.GetString("sub_url") + glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", subURL) + sub, err := pubsub.OpenSubscription(context.Background(), subURL) + if err != nil { + return err + } + k.sub = sub + return nil +} + +func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { + msg, err := k.sub.Receive(context.Background()) + key = msg.Metadata["key"] + message = &filer_pb.EventNotification{} + err = proto.Unmarshal(msg.Body, message) + if err != nil { + return "", nil, err + } + return key, message, nil +} From 766396d249652c1b29771fa1fce65250f9707d1d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 4 Apr 2019 19:27:00 -0700 Subject: [PATCH 123/450] weed master: atomic volume counting possible fix for https://github.com/chrislusf/seaweedfs/issues/913 --- weed/server/master_grpc_server.go | 2 +- weed/server/master_server_handlers_admin.go | 4 +- weed/topology/node.go | 45 +++++++++++---------- weed/topology/rack.go | 2 +- weed/topology/volume_growth.go | 8 ++-- 5 files changed, 31 insertions(+), 30 deletions(-) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 4c8ff5700..4ae2db030 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -63,7 +63,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ rack := dc.GetOrCreateRack(rackName) dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), heartbeat.PublicUrl, - int(heartbeat.MaxVolumeCount)) + int64(heartbeat.MaxVolumeCount)) glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) if err := stream.Send(&master_pb.HeartbeatResponse{ VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024, diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 95e55a497..4f0195084 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -68,8 +68,8 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request } if err == nil { if count, err = strconv.Atoi(r.FormValue("count")); err == nil { - if ms.Topo.FreeSpace() < count*option.ReplicaPlacement.GetCopyCount() { - err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*option.ReplicaPlacement.GetCopyCount())) + if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) { + err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount()) } else { count, err = ms.vg.GrowByCountAndType(ms.grpcDialOpiton, count, option, ms.Topo) } diff --git a/weed/topology/node.go b/weed/topology/node.go index b7d2f79ec..db70c9734 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -5,6 +5,7 @@ import ( "math/rand" "strings" "sync" + "sync/atomic" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" @@ -14,16 +15,16 @@ type NodeId string type Node interface { Id() NodeId String() string - FreeSpace() int - ReserveOneVolume(r int) (*DataNode, error) - UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int) - UpAdjustVolumeCountDelta(volumeCountDelta int) - UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int) + FreeSpace() int64 + ReserveOneVolume(r int64) (*DataNode, error) + UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) + UpAdjustVolumeCountDelta(volumeCountDelta int64) + UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) UpAdjustMaxVolumeId(vid storage.VolumeId) - GetVolumeCount() int - GetActiveVolumeCount() int - GetMaxVolumeCount() int + GetVolumeCount() int64 + GetActiveVolumeCount() int64 + GetMaxVolumeCount() int64 GetMaxVolumeId() storage.VolumeId SetParent(Node) LinkChildNode(node Node) @@ -40,9 +41,9 @@ type Node interface { } type NodeImpl struct { id NodeId - volumeCount int - activeVolumeCount int - maxVolumeCount int + volumeCount int64 + activeVolumeCount int64 + maxVolumeCount int64 parent Node sync.RWMutex // lock children children map[NodeId]Node @@ -126,7 +127,7 @@ func (n *NodeImpl) String() string { func (n *NodeImpl) Id() NodeId { return n.id } -func (n *NodeImpl) FreeSpace() int { +func (n *NodeImpl) FreeSpace() int64 { return n.maxVolumeCount - n.volumeCount } func (n *NodeImpl) SetParent(node Node) { @@ -146,7 +147,7 @@ func (n *NodeImpl) Parent() Node { func (n *NodeImpl) GetValue() interface{} { return n.value } -func (n *NodeImpl) ReserveOneVolume(r int) (assignedNode *DataNode, err error) { +func (n *NodeImpl) ReserveOneVolume(r int64) (assignedNode *DataNode, err error) { n.RLock() defer n.RUnlock() for _, node := range n.children { @@ -171,20 +172,20 @@ func (n *NodeImpl) ReserveOneVolume(r int) (assignedNode *DataNode, err error) { return nil, errors.New("No free volume slot found!") } -func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int) { //can be negative - n.maxVolumeCount += maxVolumeCountDelta +func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //can be negative + atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta) if n.parent != nil { n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta) } } -func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int) { //can be negative - n.volumeCount += volumeCountDelta +func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative + atomic.AddInt64(&n.volumeCount, volumeCountDelta) if n.parent != nil { n.parent.UpAdjustVolumeCountDelta(volumeCountDelta) } } -func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int) { //can be negative - n.activeVolumeCount += activeVolumeCountDelta +func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) { //can be negative + atomic.AddInt64(&n.activeVolumeCount, activeVolumeCountDelta) if n.parent != nil { n.parent.UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta) } @@ -200,13 +201,13 @@ func (n *NodeImpl) UpAdjustMaxVolumeId(vid storage.VolumeId) { //can be negative func (n *NodeImpl) GetMaxVolumeId() storage.VolumeId { return n.maxVolumeId } -func (n *NodeImpl) GetVolumeCount() int { +func (n *NodeImpl) GetVolumeCount() int64 { return n.volumeCount } -func (n *NodeImpl) GetActiveVolumeCount() int { +func (n *NodeImpl) GetActiveVolumeCount() int64 { return n.activeVolumeCount } -func (n *NodeImpl) GetMaxVolumeCount() int { +func (n *NodeImpl) GetMaxVolumeCount() int64 { return n.maxVolumeCount } diff --git a/weed/topology/rack.go b/weed/topology/rack.go index f8f8ce34a..932c1a804 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -28,7 +28,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode { } return nil } -func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int) *DataNode { +func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64) *DataNode { for _, c := range r.Children() { dn := c.(*DataNode) if dn.MatchLocation(ip, port) { diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index ef39a1c01..514033ca1 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -105,7 +105,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum if len(node.Children()) < rp.DiffRackCount+1 { return fmt.Errorf("Only has %d racks, not enough for %d.", len(node.Children()), rp.DiffRackCount+1) } - if node.FreeSpace() < rp.DiffRackCount+rp.SameRackCount+1 { + if node.FreeSpace() < int64(rp.DiffRackCount+rp.SameRackCount+1) { return fmt.Errorf("Free:%d < Expected:%d", node.FreeSpace(), rp.DiffRackCount+rp.SameRackCount+1) } possibleRacksCount := 0 @@ -134,7 +134,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) { return fmt.Errorf("Not matching preferred rack:%s", option.Rack) } - if node.FreeSpace() < rp.SameRackCount+1 { + if node.FreeSpace() < int64(rp.SameRackCount+1) { return fmt.Errorf("Free:%d < Expected:%d", node.FreeSpace(), rp.SameRackCount+1) } if len(node.Children()) < rp.SameRackCount+1 { @@ -175,7 +175,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum servers = append(servers, server.(*DataNode)) } for _, rack := range otherRacks { - r := rand.Intn(rack.FreeSpace()) + r := rand.Int63n(rack.FreeSpace()) if server, e := rack.ReserveOneVolume(r); e == nil { servers = append(servers, server) } else { @@ -183,7 +183,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } } for _, datacenter := range otherDataCenters { - r := rand.Intn(datacenter.FreeSpace()) + r := rand.Int63n(datacenter.FreeSpace()) if server, e := datacenter.ReserveOneVolume(r); e == nil { servers = append(servers, server) } else { From af37b374cbe915f033058abad98d648ccce70d5c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 4 Apr 2019 19:27:51 -0700 Subject: [PATCH 124/450] weed shell: fs.cd change current directory --- weed/command/shell.go | 2 +- weed/shell/command_fs_cd.go | 54 +++++------------------------------ weed/shell/command_fs_du.go | 16 ++++------- weed/shell/command_fs_ls.go | 38 ++++++++++-------------- weed/shell/command_fs_tree.go | 9 +----- weed/shell/commands.go | 54 +++++++++++++++++++++++++++++++++++ 6 files changed, 84 insertions(+), 89 deletions(-) diff --git a/weed/command/shell.go b/weed/command/shell.go index 3216d5d48..95b62f0b5 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -33,7 +33,7 @@ func runShell(command *Command, args []string) bool { shellOptions.FilerHost = "localhost" shellOptions.FilerPort = 8888 - shellOptions.Directory = "" + shellOptions.Directory = "/" shell.RunShell(shellOptions) diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go index 13208a3f8..f14350f02 100644 --- a/weed/shell/command_fs_cd.go +++ b/weed/shell/command_fs_cd.go @@ -2,11 +2,7 @@ package shell import ( "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" - "strings" ) func init() { @@ -35,59 +31,23 @@ func (c *commandFsCd) Help() string { func (c *commandFsCd) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { - input := "" - if len(args) > 0 { - input = args[len(args)-1] - } + input := findInputDirectory(args) filerServer, filerPort, path, err := commandEnv.parseUrl(input) if err != nil { return err } - dir, name := filer2.FullPath(path).DirAndName() - if strings.HasSuffix(path, "/") { - if path == "/" { - dir, name = "/", "" - } else { - dir, name = filer2.FullPath(path[0:len(path)-1]).DirAndName() - } + if path == "/" { + commandEnv.option.FilerHost = filerServer + commandEnv.option.FilerPort = filerPort + commandEnv.option.Directory = "/" + return nil } ctx := context.Background() - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - - resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: name, - StartFromFileName: name, - InclusiveStartFrom: true, - Limit: 1, - }) - if listErr != nil { - return listErr - } - - if path == "" || path == "/" { - return nil - } - - if len(resp.Entries) == 0 { - return fmt.Errorf("entry not found") - } - - if resp.Entries[0].Name != name { - println("path", path, "dir", dir, "name", name, "found", resp.Entries[0].Name) - return fmt.Errorf("not a valid directory, found %s", resp.Entries[0].Name) - } - - if !resp.Entries[0].IsDirectory { - return fmt.Errorf("not a directory") - } - - return nil - }) + err = commandEnv.checkDirectory(ctx, filerServer, filerPort, path) if err == nil { commandEnv.option.FilerHost = filerServer diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 98e2eebd1..f305cabdc 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -8,7 +8,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" "io" - "strings" ) func init() { @@ -33,21 +32,18 @@ func (c *commandFsDu) Help() string { func (c *commandFsDu) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(args[0]) + filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - dir, name := filer2.FullPath(path).DirAndName() - if strings.HasSuffix(path, "/") { - if path == "/" { - dir, name = "/", "" - } else { - dir, name = path[0 : len(path)-1], "" - } + ctx := context.Background() + + if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + path = path + "/" } - ctx := context.Background() + dir, name := filer2.FullPath(path).DirAndName() return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 7b8d1d0cc..93b86fa9f 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -36,41 +36,33 @@ func (c *commandFsLs) Do(args []string, commandEnv *commandEnv, writer io.Writer var isLongFormat, showHidden bool for _, arg := range args { - switch arg { - case "-a": - showHidden = true - case "-l": - isLongFormat = true + if !strings.HasPrefix(arg, "-") { + break + } + for _, t := range arg { + switch t { + case 'a': + showHidden = true + case 'l': + isLongFormat = true + } } } - input := "" - if len(args) > 0 { - input = args[len(args)-1] - if strings.HasPrefix(input, "-") { - input = "" - } - } + input := findInputDirectory(args) filerServer, filerPort, path, err := commandEnv.parseUrl(input) if err != nil { return err } - if input == "" && !strings.HasSuffix(path, "/") { + + ctx := context.Background() + + if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { path = path + "/" } dir, name := filer2.FullPath(path).DirAndName() - // println("path", path, "dir", dir, "name", name) - if strings.HasSuffix(path, "/") { - if path == "/" { - dir, name = "/", "" - } else { - dir, name = path[0 : len(path)-1], "" - } - } - - ctx := context.Background() return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 805b17d2a..5bc3c57b4 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -29,19 +29,12 @@ func (c *commandFsTree) Help() string { func (c *commandFsTree) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := parseFilerUrl(args[len(args)-1]) + filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } dir, name := filer2.FullPath(path).DirAndName() - if strings.HasSuffix(path, "/") { - if path == "/" { - dir, name = "/", "" - } else { - dir, name = path[0:len(path)-1], "" - } - } ctx := context.Background() diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 2a262d913..50b70498d 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -1,7 +1,10 @@ package shell import ( + "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/wdclient" "google.golang.org/grpc" "io" @@ -46,6 +49,46 @@ func (ce *commandEnv) parseUrl(input string) (filerServer string, filerPort int6 return ce.option.FilerHost, ce.option.FilerPort, input, err } +func (ce *commandEnv) isDirectory(ctx context.Context, filerServer string, filerPort int64, path string) bool { + + return ce.checkDirectory(ctx,filerServer,filerPort,path) == nil + +} + +func (ce *commandEnv) checkDirectory(ctx context.Context, filerServer string, filerPort int64, path string) error { + + dir, name := filer2.FullPath(path).DirAndName() + + return ce.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: name, + StartFromFileName: name, + InclusiveStartFrom: true, + Limit: 1, + }) + if listErr != nil { + return listErr + } + + if len(resp.Entries) == 0 { + return fmt.Errorf("entry not found") + } + + if resp.Entries[0].Name != name { + return fmt.Errorf("not a valid directory, found %s", resp.Entries[0].Name) + } + + if !resp.Entries[0].IsDirectory { + return fmt.Errorf("not a directory") + } + + return nil + }) + +} + func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { if strings.HasPrefix(entryPath, "http") { var u *url.URL @@ -64,3 +107,14 @@ func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path } return } + +func findInputDirectory(args []string) (input string) { + input = "." + if len(args) > 0 { + input = args[len(args)-1] + if strings.HasPrefix(input, "-") { + input = "." + } + } + return input +} From 9cc73f4a9a953665c0ffa3cb85755381d98956e0 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 4 Apr 2019 19:34:15 -0700 Subject: [PATCH 125/450] fix test --- weed/topology/topology_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/weed/topology/topology_test.go b/weed/topology/topology_test.go index 07dc9c67b..a8bdec902 100644 --- a/weed/topology/topology_test.go +++ b/weed/topology/topology_test.go @@ -47,8 +47,8 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { topo.SyncDataNodeRegistration(volumeMessages, dn) - assert(t, "activeVolumeCount1", topo.activeVolumeCount, volumeCount) - assert(t, "volumeCount", topo.volumeCount, volumeCount) + assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) + assert(t, "volumeCount", int(topo.volumeCount), volumeCount) } { @@ -71,13 +71,13 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) { } topo.SyncDataNodeRegistration(volumeMessages, dn) - assert(t, "activeVolumeCount1", topo.activeVolumeCount, volumeCount) - assert(t, "volumeCount", topo.volumeCount, volumeCount) + assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount) + assert(t, "volumeCount", int(topo.volumeCount), volumeCount) } topo.UnRegisterDataNode(dn) - assert(t, "activeVolumeCount2", topo.activeVolumeCount, 0) + assert(t, "activeVolumeCount2", int(topo.activeVolumeCount), 0) } From 0bd7ced7c248d76c0e0c87728ebca5308e7d51c6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 4 Apr 2019 20:11:59 -0700 Subject: [PATCH 126/450] fix test --- weed/topology/volume_growth_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index f983df1ec..1963cb928 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -101,7 +101,7 @@ func setup(topologyLayout string) *Topology { Version: storage.CurrentVersion} server.AddOrUpdateVolume(vi) } - server.UpAdjustMaxVolumeCountDelta(int(serverMap["limit"].(float64))) + server.UpAdjustMaxVolumeCountDelta(int64(serverMap["limit"].(float64))) } } } From c94823f52c7dbcbc2f9ffc887bcafe0f29ed572d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 4 Apr 2019 20:28:40 -0700 Subject: [PATCH 127/450] set default http idle connection per host possible fix https://github.com/chrislusf/seaweedfs/issues/915 --- weed/util/grpc_client_server.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index e5993aeab..5c08538dc 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -3,6 +3,7 @@ package util import ( "context" "fmt" + "net/http" "strconv" "strings" "sync" @@ -18,6 +19,10 @@ var ( grpcClientsLock sync.Mutex ) +func init(){ + http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 100 +} + func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { var options []grpc.ServerOption options = append(options, grpc.KeepaliveParams(keepalive.ServerParameters{ From 4f714cef22cc738d550a338c8009a76db8d55cfc Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 4 Apr 2019 23:14:32 -0700 Subject: [PATCH 128/450] default weed filer.copy and weed upload chunk size to 32MB --- weed/command/filer_copy.go | 2 +- weed/command/upload.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index dd763974c..6750617db 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -47,7 +47,7 @@ func init() { copy.replication = cmdCopy.Flag.String("replication", "", "replication type") copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name") copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - copy.maxMB = cmdCopy.Flag.Int("maxMB", 0, "split files larger than the limit") + copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000") } diff --git a/weed/command/upload.go b/weed/command/upload.go index 80fc635c1..1271725ba 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -37,7 +37,7 @@ func init() { upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name") upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit") + upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit") } var cmdUpload = &Command{ From 921231268b182620aa004fb49c7e318225b0b3bd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 5 Apr 2019 00:04:00 -0700 Subject: [PATCH 129/450] weed filer.copy: skip files that can not be opened such as socket --- weed/command/filer_copy.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 6750617db..bd4c7d166 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -1,25 +1,24 @@ package command import ( + "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/spf13/viper" "google.golang.org/grpc" + "io" "io/ioutil" + "net/http" "net/url" "os" "path/filepath" - "strings" - - "context" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "io" - "net/http" "strconv" + "strings" "time" ) @@ -123,6 +122,10 @@ func doEachCopy(ctx context.Context, fileOrDir string, filerAddress, filerGrpcAd f, err := os.Open(fileOrDir) if err != nil { fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) + if _, ok := err.(*os.PathError); ok { + fmt.Printf("skipping %s\n", fileOrDir) + return true + } return false } defer f.Close() From 300b39b5af583533abf60b763ebcd421ed0fc690 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 5 Apr 2019 01:09:06 -0700 Subject: [PATCH 130/450] weed filer.copy: use cached filer grpc connection another attemp for https://github.com/chrislusf/seaweedfs/issues/915 --- weed/command/filer_copy.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index bd4c7d166..b34ab55d1 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -343,13 +343,9 @@ func detectMimeType(f *os.File) string { func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(ctx, filerAddress, grpcDialOption) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", filerAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(clientConn) + return fn(client) + }, filerAddress, grpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } From a6a5d804014f5bbcf339fd45a94141f0f8c06232 Mon Sep 17 00:00:00 2001 From: Jonathan Amsterdam Date: Fri, 5 Apr 2019 08:13:32 -0400 Subject: [PATCH 131/450] fix typo --- weed/replication/sub/notification_gocdk_pub_sub.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index c8b16e308..9c76e6918 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -29,7 +29,7 @@ func (k *GoCDKPubSubInput) GetName() string { func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { subURL := config.GetString("sub_url") - glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", subURL) + glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) sub, err := pubsub.OpenSubscription(context.Background(), subURL) if err != nil { return err From 72920efc20fd758048f21a82d621a6d7bbc08066 Mon Sep 17 00:00:00 2001 From: Jonathan Amsterdam Date: Fri, 5 Apr 2019 13:43:38 -0400 Subject: [PATCH 132/450] added entry to scaffold --- weed/command/scaffold.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index b21641f6b..106c2dace 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -190,6 +190,7 @@ enabled = false # The exchange must have already been created by some other means, like # the RabbitMQ management plugin. topic_url = "rabbit://myexchange" +sub_url = "rabbit://myqueue" ` REPLICATION_TOML_EXAMPLE = ` From cd6d35aa52263bedba7280e6a32e98080f9472db Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 5 Apr 2019 11:40:54 -0700 Subject: [PATCH 133/450] weed shell: fs.tree improvements --- weed/shell/command_fs_tree.go | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 5bc3c57b4..289470749 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -40,15 +40,22 @@ func (c *commandFsTree) Do(args []string, commandEnv *commandEnv, writer io.Writ return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - return treeTraverseDirectory(ctx, writer, client, dir, name, 1000, newPrefix(), 0) + dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, dir, name, newPrefix(), -1) + + if terr == nil { + fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) + } + + return terr }) } -func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int, prefix *Prefix, level int) (err error) { +func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { paginatedCount := -1 startFromFileName := "" + paginateSize := 1000 for paginatedCount == -1 || paginatedCount == paginateSize { resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ @@ -69,17 +76,29 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_p } for i, entry := range resp.Entries { + + if level < 0 { + if entry.Name != name { + break + } + } + // 0.1% wrong prefix here, but fixing it would need to paginate to the next batch first isLast := paginatedCount < paginateSize && i == paginatedCount-1 fmt.Fprintf(writer, "%s%s\n", prefix.getPrefix(level, isLast), entry.Name) if entry.IsDirectory { + directoryCount++ subDir := fmt.Sprintf("%s/%s", dir, entry.Name) if dir == "/" { subDir = "/" + entry.Name } - err = treeTraverseDirectory(ctx, writer, client, subDir, "", paginateSize, prefix, level+1) + dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, client, subDir, "", prefix, level+1) + directoryCount += dirCount + fileCount += fCount + err = terr } else { + fileCount++ } startFromFileName = entry.Name @@ -107,6 +126,9 @@ func (p *Prefix) removeMarker(marker int) { } func (p *Prefix) getPrefix(level int, isLastChild bool) string { var sb strings.Builder + if level < 0 { + return "" + } for i := 0; i < level; i++ { if _, ok := p.markers[i]; ok { sb.WriteString("│") From 5808caa2f55b1e9c76c5fa50576db895a499892c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 5 Apr 2019 20:26:52 -0700 Subject: [PATCH 134/450] use cached grpc client --- weed/wdclient/masterclient.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 794471f7b..7a0bc9181 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -106,15 +106,11 @@ func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.Di return fmt.Errorf("failed to parse master grpc %v", master) } - grpcConnection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", master, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := master_pb.NewSeaweedClient(grpcConnection) + return fn(ctx, client) + }, masterGrpcAddress, grpcDialOption) - client := master_pb.NewSeaweedClient(grpcConnection) - - return fn(ctx, client) } func (mc *MasterClient) WithClient(ctx context.Context, fn func(client master_pb.SeaweedClient) error) error { From c789b496d82bb1ff811dd8682c7ebde3d7725a03 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 5 Apr 2019 20:31:58 -0700 Subject: [PATCH 135/450] use cached grpc client --- weed/replication/sink/filersink/fetch_write.go | 13 +++++-------- weed/replication/source/filer_source.go | 12 ++++-------- weed/s3api/s3api_handlers.go | 13 +++++-------- 3 files changed, 14 insertions(+), 24 deletions(-) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 0f3473ff2..d24770e3d 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,6 +3,7 @@ package filersink import ( "context" "fmt" + "google.golang.org/grpc" "strings" "sync" @@ -105,15 +106,11 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(ctx, fs.grpcAddress, fs.grpcDialOption) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.grpcAddress, fs.grpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } func volumeId(fileId string) string { diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index 3ab6c7261..d7b5ebc4d 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -91,15 +91,11 @@ func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename stri func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(ctx, fs.grpcAddress, grpcDialOption) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.grpcAddress, fs.grpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } func volumeId(fileId string) string { diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 5a63648ca..127be07e3 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -9,6 +9,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "net/http" "net/url" "time" @@ -38,15 +39,11 @@ func encodeResponse(response interface{}) []byte { func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(ctx, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } // If none of the http routes match respond with MethodNotAllowed From 24c020104aedbf48e568eaebd79d35d7125b55ce Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 5 Apr 2019 23:35:30 -0700 Subject: [PATCH 136/450] weed filer.copy: parallelize the copying to increase throughput improvement https://github.com/chrislusf/seaweedfs/issues/915 --- weed/command/filer_copy.go | 202 ++++++++++++++++++++++++------------- 1 file changed, 130 insertions(+), 72 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index b34ab55d1..c5baff5e4 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -19,11 +19,13 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" ) var ( - copy CopyOptions + copy CopyOptions + waitGroup sync.WaitGroup ) type CopyOptions struct { @@ -36,6 +38,7 @@ type CopyOptions struct { maxMB *int grpcDialOption grpc.DialOption masterClient *wdclient.MasterClient + concurrency *int } func init() { @@ -48,6 +51,7 @@ func init() { copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000") + copy.concurrency = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") } var cmdCopy = &Command{ @@ -110,65 +114,125 @@ func runCopy(cmd *Command, args []string) bool { go copy.masterClient.KeepConnectedToMaster() copy.masterClient.WaitUntilConnected() - for _, fileOrDir := range fileOrDirs { - if !doEachCopy(context.Background(), fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) { - return false + fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrency) + + ctx := context.Background() + + go func() { + defer close(fileCopyTaskChan) + for _, fileOrDir := range fileOrDirs { + if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil { + fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err) + break + } } + }() + for i := 0; i < *copy.concurrency; i++ { + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + worker := FileCopyWorker{ + options: ©, + filerHost: filerUrl.Host, + filerGrpcAddress: filerGrpcAddress, + } + if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil { + fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) + return + } + }() } + waitGroup.Wait() + return true } -func doEachCopy(ctx context.Context, fileOrDir string, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, path string) bool { - f, err := os.Open(fileOrDir) - if err != nil { - fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) - if _, ok := err.(*os.PathError); ok { - fmt.Printf("skipping %s\n", fileOrDir) - return true - } - return false - } - defer f.Close() +func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan FileCopyTask) error { - fi, err := f.Stat() + fi, err := os.Stat(fileOrDir) if err != nil { - fmt.Printf("Failed to get stat for file %s: %v\n", fileOrDir, err) - return false + fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err) + return nil } mode := fi.Mode() if mode.IsDir() { files, _ := ioutil.ReadDir(fileOrDir) for _, subFileOrDir := range files { - if !doEachCopy(ctx, fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, grpcDialOption, path+fi.Name()+"/") { - return false + if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { + return err } } - return true + return nil } + fileCopyTaskChan <- FileCopyTask{ + sourceLocation: fileOrDir, + destinationUrlPath: destPath, + fileSize: fi.Size(), + fileMode: fi.Mode(), + } + + return nil +} + +type FileCopyWorker struct { + options *CopyOptions + filerHost string + filerGrpcAddress string +} + +func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error { + for task := range fileCopyTaskChan { + if err := worker.doEachCopy(ctx, task); err != nil { + return err + } + } + return nil +} + +type FileCopyTask struct { + sourceLocation string + destinationUrlPath string + fileSize int64 + fileMode os.FileMode +} + +func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error { + + f, err := os.Open(task.sourceLocation) + if err != nil { + fmt.Printf("Failed to open file %s: %v\n", task.sourceLocation, err) + if _, ok := err.(*os.PathError); ok { + fmt.Printf("skipping %s\n", task.sourceLocation) + return nil + } + return err + } + defer f.Close() + // this is a regular file - if *copy.include != "" { - if ok, _ := filepath.Match(*copy.include, filepath.Base(fileOrDir)); !ok { - return true + if *worker.options.include != "" { + if ok, _ := filepath.Match(*worker.options.include, filepath.Base(task.sourceLocation)); !ok { + return nil } } // find the chunk count - chunkSize := int64(*copy.maxMB * 1024 * 1024) + chunkSize := int64(*worker.options.maxMB * 1024 * 1024) chunkCount := 1 - if chunkSize > 0 && fi.Size() > chunkSize { - chunkCount = int(fi.Size()/chunkSize) + 1 + if chunkSize > 0 && task.fileSize > chunkSize { + chunkCount = int(task.fileSize/chunkSize) + 1 } if chunkCount == 1 { - return uploadFileAsOne(ctx, filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi) + return worker.uploadFileAsOne(ctx, task, f) } - return uploadFileInChunks(ctx, filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi, chunkCount, chunkSize) + return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize) } -func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo) bool { +func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error { // upload the file content fileName := filepath.Base(f.Name()) @@ -176,29 +240,27 @@ func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, var chunks []*filer_pb.FileChunk - if fi.Size() > 0 { + if task.fileSize > 0 { // assign a volume - assignResult, err := operation.Assign(copy.masterClient.GetMaster(), grpcDialOption, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, - Replication: *copy.replication, - Collection: *copy.collection, - Ttl: *copy.ttl, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + Ttl: *worker.options.ttl, }) if err != nil { - fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) + fmt.Printf("Failed to assign from %s: %v\n", *worker.options.master, err) } targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid uploadResult, err := operation.Upload(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth) if err != nil { - fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) - return false + return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } if uploadResult.Error != "" { - fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) - return false + return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) } fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) @@ -210,12 +272,12 @@ func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, ETag: uploadResult.ETag, }) - fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) + fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ - Directory: urlFolder, + Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ Name: fileName, Attributes: &filer_pb.FuseAttributes{ @@ -223,12 +285,12 @@ func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, Mtime: time.Now().Unix(), Gid: uint32(os.Getgid()), Uid: uint32(os.Getuid()), - FileSize: uint64(fi.Size()), - FileMode: uint32(fi.Mode()), + FileSize: uint64(task.fileSize), + FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *copy.replication, - Collection: *copy.collection, - TtlSec: int32(util.ParseInt(*copy.ttl, 0)), + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), }, Chunks: chunks, }, @@ -239,14 +301,13 @@ func uploadFileAsOne(ctx context.Context, filerAddress, filerGrpcAddress string, } return nil }); err != nil { - fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) - return false + return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err) } - return true + return nil } -func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { +func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -256,14 +317,14 @@ func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress stri for i := int64(0); i < int64(chunkCount); i++ { // assign a volume - assignResult, err := operation.Assign(copy.masterClient.GetMaster(), grpcDialOption, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, - Replication: *copy.replication, - Collection: *copy.collection, - Ttl: *copy.ttl, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + Ttl: *worker.options.ttl, }) if err != nil { - fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) + fmt.Printf("Failed to assign from %s: %v\n", *worker.options.master, err) } targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid @@ -273,12 +334,10 @@ func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress stri io.LimitReader(f, chunkSize), false, "application/octet-stream", nil, assignResult.Auth) if err != nil { - fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) - return false + return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } if uploadResult.Error != "" { - fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) - return false + return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) } chunks = append(chunks, &filer_pb.FileChunk{ FileId: assignResult.Fid, @@ -290,9 +349,9 @@ func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress stri fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) } - if err := withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ - Directory: urlFolder, + Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ Name: fileName, Attributes: &filer_pb.FuseAttributes{ @@ -300,12 +359,12 @@ func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress stri Mtime: time.Now().Unix(), Gid: uint32(os.Getgid()), Uid: uint32(os.Getuid()), - FileSize: uint64(fi.Size()), - FileMode: uint32(fi.Mode()), + FileSize: uint64(task.fileSize), + FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *copy.replication, - Collection: *copy.collection, - TtlSec: int32(util.ParseInt(*copy.ttl, 0)), + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), }, Chunks: chunks, }, @@ -316,13 +375,12 @@ func uploadFileInChunks(ctx context.Context, filerAddress, filerGrpcAddress stri } return nil }); err != nil { - fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) - return false + return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err) } - fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) + fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) - return true + return nil } func detectMimeType(f *os.File) string { From b2d92a29921775c8e9411fc31950e92656d328b9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 5 Apr 2019 23:42:36 -0700 Subject: [PATCH 137/450] weed shell: fs.tree fix directory tree listing --- weed/shell/command_fs_tree.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 289470749..f1ffc9e4b 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -77,7 +77,7 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, client filer_p for i, entry := range resp.Entries { - if level < 0 { + if level < 0 && name != "" { if entry.Name != name { break } From 94302935b2d2929f077c96b51de4287f8b63d816 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 00:10:52 -0700 Subject: [PATCH 138/450] weed volume: only store compressed data if compression is effective --- weed/storage/needle_parse_multipart.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/weed/storage/needle_parse_multipart.go b/weed/storage/needle_parse_multipart.go index e8d57ee38..3dba81fcf 100644 --- a/weed/storage/needle_parse_multipart.go +++ b/weed/storage/needle_parse_multipart.go @@ -88,10 +88,12 @@ func parseMultipart(r *http.Request) ( } isGzipped = true } else if operation.IsGzippable(ext, mtype, data) { - if data, e = operation.GzipData(data); e != nil { - return + if compressedData, err := operation.GzipData(data); err == nil { + if len(data) > len(compressedData) { + data = compressedData + isGzipped = true + } } - isGzipped = true } } From de7626bd225cea9483bc097f8f704f30dc847369 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 09:25:29 -0700 Subject: [PATCH 139/450] weed shell: volume.list add summary statistics --- weed/shell/command_volume_list.go | 65 ++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 10 deletions(-) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index f3f843d58..41e6a5072 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -42,30 +42,75 @@ func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io. return nil } -func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo) { +func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo) statistics{ fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + var s statistics for _, dc := range t.DataCenterInfos { - writeDataCenterInfo(writer, dc) + s = s.plus(writeDataCenterInfo(writer, dc)) } + fmt.Fprintf(writer, "%+v \n", s) + return s } -func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) { +func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics{ fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + var s statistics for _, r := range t.RackInfos { - writeRackInfo(writer, r) + s = s.plus(writeRackInfo(writer, r)) } + fmt.Fprintf(writer, " DataCenter %s %+v \n", t.Id, s) + return s } -func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) { +func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics{ fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + var s statistics for _, dn := range t.DataNodeInfos { - writeDataNodeInfo(writer, dn) + s = s.plus(writeDataNodeInfo(writer, dn)) } + fmt.Fprintf(writer, " Rack %s %+v \n", t.Id, s) + return s } -func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) { +func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics{ fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) + var s statistics for _, vi := range t.VolumeInfos { - writeVolumeInformationMessage(writer, vi) + s = s.plus(writeVolumeInformationMessage(writer, vi)) + } + fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s) + return s +} +func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics { + fmt.Fprintf(writer, " volume %+v \n", t) + return newStatiscis(t) +} + +type statistics struct { + Size uint64 + FileCount uint64 + DeletedFileCount uint64 + DeletedBytes uint64 +} + +func newStatiscis(t *master_pb.VolumeInformationMessage) statistics { + return statistics{ + Size: t.Size, + FileCount: t.FileCount, + DeletedFileCount: t.DeleteCount, + DeletedBytes: t.DeletedByteCount, } } -func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) { - fmt.Fprintf(writer, " volume %+v \n", t) + +func (s statistics) plus(t statistics) statistics { + return statistics{ + Size: s.Size + t.Size, + FileCount: s.FileCount + t.FileCount, + DeletedFileCount: s.DeletedFileCount + t.DeletedFileCount, + DeletedBytes: s.DeletedBytes + t.DeletedBytes, + } +} + +func (s statistics) String() string { + if s.DeletedFileCount>0 { + return fmt.Sprintf("total size:%d file_count:%d deleted_file:%d deleted_bytes:%d", s.Size, s.FileCount, s.DeletedFileCount, s.DeletedBytes) + } + return fmt.Sprintf("total size:%d file_count:%d", s.Size, s.FileCount) } From 868913aa04f86e9bcf6a1ad50f1c3879841c0d6c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:12:35 -0700 Subject: [PATCH 140/450] printout statistics --- weed/shell/command_volume_list.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 41e6a5072..b3dca0d0b 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -42,7 +42,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *commandEnv, writer io. return nil } -func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo) statistics{ +func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo) statistics { fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) var s statistics for _, dc := range t.DataCenterInfos { @@ -51,7 +51,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo) statistics{ fmt.Fprintf(writer, "%+v \n", s) return s } -func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics{ +func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics { fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) var s statistics for _, r := range t.RackInfos { @@ -60,7 +60,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti fmt.Fprintf(writer, " DataCenter %s %+v \n", t.Id, s) return s } -func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics{ +func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics { fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) var s statistics for _, dn := range t.DataNodeInfos { @@ -69,7 +69,7 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics{ fmt.Fprintf(writer, " Rack %s %+v \n", t.Id, s) return s } -func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics{ +func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount) var s statistics for _, vi := range t.VolumeInfos { @@ -109,7 +109,7 @@ func (s statistics) plus(t statistics) statistics { } func (s statistics) String() string { - if s.DeletedFileCount>0 { + if s.DeletedFileCount > 0 { return fmt.Sprintf("total size:%d file_count:%d deleted_file:%d deleted_bytes:%d", s.Size, s.FileCount, s.DeletedFileCount, s.DeletedBytes) } return fmt.Sprintf("total size:%d file_count:%d", s.Size, s.FileCount) From 7e95dcc99e3d6675032bb4323ab4ec0819fcc7b9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:12:43 -0700 Subject: [PATCH 141/450] trying to fix travis problem --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e1a1fa31c..5d50bb162 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ before_install: - export PATH=/home/travis/gopath/bin:$PATH install: -- go get ./weed/... +- go get -u ./weed/... script: - go test ./weed/... From 27850c3a8b61bd734ee05adf4cfccaefa2411f52 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:13:53 -0700 Subject: [PATCH 142/450] add stop to clean up gocache --- .travis.yml | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5d50bb162..914f46aee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,19 +1,20 @@ sudo: false language: go go: -- 1.10.x -- 1.11.x -- 1.12.x -- tip + - 1.10.x + - 1.11.x + - 1.12.x + - tip before_install: -- export PATH=/home/travis/gopath/bin:$PATH + - export PATH=/home/travis/gopath/bin:$PATH install: -- go get -u ./weed/... + - rm -rf `go env GOCACHE` + - go get -u ./weed/... script: -- go test ./weed/... + - go test ./weed/... before_deploy: - make release @@ -23,22 +24,22 @@ deploy: api_key: secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI= file: - - build/linux_arm.tar.gz - - build/linux_arm64.tar.gz - - build/linux_386.tar.gz - - build/linux_amd64.tar.gz - - build/darwin_amd64.tar.gz - - build/windows_386.zip - - build/windows_amd64.zip - - build/freebsd_arm.tar.gz - - build/freebsd_amd64.tar.gz - - build/freebsd_386.tar.gz - - build/netbsd_arm.tar.gz - - build/netbsd_amd64.tar.gz - - build/netbsd_386.tar.gz - - build/openbsd_arm.tar.gz - - build/openbsd_amd64.tar.gz - - build/openbsd_386.tar.gz + - build/linux_arm.tar.gz + - build/linux_arm64.tar.gz + - build/linux_386.tar.gz + - build/linux_amd64.tar.gz + - build/darwin_amd64.tar.gz + - build/windows_386.zip + - build/windows_amd64.zip + - build/freebsd_arm.tar.gz + - build/freebsd_amd64.tar.gz + - build/freebsd_386.tar.gz + - build/netbsd_arm.tar.gz + - build/netbsd_amd64.tar.gz + - build/netbsd_386.tar.gz + - build/openbsd_arm.tar.gz + - build/openbsd_amd64.tar.gz + - build/openbsd_386.tar.gz on: tags: true repo: chrislusf/seaweedfs From efa881f557f332f1cc902051ecce3d2156d39b27 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:22:09 -0700 Subject: [PATCH 143/450] clean cache --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 914f46aee..fc27af5e3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,9 @@ before_install: - export PATH=/home/travis/gopath/bin:$PATH install: + - go env GOCACHE - rm -rf `go env GOCACHE` + - rm -rf /home/travis/.gimme - go get -u ./weed/... script: From b13c2be3320da577f8d43d93d145fe5b0fb595f8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:27:59 -0700 Subject: [PATCH 144/450] travis tests --- .travis.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index fc27af5e3..461dc19dd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,9 +10,7 @@ before_install: - export PATH=/home/travis/gopath/bin:$PATH install: - - go env GOCACHE - - rm -rf `go env GOCACHE` - - rm -rf /home/travis/.gimme + - echo `go env GOCACHE` - go get -u ./weed/... script: From a98a6263e22e526c4fc91aa85f0cc88378c99e1a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:30:04 -0700 Subject: [PATCH 145/450] travis disable GOCACHE --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 461dc19dd..416654ee7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,6 +10,7 @@ before_install: - export PATH=/home/travis/gopath/bin:$PATH install: + - export GOCACHE="off" - echo `go env GOCACHE` - go get -u ./weed/... From e72469ea214245305e2b4441567b1221bfc47c7e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:52:55 -0700 Subject: [PATCH 146/450] disable cgo --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 416654ee7..6d79a5e76 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,7 +11,8 @@ before_install: install: - export GOCACHE="off" - - echo `go env GOCACHE` + - export CGO=0 + - go env - go get -u ./weed/... script: From 596e920f1f112b1af468788fccc9b358da9deffe Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 11:53:56 -0700 Subject: [PATCH 147/450] disable cgo --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 6d79a5e76..0dc393409 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,7 +11,7 @@ before_install: install: - export GOCACHE="off" - - export CGO=0 + - export CGO_ENABLED="0" - go env - go get -u ./weed/... From d942aa808aa6e4a6729b2547fba6e0d713dc5ee5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 12:01:02 -0700 Subject: [PATCH 148/450] travis: enable go cache --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 0dc393409..f6dc49c31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,6 @@ before_install: - export PATH=/home/travis/gopath/bin:$PATH install: - - export GOCACHE="off" - export CGO_ENABLED="0" - go env - go get -u ./weed/... From 02191f6156d2f2087a1a37465ec7c9937ca4fff2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 12:05:19 -0700 Subject: [PATCH 149/450] travis: temporarily disable go tip --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index f6dc49c31..612f643e9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ go: - 1.10.x - 1.11.x - 1.12.x - - tip + # - tip before_install: - export PATH=/home/travis/gopath/bin:$PATH @@ -44,4 +44,4 @@ deploy: on: tags: true repo: chrislusf/seaweedfs - go: tip + go: 1.12.x From 174bf1e8b2ecfc23e375dcbbf3fca28d73ed172f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 6 Apr 2019 14:14:28 -0700 Subject: [PATCH 150/450] more efficient client side gzip compression --- weed/operation/compress.go | 81 ++++++++++++++++++-------------- weed/operation/upload_content.go | 19 +++++++- 2 files changed, 64 insertions(+), 36 deletions(-) diff --git a/weed/operation/compress.go b/weed/operation/compress.go index fedc877dd..a28fb33ec 100644 --- a/weed/operation/compress.go +++ b/weed/operation/compress.go @@ -16,40 +16,9 @@ import ( */ func IsGzippable(ext, mtype string, data []byte) bool { - // text - if strings.HasPrefix(mtype, "text/") { - return true - } - - // images - switch ext { - case ".svg", ".bmp": - return true - } - if strings.HasPrefix(mtype, "image/") { - return false - } - - // by file name extension - switch ext { - case ".zip", ".rar", ".gz", ".bz2", ".xz": - return false - case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json": - return true - case ".php", ".java", ".go", ".rb", ".c", ".cpp", ".h", ".hpp": - return true - case ".png", ".jpg", ".jpeg": - return false - } - - // by mime type - if strings.HasPrefix(mtype, "application/") { - if strings.HasSuffix(mtype, "xml") { - return true - } - if strings.HasSuffix(mtype, "script") { - return true - } + shouldBeZipped, iAmSure := IsGzippableFileType(ext, mtype) + if iAmSure { + return shouldBeZipped } isMostlyText := util.IsText(data) @@ -57,6 +26,50 @@ func IsGzippable(ext, mtype string, data []byte) bool { return isMostlyText } +/* +* Default more not to gzip since gzip can be done on client side. + */ +func IsGzippableFileType(ext, mtype string) (shouldBeZipped, iAmSure bool) { + + // text + if strings.HasPrefix(mtype, "text/") { + return true, true + } + + // images + switch ext { + case ".svg", ".bmp": + return true, true + } + if strings.HasPrefix(mtype, "image/") { + return false, true + } + + // by file name extension + switch ext { + case ".zip", ".rar", ".gz", ".bz2", ".xz": + return false, true + case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json": + return true, true + case ".php", ".java", ".go", ".rb", ".c", ".cpp", ".h", ".hpp": + return true, true + case ".png", ".jpg", ".jpeg": + return false, true + } + + // by mime type + if strings.HasPrefix(mtype, "application/") { + if strings.HasSuffix(mtype, "xml") { + return true, true + } + if strings.HasSuffix(mtype, "script") { + return true, true + } + } + + return false, false +} + func GzipData(input []byte) ([]byte, error) { buf := new(bytes.Buffer) w, _ := gzip.NewWriterLevel(buf, flate.BestCompression) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 2276c67b7..6bd299826 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,6 +2,7 @@ package operation import ( "bytes" + "compress/gzip" "encoding/json" "errors" "fmt" @@ -39,10 +40,24 @@ var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") // Upload sends a POST request to a volume server to upload the content func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { + contentIsGzipped := isGzipped + shouldGzipNow := false + if !isGzipped { + if shouldBeZipped, iAmSure := IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped { + shouldGzipNow = true + contentIsGzipped = true + } + } return upload_content(uploadUrl, func(w io.Writer) (err error) { - _, err = io.Copy(w, reader) + if shouldGzipNow { + gzWriter := gzip.NewWriter(w) + _, err = io.Copy(gzWriter, reader) + gzWriter.Close() + } else { + _, err = io.Copy(w, reader) + } return - }, filename, isGzipped, mtype, pairMap, jwt) + }, filename, contentIsGzipped, mtype, pairMap, jwt) } func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { body_buf := bytes.NewBufferString("") From c731d9be6d28a661037bbd03c0762d0819f6fb7a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 7 Apr 2019 09:13:24 -0700 Subject: [PATCH 151/450] weed filer.copy: add option to profile the execution --- weed/command/filer_copy.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index c5baff5e4..18b641ae5 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -114,6 +114,10 @@ func runCopy(cmd *Command, args []string) bool { go copy.masterClient.KeepConnectedToMaster() copy.masterClient.WaitUntilConnected() + if *cmdCopy.IsDebug { + util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") + } + fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrency) ctx := context.Background() From a32abda1a3fb696687739805faa1f5444d86c5fa Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 7 Apr 2019 11:01:28 -0700 Subject: [PATCH 152/450] adjust compression to optimize for speed --- weed/operation/compress.go | 2 +- weed/operation/upload_content.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/weed/operation/compress.go b/weed/operation/compress.go index a28fb33ec..7190eeeb2 100644 --- a/weed/operation/compress.go +++ b/weed/operation/compress.go @@ -72,7 +72,7 @@ func IsGzippableFileType(ext, mtype string) (shouldBeZipped, iAmSure bool) { func GzipData(input []byte) ([]byte, error) { buf := new(bytes.Buffer) - w, _ := gzip.NewWriterLevel(buf, flate.BestCompression) + w, _ := gzip.NewWriterLevel(buf, flate.BestSpeed) if _, err := w.Write(input); err != nil { glog.V(2).Infoln("error compressing data:", err) return nil, err diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 6bd299826..dcab1a0ae 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,6 +2,7 @@ package operation import ( "bytes" + "compress/flate" "compress/gzip" "encoding/json" "errors" @@ -50,7 +51,7 @@ func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, } return upload_content(uploadUrl, func(w io.Writer) (err error) { if shouldGzipNow { - gzWriter := gzip.NewWriter(w) + gzWriter, _ := gzip.NewWriterLevel(w, flate.BestSpeed) _, err = io.Copy(gzWriter, reader) gzWriter.Close() } else { From d14b614407a96e3d81bc655e5164d202c7e3b959 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 7 Apr 2019 11:31:50 -0700 Subject: [PATCH 153/450] weed filer.copy: use existing file owner and gropu id --- weed/command/filer_copy.go | 14 ++++++++++---- weed/util/file_util_non_posix.go | 12 ++++++++++++ weed/util/file_util_posix.go | 11 +++++++++++ 3 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 weed/util/file_util_non_posix.go create mode 100644 weed/util/file_util_posix.go diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 18b641ae5..777e52ab6 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -170,11 +170,15 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi return nil } + uid, gid := util.GetFileUidGid(fi) + fileCopyTaskChan <- FileCopyTask{ sourceLocation: fileOrDir, destinationUrlPath: destPath, fileSize: fi.Size(), fileMode: fi.Mode(), + uid: uid, + gid: gid, } return nil @@ -200,6 +204,8 @@ type FileCopyTask struct { destinationUrlPath string fileSize int64 fileMode os.FileMode + uid uint32 + gid uint32 } func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error { @@ -287,8 +293,8 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy Attributes: &filer_pb.FuseAttributes{ Crtime: time.Now().Unix(), Mtime: time.Now().Unix(), - Gid: uint32(os.Getgid()), - Uid: uint32(os.Getuid()), + Gid: task.gid, + Uid: task.uid, FileSize: uint64(task.fileSize), FileMode: uint32(task.fileMode), Mime: mimeType, @@ -361,8 +367,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC Attributes: &filer_pb.FuseAttributes{ Crtime: time.Now().Unix(), Mtime: time.Now().Unix(), - Gid: uint32(os.Getgid()), - Uid: uint32(os.Getuid()), + Gid: task.gid, + Uid: task.uid, FileSize: uint64(task.fileSize), FileMode: uint32(task.fileMode), Mime: mimeType, diff --git a/weed/util/file_util_non_posix.go b/weed/util/file_util_non_posix.go new file mode 100644 index 000000000..ffcfef6d5 --- /dev/null +++ b/weed/util/file_util_non_posix.go @@ -0,0 +1,12 @@ +// +build linux darwin freebsd netbsd openbsd plan9 solaris zos + +package util + +import ( + "os" + "syscall" +) + +func GetFileUidGid(fi os.FileInfo) (uid, gid uint32) { + return fi.Sys().(*syscall.Stat_t).Uid, fi.Sys().(*syscall.Stat_t).Gid +} diff --git a/weed/util/file_util_posix.go b/weed/util/file_util_posix.go new file mode 100644 index 000000000..22ca60b3b --- /dev/null +++ b/weed/util/file_util_posix.go @@ -0,0 +1,11 @@ +// +build windows + +package util + +import ( + "os" +) + +func GetFileUidGid(fi os.FileInfo) (uid, gid uint32) { + return 0, 0 +} From 000ee725fc473560d8e84046598ad469c52e432d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 8 Apr 2019 19:40:56 -0700 Subject: [PATCH 154/450] refactor Offset into a struct of bytes --- weed/command/export.go | 2 +- weed/command/fix.go | 4 +- weed/server/volume_grpc_follow.go | 3 +- weed/storage/needle/btree_map.go | 4 +- weed/storage/needle/compact_map_perf_test.go | 2 +- weed/storage/needle/compact_map_test.go | 49 ++++++++++---------- weed/storage/needle_map_boltdb.go | 2 +- weed/storage/needle_map_leveldb.go | 2 +- weed/storage/needle_map_memory.go | 4 +- weed/storage/types/needle_types.go | 46 ++++++++++++++++-- weed/storage/volume_checking.go | 4 +- weed/storage/volume_follow.go | 28 +++++------ weed/storage/volume_read_write.go | 16 +++---- weed/storage/volume_vacuum.go | 18 +++---- weed/tools/read_index.go | 29 ------------ 15 files changed, 111 insertions(+), 102 deletions(-) delete mode 100644 weed/tools/read_index.go diff --git a/weed/command/export.go b/weed/command/export.go index cdced5936..47abc2929 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -107,7 +107,7 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *storage.Needle, offset i nv, ok := needleMap.Get(n.Id) glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv) - if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && int64(nv.Offset)*types.NeedlePaddingSize == offset { + if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", n.LastModified, newerThanUnix) diff --git a/weed/command/fix.go b/weed/command/fix.go index 42ae23a3c..2536d774f 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -45,11 +45,11 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *storage.Needle, offset int64) error { glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) if n.Size > 0 && n.Size != types.TombstoneFileSize { - pe := scanner.nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size) + pe := scanner.nm.Put(n.Id, types.ToOffset(offset), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { glog.V(2).Infof("skipping deleted file ...") - return scanner.nm.Delete(n.Id, types.Offset(offset/types.NeedlePaddingSize)) + return scanner.nm.Delete(n.Id, types.ToOffset(offset)) } return nil } diff --git a/weed/server/volume_grpc_follow.go b/weed/server/volume_grpc_follow.go index cc5dcc78e..c3ce774c0 100644 --- a/weed/server/volume_grpc_follow.go +++ b/weed/server/volume_grpc_follow.go @@ -3,7 +3,6 @@ package weed_server import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/storage/types" "io" "os" @@ -28,7 +27,7 @@ func (vs *VolumeServer) VolumeFollow(req *volume_server_pb.VolumeFollowRequest, return nil } - startOffset := int64(foundOffset) * int64(types.NeedlePaddingSize) + startOffset := foundOffset.ToAcutalOffset() buf := make([]byte, 1024*1024*2) return sendFileContent(v.DataFile(), buf, startOffset, stopOffset, stream) diff --git a/weed/storage/needle/btree_map.go b/weed/storage/needle/btree_map.go index d688b802e..aed940f0e 100644 --- a/weed/storage/needle/btree_map.go +++ b/weed/storage/needle/btree_map.go @@ -26,7 +26,7 @@ func (cm *BtreeMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Off } func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) { - found := cm.tree.Delete(NeedleValue{key, 0, 0}) + found := cm.tree.Delete(NeedleValue{key, Offset{}, 0}) if found != nil { old := found.(NeedleValue) return old.Size @@ -34,7 +34,7 @@ func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) { return } func (cm *BtreeMap) Get(key NeedleId) (*NeedleValue, bool) { - found := cm.tree.Get(NeedleValue{key, 0, 0}) + found := cm.tree.Get(NeedleValue{key, Offset{}, 0}) if found != nil { old := found.(NeedleValue) return &old, true diff --git a/weed/storage/needle/compact_map_perf_test.go b/weed/storage/needle/compact_map_perf_test.go index 908da968f..3f6fe548b 100644 --- a/weed/storage/needle/compact_map_perf_test.go +++ b/weed/storage/needle/compact_map_perf_test.go @@ -62,7 +62,7 @@ func loadNewNeedleMap(file *os.File) (*CompactMap, uint64) { offset := BytesToOffset(bytes[i+NeedleIdSize : i+NeedleIdSize+OffsetSize]) size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize]) - if offset > 0 { + if !offset.IsZero() { m.Set(NeedleId(key), offset, size) } else { m.Delete(key) diff --git a/weed/storage/needle/compact_map_test.go b/weed/storage/needle/compact_map_test.go index 73231053e..374b9ff4d 100644 --- a/weed/storage/needle/compact_map_test.go +++ b/weed/storage/needle/compact_map_test.go @@ -1,22 +1,23 @@ package needle import ( + "fmt" . "github.com/chrislusf/seaweedfs/weed/storage/types" "testing" ) func TestOverflow2(t *testing.T) { m := NewCompactMap() - m.Set(NeedleId(150088), 8, 3000073) - m.Set(NeedleId(150073), 8, 3000073) - m.Set(NeedleId(150089), 8, 3000073) - m.Set(NeedleId(150076), 8, 3000073) - m.Set(NeedleId(150124), 8, 3000073) - m.Set(NeedleId(150137), 8, 3000073) - m.Set(NeedleId(150147), 8, 3000073) - m.Set(NeedleId(150145), 8, 3000073) - m.Set(NeedleId(150158), 8, 3000073) - m.Set(NeedleId(150162), 8, 3000073) + m.Set(NeedleId(150088), ToOffset(8), 3000073) + m.Set(NeedleId(150073), ToOffset(8), 3000073) + m.Set(NeedleId(150089), ToOffset(8), 3000073) + m.Set(NeedleId(150076), ToOffset(8), 3000073) + m.Set(NeedleId(150124), ToOffset(8), 3000073) + m.Set(NeedleId(150137), ToOffset(8), 3000073) + m.Set(NeedleId(150147), ToOffset(8), 3000073) + m.Set(NeedleId(150145), ToOffset(8), 3000073) + m.Set(NeedleId(150158), ToOffset(8), 3000073) + m.Set(NeedleId(150162), ToOffset(8), 3000073) m.Visit(func(value NeedleValue) error { println("needle key:", value.Key) @@ -26,13 +27,13 @@ func TestOverflow2(t *testing.T) { func TestIssue52(t *testing.T) { m := NewCompactMap() - m.Set(NeedleId(10002), 10002, 10002) + m.Set(NeedleId(10002), ToOffset(10002), 10002) if element, ok := m.Get(NeedleId(10002)); ok { - println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size) + fmt.Printf("key %d ok %v %d, %v, %d\n", 10002, ok, element.Key, element.Offset, element.Size) } - m.Set(NeedleId(10001), 10001, 10001) + m.Set(NeedleId(10001), ToOffset(10001), 10001) if element, ok := m.Get(NeedleId(10002)); ok { - println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size) + fmt.Printf("key %d ok %v %d, %v, %d\n", 10002, ok, element.Key, element.Offset, element.Size) } else { t.Fatal("key 10002 missing after setting 10001") } @@ -41,7 +42,7 @@ func TestIssue52(t *testing.T) { func TestCompactMap(t *testing.T) { m := NewCompactMap() for i := uint32(0); i < 100*batch; i += 2 { - m.Set(NeedleId(i), Offset(i), i) + m.Set(NeedleId(i), ToOffset(int64(i)), i) } for i := uint32(0); i < 100*batch; i += 37 { @@ -49,7 +50,7 @@ func TestCompactMap(t *testing.T) { } for i := uint32(0); i < 10*batch; i += 3 { - m.Set(NeedleId(i), Offset(i+11), i+5) + m.Set(NeedleId(i), ToOffset(int64(i+11)), i+5) } // for i := uint32(0); i < 100; i++ { @@ -99,17 +100,17 @@ func TestCompactMap(t *testing.T) { func TestOverflow(t *testing.T) { o := Overflow(make([]SectionalNeedleValue, 0)) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: 12, Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 2, Offset: 12, Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: 12, Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: 12, Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 5, Offset: 12, Size: 12}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: ToOffset(12), Size: 12}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 2, Offset: ToOffset(12), Size: 12}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: ToOffset(12), Size: 12}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: ToOffset(12), Size: 12}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 5, Offset: ToOffset(12), Size: 12}) if o[2].Key != 3 { t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key) } - o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: 24, Size: 24}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: ToOffset(24), Size: 24}) if o[2].Key != 3 { t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key) @@ -142,13 +143,13 @@ func TestOverflow(t *testing.T) { } println() - o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: 44, Size: 44}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: ToOffset(44), Size: 44}) for i, x := range o { println("overflow[", i, "]:", x.Key) } println() - o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: 11, Size: 11}) + o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: ToOffset(11), Size: 11}) for i, x := range o { println("overflow[", i, "]:", x.Key) diff --git a/weed/storage/needle_map_boltdb.go b/weed/storage/needle_map_boltdb.go index a24c55a32..e2e4d22f7 100644 --- a/weed/storage/needle_map_boltdb.go +++ b/weed/storage/needle_map_boltdb.go @@ -68,7 +68,7 @@ func generateBoltDbFile(dbFileName string, indexFile *os.File) error { } defer db.Close() return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error { - if offset > 0 && size != TombstoneFileSize { + if !offset.IsZero() && size != TombstoneFileSize { boltDbWrite(db, key, offset, size) } else { boltDbDelete(db, key) diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index 77d29bd87..939e0cac4 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -63,7 +63,7 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error { } defer db.Close() return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error { - if offset > 0 && size != TombstoneFileSize { + if !offset.IsZero() && size != TombstoneFileSize { levelDbWrite(db, key, offset, size) } else { levelDbDelete(db, key) diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index fa5576c2b..ad3bd3f7a 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -50,12 +50,12 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { if key > nm.MaximumFileKey { nm.MaximumFileKey = key } - if offset > 0 && size != TombstoneFileSize { + if !offset.IsZero() && size != TombstoneFileSize { nm.FileCounter++ nm.FileByteCounter = nm.FileByteCounter + uint64(size) oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size) // glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) - if oldOffset > 0 && oldSize != TombstoneFileSize { + if !oldOffset.IsZero() && oldSize != TombstoneFileSize { nm.DeletionCounter++ nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize) } diff --git a/weed/storage/types/needle_types.go b/weed/storage/types/needle_types.go index ce4e601e4..d23666357 100644 --- a/weed/storage/types/needle_types.go +++ b/weed/storage/types/needle_types.go @@ -7,7 +7,15 @@ import ( "strconv" ) -type Offset uint32 +type Offset struct { + // b5 byte // unused + // b4 byte // unused + b3 byte + b2 byte + b1 byte + b0 byte // the smaller byte +} + type Cookie uint32 const ( @@ -41,13 +49,43 @@ func ParseCookie(cookieString string) (Cookie, error) { } func OffsetToBytes(bytes []byte, offset Offset) { - util.Uint32toBytes(bytes, uint32(offset)) + bytes[3] = offset.b0 + bytes[2] = offset.b1 + bytes[1] = offset.b2 + bytes[0] = offset.b3 } func Uint32ToOffset(offset uint32) Offset { - return Offset(offset) + return Offset{ + b0: byte(offset), + b1: byte(offset >> 8), + b2: byte(offset >> 16), + b3: byte(offset >> 24), + } } func BytesToOffset(bytes []byte) Offset { - return Offset(util.BytesToUint32(bytes[0:4])) + return Offset{ + b0: bytes[3], + b1: bytes[2], + b2: bytes[1], + b3: bytes[0], + } +} + +func (offset Offset) IsZero() bool { + return offset.b0 == 0 && offset.b1 == 0 && offset.b2 == 0 && offset.b3 == 0 +} + +func ToOffset(offset int64) Offset { + smaller := uint32(offset / int64(NeedlePaddingSize)) + return Uint32ToOffset(smaller) +} + +func (offset Offset) ToAcutalOffset() (actualOffset int64) { + return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24) * int64(NeedlePaddingSize) +} + +func (offset Offset) String() string { + return fmt.Sprintf("%d", int64(offset.b0)+int64(offset.b1)<<8+int64(offset.b2)<<16+int64(offset.b3)<<24) } diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index 12c282be9..1ac73d3d3 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -26,10 +26,10 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error { return fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e) } key, offset, size := IdxFileEntry(lastIdxEntry) - if offset == 0 || size == TombstoneFileSize { + if offset.IsZero() || size == TombstoneFileSize { return nil } - if e = verifyNeedleIntegrity(v.dataFile, v.Version(), int64(offset)*NeedlePaddingSize, key, size); e != nil { + if e = verifyNeedleIntegrity(v.dataFile, v.Version(), offset.ToAcutalOffset(), key, size); e != nil { return fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e) } diff --git a/weed/storage/volume_follow.go b/weed/storage/volume_follow.go index b8353f9d1..8512ec932 100644 --- a/weed/storage/volume_follow.go +++ b/weed/storage/volume_follow.go @@ -110,7 +110,7 @@ func (v *Volume) findLastAppendAtNs() (uint64, error) { if err != nil { return 0, err } - if offset == 0 { + if offset.IsZero() { return 0, nil } return v.readAppendAtNs(offset) @@ -119,26 +119,26 @@ func (v *Volume) findLastAppendAtNs() (uint64, error) { func (v *Volume) locateLastAppendEntry() (Offset, error) { indexFile, e := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644) if e != nil { - return 0, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e) + return Offset{}, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e) } defer indexFile.Close() fi, err := indexFile.Stat() if err != nil { - return 0, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err) + return Offset{}, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err) } fileSize := fi.Size() if fileSize%NeedleEntrySize != 0 { - return 0, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize) + return Offset{}, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize) } if fileSize == 0 { - return 0, nil + return Offset{}, nil } bytes := make([]byte, NeedleEntrySize) n, e := indexFile.ReadAt(bytes, fileSize-NeedleEntrySize) if n != NeedleEntrySize { - return 0, fmt.Errorf("file %s read error: %v", indexFile.Name(), e) + return Offset{}, fmt.Errorf("file %s read error: %v", indexFile.Name(), e) } _, offset, _ := IdxFileEntry(bytes) @@ -147,13 +147,13 @@ func (v *Volume) locateLastAppendEntry() (Offset, error) { func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) { - n, bodyLength, err := ReadNeedleHeader(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize) + n, bodyLength, err := ReadNeedleHeader(v.dataFile, v.SuperBlock.version, offset.ToAcutalOffset()) if err != nil { return 0, fmt.Errorf("ReadNeedleHeader: %v", err) } - err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize+int64(NeedleEntrySize), bodyLength) + err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, offset.ToAcutalOffset()+int64(NeedleEntrySize), bodyLength) if err != nil { - return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", int64(offset)*NeedlePaddingSize, bodyLength, err) + return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err) } return n.AppendAtNs, nil @@ -189,7 +189,7 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast m := (l + h) / 2 if m == entryCount { - return 0, true, nil + return Offset{}, true, nil } // read the appendAtNs for entry m @@ -214,7 +214,7 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast } if l == entryCount { - return 0, true, nil + return Offset{}, true, nil } offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, l) @@ -226,7 +226,7 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast // bytes is of size NeedleEntrySize func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m int64) (Offset, error) { if _, readErr := indexFile.ReadAt(bytes, m*NeedleEntrySize); readErr != nil && readErr != io.EOF { - return 0, readErr + return Offset{}, readErr } _, offset, _ := IdxFileEntry(bytes) return offset, nil @@ -247,7 +247,7 @@ func (scanner *VolumeFileScanner4GenIdx) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4GenIdx) VisitNeedle(n *Needle, offset int64) error { if n.Size > 0 && n.Size != TombstoneFileSize { - return scanner.v.nm.Put(n.Id, Offset(offset/NeedlePaddingSize), n.Size) + return scanner.v.nm.Put(n.Id, ToOffset(offset), n.Size) } - return scanner.v.nm.Delete(n.Id, Offset(offset/NeedlePaddingSize)) + return scanner.v.nm.Delete(n.Id, ToOffset(offset)) } diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go index 5366a547d..93f4ed1c1 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_read_write.go @@ -21,9 +21,9 @@ func (v *Volume) isFileUnchanged(n *Needle) bool { return false } nv, ok := v.nm.Get(n.Id) - if ok && nv.Offset > 0 { + if ok && !nv.Offset.IsZero() { oldNeedle := new(Needle) - err := oldNeedle.ReadData(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version()) + err := oldNeedle.ReadData(v.dataFile, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) if err != nil { glog.V(0).Infof("Failed to check updated file %v", err) return false @@ -96,8 +96,8 @@ func (v *Volume) writeNeedle(n *Needle) (offset uint64, size uint32, err error) } nv, ok := v.nm.Get(n.Id) - if !ok || uint64(nv.Offset)*NeedlePaddingSize < offset { - if err = v.nm.Put(n.Id, Offset(offset/NeedlePaddingSize), n.Size); err != nil { + if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset { + if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) } } @@ -124,7 +124,7 @@ func (v *Volume) deleteNeedle(n *Needle) (uint32, error) { if err != nil { return size, err } - if err = v.nm.Delete(n.Id, Offset(offset/NeedlePaddingSize)); err != nil { + if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil { return size, err } return size, err @@ -135,10 +135,10 @@ func (v *Volume) deleteNeedle(n *Needle) (uint32, error) { // read fills in Needle content by looking up n.Id from NeedleMapper func (v *Volume) readNeedle(n *Needle) (int, error) { nv, ok := v.nm.Get(n.Id) - if !ok || nv.Offset == 0 { + if !ok || nv.Offset.IsZero() { v.compactingWg.Wait() nv, ok = v.nm.Get(n.Id) - if !ok || nv.Offset == 0 { + if !ok || nv.Offset.IsZero() { return -1, ErrorNotFound } } @@ -148,7 +148,7 @@ func (v *Volume) readNeedle(n *Needle) (int, error) { if nv.Size == 0 { return 0, nil } - err := n.ReadData(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version()) + err := n.ReadData(v.dataFile, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) if err != nil { return 0, err } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index b575277cd..b550edb80 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -201,13 +201,13 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI } //updated needle - if increIdxEntry.offset != 0 && increIdxEntry.size != 0 && increIdxEntry.size != TombstoneFileSize { + if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size != TombstoneFileSize { //even the needle cache in memory is hit, the need_bytes is correct - glog.V(4).Infof("file %d offset %d size %d", key, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size) + glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size) var needleBytes []byte - needleBytes, err = ReadNeedleBlob(oldDatFile, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size, v.Version()) + needleBytes, err = ReadNeedleBlob(oldDatFile, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version()) if err != nil { - return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size, err) + return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, err) } dst.Write(needleBytes) util.Uint32toBytes(idxEntryBytes[8:12], uint32(offset/NeedlePaddingSize)) @@ -261,8 +261,8 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *Needle, offset int64) er } nv, ok := scanner.v.nm.Get(n.Id) glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) - if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 && nv.Size != TombstoneFileSize { - if err := scanner.nm.Put(n.Id, Offset(scanner.newOffset/NeedlePaddingSize), n.Size); err != nil { + if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size != TombstoneFileSize { + if err := scanner.nm.Put(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil { @@ -325,7 +325,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { newOffset := int64(v.SuperBlock.BlockSize()) WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error { - if offset == 0 || size == TombstoneFileSize { + if offset.IsZero() || size == TombstoneFileSize { return nil } @@ -335,7 +335,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { } n := new(Needle) - err := n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version()) + err := n.ReadData(v.dataFile, offset.ToAcutalOffset(), size, v.Version()) if err != nil { return nil } @@ -346,7 +346,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) if nv.Offset == offset && nv.Size > 0 { - if err = nm.Put(n.Id, Offset(newOffset/NeedlePaddingSize), n.Size); err != nil { + if err = nm.Put(n.Id, ToOffset(newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } if _, _, _, err = n.Append(dst, v.Version()); err != nil { diff --git a/weed/tools/read_index.go b/weed/tools/read_index.go deleted file mode 100644 index d53f489ea..000000000 --- a/weed/tools/read_index.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - - "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/chrislusf/seaweedfs/weed/storage/types" -) - -var ( - indexFileName = flag.String("file", "", ".idx file to analyze") -) - -func main() { - flag.Parse() - indexFile, err := os.OpenFile(*indexFileName, os.O_RDONLY, 0644) - if err != nil { - log.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() - - storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { - fmt.Printf("key %d, offset %d, size %d, nextOffset %d\n", key, offset*8, size, int64(offset)*types.NeedlePaddingSize+int64(size)) - return nil - }) -} From 7a4b234ea1b6be8ef47365a60b163dbf96c85e45 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 8 Apr 2019 21:44:06 -0700 Subject: [PATCH 155/450] divide offset into higher and lower sections --- weed/storage/needle/compact_map.go | 159 ++++++++++++++---------- weed/storage/needle/compact_map_test.go | 48 +++---- weed/storage/types/needle_types.go | 36 ++++-- 3 files changed, 139 insertions(+), 104 deletions(-) diff --git a/weed/storage/needle/compact_map.go b/weed/storage/needle/compact_map.go index bca698407..cb0bf2e51 100644 --- a/weed/storage/needle/compact_map.go +++ b/weed/storage/needle/compact_map.go @@ -15,27 +15,36 @@ type SectionalNeedleId uint32 const SectionalNeedleIdLimit = 1<<32 - 1 type SectionalNeedleValue struct { - Key SectionalNeedleId - Offset Offset `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G - Size uint32 `comment:"Size of the data portion"` + Key SectionalNeedleId + OffsetLower OffsetLower `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G + Size uint32 `comment:"Size of the data portion"` +} + +type SectionalNeedleValueExtra struct { + OffsetHigher OffsetHigher } type CompactSection struct { sync.RWMutex - values []SectionalNeedleValue - overflow Overflow - start NeedleId - end NeedleId - counter int + values []SectionalNeedleValue + valuesExtra []SectionalNeedleValueExtra + overflow Overflow + overflowExtra OverflowExtra + start NeedleId + end NeedleId + counter int } type Overflow []SectionalNeedleValue +type OverflowExtra []SectionalNeedleValueExtra func NewCompactSection(start NeedleId) *CompactSection { return &CompactSection{ - values: make([]SectionalNeedleValue, batch), - overflow: Overflow(make([]SectionalNeedleValue, 0)), - start: start, + values: make([]SectionalNeedleValue, batch), + valuesExtra: make([]SectionalNeedleValueExtra, batch), + overflow: Overflow(make([]SectionalNeedleValue, 0)), + overflowExtra: OverflowExtra(make([]SectionalNeedleValueExtra, 0)), + start: start, } } @@ -47,21 +56,21 @@ func (cs *CompactSection) Set(key NeedleId, offset Offset, size uint32) (oldOffs } skey := SectionalNeedleId(key - cs.start) if i := cs.binarySearchValues(skey); i >= 0 { - oldOffset, oldSize = cs.values[i].Offset, cs.values[i].Size + oldOffset.OffsetHigher, oldOffset.OffsetLower, oldSize = cs.valuesExtra[i].OffsetHigher, cs.values[i].OffsetLower, cs.values[i].Size //println("key", key, "old size", ret) - cs.values[i].Offset, cs.values[i].Size = offset, size + cs.valuesExtra[i].OffsetHigher, cs.values[i].OffsetLower, cs.values[i].Size = offset.OffsetHigher, offset.OffsetLower, size } else { needOverflow := cs.counter >= batch needOverflow = needOverflow || cs.counter > 0 && cs.values[cs.counter-1].Key > skey if needOverflow { //println("start", cs.start, "counter", cs.counter, "key", key) - if oldValue, found := cs.overflow.findOverflowEntry(skey); found { - oldOffset, oldSize = oldValue.Offset, oldValue.Size + if oldValueExtra, oldValue, found := cs.findOverflowEntry(skey); found { + oldOffset.OffsetHigher, oldOffset.OffsetLower, oldSize = oldValueExtra.OffsetHigher, oldValue.OffsetLower, oldValue.Size } - cs.overflow = cs.overflow.setOverflowEntry(SectionalNeedleValue{Key: skey, Offset: offset, Size: size}) + cs.setOverflowEntry(skey, offset, size) } else { p := &cs.values[cs.counter] - p.Key, p.Offset, p.Size = skey, offset, size + p.Key, cs.valuesExtra[cs.counter].OffsetHigher, p.OffsetLower, p.Size = skey, offset.OffsetHigher, offset.OffsetLower, size //println("added index", cs.counter, "key", key, cs.values[cs.counter].Key) cs.counter++ } @@ -70,6 +79,50 @@ func (cs *CompactSection) Set(key NeedleId, offset Offset, size uint32) (oldOffs return } +func (cs *CompactSection) setOverflowEntry(skey SectionalNeedleId, offset Offset, size uint32) { + needleValue := SectionalNeedleValue{Key: skey, OffsetLower: offset.OffsetLower, Size: size} + needleValueExtra := SectionalNeedleValueExtra{OffsetHigher: OffsetHigher{}} + insertCandidate := sort.Search(len(cs.overflow), func(i int) bool { + return cs.overflow[i].Key >= needleValue.Key + }) + if insertCandidate != len(cs.overflow) && cs.overflow[insertCandidate].Key == needleValue.Key { + cs.overflow[insertCandidate] = needleValue + } else { + cs.overflow = append(cs.overflow, needleValue) + cs.overflowExtra = append(cs.overflowExtra, needleValueExtra) + for i := len(cs.overflow) - 1; i > insertCandidate; i-- { + cs.overflow[i] = cs.overflow[i-1] + cs.overflowExtra[i] = cs.overflowExtra[i-1] + } + cs.overflow[insertCandidate] = needleValue + } +} + +func (cs *CompactSection) findOverflowEntry(key SectionalNeedleId) (nve SectionalNeedleValueExtra, nv SectionalNeedleValue, found bool) { + foundCandidate := sort.Search(len(cs.overflow), func(i int) bool { + return cs.overflow[i].Key >= key + }) + if foundCandidate != len(cs.overflow) && cs.overflow[foundCandidate].Key == key { + return cs.overflowExtra[foundCandidate], cs.overflow[foundCandidate], true + } + return nve, nv, false +} + +func (cs *CompactSection) deleteOverflowEntry(key SectionalNeedleId) { + length := len(cs.overflow) + deleteCandidate := sort.Search(length, func(i int) bool { + return cs.overflow[i].Key >= key + }) + if deleteCandidate != length && cs.overflow[deleteCandidate].Key == key { + for i := deleteCandidate; i < length-1; i++ { + cs.overflow[i] = cs.overflow[i+1] + cs.overflowExtra[i] = cs.overflowExtra[i+1] + } + cs.overflow = cs.overflow[0 : length-1] + cs.overflowExtra = cs.overflowExtra[0 : length-1] + } +} + //return old entry size func (cs *CompactSection) Delete(key NeedleId) uint32 { skey := SectionalNeedleId(key - cs.start) @@ -81,8 +134,8 @@ func (cs *CompactSection) Delete(key NeedleId) uint32 { cs.values[i].Size = TombstoneFileSize } } - if v, found := cs.overflow.findOverflowEntry(skey); found { - cs.overflow = cs.overflow.deleteOverflowEntry(skey) + if _, v, found := cs.findOverflowEntry(skey); found { + cs.deleteOverflowEntry(skey) ret = v.Size } cs.Unlock() @@ -91,14 +144,14 @@ func (cs *CompactSection) Delete(key NeedleId) uint32 { func (cs *CompactSection) Get(key NeedleId) (*NeedleValue, bool) { cs.RLock() skey := SectionalNeedleId(key - cs.start) - if v, ok := cs.overflow.findOverflowEntry(skey); ok { + if ve, v, ok := cs.findOverflowEntry(skey); ok { cs.RUnlock() - nv := v.toNeedleValue(cs) + nv := toNeedleValue(ve, v, cs) return &nv, true } if i := cs.binarySearchValues(skey); i >= 0 { cs.RUnlock() - nv := cs.values[i].toNeedleValue(cs) + nv := toNeedleValue(cs.valuesExtra[i], cs.values[i], cs) return &nv, true } cs.RUnlock() @@ -194,8 +247,8 @@ func (cm *CompactMap) binarySearchCompactSection(key NeedleId) int { func (cm *CompactMap) Visit(visit func(NeedleValue) error) error { for _, cs := range cm.list { cs.RLock() - for _, v := range cs.overflow { - if err := visit(v.toNeedleValue(cs)); err != nil { + for i, v := range cs.overflow { + if err := visit(toNeedleValue(cs.overflowExtra[i], v, cs)); err != nil { cs.RUnlock() return err } @@ -204,8 +257,8 @@ func (cm *CompactMap) Visit(visit func(NeedleValue) error) error { if i >= cs.counter { break } - if _, found := cs.overflow.findOverflowEntry(v.Key); !found { - if err := visit(v.toNeedleValue(cs)); err != nil { + if _, _, found := cs.findOverflowEntry(v.Key); !found { + if err := visit(toNeedleValue(cs.valuesExtra[i], v, cs)); err != nil { cs.RUnlock() return err } @@ -216,50 +269,20 @@ func (cm *CompactMap) Visit(visit func(NeedleValue) error) error { return nil } -func (o Overflow) deleteOverflowEntry(key SectionalNeedleId) Overflow { - length := len(o) - deleteCandidate := sort.Search(length, func(i int) bool { - return o[i].Key >= key - }) - if deleteCandidate != length && o[deleteCandidate].Key == key { - for i := deleteCandidate; i < length-1; i++ { - o[i] = o[i+1] - } - o = o[0 : length-1] +func toNeedleValue(snve SectionalNeedleValueExtra, snv SectionalNeedleValue, cs *CompactSection) NeedleValue { + offset := Offset{ + OffsetHigher: snve.OffsetHigher, + OffsetLower: snv.OffsetLower, } - return o + return NeedleValue{Key: NeedleId(snv.Key) + cs.start, Offset: offset, Size: snv.Size} } -func (o Overflow) setOverflowEntry(needleValue SectionalNeedleValue) Overflow { - insertCandidate := sort.Search(len(o), func(i int) bool { - return o[i].Key >= needleValue.Key - }) - if insertCandidate != len(o) && o[insertCandidate].Key == needleValue.Key { - o[insertCandidate] = needleValue - } else { - o = append(o, needleValue) - for i := len(o) - 1; i > insertCandidate; i-- { - o[i] = o[i-1] - } - o[insertCandidate] = needleValue +func (nv NeedleValue) toSectionalNeedleValue(cs *CompactSection) (SectionalNeedleValue, SectionalNeedleValueExtra) { + return SectionalNeedleValue{ + SectionalNeedleId(nv.Key - cs.start), + nv.Offset.OffsetLower, + nv.Size, + }, SectionalNeedleValueExtra{ + nv.Offset.OffsetHigher, } - return o -} - -func (o Overflow) findOverflowEntry(key SectionalNeedleId) (nv SectionalNeedleValue, found bool) { - foundCandidate := sort.Search(len(o), func(i int) bool { - return o[i].Key >= key - }) - if foundCandidate != len(o) && o[foundCandidate].Key == key { - return o[foundCandidate], true - } - return nv, false -} - -func (snv SectionalNeedleValue) toNeedleValue(cs *CompactSection) NeedleValue { - return NeedleValue{NeedleId(snv.Key) + cs.start, snv.Offset, snv.Size} -} - -func (nv NeedleValue) toSectionalNeedleValue(cs *CompactSection) SectionalNeedleValue { - return SectionalNeedleValue{SectionalNeedleId(nv.Key - cs.start), nv.Offset, nv.Size} } diff --git a/weed/storage/needle/compact_map_test.go b/weed/storage/needle/compact_map_test.go index 374b9ff4d..b9586ab54 100644 --- a/weed/storage/needle/compact_map_test.go +++ b/weed/storage/needle/compact_map_test.go @@ -98,60 +98,60 @@ func TestCompactMap(t *testing.T) { } func TestOverflow(t *testing.T) { - o := Overflow(make([]SectionalNeedleValue, 0)) + cs := NewCompactSection(1) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: ToOffset(12), Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 2, Offset: ToOffset(12), Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: ToOffset(12), Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: ToOffset(12), Size: 12}) - o = o.setOverflowEntry(SectionalNeedleValue{Key: 5, Offset: ToOffset(12), Size: 12}) + cs.setOverflowEntry(1, ToOffset(12), 12) + cs.setOverflowEntry(2, ToOffset(12), 12) + cs.setOverflowEntry(3, ToOffset(12), 12) + cs.setOverflowEntry(4, ToOffset(12), 12) + cs.setOverflowEntry(5, ToOffset(12), 12) - if o[2].Key != 3 { - t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key) + if cs.overflow[2].Key != 3 { + t.Fatalf("expecting o[2] has key 3: %+v", cs.overflow[2].Key) } - o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: ToOffset(24), Size: 24}) + cs.setOverflowEntry(3, ToOffset(24), 24) - if o[2].Key != 3 { - t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key) + if cs.overflow[2].Key != 3 { + t.Fatalf("expecting o[2] has key 3: %+v", cs.overflow[2].Key) } - if o[2].Size != 24 { - t.Fatalf("expecting o[2] has size 24: %+v", o[2].Size) + if cs.overflow[2].Size != 24 { + t.Fatalf("expecting o[2] has size 24: %+v", cs.overflow[2].Size) } - o = o.deleteOverflowEntry(4) + cs.deleteOverflowEntry(4) - if len(o) != 4 { - t.Fatalf("expecting 4 entries now: %+v", o) + if len(cs.overflow) != 4 { + t.Fatalf("expecting 4 entries now: %+v", cs.overflow) } - x, _ := o.findOverflowEntry(5) + _, x, _ := cs.findOverflowEntry(5) if x.Key != 5 { t.Fatalf("expecting entry 5 now: %+v", x) } - for i, x := range o { + for i, x := range cs.overflow { println("overflow[", i, "]:", x.Key) } println() - o = o.deleteOverflowEntry(1) + cs.deleteOverflowEntry(1) - for i, x := range o { + for i, x := range cs.overflow { println("overflow[", i, "]:", x.Key) } println() - o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: ToOffset(44), Size: 44}) - for i, x := range o { + cs.setOverflowEntry(4, ToOffset(44), 44) + for i, x := range cs.overflow { println("overflow[", i, "]:", x.Key) } println() - o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: ToOffset(11), Size: 11}) + cs.setOverflowEntry(1, ToOffset(11), 11) - for i, x := range o { + for i, x := range cs.overflow { println("overflow[", i, "]:", x.Key) } println() diff --git a/weed/storage/types/needle_types.go b/weed/storage/types/needle_types.go index d23666357..86eae03ac 100644 --- a/weed/storage/types/needle_types.go +++ b/weed/storage/types/needle_types.go @@ -8,8 +8,15 @@ import ( ) type Offset struct { - // b5 byte // unused - // b4 byte // unused + OffsetHigher + OffsetLower +} + +type OffsetHigher struct { + // b4 byte +} + +type OffsetLower struct { b3 byte b2 byte b1 byte @@ -19,9 +26,9 @@ type Offset struct { type Cookie uint32 const ( - OffsetSize = 4 + OffsetSize = 4 // + 1 SizeSize = 4 // uint32 size - NeedleEntrySize = NeedleIdSize + OffsetSize + SizeSize + NeedleEntrySize = CookieSize + NeedleIdSize + SizeSize TimestampSize = 8 // int64 size NeedlePaddingSize = 8 MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 @@ -55,21 +62,26 @@ func OffsetToBytes(bytes []byte, offset Offset) { bytes[0] = offset.b3 } +// only for testing, will be removed later. func Uint32ToOffset(offset uint32) Offset { return Offset{ - b0: byte(offset), - b1: byte(offset >> 8), - b2: byte(offset >> 16), - b3: byte(offset >> 24), + OffsetLower: OffsetLower{ + b0: byte(offset), + b1: byte(offset >> 8), + b2: byte(offset >> 16), + b3: byte(offset >> 24), + }, } } func BytesToOffset(bytes []byte) Offset { return Offset{ - b0: bytes[3], - b1: bytes[2], - b2: bytes[1], - b3: bytes[0], + OffsetLower: OffsetLower{ + b0: bytes[3], + b1: bytes[2], + b2: bytes[1], + b3: bytes[0], + }, } } From 784c5bb73a71bad615e500e4254c11e736b99a41 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 8 Apr 2019 22:01:29 -0700 Subject: [PATCH 156/450] add build option support 5-byte offset --- weed/storage/types/needle_types.go | 53 ------------------- weed/storage/types/offset_4bytes.go | 63 +++++++++++++++++++++++ weed/storage/types/offset_5bytes.go | 80 +++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+), 53 deletions(-) create mode 100644 weed/storage/types/offset_4bytes.go create mode 100644 weed/storage/types/offset_5bytes.go diff --git a/weed/storage/types/needle_types.go b/weed/storage/types/needle_types.go index 86eae03ac..b591dd7c3 100644 --- a/weed/storage/types/needle_types.go +++ b/weed/storage/types/needle_types.go @@ -12,10 +12,6 @@ type Offset struct { OffsetLower } -type OffsetHigher struct { - // b4 byte -} - type OffsetLower struct { b3 byte b2 byte @@ -26,12 +22,10 @@ type OffsetLower struct { type Cookie uint32 const ( - OffsetSize = 4 // + 1 SizeSize = 4 // uint32 size NeedleEntrySize = CookieSize + NeedleIdSize + SizeSize TimestampSize = 8 // int64 size NeedlePaddingSize = 8 - MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 TombstoneFileSize = math.MaxUint32 CookieSize = 4 ) @@ -54,50 +48,3 @@ func ParseCookie(cookieString string) (Cookie, error) { } return Cookie(cookie), nil } - -func OffsetToBytes(bytes []byte, offset Offset) { - bytes[3] = offset.b0 - bytes[2] = offset.b1 - bytes[1] = offset.b2 - bytes[0] = offset.b3 -} - -// only for testing, will be removed later. -func Uint32ToOffset(offset uint32) Offset { - return Offset{ - OffsetLower: OffsetLower{ - b0: byte(offset), - b1: byte(offset >> 8), - b2: byte(offset >> 16), - b3: byte(offset >> 24), - }, - } -} - -func BytesToOffset(bytes []byte) Offset { - return Offset{ - OffsetLower: OffsetLower{ - b0: bytes[3], - b1: bytes[2], - b2: bytes[1], - b3: bytes[0], - }, - } -} - -func (offset Offset) IsZero() bool { - return offset.b0 == 0 && offset.b1 == 0 && offset.b2 == 0 && offset.b3 == 0 -} - -func ToOffset(offset int64) Offset { - smaller := uint32(offset / int64(NeedlePaddingSize)) - return Uint32ToOffset(smaller) -} - -func (offset Offset) ToAcutalOffset() (actualOffset int64) { - return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24) * int64(NeedlePaddingSize) -} - -func (offset Offset) String() string { - return fmt.Sprintf("%d", int64(offset.b0)+int64(offset.b1)<<8+int64(offset.b2)<<16+int64(offset.b3)<<24) -} diff --git a/weed/storage/types/offset_4bytes.go b/weed/storage/types/offset_4bytes.go new file mode 100644 index 000000000..9acd069d3 --- /dev/null +++ b/weed/storage/types/offset_4bytes.go @@ -0,0 +1,63 @@ +// +build !5BytesOffset + +package types + +import ( + "fmt" +) + +type OffsetHigher struct { + // b4 byte +} + +const ( + OffsetSize = 4 + MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 // 32GB +) + +func OffsetToBytes(bytes []byte, offset Offset) { + bytes[3] = offset.b0 + bytes[2] = offset.b1 + bytes[1] = offset.b2 + bytes[0] = offset.b3 +} + +// only for testing, will be removed later. +func Uint32ToOffset(offset uint32) Offset { + return Offset{ + OffsetLower: OffsetLower{ + b0: byte(offset), + b1: byte(offset >> 8), + b2: byte(offset >> 16), + b3: byte(offset >> 24), + }, + } +} + +func BytesToOffset(bytes []byte) Offset { + return Offset{ + OffsetLower: OffsetLower{ + b0: bytes[3], + b1: bytes[2], + b2: bytes[1], + b3: bytes[0], + }, + } +} + +func (offset Offset) IsZero() bool { + return offset.b0 == 0 && offset.b1 == 0 && offset.b2 == 0 && offset.b3 == 0 +} + +func ToOffset(offset int64) Offset { + smaller := uint32(offset / int64(NeedlePaddingSize)) + return Uint32ToOffset(smaller) +} + +func (offset Offset) ToAcutalOffset() (actualOffset int64) { + return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24) * int64(NeedlePaddingSize) +} + +func (offset Offset) String() string { + return fmt.Sprintf("%d", int64(offset.b0)+int64(offset.b1)<<8+int64(offset.b2)<<16+int64(offset.b3)<<24) +} diff --git a/weed/storage/types/offset_5bytes.go b/weed/storage/types/offset_5bytes.go new file mode 100644 index 000000000..f57e4f6d4 --- /dev/null +++ b/weed/storage/types/offset_5bytes.go @@ -0,0 +1,80 @@ +// +build 5BytesOffset + +package types + +import ( + "fmt" +) + +type OffsetHigher struct { + b4 byte +} + +const ( + OffsetSize = 4 + 1 + MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 * 256 /* 256 is from the extra byte */ // 8TB +) + +func OffsetToBytes(bytes []byte, offset Offset) { + bytes[4] = offset.b4 + bytes[3] = offset.b0 + bytes[2] = offset.b1 + bytes[1] = offset.b2 + bytes[0] = offset.b3 +} + +// only for testing, will be removed later. +func Uint32ToOffset(offset uint32) Offset { + return Offset{ + OffsetHigher: OffsetHigher{ + b4: byte(offset >> 32), + }, + OffsetLower: OffsetLower{ + b0: byte(offset), + b1: byte(offset >> 8), + b2: byte(offset >> 16), + b3: byte(offset >> 24), + }, + } +} + +func BytesToOffset(bytes []byte) Offset { + return Offset{ + OffsetHigher: OffsetHigher{ + b4: bytes[4], + }, + OffsetLower: OffsetLower{ + b0: bytes[3], + b1: bytes[2], + b2: bytes[1], + b3: bytes[0], + }, + } +} + +func (offset Offset) IsZero() bool { + return offset.b0 == 0 && offset.b1 == 0 && offset.b2 == 0 && offset.b3 == 0 && offset.b4 == 0 +} + +func ToOffset(offset int64) Offset { + smaller := offset / int64(NeedlePaddingSize) + return Offset{ + OffsetHigher: OffsetHigher{ + b4: byte(smaller >> 32), + }, + OffsetLower: OffsetLower{ + b0: byte(smaller), + b1: byte(smaller >> 8), + b2: byte(smaller >> 16), + b3: byte(smaller >> 24), + }, + } +} + +func (offset Offset) ToAcutalOffset() (actualOffset int64) { + return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24 + int64(offset.b4)<<32) * int64(NeedlePaddingSize) +} + +func (offset Offset) String() string { + return fmt.Sprintf("%d", int64(offset.b0)+int64(offset.b1)<<8+int64(offset.b2)<<16+int64(offset.b3)<<24+int64(offset.b4)<<32) +} From 88ba08cb9e8e15e34e481143a6beec1390dc4874 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 8 Apr 2019 22:32:42 -0700 Subject: [PATCH 157/450] add large disk support --- .travis.yml | 3 +++ Makefile | 17 ++++++++++++++++- weed/command/master.go | 2 +- weed/command/server.go | 2 +- weed/util/constants.go | 8 ++++++-- weed/util/constants_4bytes.go | 8 ++++++++ weed/util/constants_5bytes.go | 8 ++++++++ 7 files changed, 43 insertions(+), 5 deletions(-) create mode 100644 weed/util/constants_4bytes.go create mode 100644 weed/util/constants_5bytes.go diff --git a/.travis.yml b/.travis.yml index 612f643e9..b42847e8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,9 +29,12 @@ deploy: - build/linux_arm64.tar.gz - build/linux_386.tar.gz - build/linux_amd64.tar.gz + - build/linux_amd64_large_disk.tar.gz - build/darwin_amd64.tar.gz + - build/darwin_amd64_large_disk.tar.gz - build/windows_386.zip - build/windows_amd64.zip + - build/windows_amd64_large_disk.zip - build/freebsd_arm.tar.gz - build/freebsd_amd64.tar.gz - build/freebsd_386.tar.gz diff --git a/Makefile b/Makefile index 9357b2a03..cce9d586d 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,9 @@ build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -stat tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) +build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR) +tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3) +zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3) all: build @@ -32,9 +35,21 @@ linux: deps mkdir -p linux GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) -release: deps windows_build darwin_build linux_build bsd_build +release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build ##### LINUX BUILDS ##### +5_byte_linux_build: + $(call build_large,linux,amd64,) + $(call tar_large,linux,amd64) + +5_byte_darwin_build: + $(call build_large,darwin,amd64,) + $(call tar_large,darwin,amd64) + +5_byte_windows_build: + $(call build_large,windows,amd64,.exe) + $(call zip_large,windows,amd64,.exe) + linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz build/linux_386.tar.gz: $(sources) diff --git a/weed/command/master.go b/weed/command/master.go index 15d1171e0..cd5704c3f 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -70,7 +70,7 @@ func runMaster(cmd *Command, args []string) bool { if *masterWhiteListOption != "" { masterWhiteList = strings.Split(*masterWhiteListOption, ",") } - if *volumeSizeLimitMB > 30*1000 { + if *volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { glog.Fatalf("volumeSizeLimitMB should be smaller than 30000") } diff --git a/weed/command/server.go b/weed/command/server.go index d88ded0ee..1638a7218 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -140,7 +140,7 @@ func runServer(cmd *Command, args []string) bool { folders := strings.Split(*volumeDataFolders, ",") - if *masterVolumeSizeLimitMB > 30*1000 { + if *masterVolumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000") } diff --git a/weed/util/constants.go b/weed/util/constants.go index 0a4980e6f..1ed60b26f 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,9 @@ package util -const ( - VERSION = "1.28" +import ( + "fmt" +) + +var ( + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 28) ) diff --git a/weed/util/constants_4bytes.go b/weed/util/constants_4bytes.go new file mode 100644 index 000000000..a29d9d3b0 --- /dev/null +++ b/weed/util/constants_4bytes.go @@ -0,0 +1,8 @@ +// +build !5BytesOffset + +package util + +const ( + sizeLimit = "30GB" + VolumeSizeLimitGB = 30 +) diff --git a/weed/util/constants_5bytes.go b/weed/util/constants_5bytes.go new file mode 100644 index 000000000..91ce4066f --- /dev/null +++ b/weed/util/constants_5bytes.go @@ -0,0 +1,8 @@ +// +build 5BytesOffset + +package util + +const ( + sizeLimit = "8000GB" + VolumeSizeLimitGB = 8000 +) From 6d4c44a85c78adc32a4cf5ca4e2e810265cc5461 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 8 Apr 2019 22:41:59 -0700 Subject: [PATCH 158/450] 1.29 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 1ed60b26f..aa8399523 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 28) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 29) ) From a55e708d36c31973a7d117f8ef1d4f36cd382801 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 9 Apr 2019 00:18:32 -0700 Subject: [PATCH 159/450] weed volume: disable block cache on leveldb saving 8MB for each volume, but at the cost of slower (offset,size) lookup --- weed/filer2/leveldb/leveldb_store.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 5b3a63959..06398d48e 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/syndtr/goleveldb/leveldb/opt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -39,7 +40,9 @@ func (store *LevelDBStore) initialize(dir string) (err error) { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } - if store.db, err = leveldb.OpenFile(dir, nil); err != nil { + if store.db, err = leveldb.OpenFile(dir, &opt.Options{ + BlockCacheCapacity: -1, // default value is 8MiB + }); err != nil { glog.Infof("filer store open dir %s: %v", dir, err) return } From 35aba35c07e75ba5849a32e8382e92aafd076765 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 9 Apr 2019 00:22:40 -0700 Subject: [PATCH 160/450] Revert "weed volume: disable block cache on leveldb" This reverts commit a55e708d36c31973a7d117f8ef1d4f36cd382801. --- weed/filer2/leveldb/leveldb_store.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 06398d48e..5b3a63959 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "github.com/syndtr/goleveldb/leveldb/opt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -40,9 +39,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } - if store.db, err = leveldb.OpenFile(dir, &opt.Options{ - BlockCacheCapacity: -1, // default value is 8MiB - }); err != nil { + if store.db, err = leveldb.OpenFile(dir, nil); err != nil { glog.Infof("filer store open dir %s: %v", dir, err) return } From 9924fa3b1a291ac6efe856619cfe98e21900ae87 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 9 Apr 2019 00:24:32 -0700 Subject: [PATCH 161/450] weed volume: disable block cache on leveldb saving 8MB for each volume, but at the cost of slower lookup --- weed/storage/needle_map_leveldb.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index 939e0cac4..c5c4f22eb 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "github.com/syndtr/goleveldb/leveldb/opt" "os" "path/filepath" @@ -27,7 +28,9 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File) (m *LevelDbNeedl glog.V(0).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) } glog.V(1).Infof("Opening %s...", dbFileName) - if m.db, err = leveldb.OpenFile(dbFileName, nil); err != nil { + if m.db, err = leveldb.OpenFile(dbFileName, &opt.Options{ + BlockCacheCapacity: -1, // default value is 8MiB + }); err != nil { return } glog.V(1).Infof("Loading %s...", indexFile.Name()) From 8a5ce16e967974e9a22f66fd64c991944d2206e2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 9 Apr 2019 09:42:06 -0700 Subject: [PATCH 162/450] weed volume: remove boltdb, btree options, add options for leveldb medium, large --- weed/command/server.go | 2 +- weed/command/volume.go | 8 +- weed/storage/needle_map.go | 6 +- weed/storage/needle_map_boltdb.go | 185 ----------------------------- weed/storage/needle_map_leveldb.go | 7 +- weed/storage/volume_loading.go | 31 +++-- 6 files changed, 34 insertions(+), 205 deletions(-) delete mode 100644 weed/storage/needle_map_boltdb.go diff --git a/weed/command/server.go b/weed/command/server.go index 1638a7218..a56944b48 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -90,7 +90,7 @@ func init() { serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") - serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") + serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") diff --git a/weed/command/volume.go b/weed/command/volume.go index 2ee6bb11a..88aee95ae 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -58,7 +58,7 @@ func init() { v.maxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name") v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name") - v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") + v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") @@ -142,10 +142,10 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v switch *v.indexType { case "leveldb": volumeNeedleMapKind = storage.NeedleMapLevelDb - case "boltdb": + case "leveldbMedium": + volumeNeedleMapKind = storage.NeedleMapBoltDb + case "leveldbLarge": volumeNeedleMapKind = storage.NeedleMapBoltDb - case "btree": - volumeNeedleMapKind = storage.NeedleMapBtree } masters := *v.masters diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go index 6d815679b..ee31b06f6 100644 --- a/weed/storage/needle_map.go +++ b/weed/storage/needle_map.go @@ -14,8 +14,10 @@ import ( type NeedleMapType int const ( - NeedleMapInMemory NeedleMapType = iota - NeedleMapLevelDb + NeedleMapInMemory NeedleMapType = iota + NeedleMapLevelDb // small memory footprint, 4MB total, 1 write buffer, 3 block buffer + NeedleMapLevelDbMedium // medium memory footprint, 8MB total, 3 write buffer, 5 block buffer + NeedleMapLevelDbLarge // large memory footprint, 12MB total, 4write buffer, 8 block buffer NeedleMapBoltDb NeedleMapBtree ) diff --git a/weed/storage/needle_map_boltdb.go b/weed/storage/needle_map_boltdb.go deleted file mode 100644 index e2e4d22f7..000000000 --- a/weed/storage/needle_map_boltdb.go +++ /dev/null @@ -1,185 +0,0 @@ -package storage - -import ( - "fmt" - "os" - - "github.com/boltdb/bolt" - - "errors" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage/needle" - . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" -) - -type BoltDbNeedleMap struct { - dbFileName string - db *bolt.DB - baseNeedleMapper -} - -var boltdbBucket = []byte("weed") - -var NotFound = errors.New("not found") - -func NewBoltDbNeedleMap(dbFileName string, indexFile *os.File) (m *BoltDbNeedleMap, err error) { - m = &BoltDbNeedleMap{dbFileName: dbFileName} - m.indexFile = indexFile - if !isBoltDbFresh(dbFileName, indexFile) { - glog.V(0).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) - generateBoltDbFile(dbFileName, indexFile) - glog.V(0).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) - } - glog.V(1).Infof("Opening %s...", dbFileName) - if m.db, err = bolt.Open(dbFileName, 0644, nil); err != nil { - return - } - glog.V(1).Infof("Loading %s...", indexFile.Name()) - mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) - if indexLoadError != nil { - return nil, indexLoadError - } - m.mapMetric = *mm - return -} - -func isBoltDbFresh(dbFileName string, indexFile *os.File) bool { - // normally we always write to index file first - dbLogFile, err := os.Open(dbFileName) - if err != nil { - return false - } - defer dbLogFile.Close() - dbStat, dbStatErr := dbLogFile.Stat() - indexStat, indexStatErr := indexFile.Stat() - if dbStatErr != nil || indexStatErr != nil { - glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) - return false - } - - return dbStat.ModTime().After(indexStat.ModTime()) -} - -func generateBoltDbFile(dbFileName string, indexFile *os.File) error { - db, err := bolt.Open(dbFileName, 0644, nil) - if err != nil { - return err - } - defer db.Close() - return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error { - if !offset.IsZero() && size != TombstoneFileSize { - boltDbWrite(db, key, offset, size) - } else { - boltDbDelete(db, key) - } - return nil - }) -} - -func (m *BoltDbNeedleMap) Get(key NeedleId) (element *needle.NeedleValue, ok bool) { - var offset Offset - var size uint32 - bytes := make([]byte, NeedleIdSize) - NeedleIdToBytes(bytes, key) - err := m.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(boltdbBucket) - if bucket == nil { - return fmt.Errorf("Bucket %q not found!", boltdbBucket) - } - - data := bucket.Get(bytes) - - if len(data) == 0 { - return NotFound - } - - if len(data) != OffsetSize+SizeSize { - glog.V(0).Infof("key:%v has wrong data length: %d", key, len(data)) - return fmt.Errorf("key:%v has wrong data length: %d", key, len(data)) - } - - offset = BytesToOffset(data[0:OffsetSize]) - size = util.BytesToUint32(data[OffsetSize : OffsetSize+SizeSize]) - - return nil - }) - - if err != nil { - return nil, false - } - return &needle.NeedleValue{Key: key, Offset: offset, Size: size}, true -} - -func (m *BoltDbNeedleMap) Put(key NeedleId, offset Offset, size uint32) error { - var oldSize uint32 - if oldNeedle, ok := m.Get(key); ok { - oldSize = oldNeedle.Size - } - m.logPut(key, oldSize, size) - // write to index file first - if err := m.appendToIndexFile(key, offset, size); err != nil { - return fmt.Errorf("cannot write to indexfile %s: %v", m.indexFile.Name(), err) - } - return boltDbWrite(m.db, key, offset, size) -} - -func boltDbWrite(db *bolt.DB, - key NeedleId, offset Offset, size uint32) error { - - bytes := make([]byte, NeedleIdSize+OffsetSize+SizeSize) - NeedleIdToBytes(bytes[0:NeedleIdSize], key) - OffsetToBytes(bytes[NeedleIdSize:NeedleIdSize+OffsetSize], offset) - util.Uint32toBytes(bytes[NeedleIdSize+OffsetSize:NeedleIdSize+OffsetSize+SizeSize], size) - - return db.Update(func(tx *bolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(boltdbBucket) - if err != nil { - return err - } - - err = bucket.Put(bytes[0:NeedleIdSize], bytes[NeedleIdSize:NeedleIdSize+OffsetSize+SizeSize]) - if err != nil { - return err - } - return nil - }) -} -func boltDbDelete(db *bolt.DB, key NeedleId) error { - bytes := make([]byte, NeedleIdSize) - NeedleIdToBytes(bytes, key) - return db.Update(func(tx *bolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(boltdbBucket) - if err != nil { - return err - } - - err = bucket.Delete(bytes) - if err != nil { - return err - } - return nil - }) -} - -func (m *BoltDbNeedleMap) Delete(key NeedleId, offset Offset) error { - if oldNeedle, ok := m.Get(key); ok { - m.logDelete(oldNeedle.Size) - } - // write to index file first - if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { - return err - } - return boltDbDelete(m.db, key) -} - -func (m *BoltDbNeedleMap) Close() { - m.indexFile.Close() - m.db.Close() -} - -func (m *BoltDbNeedleMap) Destroy() error { - m.Close() - os.Remove(m.indexFile.Name()) - return os.Remove(m.dbFileName) -} diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index c5c4f22eb..4d5280938 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -19,7 +19,7 @@ type LevelDbNeedleMap struct { baseNeedleMapper } -func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File) (m *LevelDbNeedleMap, err error) { +func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Options) (m *LevelDbNeedleMap, err error) { m = &LevelDbNeedleMap{dbFileName: dbFileName} m.indexFile = indexFile if !isLevelDbFresh(dbFileName, indexFile) { @@ -28,9 +28,8 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File) (m *LevelDbNeedl glog.V(0).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) } glog.V(1).Infof("Opening %s...", dbFileName) - if m.db, err = leveldb.OpenFile(dbFileName, &opt.Options{ - BlockCacheCapacity: -1, // default value is 8MiB - }); err != nil { + + if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil { return } glog.V(1).Infof("Loading %s...", indexFile.Name()) diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index 37a6e07b2..14013b302 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "github.com/syndtr/goleveldb/leveldb/opt" "os" "time" @@ -82,18 +83,30 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind } case NeedleMapLevelDb: glog.V(0).Infoln("loading leveldb", fileName+".ldb") - if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile); e != nil { + opts := &opt.Options{ + BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB + } + if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) } - case NeedleMapBoltDb: - glog.V(0).Infoln("loading boltdb", fileName+".bdb") - if v.nm, e = NewBoltDbNeedleMap(fileName+".bdb", indexFile); e != nil { - glog.V(0).Infof("loading boltdb %s error: %v", fileName+".bdb", e) + case NeedleMapLevelDbMedium: + glog.V(0).Infoln("loading leveldb medium", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB } - case NeedleMapBtree: - glog.V(0).Infoln("loading index", fileName+".idx", "to btree readonly", v.readOnly) - if v.nm, e = LoadBtreeNeedleMap(indexFile); e != nil { - glog.V(0).Infof("loading index %s to btree error: %v", fileName+".idx", e) + if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) + } + case NeedleMapLevelDbLarge: + glog.V(0).Infoln("loading leveldb large", fileName+".ldb") + opts := &opt.Options{ + BlockCacheCapacity: 8 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 4 * 1024 * 1024, // default value is 4MiB + } + if v.nm, e = NewLevelDbNeedleMap(fileName+".ldb", indexFile, opts); e != nil { + glog.V(0).Infof("loading leveldb %s error: %v", fileName+".ldb", e) } } } From 3555628ad5e4aa5530183eebfdb99d7d94d193c9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 9 Apr 2019 10:08:59 -0700 Subject: [PATCH 163/450] weed volume: fix bug, remove boltdb, btree --- weed/command/volume.go | 4 ++-- weed/storage/needle_map.go | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/weed/command/volume.go b/weed/command/volume.go index 88aee95ae..b87555456 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -143,9 +143,9 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v case "leveldb": volumeNeedleMapKind = storage.NeedleMapLevelDb case "leveldbMedium": - volumeNeedleMapKind = storage.NeedleMapBoltDb + volumeNeedleMapKind = storage.NeedleMapLevelDbMedium case "leveldbLarge": - volumeNeedleMapKind = storage.NeedleMapBoltDb + volumeNeedleMapKind = storage.NeedleMapLevelDbLarge } masters := *v.masters diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go index ee31b06f6..92fc06aae 100644 --- a/weed/storage/needle_map.go +++ b/weed/storage/needle_map.go @@ -18,8 +18,6 @@ const ( NeedleMapLevelDb // small memory footprint, 4MB total, 1 write buffer, 3 block buffer NeedleMapLevelDbMedium // medium memory footprint, 8MB total, 3 write buffer, 5 block buffer NeedleMapLevelDbLarge // large memory footprint, 12MB total, 4write buffer, 8 block buffer - NeedleMapBoltDb - NeedleMapBtree ) type NeedleMapper interface { From 3895c868121d747392300a27158dd438b9b53c21 Mon Sep 17 00:00:00 2001 From: Zicklag Date: Tue, 9 Apr 2019 15:57:38 +0000 Subject: [PATCH 164/450] Add cronjob Mode For The Docker Container fixes #907 --- docker/Dockerfile | 11 +++++++++++ docker/entrypoint.sh | 8 ++++++++ docker/seaweedfs-compose.yml | 10 ++++++++++ 3 files changed, 29 insertions(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 75efc7ec0..f3afa88fd 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,9 +1,20 @@ FROM frolvlad/alpine-glibc +# Supercronic install settings +ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \ + SUPERCRONIC=supercronic-linux-amd64 \ + SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea + +# Install SeaweedFS and Supercronic ( for cron job mode ) # Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format" RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ + curl -fsSLO "$SUPERCRONIC_URL" && \ + echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ + chmod +x "$SUPERCRONIC" && \ + mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ + ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \ apk del build-dependencies && \ rm -rf /tmp/* diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 660e51766..4e0d659ae 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -44,6 +44,14 @@ case "$1" in exec /usr/bin/weed $@ $ARGS ;; + 'cronjob') + MASTER=${WEED_MASTER-localhost:9333} + CRON_SCHEDULE=${CRON_SCHEDULE-*/5 * * * * *} + echo "$CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab + echo "Running Crontab:" + cat /crontab + exec supercronic /crontab + ;; *) exec /usr/bin/weed $@ ;; diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 05ed0e69e..d66b921bb 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -26,6 +26,16 @@ services: depends_on: - master - volume + cronjob: + image: chrislusf/seaweedfs # use a remote image + command: 'cronjob' + environment: + # Run re-replication every 2 minutes + CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *' + WEED_MASTER: master:9333 # Default: localhost:9333 + depends_on: + - master + - volume s3: image: chrislusf/seaweedfs # use a remote image ports: From c1a0403da2b0fb6f4eebc48d8dc621435c6e0b12 Mon Sep 17 00:00:00 2001 From: stlpmo-jn Date: Wed, 10 Apr 2019 19:41:55 +0800 Subject: [PATCH 165/450] repair the error replications of the volume --- weed/pb/volume_server.proto | 1 + weed/pb/volume_server_pb/volume_server.pb.go | 8 + weed/server/volume_grpc_replicate.go | 66 ++++- weed/storage/volume.go | 19 ++ weed/topology/replication_health_checker.go | 297 +++++++++++++++++++ 5 files changed, 380 insertions(+), 11 deletions(-) create mode 100644 weed/topology/replication_health_checker.go diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 3b5b36a21..1cef07dce 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -171,6 +171,7 @@ message ReadVolumeFileStatusResponse { uint64 idx_file_size = 3; uint64 dat_file_timestamp = 4; uint64 dat_file_size = 5; + uint64 file_count = 6; } message DiskStatus { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 0f3b47ee0..81cae93c6 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -656,6 +656,7 @@ type ReadVolumeFileStatusResponse struct { IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` DatFileTimestamp uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp,json=datFileTimestamp" json:"dat_file_timestamp,omitempty"` DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize" json:"dat_file_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` } func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } @@ -698,6 +699,13 @@ func (m *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { return 0 } +func (m *ReadVolumeFileStatusResponse) GetFileCount() uint64 { + if m != nil { + return m.FileCount + } + return 0 +} + type DiskStatus struct { Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` diff --git a/weed/server/volume_grpc_replicate.go b/weed/server/volume_grpc_replicate.go index 1a31a37f3..c991a496e 100644 --- a/weed/server/volume_grpc_replicate.go +++ b/weed/server/volume_grpc_replicate.go @@ -3,6 +3,7 @@ package weed_server import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" @@ -34,22 +35,28 @@ func (vs *VolumeServer) ReplicateVolume(ctx context.Context, req *volume_server_ // send .idx file // send .dat file // confirm size and timestamp - + var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse + datFileName := volumeFileName + ".dat" + idxFileName := volumeFileName + ".idx" err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - - // TODO read file sizes before copying - client.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{}) + var err error + volFileInfoResp, err = client.ReadVolumeFileStatus(ctx, + &volume_server_pb.ReadVolumeFileStatusRequest{ + VolumeId: req.VolumeId, + }) + if nil != err { + return fmt.Errorf("read volume file status failed, %v", err) + } copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ VolumeId: req.VolumeId, IsIdxFile: true, }) - if err != nil { return fmt.Errorf("failed to start copying volume %d idx file: %v", req.VolumeId, err) } - err = writeToFile(copyFileClient, volumeFileName+".idx") + err = writeToFile(copyFileClient, idxFileName) if err != nil { return fmt.Errorf("failed to copy volume %d idx file: %v", req.VolumeId, err) } @@ -58,24 +65,26 @@ func (vs *VolumeServer) ReplicateVolume(ctx context.Context, req *volume_server_ VolumeId: req.VolumeId, IsDatFile: true, }) - if err != nil { return fmt.Errorf("failed to start copying volume %d dat file: %v", req.VolumeId, err) } - err = writeToFile(copyFileClient, volumeFileName+".dat") + err = writeToFile(copyFileClient, datFileName) if err != nil { return fmt.Errorf("failed to copy volume %d dat file: %v", req.VolumeId, err) } return nil }) - if err != nil { + os.Remove(idxFileName) + os.Remove(datFileName) return nil, err } - // TODO: check the timestamp and size + if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16 + return nil, err + } // mount the volume err = vs.store.MountVolume(storage.VolumeId(req.VolumeId)) @@ -84,11 +93,35 @@ func (vs *VolumeServer) ReplicateVolume(ctx context.Context, req *volume_server_ } return &volume_server_pb.ReplicateVolumeResponse{}, err +} +/** + only check the the differ of the file size + todo: maybe should check the received count and deleted count of the volume + */ +func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) error { + stat, err := os.Stat(idxFileName) + if err != nil { + return fmt.Errorf("get idx file info failed, %v", err) + } + if originFileInf.IdxFileSize != uint64(stat.Size()) { + return fmt.Errorf("the idx file size [%v] is not same as origin file size [%v]", + stat.Size(), originFileInf.IdxFileSize) + } + + stat, err = os.Stat(datFileName) + if err != nil { + return fmt.Errorf("get dat file info failed, %v", err) + } + if originFileInf.DatFileSize != uint64(stat.Size()) { + return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]", + stat.Size(), originFileInf.DatFileSize) + } + return nil } func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error { - println("writing to ", fileName) + glog.V(4).Infof("writing to ", fileName) dst, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return nil @@ -110,6 +143,17 @@ func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName s func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_server_pb.ReadVolumeFileStatusRequest) (*volume_server_pb.ReadVolumeFileStatusResponse, error) { resp := &volume_server_pb.ReadVolumeFileStatusResponse{} + v := vs.store.GetVolume(storage.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp.VolumeId = req.VolumeId + resp.DatFileSize = v.DataFileSize() + resp.IdxFileSize = v.IndexFileSize() + resp.DatFileTimestamp = v.LastModifiedTime() + resp.IdxFileTimestamp = v.LastModifiedTime() + resp.FileCount = uint64(v.FileCount()) return resp, nil } diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 807fefa38..280963c2c 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -79,6 +79,25 @@ func (v *Volume) Size() int64 { return 0 // -1 causes integer overflow and the volume to become unwritable. } +func (v *Volume)IndexFileSize() uint64 { + return v.nm.IndexFileSize() +} + +func (v *Volume)DataFileSize() uint64 { + return uint64(v.Size()) +} + +/** +unix time in seconds + */ +func (v *Volume)LastModifiedTime() uint64 { + return v.lastModifiedTime +} + +func (v *Volume)FileCount() uint { + return uint(v.nm.FileCount()) +} + // Close cleanly shuts down this volume func (v *Volume) Close() { v.dataFileAccessLock.Lock() diff --git a/weed/topology/replication_health_checker.go b/weed/topology/replication_health_checker.go new file mode 100644 index 000000000..947e7d45c --- /dev/null +++ b/weed/topology/replication_health_checker.go @@ -0,0 +1,297 @@ +package topology + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "google.golang.org/grpc" + "sort" + "strings" + "sync" +) + +/** + check the replication health + */ +func (t *Topology) RepairUnhealthyReplicationInLayout(grpcDialOption grpc.DialOption, layout *VolumeLayout, eVid storage.VolumeId) error { + ctx := context.Background() + locations, exist := layout.vid2location[eVid] + if !exist { + retErr := fmt.Errorf("the volume:%v has no locations", eVid) + glog.V(0).Infof(retErr.Error()) + return retErr + } + + //glog.V(5).Infof("volume:%v, locations:%v", eVid, locations.list) + fileStat, err := getReplicationInfo(grpcDialOption, ctx, eVid, locations) + if err != nil { + glog.Errorf("get replication status failed, %v", err) + return err + } + + if isSameVolumeReplications(fileStat, layout.volumeSizeLimit) { + glog.V(0).Infof("the volume:%v has %d same replication, need not repair", eVid, len(fileStat)) + return nil + } + + // compact all the replications of volume + { + glog.V(4).Infof("begin compact all the replications of volume:%v", eVid) + allUrls := make([]string, 0, len(fileStat)) + for _, fs := range fileStat { + allUrls = append(allUrls, fs.url) + } + + if tryBatchCompactVolume(ctx, grpcDialOption, eVid, allUrls) == false { + err := fmt.Errorf("compact all the replications of volume:%v", eVid) + glog.Error(err.Error()) + return err + } + glog.V(4).Infof("success compact all the replications of volume:%v", eVid) + } + + // get replication status again + fileStat, err = getReplicationInfo(grpcDialOption, ctx, eVid, locations) + if err != nil { + return err + } + + okUrls, errUrls := filterErrorReplication(fileStat) + if len(errUrls) == 0 { + return nil // they are the same + } + + if len(okUrls) == 0 { + return fmt.Errorf("no correct volume replications, that's impossible") + } + + glog.V(4).Infof("need repair replication : %v", errUrls) + if len(locations.list) <= 0 { + return fmt.Errorf("that's impossible, the locatins of volume:%v is empty", eVid) + } + for _, url := range errUrls { + vInfo := locations.list[0].volumes[eVid] + err = syncReplication(grpcDialOption, okUrls[0], url, vInfo) + if nil != err { + glog.Error(err) + return err + } + } + return nil +} + +type FileStatus struct { + url string + fileStat *volume_server_pb.ReadVolumeFileStatusResponse +} + +func getReplicationInfo(grpcDialOption grpc.DialOption, ctx context.Context, vid storage.VolumeId, locs *VolumeLocationList) (fs []FileStatus, err error) { + type ResponsePair struct { + url string + status *volume_server_pb.ReadVolumeFileStatusResponse + err error + } + + var wg sync.WaitGroup + resultChan := make(chan ResponsePair, len(locs.list)) + wg.Add(len(locs.list)) + getFileStatFunc := func(url string, volumeId storage.VolumeId) { + defer wg.Done() + glog.V(4).Infof("volumeId:%v, location:%v", volumeId, url) + err := operation.WithVolumeServerClient(url, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + req := &volume_server_pb.ReadVolumeFileStatusRequest{ + VolumeId: uint32(volumeId), + } + respTmp, err := client.ReadVolumeFileStatus(ctx, req) + resultChan <- ResponsePair{ + url: url, + status: respTmp, + err: err, + } + return nil + }) + if nil != err { + glog.Error(err) + } + } + for _, node := range locs.list { + go getFileStatFunc(node.Url(), vid) + } + + go func() { // close channel + wg.Wait() + close(resultChan) + }() + + var errs []string + for result := range resultChan { + if result.err == nil { + fs = append(fs, FileStatus{ + url: result.url, + fileStat: result.status, + }) + continue + } + tmp := fmt.Sprintf("url : %s, error : %v", result.url, result.err) + errs = append(errs, tmp) + } + + if len(fs) == len(locs.list) { + return fs, nil + } + err = fmt.Errorf("get volume[%v] replication status failed, err : %s", vid, strings.Join(errs, "; ")) + return nil, err +} + +/** + : + the file count is the total count of the volume received from user clients +todo: this policy is not perfected or not rigorous, need fix + */ +func filterErrorReplication(fileStat []FileStatus) (okUrls, errUrls []string) { + sort.Slice(fileStat, func(i, j int) bool { + return fileStat[i].fileStat.FileCount > fileStat[j].fileStat.FileCount + }) + if fileStat[0].fileStat.FileCount != fileStat[len(fileStat)-1].fileStat.FileCount { + okFileCounter := fileStat[0].fileStat.FileCount + for _, v := range fileStat { + if okFileCounter == v.fileStat.FileCount { + okUrls = append(okUrls, v.url) + } else { + errUrls = append(errUrls, v.url) + } + } + return + } + return +} + +// execute the compact transaction +func compactVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeUrl string, vid storage.VolumeId) bool { + glog.V(0).Infoln("Start vacuuming", vid, "on", volumeUrl) + err := operation.WithVolumeServerClient(volumeUrl, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{ + VolumeId: uint32(vid), + }) + return err + }) + if err != nil { + glog.Errorf("Error when vacuuming %d on %s: %v", vid, volumeUrl, err) + return false + } + glog.V(0).Infof("Complete vacuuming volume:%v on %s", vid, volumeUrl) + return true +} + +// commit the compact transaction when compactVolume() return true +func commitCompactedVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeUrl string, vid storage.VolumeId) bool { + err := operation.WithVolumeServerClient(volumeUrl, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{ + VolumeId: uint32(vid), + }) + return err + }) + if err != nil { + glog.Errorf("Error when committing vacuum %d on %s: %v", vid, volumeUrl, err) + return false + } + glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, volumeUrl) + return true +} + +// rollback the compact transaction when compactVolume return false +func cleanupCompactedVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeUrl string, vid storage.VolumeId) bool { + glog.V(0).Infoln("Start cleaning up", vid, "on", volumeUrl) + err := operation.WithVolumeServerClient(volumeUrl, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{ + VolumeId: uint32(vid), + }) + return err + }) + if err != nil { + glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, volumeUrl, err) + return false + } + glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, volumeUrl) + return false +} + +func tryCompactVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid storage.VolumeId, volumeUrl string) bool { + if compactVolume(ctx, grpcDialOption, volumeUrl, vid) == false { + return cleanupCompactedVolume(ctx, grpcDialOption, volumeUrl, vid) + } + return commitCompactedVolume(ctx, grpcDialOption, volumeUrl, vid) +} + +func tryBatchCompactVolume(ctx context.Context, grpcDialOption grpc.DialOption, + vid storage.VolumeId, urls []string) bool { + resultChan := make(chan error) + var wg sync.WaitGroup + wg.Add(len(urls)) + for _, url := range urls { + go func(volumeUrl string) { + defer wg.Done() + if tryCompactVolume(ctx, grpcDialOption, vid, volumeUrl) == false { + resultChan <- fmt.Errorf("url:%s", volumeUrl) + } + }(url) + } + + go func() { + wg.Wait() + close(resultChan) + }() + + var errs []string + for result := range resultChan { + if result != nil { + errs = append(errs, result.Error()) + } + } + if len(errs) > 0 { + glog.Errorf("consist volume:%v compact reversion failed, %s", vid, strings.Join(errs, "; ")) + return false + } + return true +} + +func isSameVolumeReplications(fileStat []FileStatus, volumeSizeLimit uint64) bool { + fileSizeSet := make(map[uint64]bool) + fileCountSet := make(map[uint64]bool) + lastModifiedSet := make(map[uint64]bool) + var oneFileSize uint64 = 0 + for _, v := range fileStat { + fileCountSet[v.fileStat.FileCount] = true + lastModifiedSet[v.fileStat.DatFileTimestamp] = true + fileSizeSet[v.fileStat.DatFileSize] = true + oneFileSize = v.fileStat.DatFileSize + } + + if (len(lastModifiedSet) == 1) && (len(fileCountSet) == 1) && + (len(fileSizeSet) == 1) && (oneFileSize >= volumeSizeLimit) { + return true + } + return false +} + +func syncReplication(grpcDialOption grpc.DialOption, srcUrl, destUrl string, vinfo storage.VolumeInfo) error { + ctx := context.Background() + err := operation.WithVolumeServerClient(destUrl, grpcDialOption, + func(client volume_server_pb.VolumeServerClient) error { + if _, err := client.ReplicateVolume(ctx, &volume_server_pb.ReplicateVolumeRequest{ + VolumeId: uint32(vinfo.Id), + Collection: vinfo.Collection, + Replication: vinfo.ReplicaPlacement.String(), + Ttl: vinfo.Ttl.String(), + SourceDataNode: srcUrl, + }); err != nil { + glog.Errorf("sync replication failed, %v", err) + return err + } + return nil + }) + return err +} From 4df10e50b347ae7a50357a3ef30ec31d47842b63 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 10 Apr 2019 14:27:28 -0700 Subject: [PATCH 166/450] update glide.yaml --- weed/glide.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/weed/glide.yaml b/weed/glide.yaml index 8e4c090aa..805020be0 100644 --- a/weed/glide.yaml +++ b/weed/glide.yaml @@ -17,13 +17,13 @@ import: - service/s3 - service/s3/s3iface - service/sqs -- package: github.com/boltdb/bolt - package: github.com/chrislusf/raft subpackages: - protobuf - package: github.com/dgrijalva/jwt-go - package: github.com/disintegration/imaging - package: github.com/dustin/go-humanize +- package: github.com/gabriel-vasile/mimetype - package: github.com/go-redis/redis - package: github.com/go-sql-driver/mysql - package: github.com/gocql/gocql @@ -53,6 +53,7 @@ import: - package: github.com/syndtr/goleveldb subpackages: - leveldb + - leveldb/opt - leveldb/util - package: github.com/willf/bloom - package: gocloud.dev From f2031884f0a5e070ac1b16c968ece1664e20aafc Mon Sep 17 00:00:00 2001 From: stlpmo-jn Date: Thu, 11 Apr 2019 09:53:31 +0800 Subject: [PATCH 167/450] fix bug : CI build failed --- weed/server/volume_grpc_replicate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/server/volume_grpc_replicate.go b/weed/server/volume_grpc_replicate.go index c991a496e..c641755d0 100644 --- a/weed/server/volume_grpc_replicate.go +++ b/weed/server/volume_grpc_replicate.go @@ -121,7 +121,7 @@ func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse } func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error { - glog.V(4).Infof("writing to ", fileName) + glog.V(4).Infof("writing to %s", fileName) dst, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return nil From 2f76681d626d29c8ffd0cc81614b1c583514f8b6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 10 Apr 2019 21:41:17 -0700 Subject: [PATCH 168/450] refactor: adjust grpc API name --- weed/pb/volume_server.proto | 6 +- weed/pb/volume_server_pb/volume_server.pb.go | 208 +++++++++---------- weed/server/volume_grpc_admin.go | 4 +- weed/topology/allocate_volume.go | 2 +- 4 files changed, 110 insertions(+), 110 deletions(-) diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 3b5b36a21..538ddf9e1 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -19,7 +19,7 @@ service VolumeServer { rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { } - rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { + rpc AllocateVolume (AllocateVolumeRequest) returns (AllocateVolumeResponse) { } rpc VolumeSyncStatus (VolumeSyncStatusRequest) returns (VolumeSyncStatusResponse) { @@ -94,14 +94,14 @@ message DeleteCollectionRequest { message DeleteCollectionResponse { } -message AssignVolumeRequest { +message AllocateVolumeRequest { uint32 volume_id = 1; string collection = 2; int64 preallocate = 3; string replication = 4; string ttl = 5; } -message AssignVolumeResponse { +message AllocateVolumeResponse { } message VolumeSyncStatusRequest { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 0f3b47ee0..829f141a0 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -23,8 +23,8 @@ It has these top-level messages: VacuumVolumeCleanupResponse DeleteCollectionRequest DeleteCollectionResponse - AssignVolumeRequest - AssignVolumeResponse + AllocateVolumeRequest + AllocateVolumeResponse VolumeSyncStatusRequest VolumeSyncStatusResponse VolumeFollowRequest @@ -282,7 +282,7 @@ func (m *DeleteCollectionResponse) String() string { return proto.Com func (*DeleteCollectionResponse) ProtoMessage() {} func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } -type AssignVolumeRequest struct { +type AllocateVolumeRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"` @@ -290,53 +290,53 @@ type AssignVolumeRequest struct { Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` } -func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } -func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} } +func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateVolumeRequest) ProtoMessage() {} +func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } -func (m *AssignVolumeRequest) GetVolumeId() uint32 { +func (m *AllocateVolumeRequest) GetVolumeId() uint32 { if m != nil { return m.VolumeId } return 0 } -func (m *AssignVolumeRequest) GetCollection() string { +func (m *AllocateVolumeRequest) GetCollection() string { if m != nil { return m.Collection } return "" } -func (m *AssignVolumeRequest) GetPreallocate() int64 { +func (m *AllocateVolumeRequest) GetPreallocate() int64 { if m != nil { return m.Preallocate } return 0 } -func (m *AssignVolumeRequest) GetReplication() string { +func (m *AllocateVolumeRequest) GetReplication() string { if m != nil { return m.Replication } return "" } -func (m *AssignVolumeRequest) GetTtl() string { +func (m *AllocateVolumeRequest) GetTtl() string { if m != nil { return m.Ttl } return "" } -type AssignVolumeResponse struct { +type AllocateVolumeResponse struct { } -func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } -func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} } +func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateVolumeResponse) ProtoMessage() {} +func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } type VolumeSyncStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -817,8 +817,8 @@ func init() { proto.RegisterType((*VacuumVolumeCleanupResponse)(nil), "volume_server_pb.VacuumVolumeCleanupResponse") proto.RegisterType((*DeleteCollectionRequest)(nil), "volume_server_pb.DeleteCollectionRequest") proto.RegisterType((*DeleteCollectionResponse)(nil), "volume_server_pb.DeleteCollectionResponse") - proto.RegisterType((*AssignVolumeRequest)(nil), "volume_server_pb.AssignVolumeRequest") - proto.RegisterType((*AssignVolumeResponse)(nil), "volume_server_pb.AssignVolumeResponse") + proto.RegisterType((*AllocateVolumeRequest)(nil), "volume_server_pb.AllocateVolumeRequest") + proto.RegisterType((*AllocateVolumeResponse)(nil), "volume_server_pb.AllocateVolumeResponse") proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest") proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse") proto.RegisterType((*VolumeFollowRequest)(nil), "volume_server_pb.VolumeFollowRequest") @@ -857,7 +857,7 @@ type VolumeServerClient interface { VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) - AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) + AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) VolumeFollow(ctx context.Context, in *VolumeFollowRequest, opts ...grpc.CallOption) (VolumeServer_VolumeFollowClient, error) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) @@ -930,9 +930,9 @@ func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCol return out, nil } -func (c *volumeServerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { - out := new(AssignVolumeResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AssignVolume", in, out, c.cc, opts...) +func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { + out := new(AllocateVolumeResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -1067,7 +1067,7 @@ type VolumeServerServer interface { VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) - AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) + AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) VolumeFollow(*VolumeFollowRequest, VolumeServer_VolumeFollowServer) error VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) @@ -1190,20 +1190,20 @@ func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } -func _VolumeServer_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AssignVolumeRequest) +func _VolumeServer_AllocateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AllocateVolumeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).AssignVolume(ctx, in) + return srv.(VolumeServerServer).AllocateVolume(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/AssignVolume", + FullMethod: "/volume_server_pb.VolumeServer/AllocateVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).AssignVolume(ctx, req.(*AssignVolumeRequest)) + return srv.(VolumeServerServer).AllocateVolume(ctx, req.(*AllocateVolumeRequest)) } return interceptor(ctx, in, info, handler) } @@ -1387,8 +1387,8 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_DeleteCollection_Handler, }, { - MethodName: "AssignVolume", - Handler: _VolumeServer_AssignVolume_Handler, + MethodName: "AllocateVolume", + Handler: _VolumeServer_AllocateVolume_Handler, }, { MethodName: "VolumeSyncStatus", @@ -1434,78 +1434,78 @@ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 1169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0xdd, 0x6f, 0xdc, 0x44, - 0x10, 0x8f, 0x7b, 0x77, 0xf9, 0x98, 0xbb, 0xd0, 0x63, 0x93, 0x26, 0x57, 0xb7, 0x84, 0xeb, 0x42, - 0xcb, 0xa5, 0x4d, 0x03, 0xa4, 0x02, 0x5a, 0x9e, 0x80, 0x84, 0x8a, 0x3c, 0x94, 0x4a, 0x0e, 0xad, - 0x90, 0x40, 0xb2, 0x36, 0xf6, 0x26, 0xb1, 0xe2, 0xf3, 0xba, 0xde, 0x75, 0xda, 0xf0, 0xef, 0xf0, - 0xc2, 0x3b, 0x12, 0xff, 0x0e, 0x7f, 0x08, 0x2f, 0x68, 0x3f, 0xec, 0xf8, 0xeb, 0x7a, 0xe6, 0xe3, - 0x6d, 0x3d, 0x3b, 0xf3, 0x9b, 0x99, 0xdd, 0x99, 0xd9, 0x9f, 0x0c, 0x6b, 0x17, 0x2c, 0x4c, 0xa7, - 0xd4, 0xe5, 0x34, 0xb9, 0xa0, 0xc9, 0x6e, 0x9c, 0x30, 0xc1, 0xd0, 0xb0, 0x24, 0x74, 0xe3, 0x63, - 0xfc, 0x31, 0xa0, 0x6f, 0x88, 0xf0, 0xce, 0x0e, 0x68, 0x48, 0x05, 0x75, 0xe8, 0xab, 0x94, 0x72, - 0x81, 0x6e, 0xc2, 0xf2, 0x49, 0x10, 0x52, 0x37, 0xf0, 0xf9, 0xc8, 0x1a, 0x77, 0x26, 0x2b, 0xce, - 0x92, 0xfc, 0x3e, 0xf4, 0x39, 0x7e, 0x0e, 0x6b, 0x25, 0x03, 0x1e, 0xb3, 0x88, 0x53, 0xf4, 0x18, - 0x96, 0x12, 0xca, 0xd3, 0x50, 0x68, 0x83, 0xfe, 0xde, 0xd6, 0x6e, 0xd5, 0xd7, 0x6e, 0x6e, 0x92, - 0x86, 0xc2, 0xc9, 0xd4, 0x71, 0x00, 0x83, 0xe2, 0x06, 0xda, 0x84, 0x25, 0xe3, 0x7b, 0x64, 0x8d, - 0xad, 0xc9, 0x8a, 0xb3, 0xa8, 0x5d, 0xa3, 0x0d, 0x58, 0xe4, 0x82, 0x88, 0x94, 0x8f, 0xae, 0x8d, - 0xad, 0x49, 0xcf, 0x31, 0x5f, 0x68, 0x1d, 0x7a, 0x34, 0x49, 0x58, 0x32, 0xea, 0x28, 0x75, 0xfd, - 0x81, 0x10, 0x74, 0x79, 0xf0, 0x0b, 0x1d, 0x75, 0xc7, 0xd6, 0x64, 0xd5, 0x51, 0x6b, 0xbc, 0x04, - 0xbd, 0x6f, 0xa7, 0xb1, 0xb8, 0xc4, 0x5f, 0xc0, 0xe8, 0x25, 0xf1, 0xd2, 0x74, 0xfa, 0x52, 0xc5, - 0xb8, 0x7f, 0x46, 0xbd, 0xf3, 0x2c, 0xf7, 0x5b, 0xb0, 0x62, 0x22, 0x37, 0x11, 0xac, 0x3a, 0xcb, - 0x5a, 0x70, 0xe8, 0xe3, 0xaf, 0xe0, 0x66, 0x83, 0xa1, 0x39, 0x83, 0x0f, 0x60, 0xf5, 0x94, 0x24, - 0xc7, 0xe4, 0x94, 0xba, 0x09, 0x11, 0x01, 0x53, 0xd6, 0x96, 0x33, 0x30, 0x42, 0x47, 0xca, 0xf0, - 0x4f, 0x60, 0x97, 0x10, 0xd8, 0x34, 0x26, 0x9e, 0x68, 0xe3, 0x1c, 0x8d, 0xa1, 0x1f, 0x27, 0x94, - 0x84, 0x21, 0xf3, 0x88, 0xa0, 0xea, 0x14, 0x3a, 0x4e, 0x51, 0x84, 0xdf, 0x83, 0x5b, 0x8d, 0xe0, - 0x3a, 0x40, 0xfc, 0xb8, 0x12, 0x3d, 0x9b, 0x4e, 0x83, 0x56, 0xae, 0xf1, 0xed, 0x5a, 0xd4, 0xca, - 0xd2, 0xe0, 0x3e, 0xa9, 0xec, 0x86, 0x94, 0x44, 0x69, 0xdc, 0x0a, 0xb8, 0x1a, 0x71, 0x66, 0x9a, - 0x23, 0x6f, 0xea, 0xe2, 0xd8, 0x67, 0x61, 0x48, 0x3d, 0x11, 0xb0, 0x28, 0x83, 0xdd, 0x02, 0xf0, - 0x72, 0xa1, 0x29, 0x95, 0x82, 0x04, 0xdb, 0x30, 0xaa, 0x9b, 0x1a, 0xd8, 0xdf, 0x2c, 0x58, 0xfb, - 0x9a, 0xf3, 0xe0, 0x34, 0xd2, 0x6e, 0x5b, 0x1d, 0x7f, 0xd9, 0xe1, 0xb5, 0xaa, 0xc3, 0xea, 0xf5, - 0x74, 0x6a, 0xd7, 0x23, 0x35, 0x12, 0x1a, 0x87, 0x81, 0x47, 0x14, 0x44, 0x57, 0x41, 0x14, 0x45, - 0x68, 0x08, 0x1d, 0x21, 0xc2, 0x51, 0x4f, 0xed, 0xc8, 0x25, 0xde, 0x80, 0xf5, 0x72, 0xa4, 0x26, - 0x85, 0xcf, 0x61, 0x53, 0x4b, 0x8e, 0x2e, 0x23, 0xef, 0x48, 0x75, 0x42, 0xab, 0x03, 0xff, 0xcb, - 0x82, 0x51, 0xdd, 0xd0, 0x54, 0xf0, 0x7f, 0xcd, 0xff, 0x9f, 0x66, 0x87, 0xde, 0x87, 0xbe, 0x20, - 0x41, 0xe8, 0xb2, 0x93, 0x13, 0x4e, 0xc5, 0x68, 0x71, 0x6c, 0x4d, 0xba, 0x0e, 0x48, 0xd1, 0x73, - 0x25, 0x41, 0xdb, 0x30, 0xf4, 0x74, 0x15, 0xbb, 0x09, 0xbd, 0x08, 0xb8, 0x44, 0x5e, 0x52, 0x81, - 0x5d, 0xf7, 0xb2, 0xea, 0xd6, 0x62, 0x84, 0x61, 0x35, 0xf0, 0xdf, 0xb8, 0x6a, 0x78, 0xa8, 0xd6, - 0x5f, 0x56, 0x68, 0xfd, 0xc0, 0x7f, 0xf3, 0x34, 0x08, 0xe9, 0x91, 0x9c, 0x00, 0xdf, 0xc1, 0x9a, - 0x4e, 0xfe, 0x29, 0x0b, 0x43, 0xf6, 0xba, 0xd5, 0xbd, 0xaf, 0x43, 0x8f, 0x07, 0x91, 0xa7, 0x1b, - 0xae, 0xeb, 0xe8, 0x0f, 0xfc, 0x04, 0xd6, 0xcb, 0x48, 0xe6, 0x08, 0xef, 0xc0, 0x40, 0x45, 0xe0, - 0xb1, 0x48, 0xd0, 0x48, 0x28, 0xb4, 0x81, 0xd3, 0x97, 0xb2, 0x7d, 0x2d, 0xc2, 0x9f, 0x02, 0xd2, - 0xa6, 0xcf, 0x58, 0x1a, 0xb5, 0xeb, 0xbf, 0x1b, 0x59, 0xdc, 0xc6, 0xc4, 0x14, 0xc1, 0xa3, 0x2c, - 0x88, 0x17, 0xd1, 0xb4, 0x35, 0xd6, 0x26, 0xdc, 0xa8, 0x18, 0x19, 0xb4, 0xbd, 0xcc, 0x49, 0xf9, - 0x31, 0x78, 0x2b, 0xd8, 0x46, 0x16, 0x41, 0xf9, 0x3d, 0xc0, 0xbf, 0x5b, 0xb0, 0xe1, 0x98, 0xab, - 0xa7, 0xff, 0x6f, 0x93, 0x15, 0x8b, 0xac, 0x33, 0xb3, 0xc8, 0xba, 0x57, 0x45, 0x36, 0x81, 0x21, - 0x67, 0x69, 0xe2, 0x51, 0xd7, 0x27, 0x82, 0xb8, 0x11, 0xf3, 0xa9, 0xa9, 0xc1, 0x77, 0xb4, 0xfc, - 0x80, 0x08, 0xf2, 0x3d, 0xf3, 0x29, 0xbe, 0x09, 0x9b, 0xb5, 0xa0, 0x4d, 0x42, 0x11, 0x5c, 0xdf, - 0x67, 0xf1, 0xa5, 0xac, 0xa4, 0x96, 0x89, 0xf4, 0x03, 0xee, 0x66, 0x05, 0xa9, 0x32, 0x59, 0x76, - 0x56, 0x02, 0x7e, 0xa8, 0xab, 0xd1, 0xec, 0xfb, 0x44, 0xe8, 0xfd, 0x4e, 0xb6, 0x7f, 0x40, 0x84, - 0xdc, 0xc7, 0x9f, 0xc1, 0xf0, 0xca, 0x5f, 0xfb, 0xda, 0xfa, 0x12, 0x6e, 0x39, 0x94, 0xf8, 0xa6, - 0x34, 0x65, 0xd9, 0xb7, 0x1f, 0x0d, 0x7f, 0x5a, 0x70, 0xbb, 0xd9, 0xb8, 0xcd, 0x78, 0xd8, 0x01, - 0x94, 0xb7, 0x9f, 0x08, 0xa6, 0x94, 0x0b, 0x32, 0x8d, 0x4d, 0xcf, 0x0c, 0x4d, 0x0f, 0xfe, 0x90, - 0xc9, 0xeb, 0xcd, 0xda, 0xa9, 0x35, 0xab, 0x44, 0xcc, 0xce, 0xa7, 0x80, 0xd8, 0xd5, 0x88, 0xbe, - 0x3e, 0xa7, 0x12, 0x62, 0xae, 0xad, 0x10, 0x7b, 0x1a, 0xd1, 0x28, 0xaa, 0xf6, 0xff, 0x11, 0xe0, - 0x20, 0xe0, 0xe7, 0x3a, 0x2d, 0x59, 0x29, 0x7e, 0x90, 0x98, 0xa7, 0x43, 0x2e, 0xa5, 0x84, 0x84, - 0xa1, 0x09, 0x5a, 0x2e, 0x25, 0x8d, 0x48, 0x39, 0xf5, 0x4d, 0x78, 0x6a, 0x2d, 0x65, 0x27, 0x09, - 0xa5, 0x26, 0x12, 0xb5, 0xc6, 0xbf, 0x5a, 0xb0, 0xf2, 0x8c, 0x4e, 0x0d, 0xf2, 0x16, 0xc0, 0x29, - 0x4b, 0x58, 0x2a, 0x82, 0x88, 0x72, 0xe5, 0xa0, 0xe7, 0x14, 0x24, 0xff, 0xde, 0x8f, 0xa2, 0x35, - 0x34, 0x3c, 0x31, 0xc9, 0xa9, 0xb5, 0x94, 0x9d, 0x51, 0x12, 0x9b, 0xe9, 0xa9, 0xd6, 0x6a, 0x68, - 0x09, 0xe2, 0x9d, 0xab, 0x61, 0x29, 0x87, 0x96, 0xfc, 0xd8, 0xfb, 0x63, 0x00, 0x03, 0x33, 0xfc, - 0x15, 0x2b, 0x43, 0x3f, 0x43, 0xbf, 0xc0, 0xe6, 0xd0, 0x87, 0x75, 0xd2, 0x56, 0x67, 0x87, 0xf6, - 0xdd, 0x39, 0x5a, 0xa6, 0x63, 0x16, 0x50, 0x04, 0xef, 0xd6, 0xd8, 0x12, 0xba, 0x5f, 0xb7, 0x9e, - 0xc5, 0xc5, 0xec, 0x07, 0xad, 0x74, 0x73, 0x7f, 0x02, 0xd6, 0x1a, 0xe8, 0x0f, 0xda, 0x99, 0x83, - 0x52, 0xa2, 0x60, 0xf6, 0xc3, 0x96, 0xda, 0xb9, 0xd7, 0x57, 0x80, 0xea, 0xdc, 0x08, 0x3d, 0x98, - 0x0b, 0x73, 0xc5, 0xbd, 0xec, 0x9d, 0x76, 0xca, 0x33, 0x13, 0xd5, 0xac, 0x69, 0x6e, 0xa2, 0x25, - 0x5e, 0x36, 0x37, 0xd1, 0x0a, 0x15, 0x5b, 0x40, 0xe7, 0x30, 0xac, 0x32, 0x2a, 0xb4, 0x3d, 0x8b, - 0xe6, 0xd7, 0x08, 0x9b, 0x7d, 0xbf, 0x8d, 0x6a, 0xee, 0xcc, 0x85, 0x41, 0x91, 0xf7, 0xa0, 0x86, - 0xa2, 0x6b, 0x60, 0x70, 0xf6, 0xbd, 0x79, 0x6a, 0xc5, 0x6c, 0xaa, 0x3c, 0xa8, 0x29, 0x9b, 0x19, - 0x24, 0xab, 0x29, 0x9b, 0x59, 0xb4, 0x0a, 0x2f, 0x20, 0x92, 0xf5, 0x9d, 0x66, 0x0b, 0x4d, 0xd9, - 0x34, 0xf0, 0x92, 0xa6, 0x6c, 0x9a, 0x48, 0x07, 0x5e, 0xf8, 0xc4, 0x92, 0xad, 0x5c, 0xa0, 0x08, - 0x4d, 0xad, 0x5c, 0x27, 0x1d, 0xf6, 0xdd, 0x39, 0x5a, 0x79, 0x02, 0xc7, 0xb0, 0x5a, 0x22, 0x0d, - 0x68, 0x66, 0x68, 0x65, 0x2a, 0x62, 0x7f, 0x34, 0x57, 0xaf, 0x78, 0xe5, 0x45, 0x2e, 0x31, 0xfb, - 0x90, 0xca, 0xe3, 0xe8, 0xde, 0x3c, 0xb5, 0xdc, 0xc1, 0x19, 0x5c, 0xaf, 0x3c, 0xef, 0x68, 0x52, - 0x37, 0x6e, 0xa6, 0x2d, 0xf6, 0x76, 0x0b, 0xcd, 0xdc, 0xd3, 0x6b, 0x58, 0x6f, 0x7a, 0x49, 0xd1, - 0xc3, 0x26, 0x90, 0x99, 0xcf, 0xb5, 0xbd, 0xdb, 0x56, 0x3d, 0x77, 0xfc, 0x02, 0x96, 0x33, 0xda, - 0x80, 0xee, 0xd4, 0xad, 0x2b, 0x14, 0xc6, 0xc6, 0x6f, 0x53, 0xb9, 0x2a, 0xae, 0xe3, 0x45, 0xf5, - 0xff, 0xe0, 0xd1, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13, 0x86, 0xf1, 0x0f, 0x56, 0x10, 0x00, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0x4b, 0x73, 0xdc, 0x44, + 0x10, 0xb6, 0xb2, 0xbb, 0x7e, 0xf4, 0xae, 0xe3, 0x65, 0xfc, 0x92, 0xe5, 0x60, 0x36, 0x03, 0x4e, + 0xd6, 0x89, 0x63, 0xc0, 0x29, 0x20, 0xe1, 0x04, 0xd8, 0xa4, 0xf0, 0x21, 0xa4, 0x4a, 0x26, 0x29, + 0xaa, 0xa0, 0x4a, 0x35, 0x96, 0x66, 0x6d, 0x95, 0xb5, 0x1a, 0x45, 0x1a, 0x39, 0x31, 0x7f, 0x87, + 0x1b, 0x57, 0xae, 0xfc, 0x17, 0x7e, 0x08, 0x17, 0x6a, 0x1e, 0x92, 0x57, 0x8f, 0xcd, 0x8a, 0xc7, + 0x6d, 0xd4, 0xd3, 0xfd, 0x75, 0xf7, 0x4c, 0x77, 0xcf, 0x57, 0x82, 0xd5, 0x2b, 0x16, 0xa4, 0x63, + 0xea, 0x24, 0x34, 0xbe, 0xa2, 0xf1, 0x41, 0x14, 0x33, 0xce, 0x50, 0xbf, 0x20, 0x74, 0xa2, 0x33, + 0xfc, 0x31, 0xa0, 0x6f, 0x08, 0x77, 0x2f, 0x8e, 0x69, 0x40, 0x39, 0xb5, 0xe9, 0xeb, 0x94, 0x26, + 0x1c, 0x6d, 0xc1, 0xe2, 0xc8, 0x0f, 0xa8, 0xe3, 0x7b, 0x89, 0x69, 0x0c, 0x5a, 0xc3, 0x25, 0x7b, + 0x41, 0x7c, 0x9f, 0x78, 0x09, 0x7e, 0x01, 0xab, 0x05, 0x83, 0x24, 0x62, 0x61, 0x42, 0xd1, 0x13, + 0x58, 0x88, 0x69, 0x92, 0x06, 0x5c, 0x19, 0x74, 0x0f, 0x77, 0x0e, 0xca, 0xbe, 0x0e, 0x72, 0x93, + 0x34, 0xe0, 0x76, 0xa6, 0x8e, 0x7d, 0xe8, 0x4d, 0x6e, 0xa0, 0x4d, 0x58, 0xd0, 0xbe, 0x4d, 0x63, + 0x60, 0x0c, 0x97, 0xec, 0x79, 0xe5, 0x1a, 0x6d, 0xc0, 0x7c, 0xc2, 0x09, 0x4f, 0x13, 0xf3, 0xd6, + 0xc0, 0x18, 0x76, 0x6c, 0xfd, 0x85, 0xd6, 0xa0, 0x43, 0xe3, 0x98, 0xc5, 0x66, 0x4b, 0xaa, 0xab, + 0x0f, 0x84, 0xa0, 0x9d, 0xf8, 0xbf, 0x50, 0xb3, 0x3d, 0x30, 0x86, 0xcb, 0xb6, 0x5c, 0xe3, 0x05, + 0xe8, 0x7c, 0x3b, 0x8e, 0xf8, 0x35, 0xfe, 0x02, 0xcc, 0x57, 0xc4, 0x4d, 0xd3, 0xf1, 0x2b, 0x19, + 0xe3, 0xd1, 0x05, 0x75, 0x2f, 0xb3, 0xdc, 0xb7, 0x61, 0x49, 0x47, 0xae, 0x23, 0x58, 0xb6, 0x17, + 0x95, 0xe0, 0xc4, 0xc3, 0x5f, 0xc1, 0x56, 0x8d, 0xa1, 0x3e, 0x83, 0x0f, 0x61, 0xf9, 0x9c, 0xc4, + 0x67, 0xe4, 0x9c, 0x3a, 0x31, 0xe1, 0x3e, 0x93, 0xd6, 0x86, 0xdd, 0xd3, 0x42, 0x5b, 0xc8, 0xf0, + 0x4f, 0x60, 0x15, 0x10, 0xd8, 0x38, 0x22, 0x2e, 0x6f, 0xe2, 0x1c, 0x0d, 0xa0, 0x1b, 0xc5, 0x94, + 0x04, 0x01, 0x73, 0x09, 0xa7, 0xf2, 0x14, 0x5a, 0xf6, 0xa4, 0x08, 0xbf, 0x0f, 0xdb, 0xb5, 0xe0, + 0x2a, 0x40, 0xfc, 0xa4, 0x14, 0x3d, 0x1b, 0x8f, 0xfd, 0x46, 0xae, 0xf1, 0x9d, 0x4a, 0xd4, 0xd2, + 0x52, 0xe3, 0x3e, 0x2d, 0xed, 0x06, 0x94, 0x84, 0x69, 0xd4, 0x08, 0xb8, 0x1c, 0x71, 0x66, 0x9a, + 0x23, 0x6f, 0xaa, 0xe2, 0x38, 0x62, 0x41, 0x40, 0x5d, 0xee, 0xb3, 0x30, 0x83, 0xdd, 0x01, 0x70, + 0x73, 0xa1, 0x2e, 0x95, 0x09, 0x09, 0xb6, 0xc0, 0xac, 0x9a, 0x6a, 0xd8, 0xdf, 0x0c, 0x58, 0xff, + 0x5a, 0x1f, 0x9a, 0x72, 0xdc, 0xe8, 0x02, 0x8a, 0x2e, 0x6f, 0x95, 0x5d, 0x96, 0x2f, 0xa8, 0x55, + 0xb9, 0x20, 0xa1, 0x11, 0xd3, 0x28, 0xf0, 0x5d, 0x22, 0x21, 0xda, 0x12, 0x62, 0x52, 0x84, 0xfa, + 0xd0, 0xe2, 0x3c, 0x30, 0x3b, 0x72, 0x47, 0x2c, 0xb1, 0x09, 0x1b, 0xe5, 0x58, 0x75, 0x1a, 0x9f, + 0xc3, 0xa6, 0x92, 0x9c, 0x5e, 0x87, 0xee, 0xa9, 0xec, 0x86, 0x46, 0x87, 0xfe, 0x97, 0x01, 0x66, + 0xd5, 0x50, 0x57, 0xf1, 0x7f, 0x3d, 0x81, 0x7f, 0x9a, 0x1f, 0xfa, 0x00, 0xba, 0x9c, 0xf8, 0x81, + 0xc3, 0x46, 0xa3, 0x84, 0x72, 0x73, 0x7e, 0x60, 0x0c, 0xdb, 0x36, 0x08, 0xd1, 0x0b, 0x29, 0x41, + 0x7b, 0xd0, 0x77, 0x55, 0x25, 0x3b, 0x31, 0xbd, 0xf2, 0x13, 0x81, 0xbc, 0x20, 0x03, 0x5b, 0x71, + 0xb3, 0x0a, 0x57, 0x62, 0x84, 0x61, 0xd9, 0xf7, 0xde, 0x3a, 0x72, 0x80, 0xc8, 0xf6, 0x5f, 0x94, + 0x68, 0x5d, 0xdf, 0x7b, 0xfb, 0xcc, 0x0f, 0xe8, 0xa9, 0x98, 0x02, 0xdf, 0xc1, 0xaa, 0x4a, 0xfe, + 0x19, 0x0b, 0x02, 0xf6, 0xa6, 0xd1, 0xcd, 0xaf, 0x41, 0x27, 0xf1, 0x43, 0x57, 0x35, 0x5d, 0xdb, + 0x56, 0x1f, 0xf8, 0x29, 0xac, 0x15, 0x91, 0xf4, 0x11, 0xde, 0x85, 0x9e, 0x8c, 0xc0, 0x65, 0x21, + 0xa7, 0x21, 0x97, 0x68, 0x3d, 0xbb, 0x2b, 0x64, 0x47, 0x4a, 0x84, 0x3f, 0x05, 0xa4, 0x4c, 0x9f, + 0xb3, 0x34, 0x6c, 0xd6, 0x83, 0xeb, 0x59, 0xdc, 0xda, 0x44, 0x17, 0xc1, 0xe3, 0x2c, 0x88, 0x97, + 0xe1, 0xb8, 0x31, 0xd6, 0x26, 0xac, 0x97, 0x8c, 0x34, 0xda, 0x61, 0xe6, 0xa4, 0xf8, 0x20, 0xbc, + 0x13, 0x6c, 0x23, 0x8b, 0xa0, 0xf8, 0x26, 0xe0, 0xdf, 0x0d, 0xd8, 0xb0, 0xf5, 0xd5, 0xff, 0xcf, + 0x6d, 0x36, 0x59, 0x64, 0xad, 0xa9, 0x45, 0xd6, 0xbe, 0x29, 0xb2, 0x21, 0xf4, 0x13, 0x96, 0xc6, + 0x2e, 0x75, 0x3c, 0xc2, 0x89, 0x13, 0x32, 0x8f, 0xea, 0x1a, 0xbc, 0xad, 0xe4, 0xc7, 0x84, 0x93, + 0xef, 0x99, 0x47, 0xf1, 0x16, 0x6c, 0x56, 0x82, 0xd6, 0x09, 0x85, 0xb0, 0x72, 0xc4, 0xa2, 0x6b, + 0x51, 0x49, 0x0d, 0x13, 0xe9, 0xfa, 0x89, 0x93, 0x15, 0xa4, 0xcc, 0x64, 0xd1, 0x5e, 0xf2, 0x93, + 0x13, 0x55, 0x8d, 0x7a, 0xdf, 0x23, 0x5c, 0xed, 0xb7, 0xb2, 0xfd, 0x63, 0xc2, 0xc5, 0x3e, 0xfe, + 0x0c, 0xfa, 0x37, 0xfe, 0x9a, 0xd7, 0xd6, 0x97, 0xb0, 0x6d, 0x53, 0xe2, 0xe9, 0xd2, 0x14, 0x65, + 0xdf, 0x7c, 0x34, 0xfc, 0x69, 0xc0, 0x9d, 0x7a, 0xe3, 0x26, 0xe3, 0x61, 0x1f, 0x50, 0xde, 0x7e, + 0xdc, 0x1f, 0xd3, 0x84, 0x93, 0x71, 0xa4, 0x7b, 0xa6, 0xaf, 0x7b, 0xf0, 0x87, 0x4c, 0x5e, 0x6d, + 0xd6, 0x56, 0xa5, 0x59, 0x05, 0x62, 0x76, 0x3e, 0x13, 0x88, 0x6d, 0x85, 0xe8, 0xa9, 0x73, 0x2a, + 0x20, 0xe6, 0xda, 0x12, 0xb1, 0xa3, 0x10, 0xb5, 0xa2, 0x6c, 0xff, 0x1f, 0x01, 0x8e, 0xfd, 0xe4, + 0x52, 0xa5, 0x25, 0x2a, 0xc5, 0xf3, 0x63, 0xfd, 0x7c, 0x88, 0xa5, 0x90, 0x90, 0x20, 0xd0, 0x41, + 0x8b, 0xa5, 0xa0, 0x12, 0x69, 0x42, 0x3d, 0x1d, 0x9e, 0x5c, 0x0b, 0xd9, 0x28, 0xa6, 0x54, 0x47, + 0x22, 0xd7, 0xf8, 0x57, 0x03, 0x96, 0x9e, 0xd3, 0xb1, 0x46, 0xde, 0x01, 0x38, 0x67, 0x31, 0x4b, + 0xb9, 0x1f, 0xd2, 0x44, 0x3a, 0xe8, 0xd8, 0x13, 0x92, 0x7f, 0xef, 0x47, 0x52, 0x1b, 0x1a, 0x8c, + 0x74, 0x72, 0x72, 0x2d, 0x64, 0x17, 0x94, 0x44, 0x7a, 0x7a, 0xca, 0xb5, 0x1c, 0x5a, 0x9c, 0xb8, + 0x97, 0x72, 0x58, 0x8a, 0xa1, 0x25, 0x3e, 0x0e, 0xff, 0xe8, 0x41, 0x4f, 0x0f, 0x7f, 0xc9, 0xcc, + 0xd0, 0xcf, 0xd0, 0x9d, 0x60, 0x74, 0xe8, 0xa3, 0x2a, 0x71, 0xab, 0x32, 0x44, 0x6b, 0x77, 0x86, + 0x96, 0xee, 0x98, 0x39, 0x14, 0xc2, 0x7b, 0x15, 0xc6, 0x84, 0x1e, 0x54, 0xad, 0xa7, 0xf1, 0x31, + 0xeb, 0x61, 0x23, 0xdd, 0xdc, 0x1f, 0x87, 0xd5, 0x1a, 0x0a, 0x84, 0xf6, 0x67, 0xa0, 0x14, 0x68, + 0x98, 0xf5, 0xa8, 0xa1, 0x76, 0xee, 0xf5, 0x35, 0xa0, 0x2a, 0x3f, 0x42, 0x0f, 0x67, 0xc2, 0xdc, + 0xf0, 0x2f, 0x6b, 0xbf, 0x99, 0xf2, 0xd4, 0x44, 0x15, 0x73, 0x9a, 0x99, 0x68, 0x81, 0x9b, 0xcd, + 0x4c, 0xb4, 0x44, 0xc7, 0xe6, 0xd0, 0x25, 0xf4, 0xcb, 0xac, 0x0a, 0xed, 0x4d, 0xa3, 0xfa, 0x15, + 0xd2, 0x66, 0x3d, 0x68, 0xa2, 0x9a, 0x3b, 0xa3, 0x70, 0xbb, 0xc8, 0x7c, 0xd0, 0xfd, 0xaa, 0x7d, + 0x2d, 0x8f, 0xb3, 0x86, 0xb3, 0x15, 0x27, 0x73, 0x2a, 0xb3, 0xa1, 0xba, 0x9c, 0xa6, 0x50, 0xad, + 0xba, 0x9c, 0xa6, 0x91, 0x2b, 0x3c, 0x87, 0x48, 0xd6, 0x7d, 0x8a, 0x33, 0xa0, 0xdd, 0x69, 0xd6, + 0x05, 0x76, 0x62, 0xdd, 0x9b, 0xa5, 0x96, 0x39, 0xf8, 0xc4, 0x10, 0x0d, 0x3d, 0x41, 0x14, 0xea, + 0x1a, 0xba, 0x4a, 0x3d, 0xac, 0xdd, 0x19, 0x5a, 0x79, 0x02, 0x67, 0xb0, 0x5c, 0xa0, 0x0e, 0x68, + 0x6a, 0x68, 0x45, 0x42, 0x62, 0xdd, 0x9f, 0xa9, 0x97, 0xfb, 0x70, 0xb2, 0x43, 0xd2, 0x33, 0x69, + 0x6a, 0x70, 0xc5, 0xa1, 0x74, 0x6f, 0x96, 0x5a, 0xee, 0xe0, 0x02, 0x56, 0x4a, 0x8f, 0x3c, 0xaa, + 0xa9, 0x98, 0x7a, 0xf2, 0x62, 0xed, 0x35, 0xd0, 0xcc, 0x3d, 0xbd, 0x81, 0xb5, 0xba, 0xf7, 0x14, + 0x3d, 0xaa, 0x03, 0x99, 0xfa, 0x68, 0x5b, 0x07, 0x4d, 0xd5, 0x73, 0xc7, 0x2f, 0x61, 0x31, 0x23, + 0x0f, 0xe8, 0x6e, 0xd5, 0xba, 0x44, 0x64, 0x2c, 0xfc, 0x2e, 0x95, 0x9b, 0xe2, 0x3a, 0x9b, 0x97, + 0x7f, 0x12, 0x1e, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xdf, 0xd7, 0x82, 0x60, 0x10, 0x00, 0x00, } diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index c924b7a62..c32f8a086 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -24,9 +24,9 @@ func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server } -func (vs *VolumeServer) AssignVolume(ctx context.Context, req *volume_server_pb.AssignVolumeRequest) (*volume_server_pb.AssignVolumeResponse, error) { +func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_pb.AllocateVolumeRequest) (*volume_server_pb.AllocateVolumeResponse, error) { - resp := &volume_server_pb.AssignVolumeResponse{} + resp := &volume_server_pb.AllocateVolumeResponse{} err := vs.store.AddVolume( storage.VolumeId(req.VolumeId), diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index 1360988b3..f08736f64 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -16,7 +16,7 @@ func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid storage.Vo return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.AssignVolume(context.Background(), &volume_server_pb.AssignVolumeRequest{ + _, deleteErr := client.AllocateVolume(context.Background(), &volume_server_pb.AllocateVolumeRequest{ VolumeId: uint32(vid), Collection: option.Collection, Replication: option.ReplicaPlacement.String(), From c3fa50d3b344f27dcc82c5036e5bbefb6391d7f1 Mon Sep 17 00:00:00 2001 From: stlpmo-jn Date: Thu, 11 Apr 2019 13:40:31 +0800 Subject: [PATCH 169/450] remove the health chekcer, because it's the same as command_volume_fix_replication --- weed/topology/replication_health_checker.go | 297 -------------------- 1 file changed, 297 deletions(-) delete mode 100644 weed/topology/replication_health_checker.go diff --git a/weed/topology/replication_health_checker.go b/weed/topology/replication_health_checker.go deleted file mode 100644 index 947e7d45c..000000000 --- a/weed/topology/replication_health_checker.go +++ /dev/null @@ -1,297 +0,0 @@ -package topology - -import ( - "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" - "google.golang.org/grpc" - "sort" - "strings" - "sync" -) - -/** - check the replication health - */ -func (t *Topology) RepairUnhealthyReplicationInLayout(grpcDialOption grpc.DialOption, layout *VolumeLayout, eVid storage.VolumeId) error { - ctx := context.Background() - locations, exist := layout.vid2location[eVid] - if !exist { - retErr := fmt.Errorf("the volume:%v has no locations", eVid) - glog.V(0).Infof(retErr.Error()) - return retErr - } - - //glog.V(5).Infof("volume:%v, locations:%v", eVid, locations.list) - fileStat, err := getReplicationInfo(grpcDialOption, ctx, eVid, locations) - if err != nil { - glog.Errorf("get replication status failed, %v", err) - return err - } - - if isSameVolumeReplications(fileStat, layout.volumeSizeLimit) { - glog.V(0).Infof("the volume:%v has %d same replication, need not repair", eVid, len(fileStat)) - return nil - } - - // compact all the replications of volume - { - glog.V(4).Infof("begin compact all the replications of volume:%v", eVid) - allUrls := make([]string, 0, len(fileStat)) - for _, fs := range fileStat { - allUrls = append(allUrls, fs.url) - } - - if tryBatchCompactVolume(ctx, grpcDialOption, eVid, allUrls) == false { - err := fmt.Errorf("compact all the replications of volume:%v", eVid) - glog.Error(err.Error()) - return err - } - glog.V(4).Infof("success compact all the replications of volume:%v", eVid) - } - - // get replication status again - fileStat, err = getReplicationInfo(grpcDialOption, ctx, eVid, locations) - if err != nil { - return err - } - - okUrls, errUrls := filterErrorReplication(fileStat) - if len(errUrls) == 0 { - return nil // they are the same - } - - if len(okUrls) == 0 { - return fmt.Errorf("no correct volume replications, that's impossible") - } - - glog.V(4).Infof("need repair replication : %v", errUrls) - if len(locations.list) <= 0 { - return fmt.Errorf("that's impossible, the locatins of volume:%v is empty", eVid) - } - for _, url := range errUrls { - vInfo := locations.list[0].volumes[eVid] - err = syncReplication(grpcDialOption, okUrls[0], url, vInfo) - if nil != err { - glog.Error(err) - return err - } - } - return nil -} - -type FileStatus struct { - url string - fileStat *volume_server_pb.ReadVolumeFileStatusResponse -} - -func getReplicationInfo(grpcDialOption grpc.DialOption, ctx context.Context, vid storage.VolumeId, locs *VolumeLocationList) (fs []FileStatus, err error) { - type ResponsePair struct { - url string - status *volume_server_pb.ReadVolumeFileStatusResponse - err error - } - - var wg sync.WaitGroup - resultChan := make(chan ResponsePair, len(locs.list)) - wg.Add(len(locs.list)) - getFileStatFunc := func(url string, volumeId storage.VolumeId) { - defer wg.Done() - glog.V(4).Infof("volumeId:%v, location:%v", volumeId, url) - err := operation.WithVolumeServerClient(url, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - req := &volume_server_pb.ReadVolumeFileStatusRequest{ - VolumeId: uint32(volumeId), - } - respTmp, err := client.ReadVolumeFileStatus(ctx, req) - resultChan <- ResponsePair{ - url: url, - status: respTmp, - err: err, - } - return nil - }) - if nil != err { - glog.Error(err) - } - } - for _, node := range locs.list { - go getFileStatFunc(node.Url(), vid) - } - - go func() { // close channel - wg.Wait() - close(resultChan) - }() - - var errs []string - for result := range resultChan { - if result.err == nil { - fs = append(fs, FileStatus{ - url: result.url, - fileStat: result.status, - }) - continue - } - tmp := fmt.Sprintf("url : %s, error : %v", result.url, result.err) - errs = append(errs, tmp) - } - - if len(fs) == len(locs.list) { - return fs, nil - } - err = fmt.Errorf("get volume[%v] replication status failed, err : %s", vid, strings.Join(errs, "; ")) - return nil, err -} - -/** - : - the file count is the total count of the volume received from user clients -todo: this policy is not perfected or not rigorous, need fix - */ -func filterErrorReplication(fileStat []FileStatus) (okUrls, errUrls []string) { - sort.Slice(fileStat, func(i, j int) bool { - return fileStat[i].fileStat.FileCount > fileStat[j].fileStat.FileCount - }) - if fileStat[0].fileStat.FileCount != fileStat[len(fileStat)-1].fileStat.FileCount { - okFileCounter := fileStat[0].fileStat.FileCount - for _, v := range fileStat { - if okFileCounter == v.fileStat.FileCount { - okUrls = append(okUrls, v.url) - } else { - errUrls = append(errUrls, v.url) - } - } - return - } - return -} - -// execute the compact transaction -func compactVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeUrl string, vid storage.VolumeId) bool { - glog.V(0).Infoln("Start vacuuming", vid, "on", volumeUrl) - err := operation.WithVolumeServerClient(volumeUrl, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{ - VolumeId: uint32(vid), - }) - return err - }) - if err != nil { - glog.Errorf("Error when vacuuming %d on %s: %v", vid, volumeUrl, err) - return false - } - glog.V(0).Infof("Complete vacuuming volume:%v on %s", vid, volumeUrl) - return true -} - -// commit the compact transaction when compactVolume() return true -func commitCompactedVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeUrl string, vid storage.VolumeId) bool { - err := operation.WithVolumeServerClient(volumeUrl, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{ - VolumeId: uint32(vid), - }) - return err - }) - if err != nil { - glog.Errorf("Error when committing vacuum %d on %s: %v", vid, volumeUrl, err) - return false - } - glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, volumeUrl) - return true -} - -// rollback the compact transaction when compactVolume return false -func cleanupCompactedVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeUrl string, vid storage.VolumeId) bool { - glog.V(0).Infoln("Start cleaning up", vid, "on", volumeUrl) - err := operation.WithVolumeServerClient(volumeUrl, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{ - VolumeId: uint32(vid), - }) - return err - }) - if err != nil { - glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, volumeUrl, err) - return false - } - glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, volumeUrl) - return false -} - -func tryCompactVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid storage.VolumeId, volumeUrl string) bool { - if compactVolume(ctx, grpcDialOption, volumeUrl, vid) == false { - return cleanupCompactedVolume(ctx, grpcDialOption, volumeUrl, vid) - } - return commitCompactedVolume(ctx, grpcDialOption, volumeUrl, vid) -} - -func tryBatchCompactVolume(ctx context.Context, grpcDialOption grpc.DialOption, - vid storage.VolumeId, urls []string) bool { - resultChan := make(chan error) - var wg sync.WaitGroup - wg.Add(len(urls)) - for _, url := range urls { - go func(volumeUrl string) { - defer wg.Done() - if tryCompactVolume(ctx, grpcDialOption, vid, volumeUrl) == false { - resultChan <- fmt.Errorf("url:%s", volumeUrl) - } - }(url) - } - - go func() { - wg.Wait() - close(resultChan) - }() - - var errs []string - for result := range resultChan { - if result != nil { - errs = append(errs, result.Error()) - } - } - if len(errs) > 0 { - glog.Errorf("consist volume:%v compact reversion failed, %s", vid, strings.Join(errs, "; ")) - return false - } - return true -} - -func isSameVolumeReplications(fileStat []FileStatus, volumeSizeLimit uint64) bool { - fileSizeSet := make(map[uint64]bool) - fileCountSet := make(map[uint64]bool) - lastModifiedSet := make(map[uint64]bool) - var oneFileSize uint64 = 0 - for _, v := range fileStat { - fileCountSet[v.fileStat.FileCount] = true - lastModifiedSet[v.fileStat.DatFileTimestamp] = true - fileSizeSet[v.fileStat.DatFileSize] = true - oneFileSize = v.fileStat.DatFileSize - } - - if (len(lastModifiedSet) == 1) && (len(fileCountSet) == 1) && - (len(fileSizeSet) == 1) && (oneFileSize >= volumeSizeLimit) { - return true - } - return false -} - -func syncReplication(grpcDialOption grpc.DialOption, srcUrl, destUrl string, vinfo storage.VolumeInfo) error { - ctx := context.Background() - err := operation.WithVolumeServerClient(destUrl, grpcDialOption, - func(client volume_server_pb.VolumeServerClient) error { - if _, err := client.ReplicateVolume(ctx, &volume_server_pb.ReplicateVolumeRequest{ - VolumeId: uint32(vinfo.Id), - Collection: vinfo.Collection, - Replication: vinfo.ReplicaPlacement.String(), - Ttl: vinfo.Ttl.String(), - SourceDataNode: srcUrl, - }); err != nil { - glog.Errorf("sync replication failed, %v", err) - return err - } - return nil - }) - return err -} From c0d6cc0067b3c43f34ec38f48e65852b74d61deb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 10 Apr 2019 23:25:41 -0700 Subject: [PATCH 170/450] weed mount: mount to the same folder name --- weed/command/mount_std.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index c047b94c3..101f6a852 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -9,6 +9,7 @@ import ( "github.com/spf13/viper" "os" "os/user" + "path" "runtime" "strconv" "strings" @@ -43,6 +44,8 @@ func runMount(cmd *Command, args []string) bool { mountMode = os.ModeDir | fileInfo.Mode() } + mountName := path.Base(*mountOptions.dir) + // detect current user uid, gid := uint32(0), uint32(0) if u, err := user.Current(); err == nil { @@ -57,7 +60,7 @@ func runMount(cmd *Command, args []string) bool { util.SetupProfiling(*mountCpuProfile, *mountMemProfile) options := []fuse.MountOption{ - fuse.VolumeName("SeaweedFS"), + fuse.VolumeName(mountName), fuse.FSName("SeaweedFS"), fuse.Subtype("SeaweedFS"), fuse.NoAppleDouble(), From 0e33272f569470fd974ef913e501a05371107390 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 10 Apr 2019 23:39:53 -0700 Subject: [PATCH 171/450] small refactoring --- weed/server/volume_grpc_replicate.go | 2 +- weed/storage/volume.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/weed/server/volume_grpc_replicate.go b/weed/server/volume_grpc_replicate.go index c641755d0..ae720144e 100644 --- a/weed/server/volume_grpc_replicate.go +++ b/weed/server/volume_grpc_replicate.go @@ -153,7 +153,7 @@ func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_se resp.IdxFileSize = v.IndexFileSize() resp.DatFileTimestamp = v.LastModifiedTime() resp.IdxFileTimestamp = v.LastModifiedTime() - resp.FileCount = uint64(v.FileCount()) + resp.FileCount = v.FileCount() return resp, nil } diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 280963c2c..572040d61 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -94,8 +94,8 @@ func (v *Volume)LastModifiedTime() uint64 { return v.lastModifiedTime } -func (v *Volume)FileCount() uint { - return uint(v.nm.FileCount()) +func (v *Volume)FileCount() uint64 { + return uint64(v.nm.FileCount()) } // Close cleanly shuts down this volume From 361912224dabfe1ed204615bb709a42956e3fcc2 Mon Sep 17 00:00:00 2001 From: Wine93 Date: Thu, 11 Apr 2019 09:18:53 +0000 Subject: [PATCH 172/450] typo: remove blank --- weed/command/backup.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/weed/command/backup.go b/weed/command/backup.go index 48e2eba89..3370a478a 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -33,16 +33,16 @@ var cmdBackup = &Command{ UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333", Short: "incrementally backup a volume to local folder", Long: `Incrementally backup volume data. - + It is expected that you use this inside a script, to loop through all possible volume ids that needs to be backup to local folder. - + The volume id does not need to exist locally or even remotely. This will help to backup future new volumes. - + Usually backing up is just copying the .dat (and .idx) files. But it's tricky to incrementally copy the differences. - + The complexity comes when there are multiple addition, deletion and compaction. This tool will handle them correctly and efficiently, avoiding unnecessary data transportation. `, From e11e127d91c3a6e1f4b13243a9c7cd1980159dd9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 11 Apr 2019 09:27:05 -0700 Subject: [PATCH 173/450] close grpc connections during errors --- weed/util/grpc_client_server.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 5c08538dc..c35a13970 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -19,7 +19,7 @@ var ( grpcClientsLock sync.Mutex ) -func init(){ +func init() { http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 100 } @@ -81,6 +81,7 @@ func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, grpcClientsLock.Lock() delete(grpcClients, address) grpcClientsLock.Unlock() + grpcConnection.Close() } return err From 66c609d434a88c226cd3d9e530881ceaf765ff63 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 11 Apr 2019 15:05:10 -0700 Subject: [PATCH 174/450] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index fc79ec8dd..c64f58e4b 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ Your support will be really appreciated by me and other supporters! - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTc4MmVlYmFlNjBmZTgzZmJlYmI1MDE1YzkyNWYyZjkwZDFiM2RlMDdjNjVlNjdjYzc4NGFhZGIyYzEyMzJkYTA) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) - +- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction) ## Introduction From 59a9e15733051ccfe36d2bb2465e29e80459809b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 11 Apr 2019 19:40:36 -0700 Subject: [PATCH 175/450] 1.30 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index aa8399523..a32811692 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 29) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 30) ) From 2e03f3adef08e5dd1fd7d07d10a33a15ed5cdb3c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 11 Apr 2019 20:42:55 -0700 Subject: [PATCH 176/450] filer: rename -port.public to -port.readonly --- weed/command/filer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/filer.go b/weed/command/filer.go index 5b3f733bd..f247d1c2a 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -46,7 +46,7 @@ func init() { f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.grpcPort = cmdFiler.Flag.Int("port.grpc", 0, "filer grpc server listen port, default to http port + 10000") - f.publicPort = cmdFiler.Flag.Int("port.public", 0, "port opened to public") + f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") From 26aaccca088e099ea87d2ba60cfbdbc2e7f3d77f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 12 Apr 2019 11:13:47 -0700 Subject: [PATCH 177/450] add stress filer test code --- .../bench_filer_upload/bench_filer_upload.go | 122 ++++++++++++++ .../stress_filer_upload.go | 149 ++++++++++++++++++ 2 files changed, 271 insertions(+) create mode 100644 unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go create mode 100644 unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go new file mode 100644 index 000000000..79ecaff12 --- /dev/null +++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go @@ -0,0 +1,122 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "mime/multipart" + "net/http" + "os" + "strings" + "sync" + "time" +) + +var ( + size = flag.Int("size", 1024, "file size") + concurrency = flag.Int("c", 4, "concurrent number of uploads") + times = flag.Int("n", 1024, "repeated number of times") + destination = flag.String("to", "http://localhost:8888/", "destination directory on filer") + + statsChan = make(chan stat, 8) +) + +type stat struct { + size int64 +} + +func main() { + + flag.Parse() + + data := make([]byte, *size) + println("data len", len(data)) + + var wg sync.WaitGroup + for x := 0; x < *concurrency; x++ { + wg.Add(1) + + client := &http.Client{} + + go func() { + defer wg.Done() + for t := 0; t < *times; t++ { + if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%d", t), *destination); err == nil { + statsChan <- stat{ + size: size, + } + }else { + log.Fatalf("upload: %v", err) + } + } + }() + } + + go func() { + ticker := time.NewTicker(500 * time.Millisecond) + + var lastTime time.Time + var counter, size int64 + for { + select { + case stat := <-statsChan: + size += stat.size + counter++ + case x := <-ticker.C: + if !lastTime.IsZero() { + elapsed := x.Sub(lastTime).Seconds() + fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n", + float64(counter)/elapsed, + float64(size/1024/1024)/elapsed) + } + lastTime = x + size = 0 + counter = 0 + } + } + }() + + wg.Wait() + +} + +func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) { + + if !strings.HasSuffix(destination, "/") { + destination = destination + "/" + } + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", filename) + if err != nil { + return 0, fmt.Errorf("fail to create form %v: %v", filename, err) + } + + part.Write(data) + + err = writer.Close() + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", filename, err) + } + + uri := destination + filename + + request, err := http.NewRequest("POST", uri, body) + request.Header.Set("Content-Type", writer.FormDataContentType()) + + resp, err := client.Do(request) + if err != nil { + return 0, fmt.Errorf("http POST %s: %v", uri, err) + } else { + body := &bytes.Buffer{} + _, err := body.ReadFrom(resp.Body) + if err != nil { + return 0, fmt.Errorf("read http POST %s response: %v", uri, err) + } + resp.Body.Close() + } + + return int64(len(data)), nil +} diff --git a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go new file mode 100644 index 000000000..41fb85ae0 --- /dev/null +++ b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go @@ -0,0 +1,149 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +var ( + dir = flag.String("dir", ".", "upload files under this directory") + concurrency = flag.Int("c", 1, "concurrent number of uploads") + times = flag.Int("n", 1, "repeated number of times") + destination = flag.String("to", "http://localhost:8888/", "destination directory on filer") + + statsChan = make(chan stat, 8) +) + +type stat struct { + size int64 +} + +func main() { + + flag.Parse() + + var fileNames []string + + files, err := ioutil.ReadDir(*dir) + if err != nil { + log.Fatalf("fail to read dir %v: %v", *dir, err) + } + + for _, file := range files { + if file.IsDir() { + continue + } + fileNames = append(fileNames, filepath.Join(*dir, file.Name())) + } + + var wg sync.WaitGroup + for x := 0; x < *concurrency; x++ { + wg.Add(1) + + client := &http.Client{} + + go func() { + defer wg.Done() + rand.Shuffle(len(fileNames), func(i, j int) { + fileNames[i], fileNames[j] = fileNames[j], fileNames[i] + }) + for t := 0; t < *times; t++ { + for _, filename := range fileNames { + if size, err := uploadFileToFiler(client, filename, *destination); err == nil { + statsChan <- stat{ + size: size, + } + } + } + } + }() + } + + go func() { + ticker := time.NewTicker(500 * time.Millisecond) + + var lastTime time.Time + var counter, size int64 + for { + select { + case stat := <-statsChan: + size += stat.size + counter++ + case x := <-ticker.C: + if !lastTime.IsZero() { + elapsed := x.Sub(lastTime).Seconds() + fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n", + float64(counter)/elapsed, + float64(size/1024/1024)/elapsed) + } + lastTime = x + size = 0 + counter = 0 + } + } + }() + + wg.Wait() + +} + +func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) { + file, err := os.Open(filename) + if err != nil { + panic(err) + } + defer file.Close() + + fi, err := file.Stat() + + if !strings.HasSuffix(destination, "/") { + destination = destination + "/" + } + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", file.Name()) + if err != nil { + return 0, fmt.Errorf("fail to create form %v: %v", file.Name(), err) + } + _, err = io.Copy(part, file) + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err) + } + + err = writer.Close() + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err) + } + + uri := destination + file.Name() + + request, err := http.NewRequest("POST", uri, body) + request.Header.Set("Content-Type", writer.FormDataContentType()) + + resp, err := client.Do(request) + if err != nil { + return 0, fmt.Errorf("http POST %s: %v", uri, err) + } else { + body := &bytes.Buffer{} + _, err := body.ReadFrom(resp.Body) + if err != nil { + return 0, fmt.Errorf("read http POST %s response: %v", uri, err) + } + resp.Body.Close() + } + + return fi.Size(), nil +} From 3e8a3a8fec6df5b39b6b2b4603df8bc183d90aa8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 14 Apr 2019 23:00:37 -0700 Subject: [PATCH 178/450] fix race detector found problems --- weed/server/volume_grpc_client_to_master.go | 5 +- weed/storage/needle_map_memory.go | 6 +- weed/storage/needle_map_metric.go | 75 ++++++++++++--------- weed/storage/store.go | 18 +++-- 4 files changed, 62 insertions(+), 42 deletions(-) diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 94e99c8f6..cf01b5bd8 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -2,10 +2,11 @@ package weed_server import ( "fmt" + "time" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/spf13/viper" "google.golang.org/grpc" - "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" @@ -75,7 +76,7 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA return } if in.GetVolumeSizeLimit() != 0 { - vs.store.VolumeSizeLimit = in.GetVolumeSizeLimit() + vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit()) } if in.GetLeader() != "" && masterNode != in.GetLeader() { glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode) diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index ad3bd3f7a..a3b574324 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -47,9 +47,7 @@ func LoadBtreeNeedleMap(file *os.File) (*NeedleMap, error) { func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { e := WalkIndexFile(file, func(key NeedleId, offset Offset, size uint32) error { - if key > nm.MaximumFileKey { - nm.MaximumFileKey = key - } + nm.MaybeSetMaxFileKey(key) if !offset.IsZero() && size != TombstoneFileSize { nm.FileCounter++ nm.FileByteCounter = nm.FileByteCounter + uint64(size) @@ -67,7 +65,7 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { } return nil }) - glog.V(1).Infof("max file key: %d for file: %s", nm.MaximumFileKey, file.Name()) + glog.V(1).Infof("max file key: %d for file: %s", nm.MaxFileKey(), file.Name()) return nm, e } diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go index cc3d9e028..0e2e16964 100644 --- a/weed/storage/needle_map_metric.go +++ b/weed/storage/needle_map_metric.go @@ -2,51 +2,64 @@ package storage import ( "fmt" + "os" + "sync/atomic" + . "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/willf/bloom" - "os" ) type mapMetric struct { - DeletionCounter int `json:"DeletionCounter"` - FileCounter int `json:"FileCounter"` - DeletionByteCounter uint64 `json:"DeletionByteCounter"` - FileByteCounter uint64 `json:"FileByteCounter"` - MaximumFileKey NeedleId `json:"MaxFileKey"` + DeletionCounter uint32 `json:"DeletionCounter"` + FileCounter uint32 `json:"FileCounter"` + DeletionByteCounter uint64 `json:"DeletionByteCounter"` + FileByteCounter uint64 `json:"FileByteCounter"` + MaximumFileKey uint64 `json:"MaxFileKey"` } func (mm *mapMetric) logDelete(deletedByteCount uint32) { - mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(deletedByteCount) - mm.DeletionCounter++ + mm.LogDeletionCounter(deletedByteCount) } func (mm *mapMetric) logPut(key NeedleId, oldSize uint32, newSize uint32) { - if key > mm.MaximumFileKey { - mm.MaximumFileKey = key + mm.MaybeSetMaxFileKey(key) + mm.LogFileCounter(newSize) + if oldSize > 0 && oldSize != TombstoneFileSize { + mm.LogDeletionCounter(oldSize) } - mm.FileCounter++ - mm.FileByteCounter = mm.FileByteCounter + uint64(newSize) +} +func (mm mapMetric) LogFileCounter(newSize uint32) { + atomic.AddUint32(&mm.FileCounter, 1) + atomic.AddUint64(&mm.FileByteCounter, uint64(newSize)) +} +func (mm mapMetric) LogDeletionCounter(oldSize uint32) { if oldSize > 0 { - mm.DeletionCounter++ - mm.DeletionByteCounter = mm.DeletionByteCounter + uint64(oldSize) + atomic.AddUint32(&mm.DeletionCounter, 1) + atomic.AddUint64(&mm.DeletionByteCounter, uint64(oldSize)) + } +} +func (mm mapMetric) ContentSize() uint64 { + return atomic.LoadUint64(&mm.FileByteCounter) +} +func (mm mapMetric) DeletedSize() uint64 { + return atomic.LoadUint64(&mm.DeletionByteCounter) +} +func (mm mapMetric) FileCount() int { + return int(atomic.LoadUint32(&mm.FileCounter)) +} +func (mm mapMetric) DeletedCount() int { + return int(atomic.LoadUint32(&mm.DeletionCounter)) +} +func (mm mapMetric) MaxFileKey() NeedleId { + t := uint64(mm.MaximumFileKey) + return NeedleId(t) +} +func (mm mapMetric) MaybeSetMaxFileKey(key NeedleId) { + if key > mm.MaxFileKey() { + atomic.StoreUint64(&mm.MaximumFileKey, uint64(key)) } } -func (mm mapMetric) ContentSize() uint64 { - return mm.FileByteCounter -} -func (mm mapMetric) DeletedSize() uint64 { - return mm.DeletionByteCounter -} -func (mm mapMetric) FileCount() int { - return mm.FileCounter -} -func (mm mapMetric) DeletedCount() int { - return mm.DeletionCounter -} -func (mm mapMetric) MaxFileKey() NeedleId { - return mm.MaximumFileKey -} func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) { mm = &mapMetric{} @@ -56,9 +69,7 @@ func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) { bf = bloom.NewWithEstimates(uint(entryCount), 0.001) }, func(key NeedleId, offset Offset, size uint32) error { - if key > mm.MaximumFileKey { - mm.MaximumFileKey = key - } + mm.MaybeSetMaxFileKey(key) NeedleIdToBytes(buf, key) if size != TombstoneFileSize { mm.FileByteCounter += uint64(size) diff --git a/weed/storage/store.go b/weed/storage/store.go index 56e973738..d866d2e11 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -2,6 +2,8 @@ package storage import ( "fmt" + "sync/atomic" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" . "github.com/chrislusf/seaweedfs/weed/storage/types" @@ -22,7 +24,7 @@ type Store struct { dataCenter string //optional informaton, overwriting master setting if exists rack string //optional information, overwriting master setting if exists connected bool - VolumeSizeLimit uint64 //read from the master + volumeSizeLimit uint64 //read from the master Client master_pb.Seaweed_SendHeartbeatClient NeedleMapType NeedleMapType NewVolumeIdChan chan VolumeId @@ -30,7 +32,7 @@ type Store struct { } func (s *Store) String() (str string) { - str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.VolumeSizeLimit) + str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.GetVolumeSizeLimit()) return } @@ -150,7 +152,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { if maxFileKey < v.nm.MaxFileKey() { maxFileKey = v.nm.MaxFileKey() } - if !v.expired(s.VolumeSizeLimit) { + if !v.expired(s.GetVolumeSizeLimit()) { volumeMessages = append(volumeMessages, v.ToVolumeInformationMessage()) } else { if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) { @@ -192,7 +194,7 @@ func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) { if MaxPossibleVolumeSize >= v.ContentSize()+uint64(size) { _, size, err = v.writeNeedle(n) } else { - err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.VolumeSizeLimit, v.ContentSize()) + err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) } return } @@ -255,3 +257,11 @@ func (s *Store) DeleteVolume(i VolumeId) error { return fmt.Errorf("Volume %d not found on disk", i) } + +func (s *Store) SetVolumeSizeLimit(x uint64) { + atomic.StoreUint64(&s.volumeSizeLimit, x) +} + +func (s *Store) GetVolumeSizeLimit() uint64 { + return atomic.LoadUint64(&s.volumeSizeLimit) +} From e85048bcdc7a56c0ec9353f0459737481eb1c48d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 14 Apr 2019 23:28:24 -0700 Subject: [PATCH 179/450] http exhaust and close response body --- weed/operation/chunked_file.go | 6 +++++- weed/server/filer_server_handlers_read.go | 6 +++++- weed/server/filer_server_handlers_write.go | 6 +++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index f3f6e7b00..995f06b53 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -6,6 +6,7 @@ import ( "fmt" "google.golang.org/grpc" "io" + "io/ioutil" "net/http" "sort" @@ -103,7 +104,10 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, if err != nil { return written, err } - defer resp.Body.Close() + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() switch resp.StatusCode { case http.StatusRequestedRangeNotSatisfiable: diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 63fd2cc39..9ddf4dae5 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -3,6 +3,7 @@ package weed_server import ( "context" "io" + "io/ioutil" "mime" "mime/multipart" "net/http" @@ -107,7 +108,10 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, writeJsonError(w, r, http.StatusInternalServerError, do_err) return } - defer resp.Body.Close() + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() for k, v := range resp.Header { w.Header()[k] = v } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index cb30ff4c0..92f5f19b6 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "errors" + "io" "io/ioutil" "mime" "net/http" @@ -131,7 +132,10 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { writeJsonError(w, r, http.StatusInternalServerError, do_err) return } - defer resp.Body.Close() + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() etag := resp.Header.Get("ETag") resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { From 6e116b3d67a129dda31a9d3f3cd284856bf64deb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 15 Apr 2019 09:09:46 -0700 Subject: [PATCH 180/450] volume: reset leader if error happens --- weed/server/volume_grpc_client_to_master.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index cf01b5bd8..7667ed363 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -41,6 +41,7 @@ func (vs *VolumeServer) heartbeat() { if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) + newLeader = "" } } } From 59966561414997b18be3413d05c02a7eb15b6d12 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 15 Apr 2019 21:11:45 -0700 Subject: [PATCH 181/450] volume: close request body if any parsing error --- weed/storage/needle_parse_multipart.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/weed/storage/needle_parse_multipart.go b/weed/storage/needle_parse_multipart.go index 3dba81fcf..93b4c2dce 100644 --- a/weed/storage/needle_parse_multipart.go +++ b/weed/storage/needle_parse_multipart.go @@ -3,6 +3,7 @@ package storage import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "io" "io/ioutil" "mime" "net/http" @@ -13,6 +14,12 @@ import ( func parseMultipart(r *http.Request) ( fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { + defer func() { + if e != nil && r.Body != nil { + io.Copy(ioutil.Discard, r.Body) + r.Body.Close() + } + }() form, fe := r.MultipartReader() if fe != nil { glog.V(0).Infoln("MultipartReader [ERROR]", fe) From af52fb23163e952d55b0f529fb9a7509b17db704 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 15 Apr 2019 21:13:07 -0700 Subject: [PATCH 182/450] stress test filer --- .../bench_filer_upload/bench_filer_upload.go | 34 +++++++++++++------ .../stress_filer_upload.go | 1 + 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go index 79ecaff12..b2e4b28c6 100644 --- a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go +++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go @@ -4,7 +4,10 @@ import ( "bytes" "flag" "fmt" + "io" + "io/ioutil" "log" + "math/rand" "mime/multipart" "net/http" "os" @@ -17,6 +20,7 @@ var ( size = flag.Int("size", 1024, "file size") concurrency = flag.Int("c", 4, "concurrent number of uploads") times = flag.Int("n", 1024, "repeated number of times") + fileCount = flag.Int("fileCount", 1, "number of files to write") destination = flag.String("to", "http://localhost:8888/", "destination directory on filer") statsChan = make(chan stat, 8) @@ -37,24 +41,32 @@ func main() { for x := 0; x < *concurrency; x++ { wg.Add(1) - client := &http.Client{} - - go func() { + go func(x int) { defer wg.Done() + + client := &http.Client{Transport: &http.Transport{ + MaxConnsPerHost: 1024, + MaxIdleConnsPerHost: 1024, + }} + r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) + for t := 0; t < *times; t++ { - if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%d", t), *destination); err == nil { - statsChan <- stat{ - size: size, + for f := 0; f < *fileCount; f++ { + fn := r.Intn(*fileCount) + if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil { + statsChan <- stat{ + size: size, + } + } else { + log.Fatalf("client %d upload %d times: %v", x, t, err) } - }else { - log.Fatalf("upload: %v", err) } } - }() + }(x) } go func() { - ticker := time.NewTicker(500 * time.Millisecond) + ticker := time.NewTicker(1000 * time.Millisecond) var lastTime time.Time var counter, size int64 @@ -105,6 +117,7 @@ func uploadFileToFiler(client *http.Client, data []byte, filename, destination s request, err := http.NewRequest("POST", uri, body) request.Header.Set("Content-Type", writer.FormDataContentType()) + // request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also. resp, err := client.Do(request) if err != nil { @@ -115,6 +128,7 @@ func uploadFileToFiler(client *http.Client, data []byte, filename, destination s if err != nil { return 0, fmt.Errorf("read http POST %s response: %v", uri, err) } + io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() } diff --git a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go index 41fb85ae0..8b986b546 100644 --- a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go +++ b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go @@ -142,6 +142,7 @@ func uploadFileToFiler(client *http.Client, filename, destination string) (size if err != nil { return 0, fmt.Errorf("read http POST %s response: %v", uri, err) } + io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() } From 359b7c16507136c927a0ca42d1ade33d142fdc44 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 15 Apr 2019 21:35:12 -0700 Subject: [PATCH 183/450] Revert "volume: close request body if any parsing error" This reverts commit 59966561414997b18be3413d05c02a7eb15b6d12. --- weed/storage/needle_parse_multipart.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/weed/storage/needle_parse_multipart.go b/weed/storage/needle_parse_multipart.go index 93b4c2dce..3dba81fcf 100644 --- a/weed/storage/needle_parse_multipart.go +++ b/weed/storage/needle_parse_multipart.go @@ -3,7 +3,6 @@ package storage import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "io" "io/ioutil" "mime" "net/http" @@ -14,12 +13,6 @@ import ( func parseMultipart(r *http.Request) ( fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { - defer func() { - if e != nil && r.Body != nil { - io.Copy(ioutil.Discard, r.Body) - r.Body.Close() - } - }() form, fe := r.MultipartReader() if fe != nil { glog.V(0).Infoln("MultipartReader [ERROR]", fe) From bd76ad0ff1d1bb81cb77cb3417968d3b496380b4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 15 Apr 2019 21:43:28 -0700 Subject: [PATCH 184/450] Revert "Revert "volume: close request body if any parsing error"" This reverts commit 359b7c16507136c927a0ca42d1ade33d142fdc44. --- weed/storage/needle_parse_multipart.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/weed/storage/needle_parse_multipart.go b/weed/storage/needle_parse_multipart.go index 3dba81fcf..93b4c2dce 100644 --- a/weed/storage/needle_parse_multipart.go +++ b/weed/storage/needle_parse_multipart.go @@ -3,6 +3,7 @@ package storage import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "io" "io/ioutil" "mime" "net/http" @@ -13,6 +14,12 @@ import ( func parseMultipart(r *http.Request) ( fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { + defer func() { + if e != nil && r.Body != nil { + io.Copy(ioutil.Discard, r.Body) + r.Body.Close() + } + }() form, fe := r.MultipartReader() if fe != nil { glog.V(0).Infoln("MultipartReader [ERROR]", fe) From f567777d6464efdb0000eac527755a15a589930c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 15 Apr 2019 21:58:43 -0700 Subject: [PATCH 185/450] volume: fix needle checking for deleted needles --- weed/storage/volume_read_write.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go index 93f4ed1c1..363835eb9 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_read_write.go @@ -21,11 +21,11 @@ func (v *Volume) isFileUnchanged(n *Needle) bool { return false } nv, ok := v.nm.Get(n.Id) - if ok && !nv.Offset.IsZero() { + if ok && !nv.Offset.IsZero() && nv.Size != TombstoneFileSize { oldNeedle := new(Needle) err := oldNeedle.ReadData(v.dataFile, nv.Offset.ToAcutalOffset(), nv.Size, v.Version()) if err != nil { - glog.V(0).Infof("Failed to check updated file %v", err) + glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err) return false } if oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) { From 967e108b9a33d4e3cdd59dacd3356c070ed312a9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 15 Apr 2019 22:00:50 -0700 Subject: [PATCH 186/450] increase max idle conn per hosts --- weed/util/grpc_client_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index c35a13970..a2407eb13 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -20,7 +20,7 @@ var ( ) func init() { - http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 100 + http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024 } func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { From b3b42bc947ec44acdc69efdeebb623a0c092078a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 16 Apr 2019 00:44:31 -0700 Subject: [PATCH 187/450] replicate need to include new entry path --- other/java/client/src/main/proto/filer.proto | 6 + weed/filer2/entry.go | 13 +- weed/filer2/filer_notify.go | 12 +- weed/pb/filer.proto | 6 + weed/pb/filer_pb/filer.pb.go | 259 ++++++++++-------- weed/pb/volume_server_pb/volume_server.pb.go | 150 +++++----- weed/replication/replicator.go | 8 +- weed/replication/sink/azuresink/azure_sink.go | 2 +- weed/replication/sink/b2sink/b2_sink.go | 2 +- weed/replication/sink/filersink/filer_sink.go | 4 +- weed/replication/sink/gcssink/gcs_sink.go | 2 +- weed/replication/sink/replication_sink.go | 2 +- weed/replication/sink/s3sink/s3_sink.go | 2 +- 13 files changed, 270 insertions(+), 198 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 07c73f1d4..350288b53 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -72,10 +72,16 @@ message Entry { map extended = 5; } +message FullEntry { + string dir = 1; + Entry entry = 2; +} + message EventNotification { Entry old_entry = 1; Entry new_entry = 2; bool delete_chunks = 3; + string new_parent_path = 4; } message FileChunk { diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go index f17a11727..3f8a19114 100644 --- a/weed/filer2/entry.go +++ b/weed/filer2/entry.go @@ -52,9 +52,20 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry { return nil } return &filer_pb.Entry{ - Name: string(entry.FullPath), + Name: entry.FullPath.Name(), IsDirectory: entry.IsDirectory(), Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, } } + +func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { + if entry == nil { + return nil + } + dir, _ := entry.FullPath.DirAndName() + return &filer_pb.FullEntry{ + Dir: dir, + Entry: entry.ToProtoEntry(), + } +} diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go index b3c215249..c37381116 100644 --- a/weed/filer2/filer_notify.go +++ b/weed/filer2/filer_notify.go @@ -20,12 +20,18 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) glog.V(3).Infof("notifying entry update %v", key) + newParentPath := "" + if newEntry != nil { + newParentPath, _ = newEntry.FullPath.DirAndName() + } + notification.Queue.SendMessage( key, &filer_pb.EventNotification{ - OldEntry: oldEntry.ToProtoEntry(), - NewEntry: newEntry.ToProtoEntry(), - DeleteChunks: deleteChunks, + OldEntry: oldEntry.ToProtoEntry(), + NewEntry: newEntry.ToProtoEntry(), + DeleteChunks: deleteChunks, + NewParentPath: newParentPath, }, ) diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 07c73f1d4..350288b53 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -72,10 +72,16 @@ message Entry { map extended = 5; } +message FullEntry { + string dir = 1; + Entry entry = 2; +} + message EventNotification { Entry old_entry = 1; Entry new_entry = 2; bool delete_chunks = 3; + string new_parent_path = 4; } message FileChunk { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 4f8c915d7..3e717f0e6 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -14,6 +14,7 @@ It has these top-level messages: ListEntriesRequest ListEntriesResponse Entry + FullEntry EventNotification FileChunk FuseAttributes @@ -210,16 +211,41 @@ func (m *Entry) GetExtended() map[string][]byte { return nil } +type FullEntry struct { + Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` +} + +func (m *FullEntry) Reset() { *m = FullEntry{} } +func (m *FullEntry) String() string { return proto.CompactTextString(m) } +func (*FullEntry) ProtoMessage() {} +func (*FullEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *FullEntry) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func (m *FullEntry) GetEntry() *Entry { + if m != nil { + return m.Entry + } + return nil +} + type EventNotification struct { - OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` - NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` - DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` + OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` + NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` + DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` + NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath" json:"new_parent_path,omitempty"` } func (m *EventNotification) Reset() { *m = EventNotification{} } func (m *EventNotification) String() string { return proto.CompactTextString(m) } func (*EventNotification) ProtoMessage() {} -func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *EventNotification) GetOldEntry() *Entry { if m != nil { @@ -242,6 +268,13 @@ func (m *EventNotification) GetDeleteChunks() bool { return false } +func (m *EventNotification) GetNewParentPath() string { + if m != nil { + return m.NewParentPath + } + return "" +} + type FileChunk struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` @@ -254,7 +287,7 @@ type FileChunk struct { func (m *FileChunk) Reset() { *m = FileChunk{} } func (m *FileChunk) String() string { return proto.CompactTextString(m) } func (*FileChunk) ProtoMessage() {} -func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *FileChunk) GetFileId() string { if m != nil { @@ -317,7 +350,7 @@ type FuseAttributes struct { func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } func (m *FuseAttributes) String() string { return proto.CompactTextString(m) } func (*FuseAttributes) ProtoMessage() {} -func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *FuseAttributes) GetFileSize() uint64 { if m != nil { @@ -418,7 +451,7 @@ type CreateEntryRequest struct { func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) } func (*CreateEntryRequest) ProtoMessage() {} -func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *CreateEntryRequest) GetDirectory() string { if m != nil { @@ -440,7 +473,7 @@ type CreateEntryResponse struct { func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) } func (*CreateEntryResponse) ProtoMessage() {} -func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } type UpdateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` @@ -450,7 +483,7 @@ type UpdateEntryRequest struct { func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} } func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) } func (*UpdateEntryRequest) ProtoMessage() {} -func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *UpdateEntryRequest) GetDirectory() string { if m != nil { @@ -472,7 +505,7 @@ type UpdateEntryResponse struct { func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} } func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) } func (*UpdateEntryResponse) ProtoMessage() {} -func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } type DeleteEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` @@ -485,7 +518,7 @@ type DeleteEntryRequest struct { func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} } func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) } func (*DeleteEntryRequest) ProtoMessage() {} -func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *DeleteEntryRequest) GetDirectory() string { if m != nil { @@ -521,7 +554,7 @@ type DeleteEntryResponse struct { func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) } func (*DeleteEntryResponse) ProtoMessage() {} -func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } type AtomicRenameEntryRequest struct { OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"` @@ -533,7 +566,7 @@ type AtomicRenameEntryRequest struct { func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} } func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) } func (*AtomicRenameEntryRequest) ProtoMessage() {} -func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *AtomicRenameEntryRequest) GetOldDirectory() string { if m != nil { @@ -569,7 +602,7 @@ type AtomicRenameEntryResponse struct { func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} } func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) } func (*AtomicRenameEntryResponse) ProtoMessage() {} -func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } type AssignVolumeRequest struct { Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` @@ -582,7 +615,7 @@ type AssignVolumeRequest struct { func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *AssignVolumeRequest) GetCount() int32 { if m != nil { @@ -630,7 +663,7 @@ type AssignVolumeResponse struct { func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *AssignVolumeResponse) GetFileId() string { if m != nil { @@ -674,7 +707,7 @@ type LookupVolumeRequest struct { func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *LookupVolumeRequest) GetVolumeIds() []string { if m != nil { @@ -690,7 +723,7 @@ type Locations struct { func (m *Locations) Reset() { *m = Locations{} } func (m *Locations) String() string { return proto.CompactTextString(m) } func (*Locations) ProtoMessage() {} -func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *Locations) GetLocations() []*Location { if m != nil { @@ -707,7 +740,7 @@ type Location struct { func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *Location) GetUrl() string { if m != nil { @@ -730,7 +763,7 @@ type LookupVolumeResponse struct { func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { if m != nil { @@ -746,7 +779,7 @@ type DeleteCollectionRequest struct { func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *DeleteCollectionRequest) GetCollection() string { if m != nil { @@ -761,7 +794,7 @@ type DeleteCollectionResponse struct { func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } type StatisticsRequest struct { Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` @@ -772,7 +805,7 @@ type StatisticsRequest struct { func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *StatisticsRequest) GetReplication() string { if m != nil { @@ -807,7 +840,7 @@ type StatisticsResponse struct { func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *StatisticsResponse) GetReplication() string { if m != nil { @@ -857,6 +890,7 @@ func init() { proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest") proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse") proto.RegisterType((*Entry)(nil), "filer_pb.Entry") + proto.RegisterType((*FullEntry)(nil), "filer_pb.FullEntry") proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification") proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk") proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes") @@ -1252,92 +1286,95 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1391 bytes of a gzipped FileDescriptorProto + // 1435 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x6e, 0xdc, 0x44, - 0x14, 0xae, 0xf7, 0x2f, 0xf1, 0xd9, 0xdd, 0x92, 0x4c, 0x02, 0x75, 0x37, 0x49, 0xd9, 0x3a, 0x14, - 0xa5, 0xa2, 0x8a, 0xaa, 0xc2, 0x45, 0x4b, 0x85, 0x44, 0x9b, 0xa6, 0x52, 0xa5, 0xb4, 0x45, 0x4e, - 0x8b, 0x84, 0x90, 0xb0, 0x1c, 0x7b, 0xb2, 0x1d, 0xc5, 0x6b, 0x2f, 0x9e, 0x71, 0xd2, 0xf2, 0x08, - 0xdc, 0x70, 0xc3, 0x15, 0x12, 0x17, 0x5c, 0xf1, 0x16, 0xdc, 0xf0, 0x14, 0xbc, 0x04, 0xcf, 0x80, - 0xce, 0xcc, 0xd8, 0x3b, 0x5e, 0x3b, 0x29, 0x08, 0xf5, 0x6e, 0xe6, 0xfc, 0x7e, 0xe7, 0xcc, 0xf9, - 0xb1, 0xa1, 0x7f, 0xcc, 0x62, 0x9a, 0xed, 0xce, 0xb2, 0x54, 0xa4, 0x64, 0x59, 0x5e, 0xfc, 0xd9, - 0x91, 0xfb, 0x1c, 0x36, 0x0e, 0xd2, 0xf4, 0x24, 0x9f, 0x3d, 0x62, 0x19, 0x0d, 0x45, 0x9a, 0xbd, - 0xd9, 0x4f, 0x44, 0xf6, 0xc6, 0xa3, 0xdf, 0xe7, 0x94, 0x0b, 0xb2, 0x09, 0x76, 0x54, 0x30, 0x1c, - 0x6b, 0x6c, 0xed, 0xd8, 0xde, 0x9c, 0x40, 0x08, 0x74, 0x92, 0x60, 0x4a, 0x9d, 0x96, 0x64, 0xc8, - 0xb3, 0xbb, 0x0f, 0x9b, 0xcd, 0x06, 0xf9, 0x2c, 0x4d, 0x38, 0x25, 0x37, 0xa0, 0x4b, 0x91, 0x20, - 0xad, 0xf5, 0xef, 0xbc, 0xb7, 0x5b, 0x40, 0xd9, 0x55, 0x72, 0x8a, 0xeb, 0xfe, 0x61, 0x01, 0x39, - 0x60, 0x5c, 0x20, 0x91, 0x51, 0xfe, 0xef, 0xf0, 0x7c, 0x00, 0xbd, 0x59, 0x46, 0x8f, 0xd9, 0x6b, - 0x8d, 0x48, 0xdf, 0xc8, 0x2d, 0x58, 0xe5, 0x22, 0xc8, 0xc4, 0xe3, 0x2c, 0x9d, 0x3e, 0x66, 0x31, - 0x7d, 0x86, 0xa0, 0xdb, 0x52, 0xa4, 0xce, 0x20, 0xbb, 0x40, 0x58, 0x12, 0xc6, 0x39, 0x67, 0xa7, - 0xf4, 0xb0, 0xe0, 0x3a, 0x9d, 0xb1, 0xb5, 0xb3, 0xec, 0x35, 0x70, 0xc8, 0x3a, 0x74, 0x63, 0x36, - 0x65, 0xc2, 0xe9, 0x8e, 0xad, 0x9d, 0xa1, 0xa7, 0x2e, 0xee, 0x97, 0xb0, 0x56, 0xc1, 0xaf, 0xc3, - 0xbf, 0x09, 0x4b, 0x54, 0x91, 0x1c, 0x6b, 0xdc, 0x6e, 0x4a, 0x40, 0xc1, 0x77, 0x7f, 0x6d, 0x41, - 0x57, 0x92, 0xca, 0x3c, 0x5b, 0xf3, 0x3c, 0x93, 0xeb, 0x30, 0x60, 0xdc, 0x9f, 0x27, 0xa3, 0x25, - 0xf1, 0xf5, 0x19, 0x2f, 0xf3, 0x4e, 0x3e, 0x81, 0x5e, 0xf8, 0x2a, 0x4f, 0x4e, 0xb8, 0xd3, 0x96, - 0xae, 0xd6, 0xe6, 0xae, 0x30, 0xd8, 0x3d, 0xe4, 0x79, 0x5a, 0x84, 0xdc, 0x05, 0x08, 0x84, 0xc8, - 0xd8, 0x51, 0x2e, 0x28, 0x97, 0xd1, 0xf6, 0xef, 0x38, 0x86, 0x42, 0xce, 0xe9, 0x83, 0x92, 0xef, - 0x19, 0xb2, 0xe4, 0x1e, 0x2c, 0xd3, 0xd7, 0x82, 0x26, 0x11, 0x8d, 0x9c, 0xae, 0x74, 0xb4, 0xb5, - 0x10, 0xd3, 0xee, 0xbe, 0xe6, 0xab, 0x08, 0x4b, 0xf1, 0xd1, 0x7d, 0x18, 0x56, 0x58, 0x64, 0x05, - 0xda, 0x27, 0xb4, 0x78, 0x59, 0x3c, 0x62, 0x76, 0x4f, 0x83, 0x38, 0x57, 0x45, 0x36, 0xf0, 0xd4, - 0xe5, 0xf3, 0xd6, 0x5d, 0xcb, 0xfd, 0xd9, 0x82, 0xd5, 0xfd, 0x53, 0x9a, 0x88, 0x67, 0xa9, 0x60, - 0xc7, 0x2c, 0x0c, 0x04, 0x4b, 0x13, 0x72, 0x0b, 0xec, 0x34, 0x8e, 0xfc, 0x0b, 0x6b, 0x6c, 0x39, - 0x8d, 0xb5, 0xbf, 0x5b, 0x60, 0x27, 0xf4, 0x4c, 0x4b, 0xb7, 0xce, 0x91, 0x4e, 0xe8, 0x99, 0x92, - 0xde, 0x86, 0x61, 0x44, 0x63, 0x2a, 0xa8, 0x5f, 0xe6, 0x15, 0x93, 0x3e, 0x50, 0x44, 0x99, 0x4f, - 0xee, 0xfe, 0x66, 0x81, 0x5d, 0xa6, 0x97, 0x5c, 0x81, 0x25, 0x34, 0xe7, 0xb3, 0x48, 0x07, 0xd5, - 0xc3, 0xeb, 0x93, 0x08, 0x6b, 0x35, 0x3d, 0x3e, 0xe6, 0x54, 0x48, 0xb7, 0x6d, 0x4f, 0xdf, 0xf0, - 0xad, 0x39, 0xfb, 0x41, 0x95, 0x67, 0xc7, 0x93, 0x67, 0xcc, 0xc1, 0x54, 0xb0, 0x29, 0x95, 0xcf, - 0xd2, 0xf6, 0xd4, 0x85, 0xac, 0x41, 0x97, 0xfa, 0x22, 0x98, 0xc8, 0xba, 0xb3, 0xbd, 0x0e, 0x7d, - 0x11, 0x4c, 0xc8, 0x47, 0x70, 0x99, 0xa7, 0x79, 0x16, 0x52, 0xbf, 0x70, 0xdb, 0x93, 0xdc, 0x81, - 0xa2, 0x3e, 0x96, 0xce, 0xdd, 0xbf, 0x5b, 0x70, 0xb9, 0xfa, 0xa2, 0x64, 0x03, 0x6c, 0xa9, 0x21, - 0x9d, 0x5b, 0xd2, 0xb9, 0x9c, 0x12, 0x87, 0x15, 0x00, 0x2d, 0x13, 0x40, 0xa1, 0x32, 0x4d, 0x23, - 0x85, 0x77, 0xa8, 0x54, 0x9e, 0xa6, 0x11, 0xc5, 0x97, 0xcc, 0x59, 0x24, 0x11, 0x0f, 0x3d, 0x3c, - 0x22, 0x65, 0xc2, 0x22, 0xdd, 0x25, 0x78, 0xc4, 0x1c, 0x84, 0x99, 0xb4, 0xdb, 0x53, 0x39, 0x50, - 0x37, 0xcc, 0xc1, 0x14, 0xa9, 0x4b, 0x2a, 0x30, 0x3c, 0x93, 0x31, 0xf4, 0x33, 0x3a, 0x8b, 0xf5, - 0x33, 0x3b, 0xcb, 0x92, 0x65, 0x92, 0xc8, 0x35, 0x80, 0x30, 0x8d, 0x63, 0x1a, 0x4a, 0x01, 0x5b, - 0x0a, 0x18, 0x14, 0x7c, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xb1, 0xb5, 0xd3, 0xf5, 0x7a, - 0x42, 0xc4, 0x87, 0x34, 0xc4, 0x38, 0x72, 0x4e, 0x33, 0x5f, 0xf6, 0x58, 0x5f, 0xea, 0x2d, 0x23, - 0x41, 0x4e, 0x83, 0x2d, 0x80, 0x49, 0x96, 0xe6, 0x33, 0xc5, 0x1d, 0x8c, 0xdb, 0x38, 0x72, 0x24, - 0x45, 0xb2, 0x6f, 0xc0, 0x65, 0xfe, 0x66, 0x1a, 0xb3, 0xe4, 0xc4, 0x17, 0x41, 0x36, 0xa1, 0xc2, - 0x19, 0x4a, 0x03, 0x43, 0x4d, 0x7d, 0x21, 0x89, 0xee, 0x37, 0x40, 0xf6, 0x32, 0x1a, 0x08, 0xfa, - 0x1f, 0xa6, 0x6b, 0x39, 0x29, 0x5b, 0x17, 0x4e, 0xca, 0xf7, 0x61, 0xad, 0x62, 0x5a, 0x0d, 0x1a, - 0xf4, 0xf8, 0x72, 0x16, 0xbd, 0x2b, 0x8f, 0x15, 0xd3, 0xda, 0xe3, 0x4f, 0x16, 0x90, 0x47, 0xb2, - 0x13, 0xfe, 0xdf, 0x0a, 0xc1, 0x1a, 0xc6, 0xd1, 0xa6, 0x3a, 0x2d, 0x0a, 0x44, 0xa0, 0x87, 0xef, - 0x80, 0x71, 0x65, 0xff, 0x51, 0x20, 0x02, 0x3d, 0x00, 0x33, 0x1a, 0xe6, 0x19, 0xce, 0x63, 0x59, - 0x57, 0x72, 0x00, 0x7a, 0x05, 0x09, 0x81, 0x56, 0x00, 0x69, 0xa0, 0xbf, 0x58, 0xe0, 0x3c, 0x10, - 0xe9, 0x94, 0x85, 0x1e, 0x45, 0x87, 0x15, 0xb8, 0xdb, 0x30, 0xc4, 0xf9, 0xb1, 0x08, 0x79, 0x90, - 0xc6, 0xd1, 0x7c, 0xb2, 0x5e, 0x05, 0x1c, 0x21, 0xbe, 0x81, 0x7c, 0x29, 0x8d, 0x23, 0x59, 0x10, - 0xdb, 0x30, 0xc4, 0x89, 0x32, 0xd7, 0x57, 0x7b, 0x66, 0x90, 0xd0, 0xb3, 0x8a, 0x3e, 0x0a, 0x49, - 0xfd, 0x8e, 0xd2, 0x4f, 0xe8, 0x19, 0xea, 0xbb, 0x1b, 0x70, 0xb5, 0x01, 0x9b, 0x46, 0xfe, 0xbb, - 0x05, 0x6b, 0x0f, 0x38, 0x67, 0x93, 0xe4, 0xeb, 0x34, 0xce, 0xa7, 0xb4, 0x00, 0xbd, 0x0e, 0xdd, - 0x30, 0xcd, 0x13, 0x21, 0xc1, 0x76, 0x3d, 0x75, 0x59, 0x68, 0x88, 0x56, 0xad, 0x21, 0x16, 0x5a, - 0xaa, 0x5d, 0x6f, 0x29, 0xa3, 0x65, 0x3a, 0x95, 0x96, 0xf9, 0x10, 0xfa, 0xf8, 0x30, 0x7e, 0x48, - 0x13, 0x41, 0x33, 0x3d, 0x81, 0x00, 0x49, 0x7b, 0x92, 0xe2, 0xfe, 0x68, 0xc1, 0x7a, 0x15, 0xa9, - 0x5e, 0x80, 0xe7, 0x0e, 0x44, 0x1c, 0x18, 0x59, 0xac, 0x61, 0xe2, 0x11, 0x5b, 0x6f, 0x96, 0x1f, - 0xc5, 0x2c, 0xf4, 0x91, 0xa1, 0xe0, 0xd9, 0x8a, 0xf2, 0x32, 0x8b, 0xe7, 0x41, 0x77, 0xcc, 0xa0, - 0x09, 0x74, 0x82, 0x5c, 0xbc, 0x2a, 0x86, 0x22, 0x9e, 0xdd, 0xcf, 0x60, 0x4d, 0x7d, 0x93, 0x54, - 0xb3, 0xb6, 0x05, 0x70, 0x2a, 0x09, 0x3e, 0x8b, 0xd4, 0x3a, 0xb6, 0x3d, 0x5b, 0x51, 0x9e, 0x44, - 0xdc, 0xfd, 0x02, 0xec, 0x83, 0x54, 0x25, 0x82, 0x93, 0xdb, 0x60, 0xc7, 0xc5, 0x45, 0x6f, 0x6e, - 0x32, 0x6f, 0x8f, 0x42, 0xce, 0x9b, 0x0b, 0xb9, 0xf7, 0x61, 0xb9, 0x20, 0x17, 0xb1, 0x59, 0xe7, - 0xc5, 0xd6, 0x5a, 0x88, 0xcd, 0xfd, 0xd3, 0x82, 0xf5, 0x2a, 0x64, 0x9d, 0xbe, 0x97, 0x30, 0x2c, - 0x5d, 0xf8, 0xd3, 0x60, 0xa6, 0xb1, 0xdc, 0x36, 0xb1, 0xd4, 0xd5, 0x4a, 0x80, 0xfc, 0x69, 0x30, - 0x53, 0x25, 0x35, 0x88, 0x0d, 0xd2, 0xe8, 0x05, 0xac, 0xd6, 0x44, 0x1a, 0x96, 0xf1, 0x4d, 0x73, - 0x19, 0x57, 0x3e, 0x28, 0x4a, 0x6d, 0x73, 0x43, 0xdf, 0x83, 0x2b, 0xaa, 0xff, 0xf6, 0xca, 0xa2, - 0x2b, 0x72, 0x5f, 0xad, 0x4d, 0x6b, 0xb1, 0x36, 0xdd, 0x11, 0x38, 0x75, 0x55, 0xdd, 0x05, 0x13, - 0x58, 0x3d, 0x14, 0x81, 0x60, 0x5c, 0xb0, 0xb0, 0xfc, 0x32, 0x5c, 0x28, 0x66, 0xeb, 0x6d, 0xfb, - 0xa1, 0xde, 0x0e, 0x2b, 0xd0, 0x16, 0xa2, 0xa8, 0x33, 0x3c, 0xe2, 0x2b, 0x10, 0xd3, 0x93, 0x7e, - 0x83, 0x77, 0xe0, 0x0a, 0xeb, 0x41, 0xa4, 0x22, 0x88, 0xd5, 0xfe, 0xed, 0xc8, 0xfd, 0x6b, 0x4b, - 0x8a, 0x5c, 0xc0, 0x6a, 0x45, 0x45, 0x8a, 0xdb, 0x55, 0xdb, 0x19, 0x09, 0x92, 0xb9, 0x05, 0x20, - 0x5b, 0x4a, 0x75, 0x43, 0x4f, 0xe9, 0x22, 0x65, 0x0f, 0x09, 0x77, 0xfe, 0xea, 0xc1, 0xe0, 0x90, - 0x06, 0x67, 0x94, 0x46, 0xb8, 0xfe, 0x33, 0x32, 0x29, 0x6a, 0xab, 0xfa, 0x89, 0x4e, 0x6e, 0x2c, - 0x16, 0x51, 0xe3, 0x3f, 0xc1, 0xe8, 0xe3, 0xb7, 0x89, 0xe9, 0x67, 0xba, 0x44, 0x0e, 0xa0, 0x6f, - 0x7c, 0x03, 0x93, 0x4d, 0x43, 0xb1, 0xf6, 0x69, 0x3f, 0xda, 0x3a, 0x87, 0x6b, 0x5a, 0x33, 0x16, - 0x9d, 0x69, 0xad, 0xbe, 0x5a, 0x4d, 0x6b, 0x4d, 0xdb, 0x51, 0x5a, 0x33, 0x96, 0x98, 0x69, 0xad, - 0xbe, 0x36, 0x4d, 0x6b, 0x4d, 0x9b, 0x4f, 0x5a, 0x33, 0x36, 0x8d, 0x69, 0xad, 0xbe, 0x11, 0x4d, - 0x6b, 0x4d, 0xeb, 0xe9, 0x12, 0xf9, 0x0e, 0x56, 0x6b, 0x3b, 0x80, 0xb8, 0x73, 0xad, 0xf3, 0x96, - 0xd7, 0x68, 0xfb, 0x42, 0x99, 0xd2, 0xfe, 0x73, 0x18, 0x98, 0xb3, 0x99, 0x18, 0x80, 0x1a, 0xb6, - 0xcb, 0xe8, 0xda, 0x79, 0x6c, 0xd3, 0xa0, 0x39, 0x76, 0x4c, 0x83, 0x0d, 0x83, 0xd7, 0x34, 0xd8, - 0x34, 0xad, 0xdc, 0x4b, 0xe4, 0x5b, 0x58, 0x59, 0x6c, 0x7f, 0x72, 0x7d, 0x31, 0x6d, 0xb5, 0xa9, - 0x32, 0x72, 0x2f, 0x12, 0x29, 0x8d, 0x3f, 0x01, 0x98, 0x77, 0x35, 0xd9, 0x98, 0xeb, 0xd4, 0xa6, - 0xca, 0x68, 0xb3, 0x99, 0x59, 0x98, 0x7a, 0x78, 0x0d, 0x56, 0xb8, 0x6a, 0xad, 0x63, 0xbe, 0x1b, - 0xc6, 0x8c, 0x26, 0xe2, 0x21, 0xc8, 0x2e, 0xfb, 0x0a, 0x7f, 0xb4, 0x8f, 0x7a, 0xf2, 0x7f, 0xfb, - 0xd3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x11, 0xc9, 0xda, 0x7e, 0x0f, 0x00, 0x00, + 0x14, 0xae, 0xf7, 0x2f, 0xeb, 0xb3, 0xbb, 0x6d, 0x32, 0x09, 0x74, 0xbb, 0x49, 0xca, 0xd6, 0xa1, + 0x55, 0x2a, 0xaa, 0xa8, 0x2a, 0x5c, 0xb4, 0x54, 0x48, 0xb4, 0xf9, 0x91, 0x2a, 0xa5, 0x3f, 0x72, + 0x5a, 0x24, 0x84, 0x84, 0xe5, 0xd8, 0x93, 0xcd, 0x28, 0xb3, 0xf6, 0xe2, 0x19, 0x27, 0x0d, 0x8f, + 0xc0, 0x0d, 0xf7, 0x48, 0x5c, 0x70, 0xc5, 0x4b, 0x20, 0x6e, 0x78, 0x0a, 0x5e, 0x82, 0x67, 0x40, + 0x67, 0xc6, 0xf6, 0x8e, 0xd7, 0x9b, 0xb4, 0x08, 0xf5, 0x6e, 0xe6, 0x3b, 0x73, 0xce, 0xf9, 0xe6, + 0xcc, 0xf9, 0xb1, 0xa1, 0x73, 0xc4, 0x38, 0x4d, 0xb6, 0x26, 0x49, 0x2c, 0x63, 0xd2, 0x56, 0x1b, + 0x6f, 0x72, 0xe8, 0xbc, 0x84, 0xd5, 0xfd, 0x38, 0x3e, 0x49, 0x27, 0x3b, 0x2c, 0xa1, 0x81, 0x8c, + 0x93, 0xf3, 0xdd, 0x48, 0x26, 0xe7, 0x2e, 0xfd, 0x21, 0xa5, 0x42, 0x92, 0x35, 0xb0, 0xc3, 0x5c, + 0xd0, 0xb7, 0x86, 0xd6, 0xa6, 0xed, 0x4e, 0x01, 0x42, 0xa0, 0x11, 0xf9, 0x63, 0xda, 0xaf, 0x29, + 0x81, 0x5a, 0x3b, 0xbb, 0xb0, 0x36, 0xdf, 0xa0, 0x98, 0xc4, 0x91, 0xa0, 0xe4, 0x36, 0x34, 0x29, + 0x02, 0xca, 0x5a, 0xe7, 0xc1, 0xb5, 0xad, 0x9c, 0xca, 0x96, 0x3e, 0xa7, 0xa5, 0xce, 0x9f, 0x16, + 0x90, 0x7d, 0x26, 0x24, 0x82, 0x8c, 0x8a, 0xf7, 0xe3, 0xf3, 0x31, 0xb4, 0x26, 0x09, 0x3d, 0x62, + 0x6f, 0x33, 0x46, 0xd9, 0x8e, 0xdc, 0x83, 0x25, 0x21, 0xfd, 0x44, 0xee, 0x25, 0xf1, 0x78, 0x8f, + 0x71, 0xfa, 0x02, 0x49, 0xd7, 0xd5, 0x91, 0xaa, 0x80, 0x6c, 0x01, 0x61, 0x51, 0xc0, 0x53, 0xc1, + 0x4e, 0xe9, 0x41, 0x2e, 0xed, 0x37, 0x86, 0xd6, 0x66, 0xdb, 0x9d, 0x23, 0x21, 0x2b, 0xd0, 0xe4, + 0x6c, 0xcc, 0x64, 0xbf, 0x39, 0xb4, 0x36, 0x7b, 0xae, 0xde, 0x38, 0x5f, 0xc3, 0x72, 0x89, 0x7f, + 0x76, 0xfd, 0xbb, 0xb0, 0x40, 0x35, 0xd4, 0xb7, 0x86, 0xf5, 0x79, 0x01, 0xc8, 0xe5, 0xce, 0xaf, + 0x35, 0x68, 0x2a, 0xa8, 0x88, 0xb3, 0x35, 0x8d, 0x33, 0xb9, 0x05, 0x5d, 0x26, 0xbc, 0x69, 0x30, + 0x6a, 0x8a, 0x5f, 0x87, 0x89, 0x22, 0xee, 0xe4, 0x33, 0x68, 0x05, 0xc7, 0x69, 0x74, 0x22, 0xfa, + 0x75, 0xe5, 0x6a, 0x79, 0xea, 0x0a, 0x2f, 0xbb, 0x8d, 0x32, 0x37, 0x3b, 0x42, 0x1e, 0x02, 0xf8, + 0x52, 0x26, 0xec, 0x30, 0x95, 0x54, 0xa8, 0xdb, 0x76, 0x1e, 0xf4, 0x0d, 0x85, 0x54, 0xd0, 0x27, + 0x85, 0xdc, 0x35, 0xce, 0x92, 0x47, 0xd0, 0xa6, 0x6f, 0x25, 0x8d, 0x42, 0x1a, 0xf6, 0x9b, 0xca, + 0xd1, 0xfa, 0xcc, 0x9d, 0xb6, 0x76, 0x33, 0xb9, 0xbe, 0x61, 0x71, 0x7c, 0xf0, 0x18, 0x7a, 0x25, + 0x11, 0x59, 0x84, 0xfa, 0x09, 0xcd, 0x5f, 0x16, 0x97, 0x18, 0xdd, 0x53, 0x9f, 0xa7, 0x3a, 0xc9, + 0xba, 0xae, 0xde, 0x7c, 0x59, 0x7b, 0x68, 0x39, 0x3b, 0x60, 0xef, 0xa5, 0x9c, 0x17, 0x8a, 0x21, + 0x4b, 0x72, 0xc5, 0x90, 0x25, 0xd3, 0x44, 0xab, 0x5d, 0x9a, 0x68, 0x7f, 0x58, 0xb0, 0xb4, 0x7b, + 0x4a, 0x23, 0xf9, 0x22, 0x96, 0xec, 0x88, 0x05, 0xbe, 0x64, 0x71, 0x44, 0xee, 0x81, 0x1d, 0xf3, + 0xd0, 0xbb, 0x34, 0x53, 0xdb, 0x31, 0xcf, 0x58, 0xdf, 0x03, 0x3b, 0xa2, 0x67, 0xde, 0xa5, 0xee, + 0xda, 0x11, 0x3d, 0xd3, 0xa7, 0x37, 0xa0, 0x17, 0x52, 0x4e, 0x25, 0xf5, 0x8a, 0xd7, 0xc1, 0xa7, + 0xeb, 0x6a, 0x70, 0x5b, 0x3f, 0xc7, 0x1d, 0xb8, 0x86, 0x26, 0x27, 0x7e, 0x42, 0x23, 0xe9, 0x4d, + 0x7c, 0x79, 0xac, 0xde, 0xc4, 0x76, 0x7b, 0x11, 0x3d, 0x7b, 0xa5, 0xd0, 0x57, 0xbe, 0x3c, 0x76, + 0x7e, 0xb3, 0xc0, 0x2e, 0x1e, 0x93, 0x5c, 0x87, 0x05, 0x74, 0xeb, 0xb1, 0x30, 0x8b, 0x44, 0x0b, + 0xb7, 0xcf, 0x42, 0xac, 0x8c, 0xf8, 0xe8, 0x48, 0x50, 0xa9, 0xe8, 0xd5, 0xdd, 0x6c, 0x87, 0x99, + 0x25, 0xd8, 0x8f, 0xba, 0x18, 0x1a, 0xae, 0x5a, 0x63, 0xc4, 0xc7, 0x92, 0x8d, 0xa9, 0x72, 0x58, + 0x77, 0xf5, 0x86, 0x2c, 0x43, 0x93, 0x7a, 0xd2, 0x1f, 0xa9, 0x2c, 0xb7, 0xdd, 0x06, 0x7d, 0xed, + 0x8f, 0xc8, 0xa7, 0x70, 0x55, 0xc4, 0x69, 0x12, 0x50, 0x2f, 0x77, 0xdb, 0x52, 0xd2, 0xae, 0x46, + 0xf7, 0x94, 0x73, 0xe7, 0x9f, 0x1a, 0x5c, 0x2d, 0xe7, 0x0f, 0x59, 0x05, 0x5b, 0x69, 0x28, 0xe7, + 0x96, 0x72, 0xae, 0x7a, 0xd2, 0x41, 0x89, 0x40, 0xcd, 0x24, 0x90, 0xab, 0x8c, 0xe3, 0x50, 0xf3, + 0xed, 0x69, 0x95, 0xe7, 0x71, 0x48, 0xf1, 0xf9, 0x53, 0x16, 0x2a, 0xc6, 0x3d, 0x17, 0x97, 0x88, + 0x8c, 0x58, 0x98, 0xd5, 0x24, 0x2e, 0x31, 0x06, 0x41, 0xa2, 0xec, 0xb6, 0x74, 0x0c, 0xf4, 0x0e, + 0x63, 0x30, 0x46, 0x74, 0x41, 0x5f, 0x0c, 0xd7, 0x64, 0x08, 0x9d, 0x84, 0x4e, 0x78, 0x96, 0x0e, + 0xfd, 0xb6, 0x12, 0x99, 0x10, 0xb9, 0x09, 0x10, 0xc4, 0x9c, 0xd3, 0x40, 0x1d, 0xb0, 0xd5, 0x01, + 0x03, 0xc1, 0xa7, 0x90, 0x92, 0x7b, 0x82, 0x06, 0x7d, 0x18, 0x5a, 0x9b, 0x4d, 0xb7, 0x25, 0x25, + 0x3f, 0xa0, 0x01, 0xde, 0x23, 0x15, 0x34, 0xf1, 0x54, 0x45, 0x77, 0x94, 0x5e, 0x1b, 0x01, 0xd5, + 0x7b, 0xd6, 0x01, 0x46, 0x49, 0x9c, 0x4e, 0xb4, 0xb4, 0x3b, 0xac, 0x63, 0x83, 0x53, 0x88, 0x12, + 0xdf, 0x86, 0xab, 0xe2, 0x7c, 0xcc, 0x59, 0x74, 0xe2, 0x49, 0x3f, 0x19, 0x51, 0xd9, 0xef, 0xe9, + 0xa4, 0xc8, 0xd0, 0xd7, 0x0a, 0x74, 0xbe, 0x05, 0xb2, 0x9d, 0x50, 0x5f, 0xd2, 0xff, 0xd0, 0xcb, + 0xdf, 0xb3, 0x5c, 0x3e, 0x82, 0xe5, 0x92, 0x69, 0xdd, 0xd6, 0xd0, 0xe3, 0x9b, 0x49, 0xf8, 0xa1, + 0x3c, 0x96, 0x4c, 0x67, 0x1e, 0x7f, 0xb6, 0x80, 0xec, 0xa8, 0x8a, 0xf9, 0x7f, 0x03, 0x0b, 0x73, + 0x18, 0x1b, 0xa9, 0xae, 0xc8, 0xd0, 0x97, 0x7e, 0xd6, 0xea, 0xbb, 0x4c, 0x68, 0xfb, 0x3b, 0xbe, + 0xf4, 0xb3, 0x76, 0x9b, 0xd0, 0x20, 0x4d, 0xb0, 0xfb, 0xab, 0xbc, 0x52, 0xed, 0xd6, 0xcd, 0x21, + 0x24, 0x5a, 0x22, 0x94, 0x11, 0xfd, 0xc5, 0x82, 0xfe, 0x13, 0x19, 0x8f, 0x59, 0xe0, 0x52, 0x74, + 0x58, 0xa2, 0xbb, 0x01, 0x3d, 0xec, 0x33, 0xb3, 0x94, 0xbb, 0x31, 0x0f, 0xa7, 0x7d, 0xfc, 0x06, + 0x60, 0xab, 0xf1, 0x0c, 0xe6, 0x0b, 0x31, 0x0f, 0x55, 0x42, 0x6c, 0x00, 0xf6, 0x03, 0x43, 0x5f, + 0x4f, 0xb5, 0x6e, 0x44, 0xcf, 0x4a, 0xfa, 0x78, 0x48, 0xe9, 0xeb, 0x26, 0xb2, 0x10, 0xd1, 0x33, + 0xd4, 0x77, 0x56, 0xe1, 0xc6, 0x1c, 0x6e, 0x19, 0xf3, 0xdf, 0x2d, 0x58, 0x7e, 0x22, 0x04, 0x1b, + 0x45, 0xdf, 0xc4, 0x3c, 0x1d, 0xd3, 0x9c, 0xf4, 0x0a, 0x34, 0x83, 0x38, 0x8d, 0xa4, 0x22, 0xdb, + 0x74, 0xf5, 0x66, 0xa6, 0x20, 0x6a, 0x95, 0x82, 0x98, 0x29, 0xa9, 0x7a, 0xb5, 0xa4, 0x8c, 0x92, + 0x69, 0x94, 0x4a, 0xe6, 0x13, 0xe8, 0xe0, 0xc3, 0x78, 0x01, 0x8d, 0x24, 0x4d, 0xb2, 0x0e, 0x04, + 0x08, 0x6d, 0x2b, 0xc4, 0xf9, 0xc9, 0x82, 0x95, 0x32, 0xd3, 0x6c, 0xdc, 0x5e, 0xd8, 0x10, 0xb1, + 0x61, 0x24, 0x3c, 0xa3, 0x89, 0x4b, 0x2c, 0xbd, 0x49, 0x7a, 0xc8, 0x59, 0xe0, 0xa1, 0x40, 0xd3, + 0xb3, 0x35, 0xf2, 0x26, 0xe1, 0xd3, 0x4b, 0x37, 0xcc, 0x4b, 0x13, 0x68, 0xf8, 0xa9, 0x3c, 0xce, + 0x9b, 0x22, 0xae, 0x9d, 0x2f, 0x60, 0x59, 0x7f, 0x01, 0x95, 0xa3, 0xb6, 0x0e, 0x70, 0xaa, 0x00, + 0x8f, 0x85, 0x7a, 0xf8, 0xdb, 0xae, 0xad, 0x91, 0x67, 0xa1, 0x70, 0xbe, 0x02, 0x7b, 0x3f, 0xd6, + 0x81, 0x10, 0xe4, 0x3e, 0xd8, 0x3c, 0xdf, 0x64, 0xdf, 0x09, 0x64, 0x5a, 0x1e, 0xf9, 0x39, 0x77, + 0x7a, 0xc8, 0x79, 0x0c, 0xed, 0x1c, 0xce, 0xef, 0x66, 0x5d, 0x74, 0xb7, 0xda, 0xcc, 0xdd, 0x9c, + 0xbf, 0x2c, 0x58, 0x29, 0x53, 0xce, 0xc2, 0xf7, 0x06, 0x7a, 0x85, 0x0b, 0x6f, 0xec, 0x4f, 0x32, + 0x2e, 0xf7, 0x4d, 0x2e, 0x55, 0xb5, 0x82, 0xa0, 0x78, 0xee, 0x4f, 0x74, 0x4a, 0x75, 0xb9, 0x01, + 0x0d, 0x5e, 0xc3, 0x52, 0xe5, 0xc8, 0x9c, 0xd1, 0x7f, 0xd7, 0x1c, 0xfd, 0xa5, 0xcf, 0x97, 0x42, + 0xdb, 0xfc, 0x1e, 0x78, 0x04, 0xd7, 0x75, 0xfd, 0x6d, 0x17, 0x49, 0x97, 0xc7, 0xbe, 0x9c, 0x9b, + 0xd6, 0x6c, 0x6e, 0x3a, 0x03, 0xe8, 0x57, 0x55, 0xb3, 0x2a, 0x18, 0xc1, 0xd2, 0x81, 0xf4, 0x25, + 0x13, 0x92, 0x05, 0xc5, 0x77, 0xe8, 0x4c, 0x32, 0x5b, 0xef, 0x9a, 0x0f, 0xd5, 0x72, 0x58, 0x84, + 0xba, 0x94, 0x79, 0x9e, 0xe1, 0x12, 0x5f, 0x81, 0x98, 0x9e, 0xb2, 0x37, 0xf8, 0x00, 0xae, 0x30, + 0x1f, 0x64, 0x2c, 0x7d, 0xae, 0xe7, 0x6f, 0x43, 0xcd, 0x5f, 0x5b, 0x21, 0x6a, 0x00, 0xeb, 0x11, + 0x15, 0x6a, 0x69, 0x53, 0x4f, 0x67, 0x04, 0x94, 0x70, 0x1d, 0x40, 0x95, 0x94, 0xae, 0x86, 0x96, + 0xd6, 0x45, 0x64, 0x1b, 0x81, 0x07, 0x7f, 0xb7, 0xa0, 0x7b, 0x40, 0xfd, 0x33, 0x4a, 0x43, 0x1c, + 0xff, 0x09, 0x19, 0xe5, 0xb9, 0x55, 0xfe, 0x21, 0x20, 0xb7, 0x67, 0x93, 0x68, 0xee, 0x1f, 0xc8, + 0xe0, 0xce, 0xbb, 0x8e, 0x65, 0xcf, 0x74, 0x85, 0xec, 0x43, 0xc7, 0xf8, 0xe2, 0x26, 0x6b, 0x86, + 0x62, 0xe5, 0x47, 0x62, 0xb0, 0x7e, 0x81, 0xd4, 0xb4, 0x66, 0x0c, 0x3a, 0xd3, 0x5a, 0x75, 0xb4, + 0x9a, 0xd6, 0xe6, 0x4d, 0x47, 0x65, 0xcd, 0x18, 0x62, 0xa6, 0xb5, 0xea, 0xd8, 0x34, 0xad, 0xcd, + 0x9b, 0x7c, 0xca, 0x9a, 0x31, 0x69, 0x4c, 0x6b, 0xd5, 0x89, 0x68, 0x5a, 0x9b, 0x37, 0x9e, 0xae, + 0x90, 0xef, 0x61, 0xa9, 0x32, 0x03, 0x88, 0x33, 0xd5, 0xba, 0x68, 0x78, 0x0d, 0x36, 0x2e, 0x3d, + 0x53, 0xd8, 0x7f, 0x09, 0x5d, 0xb3, 0x37, 0x13, 0x83, 0xd0, 0x9c, 0xe9, 0x32, 0xb8, 0x79, 0x91, + 0xd8, 0x34, 0x68, 0xb6, 0x1d, 0xd3, 0xe0, 0x9c, 0xc6, 0x6b, 0x1a, 0x9c, 0xd7, 0xad, 0x9c, 0x2b, + 0xe4, 0x3b, 0x58, 0x9c, 0x2d, 0x7f, 0x72, 0x6b, 0x36, 0x6c, 0x95, 0xae, 0x32, 0x70, 0x2e, 0x3b, + 0x52, 0x18, 0x7f, 0x06, 0x30, 0xad, 0x6a, 0xb2, 0x3a, 0xd5, 0xa9, 0x74, 0x95, 0xc1, 0xda, 0x7c, + 0x61, 0x6e, 0xea, 0xe9, 0x4d, 0x58, 0x14, 0xba, 0xb4, 0x8e, 0xc4, 0x56, 0xc0, 0x19, 0x8d, 0xe4, + 0x53, 0x50, 0x55, 0xf6, 0x0a, 0x7f, 0xeb, 0x0f, 0x5b, 0xea, 0xef, 0xfe, 0xf3, 0x7f, 0x03, 0x00, + 0x00, 0xff, 0xff, 0x34, 0x05, 0x1f, 0x0d, 0xec, 0x0f, 0x00, 0x00, } diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 10d3409b8..88252a1a7 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1441,79 +1441,79 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0x4b, 0x73, 0xdc, 0x44, - 0x10, 0xb6, 0xb2, 0xbb, 0x7e, 0xf4, 0xae, 0xe3, 0x65, 0xfc, 0x92, 0xe5, 0x60, 0x36, 0x03, 0x4e, - 0xd6, 0x89, 0x63, 0xc0, 0x29, 0x20, 0xe1, 0x04, 0xd8, 0xa4, 0xf0, 0x21, 0xa4, 0x4a, 0x26, 0x29, - 0xaa, 0xa0, 0x4a, 0x35, 0x96, 0x66, 0x6d, 0x95, 0xb5, 0x1a, 0x45, 0x1a, 0x39, 0x31, 0x7f, 0x87, - 0x1b, 0x57, 0xae, 0xfc, 0x17, 0x7e, 0x08, 0x17, 0x6a, 0x1e, 0x92, 0x57, 0x8f, 0xcd, 0x8a, 0xc7, - 0x6d, 0xd4, 0xd3, 0xfd, 0x75, 0xf7, 0x4c, 0x77, 0xcf, 0x57, 0x82, 0xd5, 0x2b, 0x16, 0xa4, 0x63, - 0xea, 0x24, 0x34, 0xbe, 0xa2, 0xf1, 0x41, 0x14, 0x33, 0xce, 0x50, 0xbf, 0x20, 0x74, 0xa2, 0x33, - 0xfc, 0x31, 0xa0, 0x6f, 0x08, 0x77, 0x2f, 0x8e, 0x69, 0x40, 0x39, 0xb5, 0xe9, 0xeb, 0x94, 0x26, - 0x1c, 0x6d, 0xc1, 0xe2, 0xc8, 0x0f, 0xa8, 0xe3, 0x7b, 0x89, 0x69, 0x0c, 0x5a, 0xc3, 0x25, 0x7b, - 0x41, 0x7c, 0x9f, 0x78, 0x09, 0x7e, 0x01, 0xab, 0x05, 0x83, 0x24, 0x62, 0x61, 0x42, 0xd1, 0x13, - 0x58, 0x88, 0x69, 0x92, 0x06, 0x5c, 0x19, 0x74, 0x0f, 0x77, 0x0e, 0xca, 0xbe, 0x0e, 0x72, 0x93, - 0x34, 0xe0, 0x76, 0xa6, 0x8e, 0x7d, 0xe8, 0x4d, 0x6e, 0xa0, 0x4d, 0x58, 0xd0, 0xbe, 0x4d, 0x63, - 0x60, 0x0c, 0x97, 0xec, 0x79, 0xe5, 0x1a, 0x6d, 0xc0, 0x7c, 0xc2, 0x09, 0x4f, 0x13, 0xf3, 0xd6, - 0xc0, 0x18, 0x76, 0x6c, 0xfd, 0x85, 0xd6, 0xa0, 0x43, 0xe3, 0x98, 0xc5, 0x66, 0x4b, 0xaa, 0xab, - 0x0f, 0x84, 0xa0, 0x9d, 0xf8, 0xbf, 0x50, 0xb3, 0x3d, 0x30, 0x86, 0xcb, 0xb6, 0x5c, 0xe3, 0x05, - 0xe8, 0x7c, 0x3b, 0x8e, 0xf8, 0x35, 0xfe, 0x02, 0xcc, 0x57, 0xc4, 0x4d, 0xd3, 0xf1, 0x2b, 0x19, - 0xe3, 0xd1, 0x05, 0x75, 0x2f, 0xb3, 0xdc, 0xb7, 0x61, 0x49, 0x47, 0xae, 0x23, 0x58, 0xb6, 0x17, - 0x95, 0xe0, 0xc4, 0xc3, 0x5f, 0xc1, 0x56, 0x8d, 0xa1, 0x3e, 0x83, 0x0f, 0x61, 0xf9, 0x9c, 0xc4, - 0x67, 0xe4, 0x9c, 0x3a, 0x31, 0xe1, 0x3e, 0x93, 0xd6, 0x86, 0xdd, 0xd3, 0x42, 0x5b, 0xc8, 0xf0, - 0x4f, 0x60, 0x15, 0x10, 0xd8, 0x38, 0x22, 0x2e, 0x6f, 0xe2, 0x1c, 0x0d, 0xa0, 0x1b, 0xc5, 0x94, - 0x04, 0x01, 0x73, 0x09, 0xa7, 0xf2, 0x14, 0x5a, 0xf6, 0xa4, 0x08, 0xbf, 0x0f, 0xdb, 0xb5, 0xe0, - 0x2a, 0x40, 0xfc, 0xa4, 0x14, 0x3d, 0x1b, 0x8f, 0xfd, 0x46, 0xae, 0xf1, 0x9d, 0x4a, 0xd4, 0xd2, - 0x52, 0xe3, 0x3e, 0x2d, 0xed, 0x06, 0x94, 0x84, 0x69, 0xd4, 0x08, 0xb8, 0x1c, 0x71, 0x66, 0x9a, - 0x23, 0x6f, 0xaa, 0xe2, 0x38, 0x62, 0x41, 0x40, 0x5d, 0xee, 0xb3, 0x30, 0x83, 0xdd, 0x01, 0x70, - 0x73, 0xa1, 0x2e, 0x95, 0x09, 0x09, 0xb6, 0xc0, 0xac, 0x9a, 0x6a, 0xd8, 0xdf, 0x0c, 0x58, 0xff, - 0x5a, 0x1f, 0x9a, 0x72, 0xdc, 0xe8, 0x02, 0x8a, 0x2e, 0x6f, 0x95, 0x5d, 0x96, 0x2f, 0xa8, 0x55, - 0xb9, 0x20, 0xa1, 0x11, 0xd3, 0x28, 0xf0, 0x5d, 0x22, 0x21, 0xda, 0x12, 0x62, 0x52, 0x84, 0xfa, - 0xd0, 0xe2, 0x3c, 0x30, 0x3b, 0x72, 0x47, 0x2c, 0xb1, 0x09, 0x1b, 0xe5, 0x58, 0x75, 0x1a, 0x9f, - 0xc3, 0xa6, 0x92, 0x9c, 0x5e, 0x87, 0xee, 0xa9, 0xec, 0x86, 0x46, 0x87, 0xfe, 0x97, 0x01, 0x66, - 0xd5, 0x50, 0x57, 0xf1, 0x7f, 0x3d, 0x81, 0x7f, 0x9a, 0x1f, 0xfa, 0x00, 0xba, 0x9c, 0xf8, 0x81, - 0xc3, 0x46, 0xa3, 0x84, 0x72, 0x73, 0x7e, 0x60, 0x0c, 0xdb, 0x36, 0x08, 0xd1, 0x0b, 0x29, 0x41, - 0x7b, 0xd0, 0x77, 0x55, 0x25, 0x3b, 0x31, 0xbd, 0xf2, 0x13, 0x81, 0xbc, 0x20, 0x03, 0x5b, 0x71, - 0xb3, 0x0a, 0x57, 0x62, 0x84, 0x61, 0xd9, 0xf7, 0xde, 0x3a, 0x72, 0x80, 0xc8, 0xf6, 0x5f, 0x94, - 0x68, 0x5d, 0xdf, 0x7b, 0xfb, 0xcc, 0x0f, 0xe8, 0xa9, 0x98, 0x02, 0xdf, 0xc1, 0xaa, 0x4a, 0xfe, - 0x19, 0x0b, 0x02, 0xf6, 0xa6, 0xd1, 0xcd, 0xaf, 0x41, 0x27, 0xf1, 0x43, 0x57, 0x35, 0x5d, 0xdb, - 0x56, 0x1f, 0xf8, 0x29, 0xac, 0x15, 0x91, 0xf4, 0x11, 0xde, 0x85, 0x9e, 0x8c, 0xc0, 0x65, 0x21, - 0xa7, 0x21, 0x97, 0x68, 0x3d, 0xbb, 0x2b, 0x64, 0x47, 0x4a, 0x84, 0x3f, 0x05, 0xa4, 0x4c, 0x9f, - 0xb3, 0x34, 0x6c, 0xd6, 0x83, 0xeb, 0x59, 0xdc, 0xda, 0x44, 0x17, 0xc1, 0xe3, 0x2c, 0x88, 0x97, - 0xe1, 0xb8, 0x31, 0xd6, 0x26, 0xac, 0x97, 0x8c, 0x34, 0xda, 0x61, 0xe6, 0xa4, 0xf8, 0x20, 0xbc, - 0x13, 0x6c, 0x23, 0x8b, 0xa0, 0xf8, 0x26, 0xe0, 0xdf, 0x0d, 0xd8, 0xb0, 0xf5, 0xd5, 0xff, 0xcf, - 0x6d, 0x36, 0x59, 0x64, 0xad, 0xa9, 0x45, 0xd6, 0xbe, 0x29, 0xb2, 0x21, 0xf4, 0x13, 0x96, 0xc6, - 0x2e, 0x75, 0x3c, 0xc2, 0x89, 0x13, 0x32, 0x8f, 0xea, 0x1a, 0xbc, 0xad, 0xe4, 0xc7, 0x84, 0x93, - 0xef, 0x99, 0x47, 0xf1, 0x16, 0x6c, 0x56, 0x82, 0xd6, 0x09, 0x85, 0xb0, 0x72, 0xc4, 0xa2, 0x6b, - 0x51, 0x49, 0x0d, 0x13, 0xe9, 0xfa, 0x89, 0x93, 0x15, 0xa4, 0xcc, 0x64, 0xd1, 0x5e, 0xf2, 0x93, - 0x13, 0x55, 0x8d, 0x7a, 0xdf, 0x23, 0x5c, 0xed, 0xb7, 0xb2, 0xfd, 0x63, 0xc2, 0xc5, 0x3e, 0xfe, - 0x0c, 0xfa, 0x37, 0xfe, 0x9a, 0xd7, 0xd6, 0x97, 0xb0, 0x6d, 0x53, 0xe2, 0xe9, 0xd2, 0x14, 0x65, - 0xdf, 0x7c, 0x34, 0xfc, 0x69, 0xc0, 0x9d, 0x7a, 0xe3, 0x26, 0xe3, 0x61, 0x1f, 0x50, 0xde, 0x7e, - 0xdc, 0x1f, 0xd3, 0x84, 0x93, 0x71, 0xa4, 0x7b, 0xa6, 0xaf, 0x7b, 0xf0, 0x87, 0x4c, 0x5e, 0x6d, - 0xd6, 0x56, 0xa5, 0x59, 0x05, 0x62, 0x76, 0x3e, 0x13, 0x88, 0x6d, 0x85, 0xe8, 0xa9, 0x73, 0x2a, - 0x20, 0xe6, 0xda, 0x12, 0xb1, 0xa3, 0x10, 0xb5, 0xa2, 0x6c, 0xff, 0x1f, 0x01, 0x8e, 0xfd, 0xe4, - 0x52, 0xa5, 0x25, 0x2a, 0xc5, 0xf3, 0x63, 0xfd, 0x7c, 0x88, 0xa5, 0x90, 0x90, 0x20, 0xd0, 0x41, - 0x8b, 0xa5, 0xa0, 0x12, 0x69, 0x42, 0x3d, 0x1d, 0x9e, 0x5c, 0x0b, 0xd9, 0x28, 0xa6, 0x54, 0x47, - 0x22, 0xd7, 0xf8, 0x57, 0x03, 0x96, 0x9e, 0xd3, 0xb1, 0x46, 0xde, 0x01, 0x38, 0x67, 0x31, 0x4b, - 0xb9, 0x1f, 0xd2, 0x44, 0x3a, 0xe8, 0xd8, 0x13, 0x92, 0x7f, 0xef, 0x47, 0x52, 0x1b, 0x1a, 0x8c, - 0x74, 0x72, 0x72, 0x2d, 0x64, 0x17, 0x94, 0x44, 0x7a, 0x7a, 0xca, 0xb5, 0x1c, 0x5a, 0x9c, 0xb8, - 0x97, 0x72, 0x58, 0x8a, 0xa1, 0x25, 0x3e, 0x0e, 0xff, 0xe8, 0x41, 0x4f, 0x0f, 0x7f, 0xc9, 0xcc, - 0xd0, 0xcf, 0xd0, 0x9d, 0x60, 0x74, 0xe8, 0xa3, 0x2a, 0x71, 0xab, 0x32, 0x44, 0x6b, 0x77, 0x86, - 0x96, 0xee, 0x98, 0x39, 0x14, 0xc2, 0x7b, 0x15, 0xc6, 0x84, 0x1e, 0x54, 0xad, 0xa7, 0xf1, 0x31, - 0xeb, 0x61, 0x23, 0xdd, 0xdc, 0x1f, 0x87, 0xd5, 0x1a, 0x0a, 0x84, 0xf6, 0x67, 0xa0, 0x14, 0x68, - 0x98, 0xf5, 0xa8, 0xa1, 0x76, 0xee, 0xf5, 0x35, 0xa0, 0x2a, 0x3f, 0x42, 0x0f, 0x67, 0xc2, 0xdc, - 0xf0, 0x2f, 0x6b, 0xbf, 0x99, 0xf2, 0xd4, 0x44, 0x15, 0x73, 0x9a, 0x99, 0x68, 0x81, 0x9b, 0xcd, - 0x4c, 0xb4, 0x44, 0xc7, 0xe6, 0xd0, 0x25, 0xf4, 0xcb, 0xac, 0x0a, 0xed, 0x4d, 0xa3, 0xfa, 0x15, - 0xd2, 0x66, 0x3d, 0x68, 0xa2, 0x9a, 0x3b, 0xa3, 0x70, 0xbb, 0xc8, 0x7c, 0xd0, 0xfd, 0xaa, 0x7d, - 0x2d, 0x8f, 0xb3, 0x86, 0xb3, 0x15, 0x27, 0x73, 0x2a, 0xb3, 0xa1, 0xba, 0x9c, 0xa6, 0x50, 0xad, - 0xba, 0x9c, 0xa6, 0x91, 0x2b, 0x3c, 0x87, 0x48, 0xd6, 0x7d, 0x8a, 0x33, 0xa0, 0xdd, 0x69, 0xd6, - 0x05, 0x76, 0x62, 0xdd, 0x9b, 0xa5, 0x96, 0x39, 0xf8, 0xc4, 0x10, 0x0d, 0x3d, 0x41, 0x14, 0xea, - 0x1a, 0xba, 0x4a, 0x3d, 0xac, 0xdd, 0x19, 0x5a, 0x79, 0x02, 0x67, 0xb0, 0x5c, 0xa0, 0x0e, 0x68, - 0x6a, 0x68, 0x45, 0x42, 0x62, 0xdd, 0x9f, 0xa9, 0x97, 0xfb, 0x70, 0xb2, 0x43, 0xd2, 0x33, 0x69, - 0x6a, 0x70, 0xc5, 0xa1, 0x74, 0x6f, 0x96, 0x5a, 0xee, 0xe0, 0x02, 0x56, 0x4a, 0x8f, 0x3c, 0xaa, - 0xa9, 0x98, 0x7a, 0xf2, 0x62, 0xed, 0x35, 0xd0, 0xcc, 0x3d, 0xbd, 0x81, 0xb5, 0xba, 0xf7, 0x14, - 0x3d, 0xaa, 0x03, 0x99, 0xfa, 0x68, 0x5b, 0x07, 0x4d, 0xd5, 0x73, 0xc7, 0x2f, 0x61, 0x31, 0x23, - 0x0f, 0xe8, 0x6e, 0xd5, 0xba, 0x44, 0x64, 0x2c, 0xfc, 0x2e, 0x95, 0x9b, 0xe2, 0x3a, 0x9b, 0x97, - 0x7f, 0x12, 0x1e, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xdf, 0xd7, 0x82, 0x60, 0x10, 0x00, - 0x00, + // 1183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x58, 0x5b, 0x73, 0xdb, 0xc4, + 0x17, 0x8f, 0x6a, 0x3b, 0x89, 0x8f, 0x9d, 0xc6, 0xff, 0xcd, 0x4d, 0x51, 0xda, 0xfc, 0xdd, 0x85, + 0xb4, 0x4e, 0x9b, 0x06, 0x48, 0x07, 0x68, 0x79, 0x02, 0x12, 0x3a, 0xe4, 0xa1, 0x74, 0x46, 0xa1, + 0x1d, 0x66, 0x60, 0x46, 0xb3, 0x91, 0x36, 0x89, 0x26, 0xb2, 0x56, 0x95, 0x56, 0x69, 0xc3, 0xd7, + 0xe1, 0x8d, 0x57, 0x5e, 0xf9, 0x66, 0xcc, 0x30, 0xcc, 0x5e, 0xa4, 0x58, 0x17, 0xd7, 0xe2, 0xf2, + 0xb6, 0x3a, 0x7b, 0xce, 0xef, 0x5c, 0xf6, 0x9c, 0xdd, 0x9f, 0x0d, 0x2b, 0x57, 0x2c, 0x48, 0xc7, + 0xd4, 0x49, 0x68, 0x7c, 0x45, 0xe3, 0xfd, 0x28, 0x66, 0x9c, 0xa1, 0x41, 0x41, 0xe8, 0x44, 0xa7, + 0xf8, 0x23, 0x40, 0x5f, 0x13, 0xee, 0x5e, 0x1c, 0xd1, 0x80, 0x72, 0x6a, 0xd3, 0x37, 0x29, 0x4d, + 0x38, 0xda, 0x84, 0xc5, 0x33, 0x3f, 0xa0, 0x8e, 0xef, 0x25, 0xa6, 0x31, 0x6c, 0x8d, 0xba, 0xf6, + 0x82, 0xf8, 0x3e, 0xf6, 0x12, 0xfc, 0x12, 0x56, 0x0a, 0x06, 0x49, 0xc4, 0xc2, 0x84, 0xa2, 0xa7, + 0xb0, 0x10, 0xd3, 0x24, 0x0d, 0xb8, 0x32, 0xe8, 0x1d, 0x6c, 0xef, 0x97, 0x7d, 0xed, 0xe7, 0x26, + 0x69, 0xc0, 0xed, 0x4c, 0x1d, 0xfb, 0xd0, 0x9f, 0xdc, 0x40, 0x1b, 0xb0, 0xa0, 0x7d, 0x9b, 0xc6, + 0xd0, 0x18, 0x75, 0xed, 0x79, 0xe5, 0x1a, 0xad, 0xc3, 0x7c, 0xc2, 0x09, 0x4f, 0x13, 0xf3, 0xd6, + 0xd0, 0x18, 0x75, 0x6c, 0xfd, 0x85, 0x56, 0xa1, 0x43, 0xe3, 0x98, 0xc5, 0x66, 0x4b, 0xaa, 0xab, + 0x0f, 0x84, 0xa0, 0x9d, 0xf8, 0x3f, 0x53, 0xb3, 0x3d, 0x34, 0x46, 0x4b, 0xb6, 0x5c, 0xe3, 0x05, + 0xe8, 0x7c, 0x33, 0x8e, 0xf8, 0x35, 0xfe, 0x1c, 0xcc, 0xd7, 0xc4, 0x4d, 0xd3, 0xf1, 0x6b, 0x19, + 0xe3, 0xe1, 0x05, 0x75, 0x2f, 0xb3, 0xdc, 0xb7, 0xa0, 0xab, 0x23, 0xd7, 0x11, 0x2c, 0xd9, 0x8b, + 0x4a, 0x70, 0xec, 0xe1, 0x2f, 0x61, 0xb3, 0xc6, 0x50, 0xd7, 0xe0, 0x03, 0x58, 0x3a, 0x27, 0xf1, + 0x29, 0x39, 0xa7, 0x4e, 0x4c, 0xb8, 0xcf, 0xa4, 0xb5, 0x61, 0xf7, 0xb5, 0xd0, 0x16, 0x32, 0xfc, + 0x23, 0x58, 0x05, 0x04, 0x36, 0x8e, 0x88, 0xcb, 0x9b, 0x38, 0x47, 0x43, 0xe8, 0x45, 0x31, 0x25, + 0x41, 0xc0, 0x5c, 0xc2, 0xa9, 0xac, 0x42, 0xcb, 0x9e, 0x14, 0xe1, 0xbb, 0xb0, 0x55, 0x0b, 0xae, + 0x02, 0xc4, 0x4f, 0x4b, 0xd1, 0xb3, 0xf1, 0xd8, 0x6f, 0xe4, 0x1a, 0xdf, 0xa9, 0x44, 0x2d, 0x2d, + 0x35, 0xee, 0xb3, 0xd2, 0x6e, 0x40, 0x49, 0x98, 0x46, 0x8d, 0x80, 0xcb, 0x11, 0x67, 0xa6, 0x39, + 0xf2, 0x86, 0x6a, 0x8e, 0x43, 0x16, 0x04, 0xd4, 0xe5, 0x3e, 0x0b, 0x33, 0xd8, 0x6d, 0x00, 0x37, + 0x17, 0xea, 0x56, 0x99, 0x90, 0x60, 0x0b, 0xcc, 0xaa, 0xa9, 0x86, 0xfd, 0xd5, 0x80, 0xb5, 0xaf, + 0x74, 0xd1, 0x94, 0xe3, 0x46, 0x07, 0x50, 0x74, 0x79, 0xab, 0xec, 0xb2, 0x7c, 0x40, 0xad, 0xca, + 0x01, 0x09, 0x8d, 0x98, 0x46, 0x81, 0xef, 0x12, 0x09, 0xd1, 0x96, 0x10, 0x93, 0x22, 0x34, 0x80, + 0x16, 0xe7, 0x81, 0xd9, 0x91, 0x3b, 0x62, 0x89, 0x4d, 0x58, 0x2f, 0xc7, 0xaa, 0xd3, 0xf8, 0x0c, + 0x36, 0x94, 0xe4, 0xe4, 0x3a, 0x74, 0x4f, 0xe4, 0x34, 0x34, 0x2a, 0xfa, 0x1f, 0x06, 0x98, 0x55, + 0x43, 0xdd, 0xc5, 0xff, 0xb6, 0x02, 0x7f, 0x37, 0x3f, 0xf4, 0x7f, 0xe8, 0x71, 0xe2, 0x07, 0x0e, + 0x3b, 0x3b, 0x4b, 0x28, 0x37, 0xe7, 0x87, 0xc6, 0xa8, 0x6d, 0x83, 0x10, 0xbd, 0x94, 0x12, 0xb4, + 0x0b, 0x03, 0x57, 0x75, 0xb2, 0x13, 0xd3, 0x2b, 0x3f, 0x11, 0xc8, 0x0b, 0x32, 0xb0, 0x65, 0x37, + 0xeb, 0x70, 0x25, 0x46, 0x18, 0x96, 0x7c, 0xef, 0x9d, 0x23, 0x2f, 0x10, 0x39, 0xfe, 0x8b, 0x12, + 0xad, 0xe7, 0x7b, 0xef, 0x9e, 0xfb, 0x01, 0x3d, 0x11, 0xb7, 0xc0, 0xb7, 0xb0, 0xa2, 0x92, 0x7f, + 0xce, 0x82, 0x80, 0xbd, 0x6d, 0x74, 0xf2, 0xab, 0xd0, 0x49, 0xfc, 0xd0, 0x55, 0x43, 0xd7, 0xb6, + 0xd5, 0x07, 0x7e, 0x06, 0xab, 0x45, 0x24, 0x5d, 0xc2, 0x7b, 0xd0, 0x97, 0x11, 0xb8, 0x2c, 0xe4, + 0x34, 0xe4, 0x12, 0xad, 0x6f, 0xf7, 0x84, 0xec, 0x50, 0x89, 0xf0, 0x27, 0x80, 0x94, 0xe9, 0x0b, + 0x96, 0x86, 0xcd, 0x66, 0x70, 0x2d, 0x8b, 0x5b, 0x9b, 0xe8, 0x26, 0x78, 0x92, 0x05, 0xf1, 0x2a, + 0x1c, 0x37, 0xc6, 0xda, 0x80, 0xb5, 0x92, 0x91, 0x46, 0x3b, 0xc8, 0x9c, 0x14, 0x1f, 0x84, 0xf7, + 0x82, 0xad, 0x67, 0x11, 0x14, 0xdf, 0x04, 0xfc, 0x9b, 0x01, 0xeb, 0xb6, 0x3e, 0xfa, 0xff, 0x78, + 0xcc, 0x26, 0x9b, 0xac, 0x35, 0xb5, 0xc9, 0xda, 0x37, 0x4d, 0x36, 0x82, 0x41, 0xc2, 0xd2, 0xd8, + 0xa5, 0x8e, 0x47, 0x38, 0x71, 0x42, 0xe6, 0x51, 0xdd, 0x83, 0xb7, 0x95, 0xfc, 0x88, 0x70, 0xf2, + 0x1d, 0xf3, 0x28, 0xde, 0x84, 0x8d, 0x4a, 0xd0, 0x3a, 0xa1, 0x10, 0x96, 0x0f, 0x59, 0x74, 0x2d, + 0x3a, 0xa9, 0x61, 0x22, 0x3d, 0x3f, 0x71, 0xb2, 0x86, 0x94, 0x99, 0x2c, 0xda, 0x5d, 0x3f, 0x39, + 0x56, 0xdd, 0xa8, 0xf7, 0x3d, 0xc2, 0xd5, 0x7e, 0x2b, 0xdb, 0x3f, 0x22, 0x5c, 0xec, 0xe3, 0x4f, + 0x61, 0x70, 0xe3, 0xaf, 0x79, 0x6f, 0x7d, 0x01, 0x5b, 0x36, 0x25, 0x9e, 0x6e, 0x4d, 0xd1, 0xf6, + 0xcd, 0xaf, 0x86, 0x3f, 0x0d, 0xb8, 0x53, 0x6f, 0xdc, 0xe4, 0x7a, 0xd8, 0x03, 0x94, 0x8f, 0x1f, + 0xf7, 0xc7, 0x34, 0xe1, 0x64, 0x1c, 0xe9, 0x99, 0x19, 0xe8, 0x19, 0xfc, 0x3e, 0x93, 0x57, 0x87, + 0xb5, 0x55, 0x19, 0x56, 0x81, 0x98, 0xd5, 0x67, 0x02, 0xb1, 0xad, 0x10, 0x3d, 0x55, 0xa7, 0x02, + 0x62, 0xae, 0x2d, 0x11, 0x3b, 0x0a, 0x51, 0x2b, 0x4a, 0xc4, 0xbb, 0x00, 0xba, 0x80, 0x69, 0x98, + 0xdd, 0x36, 0x5d, 0x55, 0xbe, 0x34, 0xe4, 0xf8, 0x07, 0x80, 0x23, 0x3f, 0xb9, 0x54, 0x59, 0x8b, + 0x46, 0xf2, 0xfc, 0x58, 0xbf, 0x2e, 0x62, 0x29, 0x24, 0x24, 0x08, 0x74, 0x4e, 0x62, 0x29, 0x98, + 0x46, 0x9a, 0x50, 0x4f, 0x47, 0x2f, 0xd7, 0x42, 0x76, 0x16, 0x53, 0xaa, 0x03, 0x95, 0x6b, 0xfc, + 0x8b, 0x01, 0xdd, 0x17, 0x74, 0xac, 0x91, 0xb7, 0x01, 0xce, 0x59, 0xcc, 0x52, 0xee, 0x87, 0x34, + 0x91, 0x0e, 0x3a, 0xf6, 0x84, 0xe4, 0x9f, 0xfb, 0x91, 0xcc, 0x87, 0x06, 0x67, 0x3a, 0x77, 0xb9, + 0x16, 0xb2, 0x0b, 0x4a, 0x22, 0x9d, 0xae, 0x5c, 0xcb, 0x3b, 0x8d, 0x13, 0xf7, 0x52, 0xde, 0xa5, + 0xe2, 0x4e, 0x13, 0x1f, 0x07, 0xbf, 0xf7, 0xa1, 0xaf, 0xdf, 0x06, 0x49, 0xdc, 0xd0, 0x4f, 0xd0, + 0x9b, 0x20, 0x7c, 0xe8, 0xc3, 0x2a, 0xaf, 0xab, 0x12, 0x48, 0x6b, 0x67, 0x86, 0x96, 0x1e, 0xa8, + 0x39, 0x14, 0xc2, 0xff, 0x2a, 0x84, 0x0a, 0x3d, 0xac, 0x5a, 0x4f, 0xa3, 0x6b, 0xd6, 0xa3, 0x46, + 0xba, 0xb9, 0x3f, 0x0e, 0x2b, 0x35, 0x0c, 0x09, 0xed, 0xcd, 0x40, 0x29, 0xb0, 0x34, 0xeb, 0x71, + 0x43, 0xed, 0xdc, 0xeb, 0x1b, 0x40, 0x55, 0xfa, 0x84, 0x1e, 0xcd, 0x84, 0xb9, 0xa1, 0x67, 0xd6, + 0x5e, 0x33, 0xe5, 0xa9, 0x89, 0x2a, 0x62, 0x35, 0x33, 0xd1, 0x02, 0x75, 0x9b, 0x99, 0x68, 0x89, + 0xad, 0xcd, 0xa1, 0x4b, 0x18, 0x94, 0x49, 0x17, 0xda, 0x9d, 0xf6, 0x4b, 0xa0, 0xc2, 0xe9, 0xac, + 0x87, 0x4d, 0x54, 0x73, 0x67, 0x14, 0x6e, 0x17, 0x89, 0x11, 0x7a, 0x50, 0xb5, 0xaf, 0xa5, 0x79, + 0xd6, 0x68, 0xb6, 0xe2, 0x64, 0x4e, 0x65, 0xb2, 0x54, 0x97, 0xd3, 0x14, 0x26, 0x56, 0x97, 0xd3, + 0x34, 0xee, 0x85, 0xe7, 0x10, 0xc9, 0xa6, 0x4f, 0x51, 0x0a, 0xb4, 0x33, 0xcd, 0xba, 0x40, 0x5e, + 0xac, 0xfb, 0xb3, 0xd4, 0x32, 0x07, 0x1f, 0x1b, 0x62, 0xa0, 0x27, 0x78, 0x44, 0xdd, 0x40, 0x57, + 0x99, 0x89, 0xb5, 0x33, 0x43, 0x2b, 0x4f, 0xe0, 0x14, 0x96, 0x0a, 0xcc, 0x02, 0x4d, 0x0d, 0xad, + 0xc8, 0x57, 0xac, 0x07, 0x33, 0xf5, 0x72, 0x1f, 0x4e, 0x56, 0x24, 0x7d, 0x27, 0x4d, 0x0d, 0xae, + 0x78, 0x29, 0xdd, 0x9f, 0xa5, 0x96, 0x3b, 0xb8, 0x80, 0xe5, 0x12, 0x07, 0x40, 0x35, 0x1d, 0x53, + 0xcf, 0x6d, 0xac, 0xdd, 0x06, 0x9a, 0xb9, 0xa7, 0xb7, 0xb0, 0x5a, 0xf7, 0xdc, 0xa2, 0xc7, 0x75, + 0x20, 0x53, 0xdf, 0x74, 0x6b, 0xbf, 0xa9, 0x7a, 0xee, 0xf8, 0x15, 0x2c, 0x66, 0xdc, 0x02, 0xdd, + 0xab, 0x5a, 0x97, 0x78, 0x8e, 0x85, 0xdf, 0xa7, 0x72, 0xd3, 0x5c, 0xa7, 0xf3, 0xf2, 0x8f, 0x86, + 0x27, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x8c, 0xa8, 0xce, 0x0c, 0x7f, 0x10, 0x00, 0x00, } diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 20c1d08b5..7353cdc91 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -2,6 +2,7 @@ package replication import ( "context" + "fmt" "path/filepath" "strings" @@ -51,12 +52,17 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p return nil } - foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) if foundExisting { glog.V(4).Infof("updated %v", key) return err } + err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false) + if err != nil { + return fmt.Errorf("delete old entry %v: %v", key, err) + } + glog.V(4).Infof("creating missing %v", key) return r.sink.CreateEntry(ctx, key, message.NewEntry) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 760fbdbb5..6381908a1 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -132,7 +132,7 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb } -func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index c80bfcc49..35c2230fa 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -124,7 +124,7 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En } -func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 777c28620..ff0fe8b74 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -133,7 +133,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p }) } -func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { dir, name := filer2.FullPath(key).DirAndName() @@ -194,7 +194,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry, newE return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ - Directory: dir, + Directory: newParentPath, Entry: existingEntry, } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 6b710a12a..abd7c49b9 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -119,7 +119,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E } -func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index 984aebc58..dd54f0005 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -12,7 +12,7 @@ type ReplicationSink interface { Initialize(configuration util.Configuration) error DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error - UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) } diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index a5b52095c..d5cad3541 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -130,7 +130,7 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_ } -func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil From d35023c7139e7c488644a03c646df3415b9a1a4b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 16 Apr 2019 01:06:32 -0700 Subject: [PATCH 188/450] weed shell: add command fs.meta.save --- weed/shell/command_fs_meta_save.go | 142 +++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 weed/shell/command_fs_meta_save.go diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go new file mode 100644 index 000000000..b874db31b --- /dev/null +++ b/weed/shell/command_fs_meta_save.go @@ -0,0 +1,142 @@ +package shell + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" +) + +func init() { + commands = append(commands, &commandFsMetaSave{}) +} + +type commandFsMetaSave struct { +} + +func (c *commandFsMetaSave) Name() string { + return "fs.meta.save" +} + +func (c *commandFsMetaSave) Help() string { + return `recursively save directory and file meta data to a local file + + fs.meta.save # save current meta data from current directory + + The meta data will be saved into a local _.meta file. + These meta data can be later loaded by fs.meta.load command. + +` +} + +func (c *commandFsMetaSave) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + ctx := context.Background() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + fileName := fmt.Sprintf("%s-%d.meta", filerServer, filerPort) + + dst, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil + } + defer dst.Close() + + var dirCount, fileCount uint64 + + sizeBuf := make([]byte, 4) + + err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { + + protoMessage := &filer_pb.FullEntry{ + Dir: string(parentPath), + Entry: entry, + } + + bytes, err := proto.Marshal(protoMessage) + if err != nil { + return fmt.Errorf("marshall error: %v", err) + } + + util.Uint32toBytes(sizeBuf, uint32(len(bytes))) + + dst.Write(sizeBuf) + dst.Write(bytes) + + if entry.IsDirectory { + dirCount++ + } else { + fileCount++ + } + + println(parentPath.Child(entry.Name)) + + return nil + + }) + + if err == nil { + fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount) + fmt.Fprintf(writer, "\nmeta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) + } + + return err + + }) + +} +func doTraverse(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry) error) (err error) { + + paginatedCount := -1 + startFromFileName := "" + paginateSize := 1000 + + for paginatedCount == -1 || paginatedCount == paginateSize { + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: string(parentPath), + Prefix: "", + StartFromFileName: startFromFileName, + InclusiveStartFrom: false, + Limit: uint32(paginateSize), + }) + if listErr != nil { + err = listErr + return + } + + paginatedCount = len(resp.Entries) + + for _, entry := range resp.Entries { + + if err = fn(parentPath, entry); err != nil { + return err + } + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) + if parentPath == "/" { + subDir = "/" + entry.Name + } + if err = doTraverse(ctx, writer, client, filer2.FullPath(subDir), fn); err != nil { + return err + } + } + startFromFileName = entry.Name + + } + } + + return + +} From 79c2cca9c1b1ebbd567d95566806fb3eb37b129b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 16 Apr 2019 01:15:30 -0700 Subject: [PATCH 189/450] better error message --- weed/operation/grpc_client.go | 2 +- weed/server/volume_grpc_client_to_master.go | 2 +- weed/util/grpc_client_server.go | 10 +++++----- weed/wdclient/masterclient.go | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index ea7a82044..bf4d4bec3 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -44,7 +44,7 @@ func withMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v", masterServer) + return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) } return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 7667ed363..a57ab48e7 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -34,7 +34,7 @@ func (vs *VolumeServer) heartbeat() { } masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) if parseErr != nil { - glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress) + glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr) continue } newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index a2407eb13..31497ad35 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -88,19 +88,19 @@ func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, } func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) { - hostnameAndPort := strings.Split(server, ":") - if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("server should have hostname:port format: %v", hostnameAndPort) + colonIndex := strings.LastIndex(server, ":") + if colonIndex < 0 { + return "", fmt.Errorf("server should have hostname:port format: %v", server) } - port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + port, parseErr := strconv.ParseUint(server[colonIndex+1:], 10, 64) if parseErr != nil { return "", fmt.Errorf("server port parse error: %v", parseErr) } grpcPort := int(port) + 10000 - return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort), nil + return fmt.Sprintf("%s:%d", server[:colonIndex], grpcPort), nil } func ServerToGrpcAddress(server string) (serverGrpcAddress string) { diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 7a0bc9181..fc8d3506b 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -103,7 +103,7 @@ func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.Di masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v", master) + return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) } return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { From 014906ec797c7075afed2b33705e0db63bcd2dbd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 16 Apr 2019 01:37:11 -0700 Subject: [PATCH 190/450] weed shell: add command fs.meta.load to restore meta data --- weed/shell/command_fs_meta_load.go | 103 +++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 weed/shell/command_fs_meta_load.go diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go new file mode 100644 index 000000000..ef3c39f96 --- /dev/null +++ b/weed/shell/command_fs_meta_load.go @@ -0,0 +1,103 @@ +package shell + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" +) + +func init() { + commands = append(commands, &commandFsMetaLoad{}) +} + +type commandFsMetaLoad struct { +} + +func (c *commandFsMetaLoad) Name() string { + return "fs.meta.load" +} + +func (c *commandFsMetaLoad) Help() string { + return `load saved filer meta data to restore the directory and file structure + + fs.meta.load _.meta + +` +} + +func (c *commandFsMetaLoad) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(nil)) + if err != nil { + return err + } + + fileName := args[len(args)-1] + + dst, err := os.OpenFile(fileName, os.O_RDONLY, 0644) + if err != nil { + return nil + } + defer dst.Close() + + var dirCount, fileCount uint64 + + ctx := context.Background() + + err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + sizeBuf := make([]byte, 4) + + for { + if n, err := dst.Read(sizeBuf); n != 4 { + if err == io.EOF{ + return nil + } + return err + } + + size := util.BytesToUint32(sizeBuf) + + data := make([]byte, int(size)) + + if n, err := dst.Read(data); n != len(data) { + return err + } + + fullEntry := &filer_pb.FullEntry{} + if err = proto.Unmarshal(data, fullEntry); err != nil { + return err + } + + if _, err = client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + Directory: fullEntry.Dir, + Entry: fullEntry.Entry, + }); err != nil { + return err + } + + fmt.Fprintf(writer, "load %s\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name)) + + if fullEntry.Entry.IsDirectory { + dirCount++ + } else { + fileCount++ + } + + } + + }) + + if err == nil { + fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount) + fmt.Fprintf(writer, "\n%s is loaded to http://%s:%d%s\n", fileName, filerServer, filerPort, path) + } + + return err +} \ No newline at end of file From 8ea1ee6dfaa78ebcacf4b03c3cb26b3155cc42eb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 16 Apr 2019 01:58:28 -0700 Subject: [PATCH 191/450] weed shell: add fs.meta.notify, removing filer.export --- weed/command/command.go | 1 - weed/command/filer_export.go | 191 --------------------------- weed/shell/command_fs_meta_notify.go | 78 +++++++++++ 3 files changed, 78 insertions(+), 192 deletions(-) delete mode 100644 weed/command/filer_export.go create mode 100644 weed/shell/command_fs_meta_notify.go diff --git a/weed/command/command.go b/weed/command/command.go index 91b9bf3fc..39a01cc05 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -13,7 +13,6 @@ var Commands = []*Command{ cmdCompact, cmdCopy, cmdFix, - cmdFilerExport, cmdFilerReplicate, cmdServer, cmdMaster, diff --git a/weed/command/filer_export.go b/weed/command/filer_export.go deleted file mode 100644 index ed1ee8966..000000000 --- a/weed/command/filer_export.go +++ /dev/null @@ -1,191 +0,0 @@ -package command - -import ( - "context" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/server" - "github.com/spf13/viper" -) - -func init() { - cmdFilerExport.Run = runFilerExport // break init cycle -} - -var cmdFilerExport = &Command{ - UsageLine: "filer.export -sourceStore=mysql -targetStore=cassandra", - Short: "export meta data in filer store", - Long: `Iterate the file tree and export all metadata out - - Both source and target store: - * should be a store name already specified in filer.toml - * do not need to be enabled state - - If target store is empty, only the directory tree will be listed. - - If target store is "notification", the list of entries will be sent to notification. - This is usually used to bootstrap filer replication to a remote system. - - `, -} - -var ( - // filerExportOutputFile = cmdFilerExport.Flag.String("output", "", "the output file. If empty, only list out the directory tree") - filerExportSourceStore = cmdFilerExport.Flag.String("sourceStore", "", "the source store name in filer.toml, default to currently enabled store") - filerExportTargetStore = cmdFilerExport.Flag.String("targetStore", "", "the target store name in filer.toml, or \"notification\" to export all files to message queue") - dir = cmdFilerExport.Flag.String("dir", "/", "only process files under this directory") - dirListLimit = cmdFilerExport.Flag.Int("dirListLimit", 100000, "limit directory list size") - dryRun = cmdFilerExport.Flag.Bool("dryRun", false, "not actually moving data") - verboseFilerExport = cmdFilerExport.Flag.Bool("v", false, "verbose entry details") -) - -type statistics struct { - directoryCount int - fileCount int -} - -func runFilerExport(cmd *Command, args []string) bool { - - weed_server.LoadConfiguration("filer", true) - config := viper.GetViper() - - var sourceStore, targetStore filer2.FilerStore - - for _, store := range filer2.Stores { - if store.GetName() == *filerExportSourceStore || *filerExportSourceStore == "" && config.GetBool(store.GetName()+".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize source store for %s: %+v", - store.GetName(), err) - } else { - sourceStore = store - } - break - } - } - - for _, store := range filer2.Stores { - if store.GetName() == *filerExportTargetStore { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize target store for %s: %+v", - store.GetName(), err) - } else { - targetStore = store - } - break - } - } - - if sourceStore == nil { - glog.Errorf("Failed to find source store %s", *filerExportSourceStore) - println("existing data sources are:") - for _, store := range filer2.Stores { - println(" " + store.GetName()) - } - return false - } - - if targetStore == nil && *filerExportTargetStore != "" && *filerExportTargetStore != "notification" { - glog.Errorf("Failed to find target store %s", *filerExportTargetStore) - println("existing data sources are:") - for _, store := range filer2.Stores { - println(" " + store.GetName()) - } - return false - } - - ctx := context.Background() - - stat := statistics{} - - var fn func(level int, entry *filer2.Entry) error - - if *filerExportTargetStore == "notification" { - weed_server.LoadConfiguration("notification", false) - v := viper.GetViper() - notification.LoadConfiguration(v.Sub("notification")) - - fn = func(level int, entry *filer2.Entry) error { - printout(level, entry) - if *dryRun { - return nil - } - return notification.Queue.SendMessage( - string(entry.FullPath), - &filer_pb.EventNotification{ - NewEntry: entry.ToProtoEntry(), - }, - ) - } - } else if targetStore == nil { - fn = printout - } else { - fn = func(level int, entry *filer2.Entry) error { - printout(level, entry) - if *dryRun { - return nil - } - return targetStore.InsertEntry(ctx, entry) - } - } - - doTraverse(ctx, &stat, sourceStore, filer2.FullPath(*dir), 0, fn) - - glog.Infof("processed %d directories, %d files", stat.directoryCount, stat.fileCount) - - return true -} - -func doTraverse(ctx context.Context, stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) { - - limit := *dirListLimit - lastEntryName := "" - for { - entries, err := filerStore.ListDirectoryEntries(ctx, parentPath, lastEntryName, false, limit) - if err != nil { - break - } - for _, entry := range entries { - if fnErr := fn(level, entry); fnErr != nil { - glog.Errorf("failed to process entry: %s", entry.FullPath) - } - if entry.IsDirectory() { - stat.directoryCount++ - doTraverse(ctx, stat, filerStore, entry.FullPath, level+1, fn) - } else { - stat.fileCount++ - } - lastEntryName = entry.Name() - } - if len(entries) < limit { - break - } - } -} - -func printout(level int, entry *filer2.Entry) error { - for i := 0; i < level; i++ { - if i == level-1 { - print("+-") - } else { - print("| ") - } - } - print(entry.FullPath.Name()) - if *verboseFilerExport { - for _, chunk := range entry.Chunks { - print("[") - print(chunk.FileId) - print(",") - print(chunk.Offset) - print(",") - print(chunk.Size) - print(")") - } - } - println() - return nil -} diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go new file mode 100644 index 000000000..ca4d8da5b --- /dev/null +++ b/weed/shell/command_fs_meta_notify.go @@ -0,0 +1,78 @@ +package shell + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/spf13/viper" +) + +func init() { + commands = append(commands, &commandFsMetaNotify{}) +} + +type commandFsMetaNotify struct { +} + +func (c *commandFsMetaNotify) Name() string { + return "fs.meta.notify" +} + +func (c *commandFsMetaNotify) Help() string { + return `recursively send directory and file meta data to notifiction message queue + + fs.meta.notify # send meta data from current directory to notification message queue + + The message queue will use it to trigger replication from this filer. + +` +} + +func (c *commandFsMetaNotify) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + weed_server.LoadConfiguration("notification", true) + v := viper.GetViper() + notification.LoadConfiguration(v.Sub("notification")) + + ctx := context.Background() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + var dirCount, fileCount uint64 + + err = doTraverse(ctx, writer, client, filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) error { + + if entry.IsDirectory { + dirCount++ + } else { + fileCount++ + } + + return notification.Queue.SendMessage( + string(parentPath.Child(entry.Name)), + &filer_pb.EventNotification{ + NewEntry: entry, + }, + ) + + }) + + if err == nil { + fmt.Fprintf(writer, "\ntotal notified %d directories, %d files\n", dirCount, fileCount) + } + + return err + + }) + +} From af49aea0c6d8e67536487b1a5cadc6462167c1b2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 16 Apr 2019 09:40:27 -0700 Subject: [PATCH 192/450] weed shell: fs.meta.save adjusts meta data file name --- weed/shell/command_fs_meta_load.go | 2 +- weed/shell/command_fs_meta_save.go | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index ef3c39f96..35fa632b0 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -26,7 +26,7 @@ func (c *commandFsMetaLoad) Name() string { func (c *commandFsMetaLoad) Help() string { return `load saved filer meta data to restore the directory and file structure - fs.meta.load _.meta + fs.meta.load --