diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 985bd3781..b414183f1 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,11 +1,19 @@ --- name: Bug report about: Create a report to help us improve +title: '' +labels: '' +assignees: '' --- Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs +example of a good issue report: +https://github.com/chrislusf/seaweedfs/issues/1005 +example of a bad issue report: +https://github.com/chrislusf/seaweedfs/issues/1008 + **Describe the bug** A clear and concise description of what the bug is. diff --git a/.gitignore b/.gitignore index a56dfb8a3..671b01051 100644 --- a/.gitignore +++ b/.gitignore @@ -80,3 +80,6 @@ build target *.class other/java/hdfs/dependency-reduced-pom.xml + +# binary file +weed/weed diff --git a/.travis.yml b/.travis.yml index f445bff68..b42847e8e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,18 +1,21 @@ sudo: false language: go go: -- 1.10.x -- 1.11.x -- tip + - 1.10.x + - 1.11.x + - 1.12.x + # - tip before_install: -- export PATH=/home/travis/gopath/bin:$PATH + - export PATH=/home/travis/gopath/bin:$PATH install: -- go get ./weed/... + - export CGO_ENABLED="0" + - go env + - go get -u ./weed/... script: -- go test ./weed/... + - go test ./weed/... before_deploy: - make release @@ -22,23 +25,26 @@ deploy: api_key: secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI= file: - - build/linux_arm.tar.gz - - build/linux_arm64.tar.gz - - build/linux_386.tar.gz - - build/linux_amd64.tar.gz - - build/darwin_amd64.tar.gz - - build/windows_386.zip - - build/windows_amd64.zip - - build/freebsd_arm.tar.gz - - build/freebsd_amd64.tar.gz - - build/freebsd_386.tar.gz - - build/netbsd_arm.tar.gz - - build/netbsd_amd64.tar.gz - - build/netbsd_386.tar.gz - - build/openbsd_arm.tar.gz - - build/openbsd_amd64.tar.gz - - build/openbsd_386.tar.gz + - build/linux_arm.tar.gz + - build/linux_arm64.tar.gz + - build/linux_386.tar.gz + - build/linux_amd64.tar.gz + - build/linux_amd64_large_disk.tar.gz + - build/darwin_amd64.tar.gz + - build/darwin_amd64_large_disk.tar.gz + - build/windows_386.zip + - build/windows_amd64.zip + - build/windows_amd64_large_disk.zip + - build/freebsd_arm.tar.gz + - build/freebsd_amd64.tar.gz + - build/freebsd_386.tar.gz + - build/netbsd_arm.tar.gz + - build/netbsd_amd64.tar.gz + - build/netbsd_386.tar.gz + - build/openbsd_arm.tar.gz + - build/openbsd_amd64.tar.gz + - build/openbsd_386.tar.gz on: tags: true repo: chrislusf/seaweedfs - go: tip + go: 1.12.x diff --git a/Makefile b/Makefile index 9357b2a03..cce9d586d 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,9 @@ build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -stat tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) +build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR) +tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3) +zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3) all: build @@ -32,9 +35,21 @@ linux: deps mkdir -p linux GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) -release: deps windows_build darwin_build linux_build bsd_build +release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build ##### LINUX BUILDS ##### +5_byte_linux_build: + $(call build_large,linux,amd64,) + $(call tar_large,linux,amd64) + +5_byte_darwin_build: + $(call build_large,darwin,amd64,) + $(call tar_large,darwin,amd64) + +5_byte_windows_build: + $(call build_large,windows,amd64,.exe) + $(call zip_large,windows,amd64,.exe) + linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz build/linux_386.tar.gz: $(sources) diff --git a/README.md b/README.md index fc79ec8dd..d2afb479c 100644 --- a/README.md +++ b/README.md @@ -10,18 +10,16 @@

Supporting SeaweedFS

-SeaweedFS is an independent Apache-licensed open source project with its ongoing development made -possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md). +SeaweedFS is an independent Apache-licensed open source project with its ongoing development made +possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md). If you'd like to grow SeaweedFS even stronger, please consider joining our sponsors on Patreon. -Platinum ($2500/month), Gold ($500/month): put your company logo on the SeaweedFS github page -Generous Backer($50/month), Backer($10/month): put your name on the SeaweedFS backer page. - Your support will be really appreciated by me and other supporters!

Sponsor SeaweedFS via Patreon

+ + --- @@ -52,9 +52,29 @@ Your support will be really appreciated by me and other supporters! - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTc4MmVlYmFlNjBmZTgzZmJlYmI1MDE1YzkyNWYyZjkwZDFiM2RlMDdjNjVlNjdjYzc4NGFhZGIyYzEyMzJkYTA) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) +- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction) +Table of Contents +================= -## Introduction +* [Introduction](#introduction) +* [Features](#features) + * [Additional Features](#additional-features) + * [Filer Features](#filer-features) +* [Example Usage](#example-usage) +* [Architecture](#architecture) +* [Compared to Other File Systems](#compared-to-other-file-systems) + * [Compared to HDFS](#compared-to-hdfs) + * [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph) + * [Compared to GlusterFS](#compared-to-glusterfs) + * [Compared to Ceph](#compared-to-ceph) +* [Dev Plan](#dev-plan) +* [Installation Guide](#installation-guide) +* [Disk Related Topics](#disk-related-topics) +* [Benchmark](#Benchmark) +* [License](#license) + +## Introduction ## SeaweedFS is a simple and highly scalable distributed file system. There are two objectives: @@ -65,41 +85,57 @@ SeaweedFS started as an Object Store to handle small files efficiently. Instead There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. -SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). +SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Cassandra/LevelDB. -## Additional Features -* Can choose no replication or different replication levels, rack and data center aware -* Automatic master servers failover - no single point of failure (SPOF) -* Automatic Gzip compression depending on file mime type -* Automatic compaction to reclaim disk space after deletion or update -* Servers in the same cluster can have different disk spaces, file systems, OS etc. -* Adding/Removing servers does **not** cause any data re-balancing -* Optionally fix the orientation for jpeg pictures -* Support Etag, Accept-Range, Last-Modified, etc. -* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance. +[Back to TOC](#table-of-contents) -## Filer Features +## Features ## + +[Back to TOC](#table-of-contents) + +## Additional Features ## +* Can choose no replication or different replication levels, rack and data center aware. +* Automatic master servers failover - no single point of failure (SPOF). +* Automatic Gzip compression depending on file mime type. +* Automatic compaction to reclaim disk space after deletion or update. +* Servers in the same cluster can have different disk spaces, file systems, OS etc. +* Adding/Removing servers does **not** cause any data re-balancing. +* Optionally fix the orientation for jpeg pictures. +* Support ETag, Accept-Range, Last-Modified, etc. +* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance. +* Support rebalancing the writable and readonly volumes. + +[Back to TOC](#table-of-contents) + +## Filer Features ## * [filer server][Filer] provide "normal" directories and files via http. * [mount filer][Mount] to read and write files directly as a local directory via FUSE. * [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling. +* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. +* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files [Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount [AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API [BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System +[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV +[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage + +[Back to TOC](#table-of-contents) + +## Example Usage ## -## Example Usage By default, the master node runs on port 9333, and the volume nodes run on port 8080. Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example. SeaweedFS uses HTTP REST operations to read, write, and delete. The responses are in JSON or JSONP format. -### Start Master Server +### Start Master Server ### ``` > ./weed master @@ -125,7 +161,7 @@ Second, to store the file content, send a HTTP multi-part POST request to `url + ``` > curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6 -{"size": 43234} +{"name":"myphoto.jpg","size":43234,"eTag":"1cc0118e"} ``` To update, send another POST request with updated file content. @@ -135,6 +171,7 @@ For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL: ``` > curl -X DELETE http://127.0.0.1:8080/3,01637037d6 ``` + ### Save File Id ### Now, you can save the `fid`, 3,01637037d6 in this case, to a database field. @@ -157,7 +194,7 @@ First look up the volume server's URLs by the file's volumeId: ``` > curl http://localhost:9333/dir/lookup?volumeId=3 -{"locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]} +{"volumeId":"3","locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]} ``` Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read. @@ -213,7 +250,7 @@ More details about replication can be found [on the wiki][Replication]. You can also set the default replication strategy when starting the master server. -### Allocate File Key on specific data center ### +### Allocate File Key on Specific Data Center ### Volume servers can be started with a specific data center name: @@ -239,6 +276,8 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass [feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files [feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space +[Back to TOC](#table-of-contents) + ## Architecture ## Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has. @@ -279,12 +318,16 @@ Each individual file size is limited to the volume size. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +[Back to TOC](#table-of-contents) + ## Compared to Other File Systems ## Most other distributed file systems seem more complicated than necessary. SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications. +[Back to TOC](#table-of-contents) + ### Compared to HDFS ### HDFS uses the chunk approach for each file, and is ideal for storing large files. @@ -293,6 +336,7 @@ SeaweedFS is ideal for serving relatively smaller files quickly and concurrently SeaweedFS can also store extra large files by splitting them into manageable data chunks, and store the file ids of the data chunks into a meta chunk. This is managed by "weed upload/download" tool, and the weed master or volume servers are agnostic about it. +[Back to TOC](#table-of-contents) ### Compared to GlusterFS, Ceph ### @@ -310,17 +354,21 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa | GlusterFS | hashing | | FUSE, NFS | | | | Ceph | hashing + rules | | FUSE | Yes | | +[Back to TOC](#table-of-contents) + ### Compared to GlusterFS ### GlusterFS stores files, both directories and content, in configurable volumes called "bricks". GlusterFS hashes the path and filename into ids, and assigned to virtual volumes, and then mapped to "bricks". +[Back to TOC](#table-of-contents) + ### Compared to Ceph ### Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120) -SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage. +SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage. Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews. @@ -336,16 +384,26 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Cassa | Volume | OSD | optimized for small files | | Filer | Ceph FS | linearly scalable, Customizable, O(1) or O(logN) | +[Back to TOC](#table-of-contents) -## Dev plan ## +## Dev Plan ## More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc. Other key features include: Erasure Encoding, JWT security. This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)! +BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop: -## Installation guide for users who are not familiar with golang +``` +$ ./util/gostd +``` + +[Back to TOC](#table-of-contents) + +## Installation Guide ## + +> Installation guide for users who are not familiar with golang Step 1: install go on your machine and setup the environment by following the instructions at: @@ -366,23 +424,27 @@ go get github.com/chrislusf/seaweedfs/weed Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory -Step 4: after you modify your code locally, you could start a local build by calling `go install` under +Step 4: after you modify your code locally, you could start a local build by calling `go install` under ``` $GOPATH/src/github.com/chrislusf/seaweedfs/weed ``` -## Disk Related topics ## +[Back to TOC](#table-of-contents) + +## Disk Related Topics ## ### Hard Drive Performance ### When testing read performance on SeaweedFS, it basically becomes a performance test of your hard drive's random read speed. Hard drives usually get 100MB/s~200MB/s. -### Solid State Disk +### Solid State Disk ### To modify or delete small files, SSD must delete a whole block at a time, and move content in existing blocks to a new block. SSD is fast when brand new, but will get fragmented over time and you have to garbage collect, compacting blocks. SeaweedFS is friendly to SSD since it is append-only. Deletion and compaction are done on volume level in the background, not slowing reading and not causing fragmentation. -## Benchmark +[Back to TOC](#table-of-contents) + +## Benchmark ## My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CPU: 1 Intel Core i7 2.6GHz. @@ -435,8 +497,9 @@ Percentage of the requests served within a certain time (ms) 100% 20.7 ms ``` +[Back to TOC](#table-of-contents) -## License +## License ## Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -450,7 +513,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +[Back to TOC](#table-of-contents) -## Stargazers over time +## Stargazers over time ## [![Stargazers over time](https://starcharts.herokuapp.com/chrislusf/seaweedfs.svg)](https://starcharts.herokuapp.com/chrislusf/seaweedfs) diff --git a/docker/Dockerfile b/docker/Dockerfile index c7a343111..38117a3dc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,9 +1,20 @@ FROM frolvlad/alpine-glibc +# Supercronic install settings +ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \ + SUPERCRONIC=supercronic-linux-amd64 \ + SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea + +# Install SeaweedFS and Supercronic ( for cron job mode ) # Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format" RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ + curl -fsSLO "$SUPERCRONIC_URL" && \ + echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ + chmod +x "$SUPERCRONIC" && \ + mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ + ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \ apk del build-dependencies && \ rm -rf /tmp/* @@ -22,6 +33,8 @@ EXPOSE 9333 # s3 server http port EXPOSE 8333 +RUN mkdir -p /data/filerldb2 + VOLUME /data COPY filer.toml /etc/seaweedfs/filer.toml diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index d0a214476..85cbb6143 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -16,6 +16,8 @@ EXPOSE 9333 # s3 server http port EXPOSE 8333 +RUN mkdir -p /data/filerldb2 + VOLUME /data RUN mkdir -p /etc/seaweedfs diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 105087dbe..c28bd263c 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -29,11 +29,10 @@ case "$1" in ;; 'filer') - ARGS="-ip `hostname -i` " + ARGS="" if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" fi - mkdir -p /data/filerdb exec /usr/bin/weed $@ $ARGS ;; @@ -45,6 +44,16 @@ case "$1" in exec /usr/bin/weed $@ $ARGS ;; + 'cronjob') + MASTER=${WEED_MASTER-localhost:9333} + FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *} + echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab + BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *} + echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab + echo "Running Crontab:" + cat /crontab + exec supercronic /crontab + ;; *) exec /usr/bin/weed $@ ;; diff --git a/docker/filer.toml b/docker/filer.toml index 5bf809cd8..a11e5de2b 100644 --- a/docker/filer.toml +++ b/docker/filer.toml @@ -1,3 +1,3 @@ -[leveldb] +[leveldb2] enabled = true -dir = "/data/filerdb" +dir = "/data/filerldb2" diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 05ed0e69e..d66b921bb 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -26,6 +26,16 @@ services: depends_on: - master - volume + cronjob: + image: chrislusf/seaweedfs # use a remote image + command: 'cronjob' + environment: + # Run re-replication every 2 minutes + CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *' + WEED_MASTER: master:9333 # Default: localhost:9333 + depends_on: + - master + - volume s3: image: chrislusf/seaweedfs # use a remote image ports: diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 1ea39863f..5882c726d 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -4,7 +4,7 @@ com.github.chrislusf seaweedfs-client - 1.0.5 + 1.1.0 org.sonatype.oss diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index 63d0d8320..f4bd0944b 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -51,12 +51,26 @@ public class FilerClient { } return createEntry( - parent, - newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build() + parent, + newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build() ); } + public boolean mv(String oldPath, String newPath) { + + Path oldPathObject = Paths.get(oldPath); + String oldParent = oldPathObject.getParent().toString(); + String oldName = oldPathObject.getFileName().toString(); + + Path newPathObject = Paths.get(newPath); + String newParent = newPathObject.getParent().toString(); + String newName = newPathObject.getFileName().toString(); + + return atomicRenameEntry(oldParent, oldName, newParent, newName); + + } + public boolean rm(String path, boolean isRecursive) { Path pathObject = Paths.get(path); @@ -64,10 +78,10 @@ public class FilerClient { String name = pathObject.getFileName().toString(); return deleteEntry( - parent, - name, - true, - isRecursive); + parent, + name, + true, + isRecursive); } public boolean touch(String path, int mode) { @@ -84,18 +98,18 @@ public class FilerClient { FilerProto.Entry entry = lookupEntry(parent, name); if (entry == null) { return createEntry( - parent, - newFileEntry(name, mode, uid, gid, userName, groupNames).build() + parent, + newFileEntry(name, mode, uid, gid, userName, groupNames).build() ); } long now = System.currentTimeMillis() / 1000L; FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder() - .setMtime(now) - .setUid(uid) - .setGid(gid) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames)); + .setMtime(now) + .setUid(uid) + .setGid(gid) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames)); return updateEntry(parent, entry.toBuilder().setAttributes(attr).build()); } @@ -105,17 +119,17 @@ public class FilerClient { long now = System.currentTimeMillis() / 1000L; return FilerProto.Entry.newBuilder() - .setName(name) - .setIsDirectory(true) - .setAttributes(FilerProto.FuseAttributes.newBuilder() - .setMtime(now) - .setCrtime(now) - .setUid(uid) - .setGid(gid) - .setFileMode(mode | 1 << 31) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames))); + .setName(name) + .setIsDirectory(true) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setMtime(now) + .setCrtime(now) + .setUid(uid) + .setGid(gid) + .setFileMode(mode | 1 << 31) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames))); } public FilerProto.Entry.Builder newFileEntry(String name, int mode, @@ -124,17 +138,17 @@ public class FilerClient { long now = System.currentTimeMillis() / 1000L; return FilerProto.Entry.newBuilder() - .setName(name) - .setIsDirectory(false) - .setAttributes(FilerProto.FuseAttributes.newBuilder() - .setMtime(now) - .setCrtime(now) - .setUid(uid) - .setGid(gid) - .setFileMode(mode) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames))); + .setName(name) + .setIsDirectory(false) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setMtime(now) + .setCrtime(now) + .setUid(uid) + .setGid(gid) + .setFileMode(mode) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames))); } public List listEntries(String path) { @@ -159,21 +173,27 @@ public class FilerClient { } public List listEntries(String path, String entryPrefix, String lastEntryName, int limit) { - return filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() - .setDirectory(path) - .setPrefix(entryPrefix) - .setStartFromFileName(lastEntryName) - .setLimit(limit) - .build()).getEntriesList(); + List entries = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() + .setDirectory(path) + .setPrefix(entryPrefix) + .setStartFromFileName(lastEntryName) + .setLimit(limit) + .build()).getEntriesList(); + List fixedEntries = new ArrayList<>(entries.size()); + for (FilerProto.Entry entry : entries) { + fixedEntries.add(fixEntryAfterReading(entry)); + } + return fixedEntries; } public FilerProto.Entry lookupEntry(String directory, String entryName) { try { - return filerGrpcClient.getBlockingStub().lookupDirectoryEntry( - FilerProto.LookupDirectoryEntryRequest.newBuilder() - .setDirectory(directory) - .setName(entryName) - .build()).getEntry(); + FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry( + FilerProto.LookupDirectoryEntryRequest.newBuilder() + .setDirectory(directory) + .setName(entryName) + .build()).getEntry(); + return fixEntryAfterReading(entry); } catch (Exception e) { LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e); return null; @@ -184,9 +204,9 @@ public class FilerClient { public boolean createEntry(String parent, FilerProto.Entry entry) { try { filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parent) - .setEntry(entry) - .build()); + .setDirectory(parent) + .setEntry(entry) + .build()); } catch (Exception e) { LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); return false; @@ -197,9 +217,9 @@ public class FilerClient { public boolean updateEntry(String parent, FilerProto.Entry entry) { try { filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder() - .setDirectory(parent) - .setEntry(entry) - .build()); + .setDirectory(parent) + .setEntry(entry) + .build()); } catch (Exception e) { LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); return false; @@ -210,11 +230,11 @@ public class FilerClient { public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive) { try { filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder() - .setDirectory(parent) - .setName(entryName) - .setIsDeleteData(isDeleteFileChunk) - .setIsRecursive(isRecursive) - .build()); + .setDirectory(parent) + .setName(entryName) + .setIsDeleteData(isDeleteFileChunk) + .setIsRecursive(isRecursive) + .build()); } catch (Exception e) { LOG.warn("deleteEntry {}/{}: {}", parent, entryName, e); return false; @@ -222,4 +242,39 @@ public class FilerClient { return true; } + public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) { + try { + filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder() + .setOldDirectory(oldParent) + .setOldName(oldName) + .setNewDirectory(newParent) + .setNewName(newName) + .build()); + } catch (Exception e) { + LOG.warn("atomicRenameEntry {}/{} => {}/{}: {}", oldParent, oldName, newParent, newName, e); + return false; + } + return true; + } + + private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) { + if (entry.getChunksList().size() <= 0) { + return entry; + } + String fileId = entry.getChunks(0).getFileId(); + if (fileId != null && fileId.length() != 0) { + return entry; + } + FilerProto.Entry.Builder entryBuilder = entry.toBuilder(); + entryBuilder.clearChunks(); + for (FilerProto.FileChunk chunk : entry.getChunksList()) { + FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder(); + FilerProto.FileId fid = chunk.getFid(); + fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie()); + chunkBuilder.setFileId(fileId); + entryBuilder.addChunks(chunkBuilder); + } + return entryBuilder.build(); + } + } diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java index 16b7c3249..c28c1dcf2 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java @@ -2,7 +2,14 @@ package seaweedfs.client; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; +import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.shaded.io.grpc.netty.NegotiationType; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder; +import javax.net.ssl.SSLException; +import java.io.File; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; @@ -20,6 +27,16 @@ public class FilerGrpcClient { this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()); } + public FilerGrpcClient(String host, int grpcPort, + String caFilePath, + String clientCertFilePath, + String clientPrivateKeyFilePath) throws SSLException { + + this(NettyChannelBuilder.forAddress(host, grpcPort) + .negotiationType(NegotiationType.TLS) + .sslContext(buildSslContext(caFilePath,clientCertFilePath,clientPrivateKeyFilePath))); + } + public FilerGrpcClient(ManagedChannelBuilder channelBuilder) { channel = channelBuilder.build(); blockingStub = SeaweedFilerGrpc.newBlockingStub(channel); @@ -42,4 +59,18 @@ public class FilerGrpcClient { public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() { return futureStub; } + + private static SslContext buildSslContext(String trustCertCollectionFilePath, + String clientCertChainFilePath, + String clientPrivateKeyFilePath) throws SSLException { + SslContextBuilder builder = GrpcSslContexts.forClient(); + if (trustCertCollectionFilePath != null) { + builder.trustManager(new File(trustCertCollectionFilePath)); + } + if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) { + builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath)); + } + return builder.build(); + } + } diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index a7cede09f..15db87195 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -20,25 +20,26 @@ public class SeaweedWrite { final byte[] bytes, final long bytesOffset, final long bytesLength) throws IOException { FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume( - FilerProto.AssignVolumeRequest.newBuilder() - .setCollection("") - .setReplication(replication) - .setDataCenter("") - .setReplication("") - .setTtlSec(0) - .build()); + FilerProto.AssignVolumeRequest.newBuilder() + .setCollection("") + .setReplication(replication) + .setDataCenter("") + .setReplication("") + .setTtlSec(0) + .build()); String fileId = response.getFileId(); String url = response.getUrl(); + String auth = response.getAuth(); String targetUrl = String.format("http://%s/%s", url, fileId); - String etag = multipartUpload(targetUrl, bytes, bytesOffset, bytesLength); + String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength); entry.addChunks(FilerProto.FileChunk.newBuilder() - .setFileId(fileId) - .setOffset(offset) - .setSize(bytesLength) - .setMtime(System.currentTimeMillis() / 10000L) - .setETag(etag) + .setFileId(fileId) + .setOffset(offset) + .setSize(bytesLength) + .setMtime(System.currentTimeMillis() / 10000L) + .setETag(etag) ); } @@ -46,14 +47,15 @@ public class SeaweedWrite { public static void writeMeta(final FilerGrpcClient filerGrpcClient, final String parentDirectory, final FilerProto.Entry.Builder entry) { filerGrpcClient.getBlockingStub().createEntry( - FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parentDirectory) - .setEntry(entry) - .build() + FilerProto.CreateEntryRequest.newBuilder() + .setDirectory(parentDirectory) + .setEntry(entry) + .build() ); } private static String multipartUpload(String targetUrl, + String auth, final byte[] bytes, final long bytesOffset, final long bytesLength) throws IOException { @@ -62,11 +64,14 @@ public class SeaweedWrite { InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); HttpPost post = new HttpPost(targetUrl); + if (auth != null && auth.length() != 0) { + post.addHeader("Authorization", "BEARER " + auth); + } post.setEntity(MultipartEntityBuilder.create() - .setMode(HttpMultipartMode.BROWSER_COMPATIBLE) - .addBinaryBody("upload", inputStream) - .build()); + .setMode(HttpMultipartMode.BROWSER_COMPATIBLE) + .addBinaryBody("upload", inputStream) + .build()); try { HttpResponse response = client.execute(post); diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 6cd4df6b4..d72bced12 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { + } + rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { } @@ -36,6 +39,9 @@ service SeaweedFiler { rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } + rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { + } + } ////////////////////////////////////////////////// @@ -69,19 +75,33 @@ message Entry { map extended = 5; } +message FullEntry { + string dir = 1; + Entry entry = 2; +} + message EventNotification { Entry old_entry = 1; Entry new_entry = 2; bool delete_chunks = 3; + string new_parent_path = 4; } message FileChunk { - string file_id = 1; + string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; int64 mtime = 4; string e_tag = 5; - string source_file_id = 6; + string source_file_id = 6; // to be deprecated + FileId fid = 7; + FileId source_fid = 8; +} + +message FileId { + uint32 volume_id = 1; + uint64 file_key = 2; + fixed32 cookie = 3; } message FuseAttributes { @@ -126,6 +146,16 @@ message DeleteEntryRequest { message DeleteEntryResponse { } +message AtomicRenameEntryRequest { + string old_directory = 1; + string old_name = 2; + string new_directory = 3; + string new_name = 4; +} + +message AtomicRenameEntryResponse { +} + message AssignVolumeRequest { int32 count = 1; string collection = 2; @@ -139,6 +169,7 @@ message AssignVolumeResponse { string url = 2; string public_url = 3; int32 count = 4; + string auth = 5; } message LookupVolumeRequest { @@ -177,3 +208,12 @@ message StatisticsResponse { uint64 used_size = 5; uint64 file_count = 6; } + +message GetFilerConfigurationRequest { +} +message GetFilerConfigurationResponse { + repeated string masters = 1; + string replication = 2; + string collection = 3; + uint32 max_mb = 4; +} diff --git a/other/java/hdfs/pom.xml b/other/java/hdfs/pom.xml index a0cab8752..6a1cd897f 100644 --- a/other/java/hdfs/pom.xml +++ b/other/java/hdfs/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.0.5 + 1.1.0 3.1.1 diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java index 2a0ef78af..453924cf7 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java +++ b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -34,6 +34,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; + public static final String FS_SEAWEED_GRPC_CA = "fs.seaweed.ca"; + public static final String FS_SEAWEED_GRPC_CLIENT_KEY = "fs.seaweed.client.key"; + public static final String FS_SEAWEED_GRPC_CLIENT_CERT = "fs.seaweed.client.cert"; private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); private static int BUFFER_SIZE = 16 * 1024 * 1024; @@ -72,7 +75,17 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { setConf(conf); this.uri = uri; - seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + if (conf.get(FS_SEAWEED_GRPC_CA) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CA).length() != 0 + && conf.get(FS_SEAWEED_GRPC_CLIENT_CERT) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_CERT).length() != 0 + && conf.get(FS_SEAWEED_GRPC_CLIENT_KEY) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_KEY).length() != 0) { + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, + conf.get(FS_SEAWEED_GRPC_CA), + conf.get(FS_SEAWEED_GRPC_CLIENT_CERT), + conf.get(FS_SEAWEED_GRPC_CLIENT_KEY)); + } else { + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + } + } @Override @@ -206,8 +219,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); return seaweedFileSystemStore.createDirectory(path, currentUser, - fsPermission == null ? FsPermission.getDirDefault() : fsPermission, - FsPermission.getUMask(getConf())); + fsPermission == null ? FsPermission.getDirDefault() : fsPermission, + FsPermission.getUMask(getConf())); } @@ -238,7 +251,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { */ @Override public void setOwner(Path path, final String owner, final String group) - throws IOException { + throws IOException { LOG.debug("setOwner path: {}", path); path = qualify(path); @@ -271,54 +284,55 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { /** * Concat existing files together. - * @param trg the path to the target destination. + * + * @param trg the path to the target destination. * @param psrcs the paths to the sources to use for the concatenation. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default). + * (default). */ @Override - public void concat(final Path trg, final Path [] psrcs) throws IOException { + public void concat(final Path trg, final Path[] psrcs) throws IOException { throw new UnsupportedOperationException("Not implemented by the " + - getClass().getSimpleName() + " FileSystem implementation"); + getClass().getSimpleName() + " FileSystem implementation"); } /** * Truncate the file in the indicated path to the indicated size. *
    - *
  • Fails if path is a directory.
  • - *
  • Fails if path does not exist.
  • - *
  • Fails if path is not closed.
  • - *
  • Fails if new size is greater than current size.
  • + *
  • Fails if path is a directory.
  • + *
  • Fails if path does not exist.
  • + *
  • Fails if path is not closed.
  • + *
  • Fails if new size is greater than current size.
  • *
- * @param f The path to the file to be truncated - * @param newLength The size the file is to be truncated to * + * @param f The path to the file to be truncated + * @param newLength The size the file is to be truncated to * @return true if the file has been truncated to the desired * newLength and is immediately available to be reused for * write operations such as append, or * false if a background process of adjusting the length of * the last block has been started, and clients should wait for it to * complete before proceeding with further file updates. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default). + * (default). */ @Override public boolean truncate(Path f, long newLength) throws IOException { throw new UnsupportedOperationException("Not implemented by the " + - getClass().getSimpleName() + " FileSystem implementation"); + getClass().getSimpleName() + " FileSystem implementation"); } @Override public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException, - FileAlreadyExistsException, FileNotFoundException, - ParentNotDirectoryException, UnsupportedFileSystemException, - IOException { + FileAlreadyExistsException, FileNotFoundException, + ParentNotDirectoryException, UnsupportedFileSystemException, + IOException { // Supporting filesystems should override this method throw new UnsupportedOperationException( - "Filesystem does not support symlinks!"); + "Filesystem does not support symlinks!"); } public boolean supportsSymlinks() { @@ -327,48 +341,51 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { /** * Create a snapshot. - * @param path The directory where snapshots will be taken. + * + * @param path The directory where snapshots will be taken. * @param snapshotName The name of the snapshot * @return the snapshot path. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported */ @Override public Path createSnapshot(Path path, String snapshotName) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support createSnapshot"); + + " doesn't support createSnapshot"); } /** * Rename a snapshot. - * @param path The directory path where the snapshot was taken + * + * @param path The directory path where the snapshot was taken * @param snapshotOldName Old name of the snapshot * @param snapshotNewName New name of the snapshot - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support renameSnapshot"); + + " doesn't support renameSnapshot"); } /** * Delete a snapshot of a directory. - * @param path The directory that the to-be-deleted snapshot belongs to + * + * @param path The directory that the to-be-deleted snapshot belongs to * @param snapshotName The name of the snapshot - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void deleteSnapshot(Path path, String snapshotName) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support deleteSnapshot"); + + " doesn't support deleteSnapshot"); } /** @@ -377,49 +394,49 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * ACL entries that are not specified in this call are retained without * changes. (Modifications are merged into the current ACL.) * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List<AclEntry> describing modifications - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void modifyAclEntries(Path path, List aclSpec) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support modifyAclEntries"); + + " doesn't support modifyAclEntries"); } /** * Removes ACL entries from files and directories. Other ACL entries are * retained. * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List describing entries to remove - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeAclEntries(Path path, List aclSpec) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeAclEntries"); + + " doesn't support removeAclEntries"); } /** * Removes all default ACL entries from files and directories. * * @param path Path to modify - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeDefaultAcl(Path path) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeDefaultAcl"); + + " doesn't support removeDefaultAcl"); } /** @@ -428,32 +445,32 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * bits. * * @param path Path to modify - * @throws IOException if an ACL could not be removed + * @throws IOException if an ACL could not be removed * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeAcl(Path path) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeAcl"); + + " doesn't support removeAcl"); } /** * Fully replaces ACL of files and directories, discarding all existing * entries. * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List describing modifications, which must include entries - * for user, group, and others for compatibility with permission bits. - * @throws IOException if an ACL could not be modified + * for user, group, and others for compatibility with permission bits. + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void setAcl(Path path, List aclSpec) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support setAcl"); + + " doesn't support setAcl"); } /** @@ -461,14 +478,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get * @return AclStatus describing the ACL of the file or directory - * @throws IOException if an ACL could not be read + * @throws IOException if an ACL could not be read * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public AclStatus getAclStatus(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getAclStatus"); + + " doesn't support getAclStatus"); } /** @@ -478,19 +495,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { *

* Refer to the HDFS extended attributes user documentation for details. * - * @param path Path to modify - * @param name xattr name. + * @param path Path to modify + * @param name xattr name. * @param value xattr value. - * @param flag xattr set flag - * @throws IOException IO failure + * @param flag xattr set flag + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void setXAttr(Path path, String name, byte[] value, EnumSet flag) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support setXAttr"); + + " doesn't support setXAttr"); } /** @@ -503,14 +520,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * @param path Path to get extended attribute * @param name xattr name. * @return byte[] xattr value. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public byte[] getXAttr(Path path, String name) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttr"); + + " doesn't support getXAttr"); } /** @@ -522,14 +539,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get extended attributes * @return Map describing the XAttrs of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public Map getXAttrs(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttrs"); + + " doesn't support getXAttrs"); } /** @@ -539,18 +556,18 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { *

* Refer to the HDFS extended attributes user documentation for details. * - * @param path Path to get extended attributes + * @param path Path to get extended attributes * @param names XAttr names. * @return Map describing the XAttrs of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public Map getXAttrs(Path path, List names) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttrs"); + + " doesn't support getXAttrs"); } /** @@ -562,14 +579,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get extended attributes * @return List{@literal } of the XAttr names of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public List listXAttrs(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support listXAttrs"); + + " doesn't support listXAttrs"); } /** @@ -581,14 +598,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to remove extended attribute * @param name xattr name - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeXAttr(Path path, String name) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeXAttr"); + + " doesn't support removeXAttr"); } } diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index 27678e615..643467898 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -12,6 +12,7 @@ import seaweedfs.client.FilerGrpcClient; import seaweedfs.client.FilerProto; import seaweedfs.client.SeaweedRead; +import javax.net.ssl.SSLException; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; @@ -33,6 +34,13 @@ public class SeaweedFileSystemStore { filerClient = new FilerClient(filerGrpcClient); } + public SeaweedFileSystemStore(String host, int port, + String caFile, String clientCertFile, String clientKeyFile) throws SSLException { + int grpcPort = 10000 + port; + filerGrpcClient = new FilerGrpcClient(host, grpcPort, caFile, clientCertFile, clientKeyFile); + filerClient = new FilerClient(filerGrpcClient); + } + public static String getParentDirectory(Path path) { return path.isRoot() ? "/" : path.getParent().toUri().getPath(); } @@ -143,35 +151,7 @@ public class SeaweedFileSystemStore { LOG.warn("rename non-existing source: {}", source); return; } - LOG.warn("rename moveEntry source: {}", source); - moveEntry(source.getParent(), entry, destination); - } - - private boolean moveEntry(Path oldParent, FilerProto.Entry entry, Path destination) { - - LOG.debug("moveEntry: {}/{} => {}", oldParent, entry.getName(), destination); - - FilerProto.Entry.Builder newEntry = entry.toBuilder().setName(destination.getName()); - boolean isDirectoryCreated = filerClient.createEntry(getParentDirectory(destination), newEntry.build()); - - if (!isDirectoryCreated) { - return false; - } - - if (entry.getIsDirectory()) { - Path entryPath = new Path(oldParent, entry.getName()); - List entries = filerClient.listEntries(entryPath.toUri().getPath()); - for (FilerProto.Entry ent : entries) { - boolean isSucess = moveEntry(entryPath, ent, new Path(destination, ent.getName())); - if (!isSucess) { - return false; - } - } - } - - return filerClient.deleteEntry( - oldParent.toUri().getPath(), entry.getName(), false, false); - + filerClient.mv(source.toUri().getPath(), destination.toUri().getPath()); } public OutputStream createFile(final Path path, diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go index 779580a9b..07d9b94e4 100644 --- a/unmaintained/change_superblock/change_superblock.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -9,6 +9,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) var ( @@ -73,7 +74,7 @@ func main() { } if *targetTTL != "" { - ttl, err := storage.ReadTTL(*targetTTL) + ttl, err := needle.ReadTTL(*targetTTL) if err != nil { glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err) diff --git a/unmaintained/compact_leveldb/compact_leveldb.go b/unmaintained/compact_leveldb/compact_leveldb.go new file mode 100644 index 000000000..317356c3f --- /dev/null +++ b/unmaintained/compact_leveldb/compact_leveldb.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "log" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + dir = flag.String("dir", ".", "data directory to store leveldb files") +) + +func main() { + + flag.Parse() + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, + OpenFilesCacheCapacity: -1, + } + + db, err := leveldb.OpenFile(*dir, opts) + if err != nil { + log.Fatal(err) + } + defer db.Close() + if err := db.CompactRange(util.Range{}); err != nil { + log.Fatal(err) + } +} diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index 9eb64b3b4..a72a78eed 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -61,7 +62,7 @@ func main() { } newDatFile.Write(superBlock.Bytes()) - iterateEntries(datFile, indexFile, func(n *storage.Needle, offset int64) { + iterateEntries(datFile, indexFile, func(n *needle.Needle, offset int64) { fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize) _, s, _, e := n.Append(newDatFile, superBlock.Version()) fmt.Printf("size %d error %v\n", s, e) @@ -69,7 +70,7 @@ func main() { } -func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needle, offset int64)) { +func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) { // start to read index file var readerOffset int64 bytes := make([]byte, 16) @@ -84,7 +85,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl } offset := int64(superBlock.BlockSize()) version := superBlock.Version() - n, rest, err := storage.ReadNeedleHeader(datFile, version, offset) + n, _, rest, err := needle.ReadNeedleHeader(datFile, version, offset) if err != nil { fmt.Printf("cannot read needle header: %v", err) return @@ -106,7 +107,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex) - rest = storage.NeedleBodyLength(sizeFromIndex, version) + rest = needle.NeedleBodyLength(sizeFromIndex, version) func() { defer func() { @@ -114,7 +115,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl fmt.Println("Recovered in f", r) } }() - if err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleEntrySize), rest); err != nil { + if _, err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleHeaderSize), rest); err != nil { fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err) } }() @@ -124,9 +125,9 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl } visitNeedle(n, offset) - offset += types.NeedleEntrySize + rest + offset += types.NeedleHeaderSize + rest //fmt.Printf("==> new entry offset %d\n", offset) - if n, rest, err = storage.ReadNeedleHeader(datFile, version, offset); err != nil { + if n, _, rest, err = needle.ReadNeedleHeader(datFile, version, offset); err != nil { if err == io.EOF { return } diff --git a/unmaintained/load_test/load_test_leveldb.go b/unmaintained/load_test/load_test_leveldb.go new file mode 100644 index 000000000..3ff01f3eb --- /dev/null +++ b/unmaintained/load_test/load_test_leveldb.go @@ -0,0 +1,156 @@ +package main + +import ( + "crypto/md5" + "flag" + "fmt" + "io" + "log" + "math/rand" + "os" + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +var ( + dir = flag.String("dir", "./t", "directory to store level db files") + useHash = flag.Bool("isHash", false, "hash the path as the key") + dbCount = flag.Int("dbCount", 1, "the number of leveldb") +) + +func main() { + + flag.Parse() + + totalTenants := 300 + totalYears := 3 + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + + var dbs []*leveldb.DB + var chans []chan string + for d := 0 ; d < *dbCount; d++ { + dbFolder := fmt.Sprintf("%s/%02d", *dir, d) + os.MkdirAll(dbFolder, 0755) + db, err := leveldb.OpenFile(dbFolder, opts) + if err != nil { + log.Printf("filer store open dir %s: %v", *dir, err) + return + } + dbs = append(dbs, db) + chans = append(chans, make(chan string, 1024)) + } + + var wg sync.WaitGroup + for d := 0 ; d < *dbCount; d++ { + wg.Add(1) + go func(d int){ + defer wg.Done() + + ch := chans[d] + db := dbs[d] + + for p := range ch { + if *useHash { + insertAsHash(db, p) + }else{ + insertAsFullPath(db, p) + } + } + }(d) + } + + + counter := int64(0) + lastResetTime := time.Now() + + r := rand.New(rand.NewSource(35)) + + for y := 0; y < totalYears; y++ { + for m := 0; m < 12; m++ { + for d := 0; d < 31; d++ { + for h := 0; h < 24; h++ { + for min := 0; min < 60; min++ { + for i := 0; i < totalTenants; i++ { + p := fmt.Sprintf("tenent%03d/%4d/%02d/%02d/%02d/%02d", i, 2015+y, 1+m, 1+d, h, min) + + x := r.Intn(*dbCount) + + chans[x] <- p + + counter++ + } + + t := time.Now() + if lastResetTime.Add(time.Second).Before(t) { + p := fmt.Sprintf("%4d/%02d/%02d/%02d/%02d", 2015+y, 1+m, 1+d, h, min) + fmt.Printf("%s = %4d put/sec\n", p, counter) + counter = 0 + lastResetTime = t + } + } + } + } + } + } + + for d := 0 ; d < *dbCount; d++ { + close(chans[d]) + } + + wg.Wait() + +} + +func insertAsFullPath(db *leveldb.DB, p string) { + _, getErr := db.Get([]byte(p), nil) + if getErr == leveldb.ErrNotFound { + putErr := db.Put([]byte(p), []byte(p), nil) + if putErr != nil { + log.Printf("failed to put %s", p) + } + } +} + +func insertAsHash(db *leveldb.DB, p string) { + key := fmt.Sprintf("%d:%s", hashToLong(p), p) + _, getErr := db.Get([]byte(key), nil) + if getErr == leveldb.ErrNotFound { + putErr := db.Put([]byte(key), []byte(p), nil) + if putErr != nil { + log.Printf("failed to put %s", p) + } + } +} + +func hashToLong(dir string) (v int64) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + v += int64(b[0]) + v <<= 8 + v += int64(b[1]) + v <<= 8 + v += int64(b[2]) + v <<= 8 + v += int64(b[3]) + v <<= 8 + v += int64(b[4]) + v <<= 8 + v += int64(b[5]) + v <<= 8 + v += int64(b[6]) + v <<= 8 + v += int64(b[7]) + + return +} diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 7cc583f56..28bcabb9b 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -7,6 +7,9 @@ import ( "log" "math/rand" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -19,8 +22,11 @@ var ( func main() { flag.Parse() + util.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + for i := 0; i < *repeat; i++ { - assignResult, err := operation.Assign(*master, &operation.VolumeAssignRequest{Count: 1}) + assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) if err != nil { log.Fatalf("assign: %v", err) } @@ -31,12 +37,12 @@ func main() { targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, "") + _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth) if err != nil { log.Fatalf("upload: %v", err) } - util.Delete(targetUrl, "") + util.Delete(targetUrl, string(assignResult.Auth)) util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index f79c0a6a9..e8e54fd4f 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -2,8 +2,12 @@ package main import ( "flag" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + + "time" ) var ( @@ -13,7 +17,7 @@ var ( ) type VolumeFileScanner4SeeDat struct { - version storage.Version + version needle.Version } func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { @@ -22,18 +26,19 @@ func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.Supe } func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { - return false + return true } -func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *storage.Needle, offset int64) error { - glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie) +func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64) error { + t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) + glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x appendedAt %v", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie, t) return nil } func main() { flag.Parse() - vid := storage.VolumeId(*volumeId) + vid := needle.VolumeId(*volumeId) scanner := &VolumeFileScanner4SeeDat{} err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go index 23ca04c2e..777af1821 100644 --- a/unmaintained/see_idx/see_idx.go +++ b/unmaintained/see_idx/see_idx.go @@ -8,7 +8,7 @@ import ( "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -35,7 +35,7 @@ func main() { } defer indexFile.Close() - storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { + idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size) return nil }) diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go new file mode 100644 index 000000000..0d2ac8de1 --- /dev/null +++ b/unmaintained/see_meta/see_meta.go @@ -0,0 +1,68 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" +) + +var ( + metaFile = flag.String("meta", "", "meta file generated via fs.meta.save") +) + +func main() { + flag.Parse() + + dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644) + if err != nil { + log.Fatalf("failed to open %s: %v", *metaFile, err) + } + defer dst.Close() + + err = walkMetaFile(dst) + if err != nil { + log.Fatalf("failed to visit %s: %v", *metaFile, err) + } + +} + +func walkMetaFile(dst *os.File) error { + + sizeBuf := make([]byte, 4) + + for { + if n, err := dst.Read(sizeBuf); n != 4 { + if err == io.EOF { + return nil + } + return err + } + + size := util.BytesToUint32(sizeBuf) + + data := make([]byte, int(size)) + + if n, err := dst.Read(data); n != len(data) { + return err + } + + fullEntry := &filer_pb.FullEntry{} + if err := proto.Unmarshal(data, fullEntry); err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "file %s %v\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) + for i, chunk := range fullEntry.Entry.Chunks { + fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String()) + } + + } + +} diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go new file mode 100644 index 000000000..b2e4b28c6 --- /dev/null +++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go @@ -0,0 +1,136 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "mime/multipart" + "net/http" + "os" + "strings" + "sync" + "time" +) + +var ( + size = flag.Int("size", 1024, "file size") + concurrency = flag.Int("c", 4, "concurrent number of uploads") + times = flag.Int("n", 1024, "repeated number of times") + fileCount = flag.Int("fileCount", 1, "number of files to write") + destination = flag.String("to", "http://localhost:8888/", "destination directory on filer") + + statsChan = make(chan stat, 8) +) + +type stat struct { + size int64 +} + +func main() { + + flag.Parse() + + data := make([]byte, *size) + println("data len", len(data)) + + var wg sync.WaitGroup + for x := 0; x < *concurrency; x++ { + wg.Add(1) + + go func(x int) { + defer wg.Done() + + client := &http.Client{Transport: &http.Transport{ + MaxConnsPerHost: 1024, + MaxIdleConnsPerHost: 1024, + }} + r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) + + for t := 0; t < *times; t++ { + for f := 0; f < *fileCount; f++ { + fn := r.Intn(*fileCount) + if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil { + statsChan <- stat{ + size: size, + } + } else { + log.Fatalf("client %d upload %d times: %v", x, t, err) + } + } + } + }(x) + } + + go func() { + ticker := time.NewTicker(1000 * time.Millisecond) + + var lastTime time.Time + var counter, size int64 + for { + select { + case stat := <-statsChan: + size += stat.size + counter++ + case x := <-ticker.C: + if !lastTime.IsZero() { + elapsed := x.Sub(lastTime).Seconds() + fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n", + float64(counter)/elapsed, + float64(size/1024/1024)/elapsed) + } + lastTime = x + size = 0 + counter = 0 + } + } + }() + + wg.Wait() + +} + +func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) { + + if !strings.HasSuffix(destination, "/") { + destination = destination + "/" + } + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", filename) + if err != nil { + return 0, fmt.Errorf("fail to create form %v: %v", filename, err) + } + + part.Write(data) + + err = writer.Close() + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", filename, err) + } + + uri := destination + filename + + request, err := http.NewRequest("POST", uri, body) + request.Header.Set("Content-Type", writer.FormDataContentType()) + // request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also. + + resp, err := client.Do(request) + if err != nil { + return 0, fmt.Errorf("http POST %s: %v", uri, err) + } else { + body := &bytes.Buffer{} + _, err := body.ReadFrom(resp.Body) + if err != nil { + return 0, fmt.Errorf("read http POST %s response: %v", uri, err) + } + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + + return int64(len(data)), nil +} diff --git a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go new file mode 100644 index 000000000..8b986b546 --- /dev/null +++ b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go @@ -0,0 +1,150 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +var ( + dir = flag.String("dir", ".", "upload files under this directory") + concurrency = flag.Int("c", 1, "concurrent number of uploads") + times = flag.Int("n", 1, "repeated number of times") + destination = flag.String("to", "http://localhost:8888/", "destination directory on filer") + + statsChan = make(chan stat, 8) +) + +type stat struct { + size int64 +} + +func main() { + + flag.Parse() + + var fileNames []string + + files, err := ioutil.ReadDir(*dir) + if err != nil { + log.Fatalf("fail to read dir %v: %v", *dir, err) + } + + for _, file := range files { + if file.IsDir() { + continue + } + fileNames = append(fileNames, filepath.Join(*dir, file.Name())) + } + + var wg sync.WaitGroup + for x := 0; x < *concurrency; x++ { + wg.Add(1) + + client := &http.Client{} + + go func() { + defer wg.Done() + rand.Shuffle(len(fileNames), func(i, j int) { + fileNames[i], fileNames[j] = fileNames[j], fileNames[i] + }) + for t := 0; t < *times; t++ { + for _, filename := range fileNames { + if size, err := uploadFileToFiler(client, filename, *destination); err == nil { + statsChan <- stat{ + size: size, + } + } + } + } + }() + } + + go func() { + ticker := time.NewTicker(500 * time.Millisecond) + + var lastTime time.Time + var counter, size int64 + for { + select { + case stat := <-statsChan: + size += stat.size + counter++ + case x := <-ticker.C: + if !lastTime.IsZero() { + elapsed := x.Sub(lastTime).Seconds() + fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n", + float64(counter)/elapsed, + float64(size/1024/1024)/elapsed) + } + lastTime = x + size = 0 + counter = 0 + } + } + }() + + wg.Wait() + +} + +func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) { + file, err := os.Open(filename) + if err != nil { + panic(err) + } + defer file.Close() + + fi, err := file.Stat() + + if !strings.HasSuffix(destination, "/") { + destination = destination + "/" + } + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", file.Name()) + if err != nil { + return 0, fmt.Errorf("fail to create form %v: %v", file.Name(), err) + } + _, err = io.Copy(part, file) + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err) + } + + err = writer.Close() + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err) + } + + uri := destination + file.Name() + + request, err := http.NewRequest("POST", uri, body) + request.Header.Set("Content-Type", writer.FormDataContentType()) + + resp, err := client.Do(request) + if err != nil { + return 0, fmt.Errorf("http POST %s: %v", uri, err) + } else { + body := &bytes.Buffer{} + _, err := body.ReadFrom(resp.Body) + if err != nil { + return 0, fmt.Errorf("read http POST %s response: %v", uri, err) + } + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + + return fi.Size(), nil +} diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go new file mode 100644 index 000000000..f0ef51c09 --- /dev/null +++ b/unmaintained/volume_tailer/volume_tailer.go @@ -0,0 +1,70 @@ +package main + +import ( + "flag" + "log" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + util2 "github.com/chrislusf/seaweedfs/weed/util" + "github.com/spf13/viper" + "golang.org/x/tools/godoc/util" +) + +var ( + master = flag.String("master", "localhost:9333", "master server host and port") + volumeId = flag.Int("volumeId", -1, "a volume id") + rewindDuration = flag.Duration("rewind", -1, "rewind back in time. -1 means from the first entry. 0 means from now.") + timeoutSeconds = flag.Int("timeoutSeconds", 0, "disconnect if no activity after these seconds") + showTextFile = flag.Bool("showTextFile", false, "display textual file content") +) + +func main() { + flag.Parse() + + util2.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + + vid := needle.VolumeId(*volumeId) + + var sinceTimeNs int64 + if *rewindDuration == 0 { + sinceTimeNs = time.Now().UnixNano() + } else if *rewindDuration == -1 { + sinceTimeNs = 0 + } else if *rewindDuration > 0 { + sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano() + } + + err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) { + if n.Size == 0 { + println("-", n.String()) + return nil + } else { + println("+", n.String()) + } + + if *showTextFile { + + data := n.Data + if n.IsGzipped() { + if data, err = util2.UnGzipData(data); err != nil { + return err + } + } + if util.IsText(data) { + println(string(data)) + } + + println("-", n.String(), "compressed", n.IsGzipped(), "original size", len(data)) + } + return nil + }) + + if err != nil { + log.Printf("Error VolumeTailSender volume %d: %v", vid, err) + } + +} diff --git a/util/gostd b/util/gostd new file mode 100755 index 000000000..e9fc783d1 --- /dev/null +++ b/util/gostd @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +############################ GLOBAL VARIABLES +regex=' ' +branch="master" +max_length=150 + +REGEX_SUFFIX_GO=".+\.go$" + +############################ FUNCTIONS +msg() { + printf '%b' "$1" >&2 +} + +die() { + msg "\33[31m[✘]\33[0m ${1}${2}" + exit 1 +} + +succ() { + msg "\33[34m[√]\33[0m ${1}${2}" +} + +gostd() { + local branch=$1 + local reg4exclude=$2 + local max_length=$3 + + for file in `git diff $branch --name-only` + do + if ! [[ $file =~ $REGEX_SUFFIX_GO ]] || [[ $file =~ $reg4exclude ]]; then + continue + fi + + error=`go fmt $file 2>&1` + if ! [ $? -eq 0 ]; then + die "go fmt $file:" "$error" + fi + + succ "$file\n" + + grep -n -E --color=always ".{$max_length}" $file | awk '{ printf ("%4s %s\n", "", $0) }' + done +} + +get_options() { + while getopts "b:e:hl:" opts + do + case $opts in + b) + branch=$OPTARG + ;; + e) + regex=$OPTARG + ;; + h) + usage + exit 0 + ;; + l) + max_length=$OPTARG + ;; + \?) + usage + exit 1 + ;; + esac + done +} + +usage () { + cat << _EOC_ +Usage: + gostd [options] + +Options: + -b Specify the git diff branch or commit. + (default: master) + -e Regex for excluding file or directory. + -h Print this usage. + -l Show files that exceed the limit line length. + (default: 150) + +Examples: + gostd + gostd -b master -l 100 + gostd -b 59d532a -e weed/pb -l 100 +_EOC_ +} + +main() { + get_options "$@" + + gostd "$branch" "$regex" "$max_length" +} + +############################ MAIN() +main "$@" diff --git a/weed/command/backup.go b/weed/command/backup.go index 072aea75b..31e146965 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -3,6 +3,11 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/storage" ) @@ -30,26 +35,30 @@ var cmdBackup = &Command{ UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333", Short: "incrementally backup a volume to local folder", Long: `Incrementally backup volume data. - + It is expected that you use this inside a script, to loop through all possible volume ids that needs to be backup to local folder. - + The volume id does not need to exist locally or even remotely. This will help to backup future new volumes. - + Usually backing up is just copying the .dat (and .idx) files. But it's tricky to incrementally copy the differences. - + The complexity comes when there are multiple addition, deletion and compaction. - This tool will handle them correctly and efficiently, avoiding unnecessary data transporation. + This tool will handle them correctly and efficiently, avoiding unnecessary data transportation. `, } func runBackup(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + if *s.volumeId == -1 { return false } - vid := storage.VolumeId(*s.volumeId) + vid := needle.VolumeId(*s.volumeId) // find volume location, replication, ttl info lookup, err := operation.Lookup(*s.master, vid.String()) @@ -59,12 +68,12 @@ func runBackup(cmd *Command, args []string) bool { } volumeServer := lookup.Locations[0].Url - stats, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid)) + stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid)) if err != nil { fmt.Printf("Error get volume %d status: %v\n", vid, err) return true } - ttl, err := storage.ReadTTL(stats.Ttl) + ttl, err := needle.ReadTTL(stats.Ttl) if err != nil { fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err) return true @@ -81,7 +90,34 @@ func runBackup(cmd *Command, args []string) bool { return true } - if err := v.Synchronize(volumeServer); err != nil { + if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { + if err = v.Compact(0, 0); err != nil { + fmt.Printf("Compact Volume before synchronizing %v\n", err) + return true + } + if err = v.CommitCompact(); err != nil { + fmt.Printf("Commit Compact before synchronizing %v\n", err) + return true + } + v.SuperBlock.CompactionRevision = uint16(stats.CompactRevision) + v.DataFile().WriteAt(v.SuperBlock.Bytes(), 0) + } + + datSize, _, _ := v.FileStat() + + if datSize > stats.TailOffset { + // remove the old data + v.Destroy() + // recreate an empty volume + v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0) + if err != nil { + fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) + return true + } + } + defer v.Close() + + if err := v.IncrementalBackup(volumeServer, grpcDialOption); err != nil { fmt.Printf("Error synchronizing volume %d: %v\n", vid, err) return true } diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 60fd88ccd..dd0fdb88e 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -15,6 +15,9 @@ import ( "sync" "time" + "github.com/spf13/viper" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" @@ -33,15 +36,17 @@ type BenchmarkOptions struct { read *bool sequentialRead *bool collection *string + replication *string cpuprofile *string maxCpu *int - secretKey *string + grpcDialOption grpc.DialOption + masterClient *wdclient.MasterClient } var ( - b BenchmarkOptions - sharedBytes []byte - masterClient *wdclient.MasterClient + b BenchmarkOptions + sharedBytes []byte + isSecure bool ) func init() { @@ -57,9 +62,9 @@ func init() { b.read = cmdBenchmark.Flag.Bool("read", true, "enable read") b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file") b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection") + b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - b.secretKey = cmdBenchmark.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") sharedBytes = make([]byte, 1024) } @@ -102,6 +107,10 @@ var ( ) func runBenchmark(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { *b.maxCpu = runtime.NumCPU() @@ -116,9 +125,9 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - masterClient = wdclient.NewMasterClient(context.Background(), "benchmark", strings.Split(*b.masters, ",")) - go masterClient.KeepConnectedToMaster() - masterClient.WaitUntilConnected() + b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) + go b.masterClient.KeepConnectedToMaster() + b.masterClient.WaitUntilConnected() if *b.write { benchWrite() @@ -188,7 +197,6 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { defer wait.Done() delayedDeleteChan := make(chan *delayedFile, 100) var waitForDeletions sync.WaitGroup - secret := security.Secret(*b.secretKey) for i := 0; i < 7; i++ { waitForDeletions.Add(1) @@ -198,8 +206,11 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { if df.enterTime.After(time.Now()) { time.Sleep(df.enterTime.Sub(time.Now())) } - if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid, - security.GenJwt(secret, df.fp.Fid)); e == nil { + var jwtAuthorization security.EncodedJwt + if isSecure { + jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), df.fp.Fid) + } + if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil { s.completed++ } else { s.failed++ @@ -219,12 +230,16 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { MimeType: "image/bench", // prevent gzip benchmark content } ar := &operation.VolumeAssignRequest{ - Count: 1, - Collection: *b.collection, + Count: 1, + Collection: *b.collection, + Replication: *b.replication, } - if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil { + if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection - if _, err := fp.Upload(0, masterClient.GetMaster(), secret); err == nil { + if !isSecure && assignResult.Auth != "" { + isSecure = true + } + if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} @@ -264,7 +279,7 @@ func readFiles(fileIdLineChan chan string, s *stat) { fmt.Printf("reading file %s\n", fid) } start := time.Now() - url, err := masterClient.LookupFileId(fid) + url, err := b.masterClient.LookupFileId(fid) if err != nil { s.failed++ println("!!!! ", fid, " location not found!!!!!") diff --git a/weed/command/command.go b/weed/command/command.go index 91b9bf3fc..79c00d4cd 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -13,7 +13,6 @@ var Commands = []*Command{ cmdCompact, cmdCopy, cmdFix, - cmdFilerExport, cmdFilerReplicate, cmdServer, cmdMaster, @@ -27,6 +26,7 @@ var Commands = []*Command{ cmdVolume, cmdExport, cmdMount, + cmdWebDav, } type Command struct { diff --git a/weed/command/compact.go b/weed/command/compact.go index 0dd4efe0e..79d50c095 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -3,6 +3,7 @@ package command import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func init() { @@ -35,14 +36,14 @@ func runCompact(cmd *Command, args []string) bool { preallocate := *compactVolumePreallocate * (1 << 20) - vid := storage.VolumeId(*compactVolumeId) + vid := needle.VolumeId(*compactVolumeId) v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate) if err != nil { glog.Fatalf("Load Volume [ERROR] %s\n", err) } if *compactMethod == 0 { - if err = v.Compact(preallocate); err != nil { + if err = v.Compact(preallocate, 0); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { diff --git a/weed/command/export.go b/weed/command/export.go index 5c7e064ce..7e94ec11c 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -12,10 +12,12 @@ import ( "text/template" "time" + "io" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" - "io" ) const ( @@ -66,10 +68,10 @@ var ( localLocation, _ = time.LoadLocation("Local") ) -func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Version, deleted bool) { - key := storage.NewFileIdFromNeedle(vid, n).String() +func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool) { + key := needle.NewFileIdFromNeedle(vid, n).String() size := n.DataSize - if version == storage.Version1 { + if version == needle.Version1 { size = n.Size } fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n", @@ -85,10 +87,10 @@ func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Versio } type VolumeFileScanner4Export struct { - version storage.Version + version needle.Version counter int needleMap *storage.NeedleMap - vid storage.VolumeId + vid needle.VolumeId } func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock storage.SuperBlock) error { @@ -100,14 +102,14 @@ func (scanner *VolumeFileScanner4Export) ReadNeedleBody() bool { return true } -func (scanner *VolumeFileScanner4Export) VisitNeedle(n *storage.Needle, offset int64) error { +func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset int64) error { needleMap := scanner.needleMap vid := scanner.vid nv, ok := needleMap.Get(n.Id) glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv) - if ok && nv.Size > 0 && int64(nv.Offset)*types.NeedlePaddingSize == offset { + if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", n.LastModified, newerThanUnix) @@ -189,7 +191,7 @@ func runExport(cmd *Command, args []string) bool { if *export.collection != "" { fileName = *export.collection + "_" + fileName } - vid := storage.VolumeId(*export.volumeId) + vid := needle.VolumeId(*export.volumeId) indexFile, err := os.OpenFile(path.Join(*export.dir, fileName+".idx"), os.O_RDONLY, 0644) if err != nil { glog.Fatalf("Create Volume Index [ERROR] %s\n", err) @@ -225,8 +227,8 @@ type nameParams struct { Ext string } -func writeFile(vid storage.VolumeId, n *storage.Needle) (err error) { - key := storage.NewFileIdFromNeedle(vid, n).String() +func writeFile(vid needle.VolumeId, n *needle.Needle) (err error) { + key := needle.NewFileIdFromNeedle(vid, n).String() fileNameTemplateBuffer.Reset() if err = fileNameTemplate.Execute(fileNameTemplateBuffer, nameParams{ diff --git a/weed/command/filer.go b/weed/command/filer.go index 0c1950f96..b1ceb46f5 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -6,6 +6,9 @@ import ( "strings" "time" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/server" @@ -21,17 +24,16 @@ type FilerOptions struct { masters *string ip *string port *int - grpcPort *int publicPort *int collection *string defaultReplicaPlacement *string redirectOnRead *bool disableDirListing *bool maxMB *int - secretKey *string dirListingLimit *int dataCenter *string enableNotification *bool + disableHttp *bool // default leveldb directory, used in "weed server" mode defaultLevelDbDirectory *string @@ -43,15 +45,14 @@ func init() { f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection") f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") - f.grpcPort = cmdFiler.Flag.Int("port.grpc", 0, "filer grpc server listen port, default to http port + 10000") - f.publicPort = cmdFiler.Flag.Int("port.public", 0, "port opened to public") + f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") - f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") + f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") } var cmdFiler = &Command{ @@ -70,13 +71,15 @@ var cmdFiler = &Command{ The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. - The example filer.toml configuration file can be generated by "weed scaffold filer" + The example filer.toml configuration file can be generated by "weed scaffold -config=filer" `, } func runFiler(cmd *Command, args []string) bool { + util.LoadConfiguration("security", false) + f.startFiler() return true @@ -91,22 +94,23 @@ func (fo *FilerOptions) startFiler() { publicVolumeMux = http.NewServeMux() } - defaultLevelDbDirectory := "./filerdb" + defaultLevelDbDirectory := "./filerldb2" if fo.defaultLevelDbDirectory != nil { - defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerdb" + defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerldb2" } fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ - Masters: strings.Split(*f.masters, ","), + Masters: strings.Split(*fo.masters, ","), Collection: *fo.collection, DefaultReplication: *fo.defaultReplicaPlacement, RedirectOnRead: *fo.redirectOnRead, DisableDirListing: *fo.disableDirListing, MaxMB: *fo.maxMB, - SecretKey: *fo.secretKey, DirListingLimit: *fo.dirListingLimit, DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, + DisableHttp: *fo.disableHttp, + Port: *fo.port, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) @@ -128,7 +132,7 @@ func (fo *FilerOptions) startFiler() { glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port) filerListener, e := util.NewListener( - ":"+strconv.Itoa(*fo.port), + *fo.ip+":"+strconv.Itoa(*fo.port), time.Duration(10)*time.Second, ) if e != nil { @@ -136,15 +140,12 @@ func (fo *FilerOptions) startFiler() { } // starting grpc server - grpcPort := *fo.grpcPort - if grpcPort == 0 { - grpcPort = *fo.port + 10000 - } + grpcPort := *fo.port + 10000 grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer() + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 3638bcb27..19aceb211 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -1,52 +1,56 @@ package command import ( + "context" "fmt" + "io" "io/ioutil" + "net/http" "net/url" "os" "path/filepath" + "strconv" "strings" + "sync" + "time" - "context" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "io" - "net/http" - "strconv" - "time" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/spf13/viper" + "google.golang.org/grpc" ) var ( - copy CopyOptions + copy CopyOptions + waitGroup sync.WaitGroup ) type CopyOptions struct { - filerGrpcPort *int - master *string - include *string - replication *string - collection *string - ttl *string - maxMB *int - secretKey *string - - secret security.Secret + include *string + replication *string + collection *string + ttl *string + maxMB *int + masterClient *wdclient.MasterClient + concurrency *int + compressionLevel *int + grpcDialOption grpc.DialOption + masters []string } func init() { cmdCopy.Run = runCopy // break init cycle cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information") - copy.master = cmdCopy.Flag.String("master", "localhost:9333", "SeaweedFS master location") copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir") copy.replication = cmdCopy.Flag.String("replication", "", "replication type") copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name") copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - copy.maxMB = cmdCopy.Flag.Int("maxMB", 0, "split files larger than the limit") - copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000") - copy.secretKey = cmdCopy.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") + copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") + copy.concurrency = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") + copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9") } var cmdCopy = &Command{ @@ -66,7 +70,9 @@ var cmdCopy = &Command{ } func runCopy(cmd *Command, args []string) bool { - copy.secret = security.Secret(*copy.secretKey) + + util.LoadConfiguration("security", false) + if len(args) <= 1 { return false } @@ -96,67 +102,170 @@ func runCopy(cmd *Command, args []string) bool { } filerGrpcPort := filerPort + 10000 - if *copy.filerGrpcPort != 0 { - filerGrpcPort = uint64(*copy.filerGrpcPort) - } - filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) + copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") - for _, fileOrDir := range fileOrDirs { - if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, urlPath) { - return false - } + ctx := context.Background() + + masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress) + if err != nil { + fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err) + return false } + if *copy.collection == "" { + *copy.collection = collection + } + if *copy.replication == "" { + *copy.replication = replication + } + if *copy.maxMB == 0 { + *copy.maxMB = int(maxMB) + } + copy.masters = masters + + copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters) + go copy.masterClient.KeepConnectedToMaster() + copy.masterClient.WaitUntilConnected() + + if *cmdCopy.IsDebug { + util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") + } + + fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrency) + + go func() { + defer close(fileCopyTaskChan) + for _, fileOrDir := range fileOrDirs { + if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil { + fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err) + break + } + } + }() + for i := 0; i < *copy.concurrency; i++ { + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + worker := FileCopyWorker{ + options: ©, + filerHost: filerUrl.Host, + filerGrpcAddress: filerGrpcAddress, + } + if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil { + fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) + return + } + }() + } + waitGroup.Wait() + return true } -func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path string) bool { - f, err := os.Open(fileOrDir) - if err != nil { - fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) - return false - } - defer f.Close() +func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { + err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb + return nil + }) + return +} - fi, err := f.Stat() +func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan FileCopyTask) error { + + fi, err := os.Stat(fileOrDir) if err != nil { - fmt.Printf("Failed to get stat for file %s: %v\n", fileOrDir, err) - return false + fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err) + return nil } mode := fi.Mode() if mode.IsDir() { files, _ := ioutil.ReadDir(fileOrDir) for _, subFileOrDir := range files { - if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, path+fi.Name()+"/") { - return false + if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { + return err } } - return true + return nil } + uid, gid := util.GetFileUidGid(fi) + + fileCopyTaskChan <- FileCopyTask{ + sourceLocation: fileOrDir, + destinationUrlPath: destPath, + fileSize: fi.Size(), + fileMode: fi.Mode(), + uid: uid, + gid: gid, + } + + return nil +} + +type FileCopyWorker struct { + options *CopyOptions + filerHost string + filerGrpcAddress string +} + +func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error { + for task := range fileCopyTaskChan { + if err := worker.doEachCopy(ctx, task); err != nil { + return err + } + } + return nil +} + +type FileCopyTask struct { + sourceLocation string + destinationUrlPath string + fileSize int64 + fileMode os.FileMode + uid uint32 + gid uint32 +} + +func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error { + + f, err := os.Open(task.sourceLocation) + if err != nil { + fmt.Printf("Failed to open file %s: %v\n", task.sourceLocation, err) + if _, ok := err.(*os.PathError); ok { + fmt.Printf("skipping %s\n", task.sourceLocation) + return nil + } + return err + } + defer f.Close() + // this is a regular file - if *copy.include != "" { - if ok, _ := filepath.Match(*copy.include, filepath.Base(fileOrDir)); !ok { - return true + if *worker.options.include != "" { + if ok, _ := filepath.Match(*worker.options.include, filepath.Base(task.sourceLocation)); !ok { + return nil } } // find the chunk count - chunkSize := int64(*copy.maxMB * 1024 * 1024) + chunkSize := int64(*worker.options.maxMB * 1024 * 1024) chunkCount := 1 - if chunkSize > 0 && fi.Size() > chunkSize { - chunkCount = int(fi.Size()/chunkSize) + 1 + if chunkSize > 0 && task.fileSize > chunkSize { + chunkCount = int(task.fileSize/chunkSize) + 1 } if chunkCount == 1 { - return uploadFileAsOne(filerAddress, filerGrpcAddress, path, f, fi) + return worker.uploadFileAsOne(ctx, task, f) } - return uploadFileInChunks(filerAddress, filerGrpcAddress, path, f, fi, chunkCount, chunkSize) + return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize) } -func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo) bool { +func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error { // upload the file content fileName := filepath.Base(f.Name()) @@ -164,29 +273,27 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f var chunks []*filer_pb.FileChunk - if fi.Size() > 0 { + if task.fileSize > 0 { // assign a volume - assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, - Replication: *copy.replication, - Collection: *copy.collection, - Ttl: *copy.ttl, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + Ttl: *worker.options.ttl, }) if err != nil { - fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid - uploadResult, err := operation.Upload(targetUrl, fileName, f, false, mimeType, nil, "") + uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel) if err != nil { - fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) - return false + return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } if uploadResult.Error != "" { - fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) - return false + return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) } fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) @@ -198,43 +305,42 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f ETag: uploadResult.ETag, }) - fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) + fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ - Directory: urlFolder, + Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ Name: fileName, Attributes: &filer_pb.FuseAttributes{ Crtime: time.Now().Unix(), Mtime: time.Now().Unix(), - Gid: uint32(os.Getgid()), - Uid: uint32(os.Getuid()), - FileSize: uint64(fi.Size()), - FileMode: uint32(fi.Mode()), + Gid: task.gid, + Uid: task.uid, + FileSize: uint64(task.fileSize), + FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *copy.replication, - Collection: *copy.collection, - TtlSec: int32(util.ParseInt(*copy.ttl, 0)), + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), }, Chunks: chunks, }, } - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil }); err != nil { - fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) - return false + return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err) } - return true + return nil } -func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { +func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -244,14 +350,14 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, for i := int64(0); i < int64(chunkCount); i++ { // assign a volume - assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, - Replication: *copy.replication, - Collection: *copy.collection, - Ttl: *copy.ttl, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + Ttl: *worker.options.ttl, }) if err != nil { - fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid @@ -259,14 +365,12 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(f, chunkSize), - false, "application/octet-stream", nil, "") + false, "application/octet-stream", nil, assignResult.Auth) if err != nil { - fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) - return false + return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } if uploadResult.Error != "" { - fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) - return false + return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) } chunks = append(chunks, &filer_pb.FileChunk{ FileId: assignResult.Fid, @@ -278,39 +382,38 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) } - if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ - Directory: urlFolder, + Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ Name: fileName, Attributes: &filer_pb.FuseAttributes{ Crtime: time.Now().Unix(), Mtime: time.Now().Unix(), - Gid: uint32(os.Getgid()), - Uid: uint32(os.Getuid()), - FileSize: uint64(fi.Size()), - FileMode: uint32(fi.Mode()), + Gid: task.gid, + Uid: task.uid, + FileSize: uint64(task.fileSize), + FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *copy.replication, - Collection: *copy.collection, - TtlSec: int32(util.ParseInt(*copy.ttl, 0)), + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), }, Chunks: chunks, }, } - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil }); err != nil { - fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) - return false + return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err) } - fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) + fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) - return true + return nil } func detectMimeType(f *os.File) string { @@ -329,15 +432,11 @@ func detectMimeType(f *os.File) string { return mimeType } -func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error { +func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(filerAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", filerAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(clientConn) + return fn(client) + }, filerAddress, grpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } diff --git a/weed/command/filer_export.go b/weed/command/filer_export.go deleted file mode 100644 index 7a2e7920a..000000000 --- a/weed/command/filer_export.go +++ /dev/null @@ -1,187 +0,0 @@ -package command - -import ( - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/server" - "github.com/spf13/viper" -) - -func init() { - cmdFilerExport.Run = runFilerExport // break init cycle -} - -var cmdFilerExport = &Command{ - UsageLine: "filer.export -sourceStore=mysql -targetStore=cassandra", - Short: "export meta data in filer store", - Long: `Iterate the file tree and export all metadata out - - Both source and target store: - * should be a store name already specified in filer.toml - * do not need to be enabled state - - If target store is empty, only the directory tree will be listed. - - If target store is "notification", the list of entries will be sent to notification. - This is usually used to bootstrap filer replication to a remote system. - - `, -} - -var ( - // filerExportOutputFile = cmdFilerExport.Flag.String("output", "", "the output file. If empty, only list out the directory tree") - filerExportSourceStore = cmdFilerExport.Flag.String("sourceStore", "", "the source store name in filer.toml, default to currently enabled store") - filerExportTargetStore = cmdFilerExport.Flag.String("targetStore", "", "the target store name in filer.toml, or \"notification\" to export all files to message queue") - dir = cmdFilerExport.Flag.String("dir", "/", "only process files under this directory") - dirListLimit = cmdFilerExport.Flag.Int("dirListLimit", 100000, "limit directory list size") - dryRun = cmdFilerExport.Flag.Bool("dryRun", false, "not actually moving data") - verboseFilerExport = cmdFilerExport.Flag.Bool("v", false, "verbose entry details") -) - -type statistics struct { - directoryCount int - fileCount int -} - -func runFilerExport(cmd *Command, args []string) bool { - - weed_server.LoadConfiguration("filer", true) - config := viper.GetViper() - - var sourceStore, targetStore filer2.FilerStore - - for _, store := range filer2.Stores { - if store.GetName() == *filerExportSourceStore || *filerExportSourceStore == "" && config.GetBool(store.GetName()+".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize source store for %s: %+v", - store.GetName(), err) - } else { - sourceStore = store - } - break - } - } - - for _, store := range filer2.Stores { - if store.GetName() == *filerExportTargetStore { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize target store for %s: %+v", - store.GetName(), err) - } else { - targetStore = store - } - break - } - } - - if sourceStore == nil { - glog.Errorf("Failed to find source store %s", *filerExportSourceStore) - println("existing data sources are:") - for _, store := range filer2.Stores { - println(" " + store.GetName()) - } - return false - } - - if targetStore == nil && *filerExportTargetStore != "" && *filerExportTargetStore != "notification" { - glog.Errorf("Failed to find target store %s", *filerExportTargetStore) - println("existing data sources are:") - for _, store := range filer2.Stores { - println(" " + store.GetName()) - } - return false - } - - stat := statistics{} - - var fn func(level int, entry *filer2.Entry) error - - if *filerExportTargetStore == "notification" { - weed_server.LoadConfiguration("notification", false) - v := viper.GetViper() - notification.LoadConfiguration(v.Sub("notification")) - - fn = func(level int, entry *filer2.Entry) error { - printout(level, entry) - if *dryRun { - return nil - } - return notification.Queue.SendMessage( - string(entry.FullPath), - &filer_pb.EventNotification{ - NewEntry: entry.ToProtoEntry(), - }, - ) - } - } else if targetStore == nil { - fn = printout - } else { - fn = func(level int, entry *filer2.Entry) error { - printout(level, entry) - if *dryRun { - return nil - } - return targetStore.InsertEntry(entry) - } - } - - doTraverse(&stat, sourceStore, filer2.FullPath(*dir), 0, fn) - - glog.Infof("processed %d directories, %d files", stat.directoryCount, stat.fileCount) - - return true -} - -func doTraverse(stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) { - - limit := *dirListLimit - lastEntryName := "" - for { - entries, err := filerStore.ListDirectoryEntries(parentPath, lastEntryName, false, limit) - if err != nil { - break - } - for _, entry := range entries { - if fnErr := fn(level, entry); fnErr != nil { - glog.Errorf("failed to process entry: %s", entry.FullPath) - } - if entry.IsDirectory() { - stat.directoryCount++ - doTraverse(stat, filerStore, entry.FullPath, level+1, fn) - } else { - stat.fileCount++ - } - } - if len(entries) < limit { - break - } - } -} - -func printout(level int, entry *filer2.Entry) error { - for i := 0; i < level; i++ { - if i == level-1 { - print("+-") - } else { - print("| ") - } - } - print(entry.FullPath.Name()) - if *verboseFilerExport { - for _, chunk := range entry.Chunks { - print("[") - print(chunk.FileId) - print(",") - print(chunk.Offset) - print(",") - print(chunk.Size) - print(")") - } - } - println() - return nil -} diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index 3384e4023..c6e7f5dba 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -1,6 +1,7 @@ package command import ( + "context" "strings" "github.com/chrislusf/seaweedfs/weed/glog" @@ -12,7 +13,7 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink" "github.com/chrislusf/seaweedfs/weed/replication/sub" - "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/spf13/viper" ) @@ -28,15 +29,16 @@ var cmdFilerReplicate = &Command{ filer.replicate listens on filer notifications. If any file is updated, it will fetch the updated content, and write to the other destination. - Run "weed scaffold -config replication" to generate a replication.toml file and customize the parameters. + Run "weed scaffold -config=replication" to generate a replication.toml file and customize the parameters. `, } func runFilerReplicate(cmd *Command, args []string) bool { - weed_server.LoadConfiguration("replication", true) - weed_server.LoadConfiguration("notification", true) + util.LoadConfiguration("security", false) + util.LoadConfiguration("replication", true) + util.LoadConfiguration("notification", true) config := viper.GetViper() var notificationInput sub.NotificationInput @@ -115,7 +117,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { } else { glog.V(1).Infof("modify: %s", key) } - if err = replicator.Replicate(key, m); err != nil { + if err = replicator.Replicate(context.Background(), key, m); err != nil { glog.Errorf("replicate %s: %+v", key, err) } else { glog.V(1).Infof("replicated %s", key) diff --git a/weed/command/fix.go b/weed/command/fix.go index a800978c6..bf33490cc 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -29,7 +30,7 @@ var ( ) type VolumeFileScanner4Fix struct { - version storage.Version + version needle.Version nm *storage.NeedleMap } @@ -42,14 +43,14 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { return false } -func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *storage.Needle, offset int64) error { +func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64) error { glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) - if n.Size > 0 { - pe := scanner.nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size) + if n.Size > 0 && n.Size != types.TombstoneFileSize { + pe := scanner.nm.Put(n.Id, types.ToOffset(offset), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { glog.V(2).Infof("skipping deleted file ...") - return scanner.nm.Delete(n.Id, types.Offset(offset/types.NeedlePaddingSize)) + return scanner.nm.Delete(n.Id, types.ToOffset(offset)) } return nil } @@ -74,7 +75,7 @@ func runFix(cmd *Command, args []string) bool { nm := storage.NewBtreeNeedleMap(indexFile) defer nm.Close() - vid := storage.VolumeId(*fixVolumeId) + vid := needle.VolumeId(*fixVolumeId) scanner := &VolumeFileScanner4Fix{ nm: nm, } diff --git a/weed/command/master.go b/weed/command/master.go index bd2267b9e..9e9308468 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -6,76 +6,98 @@ import ( "runtime" "strconv" "strings" - "time" + "github.com/chrislusf/raft/protobuf" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" + "github.com/spf13/viper" "google.golang.org/grpc/reflection" ) +var ( + m MasterOptions +) + +type MasterOptions struct { + port *int + ip *string + ipBind *string + metaFolder *string + peers *string + volumeSizeLimitMB *uint + volumePreallocate *bool + pulseSeconds *int + defaultReplication *string + garbageThreshold *float64 + whiteList *string + disableHttp *bool + metricsAddress *string + metricsIntervalSec *int +} + func init() { cmdMaster.Run = runMaster // break init cycle + m.port = cmdMaster.Flag.Int("port", 9333, "http listen port") + m.ip = cmdMaster.Flag.String("ip", "localhost", "master | address") + m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") + m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094") + m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") + m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") + m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") + m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") + m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") + m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address") + m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") } var cmdMaster = &Command{ UsageLine: "master -port=9333", Short: "start a master server", - Long: `start a master server to provide volume=>location mapping service - and sequence number of file ids + Long: `start a master server to provide volume=>location mapping service and sequence number of file ids + + The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. + + The example security.toml configuration file can be generated by "weed scaffold -config=security" `, } var ( - mport = cmdMaster.Flag.Int("port", 9333, "http listen port") - mGrpcPort = cmdMaster.Flag.Int("port.grpc", 0, "grpc server listen port, default to http port + 10000") - masterIp = cmdMaster.Flag.String("ip", "localhost", "master | address") - masterBindIp = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") - metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") - masterPeers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094") - volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") - volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") - mpulse = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") - defaultReplicaPlacement = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") - // mTimeout = cmdMaster.Flag.Int("idleTimeout", 30, "connection idle seconds") - mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") - masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - masterSecureKey = cmdMaster.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") - masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") - masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file") + masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") + masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file") masterWhiteList []string ) func runMaster(cmd *Command, args []string) bool { - if *mMaxCpu < 1 { - *mMaxCpu = runtime.NumCPU() - } - runtime.GOMAXPROCS(*mMaxCpu) + + util.LoadConfiguration("security", false) + util.LoadConfiguration("master", false) + + runtime.GOMAXPROCS(runtime.NumCPU()) util.SetupProfiling(*masterCpuProfile, *masterMemProfile) - if err := util.TestFolderWritable(*metaFolder); err != nil { - glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *metaFolder, err) + if err := util.TestFolderWritable(*m.metaFolder); err != nil { + glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err) } - if *masterWhiteListOption != "" { - masterWhiteList = strings.Split(*masterWhiteListOption, ",") + if *m.whiteList != "" { + masterWhiteList = strings.Split(*m.whiteList, ",") } - if *volumeSizeLimitMB > 30*1000 { + if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { glog.Fatalf("volumeSizeLimitMB should be smaller than 30000") } r := mux.NewRouter() - ms := weed_server.NewMasterServer(r, *mport, *metaFolder, - *volumeSizeLimitMB, *volumePreallocate, - *mpulse, *defaultReplicaPlacement, *garbageThreshold, - masterWhiteList, *masterSecureKey, - ) + ms := weed_server.NewMasterServer(r, m.toMasterOption(masterWhiteList)) - listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport) + listeningAddress := *m.ipBind + ":" + strconv.Itoa(*m.port) glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", listeningAddress) @@ -85,28 +107,29 @@ func runMaster(cmd *Command, args []string) bool { } go func() { - time.Sleep(100 * time.Millisecond) - myMasterAddress, peers := checkPeers(*masterIp, *mport, *masterPeers) - raftServer := weed_server.NewRaftServer(r, peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) - ms.SetRaftServer(raftServer) - }() - - go func() { - // starting grpc server - grpcPort := *mGrpcPort - if grpcPort == 0 { - grpcPort = *mport + 10000 + // start raftServer + myMasterAddress, peers := checkPeers(*m.ip, *m.port, *m.peers) + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), + peers, myMasterAddress, *m.metaFolder, ms.Topo, *m.pulseSeconds) + if raftServer == nil { + glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *m.metaFolder) } - grpcL, err := util.NewListener(*masterBindIp+":"+strconv.Itoa(grpcPort), 0) + ms.SetRaftServer(raftServer) + r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") + + // starting grpc server + grpcPort := *m.port + 10000 + grpcL, err := util.NewListener(*m.ipBind+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer() + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) master_pb.RegisterSeaweedServer(grpcS, ms) + protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterBindIp, grpcPort) + glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *m.ipBind, grpcPort) grpcS.Serve(grpcL) }() @@ -142,3 +165,19 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st } return } + +func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption { + return &weed_server.MasterOption{ + Port: *m.port, + MetaFolder: *m.metaFolder, + VolumeSizeLimitMB: *m.volumeSizeLimitMB, + VolumePreallocate: *m.volumePreallocate, + PulseSeconds: *m.pulseSeconds, + DefaultReplicaPlacement: *m.defaultReplication, + GarbageThreshold: *m.garbageThreshold, + WhiteList: whiteList, + DisableHttp: *m.disableHttp, + MetricsAddress: *m.metricsAddress, + MetricsIntervalSec: *m.metricsIntervalSec, + } +} diff --git a/weed/command/mount.go b/weed/command/mount.go index e61f16783..ec790c999 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -8,7 +8,6 @@ import ( type MountOptions struct { filer *string - filerGrpcPort *int filerMountRootPath *string dir *string dirListingLimit *int @@ -17,6 +16,7 @@ type MountOptions struct { ttlSec *int chunkSizeLimitMB *int dataCenter *string + allowOthers *bool } var ( @@ -28,7 +28,6 @@ var ( func init() { cmdMount.Run = runMount // break init cycle mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") - mountOptions.filerGrpcPort = cmdMount.Flag.Int("filer.grpc.port", 0, "filer grpc server listen port, default to http port + 10000") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size") @@ -37,6 +36,7 @@ func init() { mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") + mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") } @@ -59,7 +59,7 @@ var cmdMount = &Command{ `, } -func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress string, err error) { +func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { hostnameAndPort := strings.Split(filer, ":") if len(hostnameAndPort) != 2 { return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort) @@ -71,9 +71,6 @@ func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress } filerGrpcPort := int(filerPort) + 10000 - if optionalGrpcPort != 0 { - filerGrpcPort = optionalGrpcPort - } return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil } diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 2937b9ef1..1d1214266 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -6,11 +6,16 @@ import ( "fmt" "os" "os/user" + "path" "runtime" "strconv" "strings" "time" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/jacobsa/daemonize" + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" @@ -19,40 +24,67 @@ import ( ) func runMount(cmd *Command, args []string) bool { + + util.SetupProfiling(*mountCpuProfile, *mountMemProfile) + + return RunMount( + *mountOptions.filer, + *mountOptions.filerMountRootPath, + *mountOptions.dir, + *mountOptions.collection, + *mountOptions.replication, + *mountOptions.dataCenter, + *mountOptions.chunkSizeLimitMB, + *mountOptions.allowOthers, + *mountOptions.ttlSec, + *mountOptions.dirListingLimit, + ) +} + +func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int, + allowOthers bool, ttlSec int, dirListingLimit int) bool { + + util.LoadConfiguration("security", false) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) - if *mountOptions.dir == "" { + if dir == "" { fmt.Printf("Please specify the mount directory via \"-dir\"") return false } - if *mountOptions.chunkSizeLimitMB <= 0 { + if chunkSizeLimitMB <= 0 { fmt.Printf("Please specify a reasonable buffer size.") return false } - fuse.Unmount(*mountOptions.dir) + fuse.Unmount(dir) + + uid, gid := uint32(0), uint32(0) // detect mount folder mode mountMode := os.ModeDir | 0755 - if fileInfo, err := os.Stat(*mountOptions.dir); err == nil { + fileInfo, err := os.Stat(dir) + if err == nil { mountMode = os.ModeDir | fileInfo.Mode() + uid, gid = util.GetFileUidGid(fileInfo) + fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode()) } - // detect current user - uid, gid := uint32(0), uint32(0) - if u, err := user.Current(); err == nil { - if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil { - uid = uint32(parsedId) - } - if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil { - gid = uint32(parsedId) + if uid == 0 { + if u, err := user.Current(); err == nil { + if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil { + uid = uint32(parsedId) + } + if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil { + gid = uint32(parsedId) + } + fmt.Printf("current uid=%d gid=%d\n", uid, gid) } } - util.SetupProfiling(*mountCpuProfile, *mountMemProfile) + mountName := path.Base(dir) - c, err := fuse.Mount( - *mountOptions.dir, - fuse.VolumeName("SeaweedFS"), + options := []fuse.MountOption{ + fuse.VolumeName(mountName), fuse.FSName("SeaweedFS"), fuse.Subtype("SeaweedFS"), fuse.NoAppleDouble(), @@ -61,56 +93,69 @@ func runMount(cmd *Command, args []string) bool { fuse.AutoXattr(), fuse.ExclCreate(), fuse.DaemonTimeout("3600"), - fuse.AllowOther(), fuse.AllowSUID(), fuse.DefaultPermissions(), - fuse.MaxReadahead(1024*128), + fuse.MaxReadahead(1024 * 128), fuse.AsyncRead(), fuse.WritebackCache(), - ) + fuse.AllowNonEmptyMount(), + } + if allowOthers { + options = append(options, fuse.AllowOther()) + } + + c, err := fuse.Mount(dir, options...) if err != nil { glog.Fatal(err) + daemonize.SignalOutcome(err) return false } util.OnInterrupt(func() { - fuse.Unmount(*mountOptions.dir) + fuse.Unmount(dir) c.Close() }) - filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer, *mountOptions.filerGrpcPort) + filerGrpcAddress, err := parseFilerGrpcAddress(filer) if err != nil { glog.Fatal(err) + daemonize.SignalOutcome(err) return false } - mountRoot := *mountOptions.filerMountRootPath + mountRoot := filerMountRootPath if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") { mountRoot = mountRoot[0 : len(mountRoot)-1] } + daemonize.SignalOutcome(nil) + err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), FilerMountRootPath: mountRoot, - Collection: *mountOptions.collection, - Replication: *mountOptions.replication, - TtlSec: int32(*mountOptions.ttlSec), - ChunkSizeLimit: int64(*mountOptions.chunkSizeLimitMB) * 1024 * 1024, - DataCenter: *mountOptions.dataCenter, - DirListingLimit: *mountOptions.dirListingLimit, + Collection: collection, + Replication: replication, + TtlSec: int32(ttlSec), + ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, + DataCenter: dataCenter, + DirListingLimit: dirListingLimit, EntryCacheTtl: 3 * time.Second, MountUid: uid, MountGid: gid, MountMode: mountMode, + MountCtime: fileInfo.ModTime(), + MountMtime: time.Now(), })) if err != nil { - fuse.Unmount(*mountOptions.dir) + fuse.Unmount(dir) } // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { glog.Fatal(err) + daemonize.SignalOutcome(err) } return true diff --git a/weed/command/s3.go b/weed/command/s3.go index 16a9490ff..e004bb066 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -4,7 +4,11 @@ import ( "net/http" "time" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api" "github.com/chrislusf/seaweedfs/weed/util" @@ -12,12 +16,11 @@ import ( ) var ( - s3options S3Options + s3StandaloneOptions S3Options ) type S3Options struct { filer *string - filerGrpcPort *int filerBucketsPath *string port *int domainName *string @@ -27,13 +30,12 @@ type S3Options struct { func init() { cmdS3.Run = runS3 // break init cycle - s3options.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") - s3options.filerGrpcPort = cmdS3.Flag.Int("filer.grpcPort", 0, "filer server grpc port, default to filer http port plus 10000") - s3options.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") - s3options.port = cmdS3.Flag.Int("port", 8333, "s3options server http listen port") - s3options.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") - s3options.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") - s3options.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") + s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") + s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") + s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") + s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") + s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") } var cmdS3 = &Command{ @@ -46,7 +48,15 @@ var cmdS3 = &Command{ func runS3(cmd *Command, args []string) bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort) + util.LoadConfiguration("security", false) + + return s3StandaloneOptions.startS3Server() + +} + +func (s3opt *S3Options) startS3Server() bool { + + filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer) if err != nil { glog.Fatal(err) return false @@ -55,10 +65,11 @@ func runS3(cmd *Command, args []string) bool { router := mux.NewRouter().SkipClean(true) _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ - Filer: *s3options.filer, + Filer: *s3opt.filer, FilerGrpcAddress: filerGrpcAddress, - DomainName: *s3options.domainName, - BucketsPath: *s3options.filerBucketsPath, + DomainName: *s3opt.domainName, + BucketsPath: *s3opt.filerBucketsPath, + GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) @@ -66,22 +77,22 @@ func runS3(cmd *Command, args []string) bool { httpS := &http.Server{Handler: router} - listenAddress := fmt.Sprintf(":%d", *s3options.port) + listenAddress := fmt.Sprintf(":%d", *s3opt.port) s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) if err != nil { glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err) } - if *s3options.tlsPrivateKey != "" { - if err = httpS.ServeTLS(s3ApiListener, *s3options.tlsCertificate, *s3options.tlsPrivateKey); err != nil { + if *s3opt.tlsPrivateKey != "" { + glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3opt.port) + if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3options.port) } else { + glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3opt.port) if err = httpS.Serve(s3ApiListener); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } - glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3options.port) } return true diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index ec0723859..062fe0ff8 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -10,7 +10,7 @@ func init() { } var cmdScaffold = &Command{ - UsageLine: "scaffold [filer]", + UsageLine: "scaffold -config=[filer|notification|replication|security|master]", Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. @@ -19,7 +19,7 @@ var cmdScaffold = &Command{ var ( outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory") - config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication] the configuration file to generate") + config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate") ) func runScaffold(cmd *Command, args []string) bool { @@ -32,6 +32,10 @@ func runScaffold(cmd *Command, args []string) bool { content = NOTIFICATION_TOML_EXAMPLE case "replication": content = REPLICATION_TOML_EXAMPLE + case "security": + content = SECURITY_TOML_EXAMPLE + case "master": + content = MASTER_TOML_EXAMPLE } if content == "" { println("need a valid -config option") @@ -61,6 +65,12 @@ enabled = false [leveldb] # local on disk, mostly for simple single-machine setup, fairly scalable +enabled = false +dir = "." # directory to store level db files + +[leveldb2] +# local on disk, mostly for simple single-machine setup, fairly scalable +# faster than previous leveldb, recommended. enabled = true dir = "." # directory to store level db files @@ -70,12 +80,13 @@ dir = "." # directory to store level db files [mysql] # CREATE TABLE IF NOT EXISTS filemeta ( -# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', -# name VARCHAR(1000) COMMENT 'directory or file name', -# directory VARCHAR(4096) COMMENT 'full path to parent directory', -# meta BLOB, +# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', +# name VARCHAR(1000) COMMENT 'directory or file name', +# directory TEXT COMMENT 'full path to parent directory', +# meta LONGBLOB, # PRIMARY KEY (dirhash, name) # ) DEFAULT CHARSET=utf8; + enabled = false hostname = "localhost" port = 3306 @@ -88,8 +99,8 @@ connection_max_open = 100 [postgres] # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT, -# name VARCHAR(1000), -# directory VARCHAR(4096), +# name VARCHAR(65535), +# directory VARCHAR(65535), # meta bytea, # PRIMARY KEY (dirhash, name) # ); @@ -132,6 +143,7 @@ addresses = [ "localhost:30005", "localhost:30006", ] +password = "" ` @@ -178,6 +190,17 @@ google_application_credentials = "/path/to/x.json" # path to json credential fil project_id = "" # an existing project id topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists +[notification.gocdk_pub_sub] +# The Go Cloud Development Kit (https://gocloud.dev). +# PubSub API (https://godoc.org/gocloud.dev/pubsub). +# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ. +enabled = false +# This URL will Dial the RabbitMQ server at the URL in the environment +# variable RABBIT_SERVER_URL and open the exchange "myexchange". +# The exchange must have already been created by some other means, like +# the RabbitMQ management plugin. +topic_url = "rabbit://myexchange" +sub_url = "rabbit://myqueue" ` REPLICATION_TOML_EXAMPLE = ` @@ -239,5 +262,79 @@ b2_master_application_key = "" bucket = "mybucket" # an existing bucket directory = "/" # destination directory +` + + SECURITY_TOML_EXAMPLE = ` +# Put this file to one of the location, with descending priority +# ./security.toml +# $HOME/.seaweedfs/security.toml +# /etc/seaweedfs/security.toml +# this file is read by master, volume server, and filer + +# the jwt signing key is read by master and volume server. +# a jwt defaults to expire after 10 seconds. +[jwt.signing] +key = "" +expires_after_seconds = 10 # seconds + +# jwt for read is only supported with master+volume setup. Filer does not support this mode. +[jwt.signing.read] +key = "" +expires_after_seconds = 10 # seconds + +# all grpc tls authentications are mutual +# the values for the following ca, cert, and key are paths to the PERM files. +# the host name is not checked, so the PERM files can be shared. +[grpc] +ca = "" + +[grpc.volume] +cert = "" +key = "" + +[grpc.master] +cert = "" +key = "" + +[grpc.filer] +cert = "" +key = "" + +# use this for any place needs a grpc client +# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" +[grpc.client] +cert = "" +key = "" + + +# volume server https options +# Note: work in progress! +# this does not work with other clients, e.g., "weed filer|mount" etc, yet. +[https.client] +enabled = true +[https.volume] +cert = "" +key = "" + + +` + + MASTER_TOML_EXAMPLE = ` +# Put this file to one of the location, with descending priority +# ./master.toml +# $HOME/.seaweedfs/master.toml +# /etc/seaweedfs/master.toml +# this file is read by master + +[master.maintenance] +# periodically run these scripts are the same as running them from 'weed shell' +scripts = """ + ec.encode -fullPercent=95 -quietFor=1h + ec.rebuild -force + ec.balance -force + volume.balance -force +""" +sleep_minutes = 17 # sleep minutes between each script execution + ` ) diff --git a/weed/command/server.go b/weed/command/server.go index ba5305a97..f8c1d06fc 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -1,6 +1,7 @@ package command import ( + "fmt" "net/http" "os" "runtime" @@ -10,6 +11,10 @@ import ( "sync" "time" + "github.com/chrislusf/raft/protobuf" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/server" @@ -25,7 +30,9 @@ type ServerOptions struct { var ( serverOptions ServerOptions + masterOptions MasterOptions filerOptions FilerOptions + s3Options S3Options ) func init() { @@ -34,51 +41,52 @@ func init() { var cmdServer = &Command{ UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name", - Short: "start a server, including volume server, and automatically elect a master server", + Short: "start a master server, a volume server, and optionally a filer and a S3 gateway", Long: `start both a volume server to provide storage spaces and a master server to provide volume=>location mapping service and sequence number of file ids This is provided as a convenient way to start both volume server and master server. - The servers are exactly the same as starting them separately. + The servers acts exactly the same as starting them separately. + So other volume servers can connect to this master server also. - So other volume servers can use this embedded master server also. - - Optionally, one filer server can be started. Logically, filer servers should not be in a cluster. - They run with meta data on disk, not shared. So each filer server is different. + Optionally, a filer server can be started. + Also optionally, a S3 gateway can be started. `, } var ( - serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name") - serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") - serverMaxCpu = cmdServer.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") - serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") - serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") - serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") - serverSecureKey = cmdServer.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") - serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") - masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") - masterGrpcPort = cmdServer.Flag.Int("master.port.grpc", 0, "master grpc server listen port, default to http port + 10000") - masterMetaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") - masterVolumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") - masterVolumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.") - masterDefaultReplicaPlacement = cmdServer.Flag.String("master.defaultReplicaPlacement", "000", "Default replication type if not specified.") - volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...") - pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") - isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") + serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name") + serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") + serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") + serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") + serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") + volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") + volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...") + pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") + isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") serverWhiteList []string ) func init() { serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file") + + masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") + masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") + masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") + masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") + masterOptions.volumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.") + masterOptions.defaultReplication = cmdServer.Flag.String("master.defaultReplication", "000", "Default replication type if not specified.") + masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") + masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address") + masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") + filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection") filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") - filerOptions.grpcPort = cmdServer.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to http port + 10000") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.") filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") @@ -88,15 +96,25 @@ func init() { serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") - serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") + serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") + serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") + s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") + s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") + s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") + s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + } func runServer(cmd *Command, args []string) bool { - filerOptions.secretKey = serverSecureKey + + util.LoadConfiguration("security", false) + util.LoadConfiguration("master", false) + if *serverOptions.cpuprofile != "" { f, err := os.Create(*serverOptions.cpuprofile) if err != nil { @@ -110,41 +128,53 @@ func runServer(cmd *Command, args []string) bool { *isStartingFiler = true } - master := *serverIp + ":" + strconv.Itoa(*masterPort) - filerOptions.ip = serverIp + if *isStartingS3 { + *isStartingFiler = true + } + + master := *serverIp + ":" + strconv.Itoa(*masterOptions.port) + masterOptions.ip = serverIp + masterOptions.ipBind = serverBindIp + filerOptions.masters = &master + filerOptions.ip = serverBindIp serverOptions.v.ip = serverIp serverOptions.v.bindIp = serverBindIp serverOptions.v.masters = &master serverOptions.v.idleConnectionTimeout = serverTimeout - serverOptions.v.maxCpu = serverMaxCpu serverOptions.v.dataCenter = serverDataCenter serverOptions.v.rack = serverRack + serverOptions.v.pulseSeconds = pulseSeconds + masterOptions.pulseSeconds = pulseSeconds + + masterOptions.whiteList = serverWhiteListOption filerOptions.dataCenter = serverDataCenter + filerOptions.disableHttp = serverDisableHttp + masterOptions.disableHttp = serverDisableHttp + + filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port) + s3Options.filer = &filerAddress if *filerOptions.defaultReplicaPlacement == "" { - *filerOptions.defaultReplicaPlacement = *masterDefaultReplicaPlacement + *filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication } - if *serverMaxCpu < 1 { - *serverMaxCpu = runtime.NumCPU() - } - runtime.GOMAXPROCS(*serverMaxCpu) + runtime.GOMAXPROCS(runtime.NumCPU()) folders := strings.Split(*volumeDataFolders, ",") - if *masterVolumeSizeLimitMB > 30*1000 { + if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000") } - if *masterMetaFolder == "" { - *masterMetaFolder = folders[0] + if *masterOptions.metaFolder == "" { + *masterOptions.metaFolder = folders[0] } - if err := util.TestFolderWritable(*masterMetaFolder); err != nil { - glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterMetaFolder, err) + if err := util.TestFolderWritable(*masterOptions.metaFolder); err != nil { + glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err) } - filerOptions.defaultLevelDbDirectory = masterMetaFolder + filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder if *serverWhiteListOption != "" { serverWhiteList = strings.Split(*serverWhiteListOption, ",") @@ -159,55 +189,55 @@ func runServer(cmd *Command, args []string) bool { }() } - var raftWaitForMaster sync.WaitGroup + if *isStartingS3 { + go func() { + time.Sleep(2 * time.Second) + + s3Options.startS3Server() + + }() + } + var volumeWait sync.WaitGroup - raftWaitForMaster.Add(1) volumeWait.Add(1) go func() { r := mux.NewRouter() - ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder, - *masterVolumeSizeLimitMB, *masterVolumePreallocate, - *pulseSeconds, *masterDefaultReplicaPlacement, *serverGarbageThreshold, - serverWhiteList, *serverSecureKey, - ) + ms := weed_server.NewMasterServer(r, masterOptions.toMasterOption(serverWhiteList)) - glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterPort) - masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterPort), 0) + glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterOptions.port) + masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterOptions.port), 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } go func() { + // start raftServer + myMasterAddress, peers := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), + peers, myMasterAddress, *masterOptions.metaFolder, ms.Topo, *masterOptions.pulseSeconds) + ms.SetRaftServer(raftServer) + r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") + // starting grpc server - grpcPort := *masterGrpcPort - if grpcPort == 0 { - grpcPort = *masterPort + 10000 - } - grpcL, err := util.NewListener(*serverIp+":"+strconv.Itoa(grpcPort), 0) + grpcPort := *masterOptions.port + 10000 + grpcL, err := util.NewListener(*serverBindIp+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer() + glog.V(1).Infof("grpc config %+v", viper.Sub("grpc")) + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) master_pb.RegisterSeaweedServer(grpcS, ms) + protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *serverIp, grpcPort) grpcS.Serve(grpcL) }() - go func() { - raftWaitForMaster.Wait() - time.Sleep(100 * time.Millisecond) - myAddress, peers := checkPeers(*serverIp, *masterPort, *serverPeers) - raftServer := weed_server.NewRaftServer(r, peers, myAddress, *masterMetaFolder, ms.Topo, *pulseSeconds) - ms.SetRaftServer(raftServer) - volumeWait.Done() - }() - - raftWaitForMaster.Done() + volumeWait.Done() // start http server httpS := &http.Server{Handler: r} diff --git a/weed/command/shell.go b/weed/command/shell.go index 19c5049c5..79f8b8bf9 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -1,21 +1,25 @@ package command import ( - "bufio" - "fmt" - "os" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/shell" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/spf13/viper" +) - "github.com/chrislusf/seaweedfs/weed/glog" +var ( + shellOptions shell.ShellOptions ) func init() { cmdShell.Run = runShell // break init cycle + shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers") } var cmdShell = &Command{ UsageLine: "shell", - Short: "run interactive commands, now just echo", - Long: `run interactive commands. + Short: "run interactive administrative commands", + Long: `run interactive administrative commands. `, } @@ -23,39 +27,16 @@ var cmdShell = &Command{ var () func runShell(command *Command, args []string) bool { - r := bufio.NewReader(os.Stdin) - o := bufio.NewWriter(os.Stdout) - e := bufio.NewWriter(os.Stderr) - prompt := func() { - var err error - if _, err = o.WriteString("> "); err != nil { - glog.V(0).Infoln("error writing to stdout:", err) - } - if err = o.Flush(); err != nil { - glog.V(0).Infoln("error flushing stdout:", err) - } - } - readLine := func() string { - ret, err := r.ReadString('\n') - if err != nil { - fmt.Fprint(e, err) - os.Exit(1) - } - return ret - } - execCmd := func(cmd string) int { - if cmd != "" { - if _, err := o.WriteString(cmd); err != nil { - glog.V(0).Infoln("error writing to stdout:", err) - } - } - return 0 - } - cmd := "" - for { - prompt() - cmd = readLine() - execCmd(cmd) - } + util.LoadConfiguration("security", false) + shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + + shellOptions.FilerHost = "localhost" + shellOptions.FilerPort = 8888 + shellOptions.Directory = "/" + + shell.RunShell(shellOptions) + + return true + } diff --git a/weed/command/upload.go b/weed/command/upload.go index f664c0e3a..25e938d9b 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -6,8 +6,11 @@ import ( "os" "path/filepath" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/spf13/viper" + + "github.com/chrislusf/seaweedfs/weed/operation" ) var ( @@ -23,7 +26,6 @@ type UploadOptions struct { dataCenter *string ttl *string maxMB *int - secretKey *string } func init() { @@ -36,8 +38,7 @@ func init() { upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name") upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit") - upload.secretKey = cmdUpload.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") + upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit") } var cmdUpload = &Command{ @@ -53,14 +54,17 @@ var cmdUpload = &Command{ All files under the folder and subfolders will be uploaded, each with its own file key. Optional parameter "-include" allows you to specify the file name patterns. - If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separatedly. + If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separately. The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned. `, } func runUpload(cmd *Command, args []string) bool { - secret := security.Secret(*upload.secretKey) + + util.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + if len(args) == 0 { if *upload.dir == "" { return false @@ -77,9 +81,9 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { return e } - results, e := operation.SubmitFiles(*upload.master, parts, + results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB, secret) + *upload.ttl, *upload.maxMB) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { @@ -96,9 +100,9 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { fmt.Println(e.Error()) } - results, _ := operation.SubmitFiles(*upload.master, parts, + results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB, secret) + *upload.ttl, *upload.maxMB) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) } diff --git a/weed/command/volume.go b/weed/command/volume.go index 27a075b5b..3c1aa2b50 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -9,6 +9,9 @@ import ( "strings" "time" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/server" @@ -32,7 +35,6 @@ type VolumeServerOptions struct { masters *string pulseSeconds *int idleConnectionTimeout *int - maxCpu *int dataCenter *string rack *string whiteList []string @@ -41,6 +43,7 @@ type VolumeServerOptions struct { readRedirect *bool cpuProfile *string memProfile *string + compactionMBPerSecond *int } func init() { @@ -53,14 +56,14 @@ func init() { v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers") v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds") - v.maxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name") v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name") - v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") + v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") + v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") } var cmdVolume = &Command{ @@ -78,10 +81,10 @@ var ( ) func runVolume(cmd *Command, args []string) bool { - if *v.maxCpu < 1 { - *v.maxCpu = runtime.NumCPU() - } - runtime.GOMAXPROCS(*v.maxCpu) + + util.LoadConfiguration("security", false) + + runtime.GOMAXPROCS(runtime.NumCPU()) util.SetupProfiling(*v.cpuProfile, *v.memProfile) v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption) @@ -137,10 +140,10 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v switch *v.indexType { case "leveldb": volumeNeedleMapKind = storage.NeedleMapLevelDb - case "boltdb": - volumeNeedleMapKind = storage.NeedleMapBoltDb - case "btree": - volumeNeedleMapKind = storage.NeedleMapBtree + case "leveldbMedium": + volumeNeedleMapKind = storage.NeedleMapLevelDbMedium + case "leveldbLarge": + volumeNeedleMapKind = storage.NeedleMapLevelDbLarge } masters := *v.masters @@ -152,6 +155,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack, v.whiteList, *v.fixJpgOrientation, *v.readRedirect, + *v.compactionMBPerSecond, ) listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) @@ -185,13 +189,20 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer() + grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume")) volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer) reflection.Register(grpcS) go grpcS.Serve(grpcL) - if e := http.Serve(listener, volumeMux); e != nil { - glog.Fatalf("Volume server fail to serve: %v", e) + if viper.GetString("https.volume.key") != "" { + if e := http.ServeTLS(listener, volumeMux, + viper.GetString("https.volume.cert"), viper.GetString("https.volume.key")); e != nil { + glog.Fatalf("Volume server fail to serve: %v", e) + } + } else { + if e := http.Serve(listener, volumeMux); e != nil { + glog.Fatalf("Volume server fail to serve: %v", e) + } } } diff --git a/weed/command/webdav.go b/weed/command/webdav.go new file mode 100644 index 000000000..371c4a9ad --- /dev/null +++ b/weed/command/webdav.go @@ -0,0 +1,109 @@ +package command + +import ( + "fmt" + "net/http" + "os/user" + "strconv" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/spf13/viper" +) + +var ( + webDavStandaloneOptions WebDavOption +) + +type WebDavOption struct { + filer *string + port *int + collection *string + tlsPrivateKey *string + tlsCertificate *string +} + +func init() { + cmdWebDav.Run = runWebDav // break init cycle + webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address") + webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port") + webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files") + webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file") + webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file") +} + +var cmdWebDav = &Command{ + UsageLine: "webdav -port=7333 -filer=", + Short: " start a webdav server that is backed by a filer", + Long: `start a webdav server that is backed by a filer. + +`, +} + +func runWebDav(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.VERSION, *webDavStandaloneOptions.port) + + return webDavStandaloneOptions.startWebDav() + +} + +func (wo *WebDavOption) startWebDav() bool { + + filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer) + if err != nil { + glog.Fatal(err) + return false + } + + // detect current user + uid, gid := uint32(0), uint32(0) + if u, err := user.Current(); err == nil { + if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil { + uid = uint32(parsedId) + } + if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil { + gid = uint32(parsedId) + } + } + + ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ + Filer: *wo.filer, + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + Collection: *wo.collection, + Uid: uid, + Gid: gid, + }) + if webdavServer_err != nil { + glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) + } + + httpS := &http.Server{Handler: ws.Handler} + + listenAddress := fmt.Sprintf(":%d", *wo.port) + webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) + if err != nil { + glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err) + } + + if *wo.tlsPrivateKey != "" { + glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.VERSION, *wo.port) + if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil { + glog.Fatalf("WebDav Server Fail to serve: %v", err) + } + } else { + glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.VERSION, *wo.port) + if err = httpS.Serve(webDavListener); err != nil { + glog.Fatalf("WebDav Server Fail to serve: %v", err) + } + } + + return true + +} diff --git a/weed/command/weedfuse/README.md b/weed/command/weedfuse/README.md new file mode 100644 index 000000000..1a1496bbb --- /dev/null +++ b/weed/command/weedfuse/README.md @@ -0,0 +1,84 @@ +Mount the SeaweedFS via FUSE + +# Mount by fstab + + +``` +$ # on linux +$ sudo apt-get install fuse +$ sudo echo 'user_allow_other' >> /etc/fuse.conf +$ sudo mv weedfuse /sbin/mount.weedfuse + +$ # on Mac +$ sudo mv weedfuse /sbin/mount_weedfuse + +``` + +On both OS X and Linux, you can add one of the entries to your /etc/fstab file like the following: + +``` +# mount the whole SeaweedFS +localhost:8888/ /home/some/mount/folder weedfuse + +# mount the SeaweedFS sub folder +localhost:8888/sub/dir /home/some/mount/folder weedfuse + +# mount the SeaweedFS sub folder with some options +localhost:8888/sub/dir /home/some/mount/folder weedfuse user + +``` + +To verify it can work, try this command +``` +$ sudo mount -av + +... + +/home/some/mount/folder : successfully mounted + +``` + +If you see `successfully mounted`, try to access the mounted folder and verify everything works. + + +To debug, run these: +``` + +$ weedfuse -foreground localhost:8888/ /home/some/mount/folder + +``` + + +To unmount the folder: +``` + +$ sudo umount /home/some/mount/folder + +``` + + diff --git a/weed/command/weedfuse/weedfuse.go b/weed/command/weedfuse/weedfuse.go new file mode 100644 index 000000000..4c0d12874 --- /dev/null +++ b/weed/command/weedfuse/weedfuse.go @@ -0,0 +1,109 @@ +package main + +import ( + "flag" + "fmt" + "os" + "strings" + + "github.com/chrislusf/seaweedfs/weed/command" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/jacobsa/daemonize" + "github.com/kardianos/osext" +) + +var ( + fuseCommand = flag.NewFlagSet("weedfuse", flag.ContinueOnError) + options = fuseCommand.String("o", "", "comma separated options rw,uid=xxx,gid=xxx") + isForeground = fuseCommand.Bool("foreground", false, "starts as a daemon") +) + +func main() { + + err := fuseCommand.Parse(os.Args[1:]) + if err != nil { + glog.Fatal(err) + } + fmt.Printf("options: %v\n", *options) + + // seems this value is always empty, need to parse it differently + optionsString := *options + prev := "" + for i, arg := range os.Args { + fmt.Printf("args[%d]: %v\n", i, arg) + if prev == "-o" { + optionsString = arg + } + prev = arg + } + + device := fuseCommand.Arg(0) + mountPoint := fuseCommand.Arg(1) + + fmt.Printf("source: %v\n", device) + fmt.Printf("target: %v\n", mountPoint) + + nouser := true + for _, option := range strings.Split(optionsString, ",") { + fmt.Printf("option: %v\n", option) + switch option { + case "user": + nouser = false + } + } + + maybeSetupPath() + + if !*isForeground { + startAsDaemon() + return + } + + parts := strings.SplitN(device, "/", 2) + filer, filerPath := parts[0], parts[1] + + command.RunMount( + filer, "/"+filerPath, mountPoint, "", "000", "", + 4, !nouser, 0, 1000000) + +} + +func maybeSetupPath() { + // sudo mount -av may not include PATH in some linux, e.g., Ubuntu + hasPathEnv := false + for _, e := range os.Environ() { + if strings.HasPrefix(e, "PATH=") { + hasPathEnv = true + } + fmt.Println(e) + } + if !hasPathEnv { + os.Setenv("PATH", "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") + } +} + +func startAsDaemon() { + + // adapted from gcsfuse + + // Find the executable. + var path string + path, err := osext.Executable() + if err != nil { + glog.Fatalf("osext.Executable: %v", err) + } + + // Set up arguments. Be sure to use foreground mode. + args := append([]string{"-foreground"}, os.Args[1:]...) + + // Pass along PATH so that the daemon can find fusermount on Linux. + env := []string{ + fmt.Sprintf("PATH=%s", os.Getenv("PATH")), + } + + err = daemonize.Run(path, args, env, os.Stdout) + if err != nil { + glog.Fatalf("daemonize.Run: %v", err) + } + +} diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 5f2990475..3e8554957 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -1,6 +1,7 @@ package abstract_sql import ( + "context" "database/sql" "fmt" @@ -18,7 +19,44 @@ type AbstractSqlStore struct { SqlListInclusive string } -func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { +type TxOrDB interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { + tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{ + Isolation: sql.LevelReadCommitted, + ReadOnly: false, + }) + if err != nil { + return ctx, err + } + + return context.WithValue(ctx, "tx", tx), nil +} +func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Commit() + } + return nil +} +func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Rollback() + } + return nil +} + +func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx + } + return store.DB +} + +func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -26,7 +64,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta) if err != nil { return fmt.Errorf("insert %s: %s", entry.FullPath, err) } @@ -38,7 +76,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -46,7 +84,7 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir) if err != nil { return fmt.Errorf("update %s: %s", entry.FullPath, err) } @@ -58,10 +96,10 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { return nil } -func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) { +func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() - row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir) + row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir) var data []byte if err := row.Scan(&data); err != nil { return nil, filer2.ErrNotFound @@ -77,11 +115,11 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr return entry, nil } -func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error { +func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { dir, name := fullpath.DirAndName() - res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir) if err != nil { return fmt.Errorf("delete %s: %s", fullpath, err) } @@ -94,14 +132,14 @@ func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error { return nil } -func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { +func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { sqlText := store.SqlListExclusive if inclusive { sqlText = store.SqlListInclusive } - rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) + rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index 2c1f03182..466be5bf3 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -1,6 +1,7 @@ package cassandra import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -39,7 +40,17 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er return } -func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *CassandraStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *CassandraStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -56,12 +67,12 @@ func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { dir, name := fullpath.DirAndName() var data []byte @@ -74,7 +85,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2. } if len(data) == 0 { - return nil, fmt.Errorf("not found: %s", fullpath) + return nil, filer2.ErrNotFound } entry = &filer2.Entry{ @@ -88,7 +99,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2. return entry, nil } -func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { +func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { dir, name := fullpath.DirAndName() @@ -101,7 +112,7 @@ func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { return nil } -func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go index f17a11727..3f8a19114 100644 --- a/weed/filer2/entry.go +++ b/weed/filer2/entry.go @@ -52,9 +52,20 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry { return nil } return &filer_pb.Entry{ - Name: string(entry.FullPath), + Name: entry.FullPath.Name(), IsDirectory: entry.IsDirectory(), Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, } } + +func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { + if entry == nil { + return nil + } + dir, _ := entry.FullPath.DirAndName() + return &filer_pb.FullEntry{ + Dir: dir, + Entry: entry.ToProtoEntry(), + } +} diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go index e50b3fa9a..cf4627b74 100644 --- a/weed/filer2/entry_codec.go +++ b/weed/filer2/entry_codec.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gogo/protobuf/proto" + "github.com/golang/protobuf/proto" ) func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index 6c3157e6c..b5876df82 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -40,7 +40,7 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file fileIds[interval.fileId] = true } for _, chunk := range chunks { - if found := fileIds[chunk.FileId]; found { + if _, found := fileIds[chunk.GetFileIdString()]; found { compacted = append(compacted, chunk) } else { garbage = append(garbage, chunk) @@ -50,15 +50,15 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file return } -func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []*filer_pb.FileChunk) { +func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { fileIds := make(map[string]bool) - for _, interval := range newChunks { - fileIds[interval.FileId] = true + for _, interval := range bs { + fileIds[interval.GetFileIdString()] = true } - for _, chunk := range oldChunks { - if found := fileIds[chunk.FileId]; !found { - unused = append(unused, chunk) + for _, chunk := range as { + if _, found := fileIds[chunk.GetFileIdString()]; !found { + delta = append(delta, chunk) } } @@ -123,7 +123,7 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb. newV := newVisibleInterval( chunk.Offset, chunk.Offset+int64(chunk.Size), - chunk.FileId, + chunk.GetFileIdString(), chunk.Mtime, true, ) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 1ee2f5ede..cf236b74d 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -3,6 +3,7 @@ package filer2 import ( "context" "fmt" + "google.golang.org/grpc" "math" "os" "path/filepath" @@ -20,17 +21,19 @@ var ( ) type Filer struct { - store FilerStore + store *FilerStoreWrapper directoryCache *ccache.Cache MasterClient *wdclient.MasterClient fileIdDeletionChan chan string + GrpcDialOption grpc.DialOption } -func NewFiler(masters []string) *Filer { +func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { f := &Filer{ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), "filer", masters), + MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), fileIdDeletionChan: make(chan string, 4096), + GrpcDialOption: grpcDialOption, } go f.loopProcessingDeletion() @@ -39,7 +42,7 @@ func NewFiler(masters []string) *Filer { } func (f *Filer) SetStore(store FilerStore) { - f.store = store + f.store = NewFilerStoreWrapper(store) } func (f *Filer) DisableDirectoryCache() { @@ -54,7 +57,19 @@ func (fs *Filer) KeepConnectedToMaster() { fs.MasterClient.KeepConnectedToMaster() } -func (f *Filer) CreateEntry(entry *Entry) error { +func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { + return f.store.BeginTransaction(ctx) +} + +func (f *Filer) CommitTransaction(ctx context.Context) error { + return f.store.CommitTransaction(ctx) +} + +func (f *Filer) RollbackTransaction(ctx context.Context) error { + return f.store.RollbackTransaction(ctx) +} + +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { if string(entry.FullPath) == "/" { return nil @@ -67,7 +82,7 @@ func (f *Filer) CreateEntry(entry *Entry) error { var lastDirectoryEntry *Entry for i := 1; i < len(dirParts); i++ { - dirPath := "/" + filepath.Join(dirParts[:i]...) + dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...)) // fmt.Printf("%d directory: %+v\n", i, dirPath) // first check local cache @@ -76,7 +91,7 @@ func (f *Filer) CreateEntry(entry *Entry) error { // not found, check the store directly if dirEntry == nil { glog.V(4).Infof("find uncached directory: %s", dirPath) - dirEntry, _ = f.FindEntry(FullPath(dirPath)) + dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) } else { glog.V(4).Infof("found cached directory: %s", dirPath) } @@ -99,9 +114,9 @@ func (f *Filer) CreateEntry(entry *Entry) error { } glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) - mkdirErr := f.store.InsertEntry(dirEntry) + mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { - if _, err := f.FindEntry(FullPath(dirPath)); err == ErrNotFound { + if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { @@ -134,14 +149,16 @@ func (f *Filer) CreateEntry(entry *Entry) error { } */ - oldEntry, _ := f.FindEntry(entry.FullPath) + oldEntry, _ := f.FindEntry(ctx, entry.FullPath) if oldEntry == nil { - if err := f.store.InsertEntry(entry); err != nil { + if err := f.store.InsertEntry(ctx, entry); err != nil { + glog.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { - if err := f.UpdateEntry(oldEntry, entry); err != nil { + if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { + glog.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } @@ -153,19 +170,21 @@ func (f *Filer) CreateEntry(entry *Entry) error { return nil } -func (f *Filer) UpdateEntry(oldEntry, entry *Entry) (err error) { +func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { if oldEntry != nil { if oldEntry.IsDirectory() && !entry.IsDirectory() { + glog.Errorf("existing %s is a directory", entry.FullPath) return fmt.Errorf("existing %s is a directory", entry.FullPath) } if !oldEntry.IsDirectory() && entry.IsDirectory() { + glog.Errorf("existing %s is a file", entry.FullPath) return fmt.Errorf("existing %s is a file", entry.FullPath) } } - return f.store.UpdateEntry(entry) + return f.store.UpdateEntry(ctx, entry) } -func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { +func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) { now := time.Now() @@ -181,11 +200,11 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { }, }, nil } - return f.store.FindEntry(p) + return f.store.FindEntry(ctx, p) } -func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) { - entry, err := f.FindEntry(p) +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) { + entry, err := f.FindEntry(ctx, p) if err != nil { return err } @@ -198,37 +217,41 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDelet lastFileName := "" includeLastFile := false for limit > 0 { - entries, err := f.ListDirectoryEntries(p, lastFileName, includeLastFile, 1024) + entries, err := f.ListDirectoryEntries(ctx, p, lastFileName, includeLastFile, 1024) if err != nil { + glog.Errorf("list folder %s: %v", p, err) return fmt.Errorf("list folder %s: %v", p, err) } + if len(entries) == 0 { break - } else { - if isRecursive { - for _, sub := range entries { - lastFileName = sub.Name() - f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks) - limit-- - if limit <= 0 { - break - } + } + + if isRecursive { + for _, sub := range entries { + lastFileName = sub.Name() + err = f.DeleteEntryMetaAndData(ctx, sub.FullPath, isRecursive, shouldDeleteChunks) + if err != nil { + return err } - } else { - if len(entries) > 0 { - return fmt.Errorf("folder %s is not empty", p) + limit-- + if limit <= 0 { + break } } - f.cacheDelDirectory(string(p)) - if len(entries) < 1024 { - break - } + } + + if len(entries) < 1024 { + break } } + + f.cacheDelDirectory(string(p)) + } if shouldDeleteChunks { - f.DeleteChunks(entry.Chunks) + f.DeleteChunks(p, entry.Chunks) } if p == "/" { @@ -238,17 +261,22 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDelet f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) - return f.store.DeleteEntry(p) + return f.store.DeleteEntry(ctx, p) } -func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { +func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { if strings.HasSuffix(string(p), "/") && len(p) > 1 { p = p[0 : len(p)-1] } - return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit) + return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) } func (f *Filer) cacheDelDirectory(dirpath string) { + + if dirpath == "/" { + return + } + if f.directoryCache == nil { return } @@ -257,6 +285,7 @@ func (f *Filer) cacheDelDirectory(dirpath string) { } func (f *Filer) cacheGetDirectory(dirpath string) *Entry { + if f.directoryCache == nil { return nil } diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go new file mode 100644 index 000000000..7e093eea2 --- /dev/null +++ b/weed/filer2/filer_client_util.go @@ -0,0 +1,163 @@ +package filer2 + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func VolumeId(fileId string) string { + lastCommaIndex := strings.LastIndex(fileId, ",") + if lastCommaIndex > 0 { + return fileId[:lastCommaIndex] + } + return fileId +} + +type FilerClient interface { + WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error +} + +func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { + var vids []string + for _, chunkView := range chunkViews { + vids = append(vids, VolumeId(chunkView.FileId)) + } + + vid2Locations := make(map[string]*filer_pb.Locations) + + err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + glog.V(4).Infof("read fh lookup volume id locations: %v", vids) + resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + VolumeIds: vids, + }) + if err != nil { + return err + } + + vid2Locations = resp.LocationsMap + + return nil + }) + + if err != nil { + return 0, fmt.Errorf("failed to lookup volume ids %v: %v", vids, err) + } + + var wg sync.WaitGroup + for _, chunkView := range chunkViews { + wg.Add(1) + go func(chunkView *ChunkView) { + defer wg.Done() + + glog.V(4).Infof("read fh reading chunk: %+v", chunkView) + + locations := vid2Locations[VolumeId(chunkView.FileId)] + if locations == nil || len(locations.Locations) == 0 { + glog.V(0).Infof("failed to locate %s", chunkView.FileId) + err = fmt.Errorf("failed to locate %s", chunkView.FileId) + return + } + + var n int64 + n, err = util.ReadUrl( + fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), + chunkView.Offset, + int(chunkView.Size), + buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)], + !chunkView.IsFullChunk) + + if err != nil { + + glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err) + + err = fmt.Errorf("failed to read http://%s/%s: %v", + locations.Locations[0].Url, chunkView.FileId, err) + return + } + + glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView) + totalRead += n + + }(chunkView) + } + wg.Wait() + return +} + +func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) { + + dir, name := FullPath(fullFilePath).DirAndName() + + err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + } + + glog.V(3).Infof("read %s request: %v", fullFilePath, request) + resp, err := client.LookupDirectoryEntry(ctx, request) + if err != nil { + if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil + } + glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err) + return err + } + + if resp.Entry != nil { + entry = resp.Entry + } + + return nil + }) + + return +} + +func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath string, fn func(entry *filer_pb.Entry)) (err error) { + + err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + paginationLimit := 1024 + + lastEntryName := "" + + for { + + request := &filer_pb.ListEntriesRequest{ + Directory: fullDirPath, + StartFromFileName: lastEntryName, + Limit: uint32(paginationLimit), + } + + glog.V(3).Infof("read directory: %v", request) + resp, err := client.ListEntries(ctx, request) + if err != nil { + return fmt.Errorf("list %s: %v", fullDirPath, err) + } + + for _, entry := range resp.Entries { + fn(entry) + lastEntryName = entry.Name + } + + if len(resp.Entries) < paginationLimit { + break + } + + } + + return nil + + }) + + return +} diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index 8fe8ae04f..fea93d57f 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -38,25 +38,28 @@ func (f *Filer) loopProcessingDeletion() { fileIds = append(fileIds, fid) if len(fileIds) >= 4096 { glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) fileIds = fileIds[:0] } case <-ticker.C: if len(fileIds) > 0 { glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) fileIds = fileIds[:0] } } } } -func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { +func (f *Filer) DeleteChunks(fullpath FullPath, chunks []*filer_pb.FileChunk) { for _, chunk := range chunks { - f.fileIdDeletionChan <- chunk.FileId + glog.V(3).Infof("deleting %s chunk %s", fullpath, chunk.String()) + f.fileIdDeletionChan <- chunk.GetFileIdString() } } +// DeleteFileByFileId direct delete by file id. +// Only used when the fileId is not being managed by snapshots. func (f *Filer) DeleteFileByFileId(fileId string) { f.fileIdDeletionChan <- fileId } @@ -67,22 +70,19 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { return } if newEntry == nil { - f.DeleteChunks(oldEntry.Chunks) + f.DeleteChunks(oldEntry.FullPath, oldEntry.Chunks) } var toDelete []*filer_pb.FileChunk + newChunkIds := make(map[string]bool) + for _, newChunk := range newEntry.Chunks { + newChunkIds[newChunk.GetFileIdString()] = true + } for _, oldChunk := range oldEntry.Chunks { - found := false - for _, newChunk := range newEntry.Chunks { - if oldChunk.FileId == newChunk.FileId { - found = true - break - } - } - if !found { + if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { toDelete = append(toDelete, oldChunk) } } - f.DeleteChunks(toDelete) + f.DeleteChunks(oldEntry.FullPath, toDelete) } diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go index b3c215249..c37381116 100644 --- a/weed/filer2/filer_notify.go +++ b/weed/filer2/filer_notify.go @@ -20,12 +20,18 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) glog.V(3).Infof("notifying entry update %v", key) + newParentPath := "" + if newEntry != nil { + newParentPath, _ = newEntry.FullPath.DirAndName() + } + notification.Queue.SendMessage( key, &filer_pb.EventNotification{ - OldEntry: oldEntry.ToProtoEntry(), - NewEntry: newEntry.ToProtoEntry(), - DeleteChunks: deleteChunks, + OldEntry: oldEntry.ToProtoEntry(), + NewEntry: newEntry.ToProtoEntry(), + DeleteChunks: deleteChunks, + NewParentPath: newParentPath, }, ) diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index 9ef1d9d48..231c7fc68 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -1,7 +1,12 @@ package filer2 import ( + "context" "errors" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -10,12 +15,110 @@ type FilerStore interface { GetName() string // Initialize initializes the file store Initialize(configuration util.Configuration) error - InsertEntry(*Entry) error - UpdateEntry(*Entry) (err error) + InsertEntry(context.Context, *Entry) error + UpdateEntry(context.Context, *Entry) (err error) // err == filer2.ErrNotFound if not found - FindEntry(FullPath) (entry *Entry, err error) - DeleteEntry(FullPath) (err error) - ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) + FindEntry(context.Context, FullPath) (entry *Entry, err error) + DeleteEntry(context.Context, FullPath) (err error) + ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) + + BeginTransaction(ctx context.Context) (context.Context, error) + CommitTransaction(ctx context.Context) error + RollbackTransaction(ctx context.Context) error } var ErrNotFound = errors.New("filer: no entry is found in filer store") + +type FilerStoreWrapper struct { + actualStore FilerStore +} + +func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { + return &FilerStoreWrapper{ + actualStore: store, + } +} + +func (fsw *FilerStoreWrapper) GetName() string { + return fsw.actualStore.GetName() +} + +func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error { + return fsw.actualStore.Initialize(configuration) +} + +func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + return fsw.actualStore.InsertEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + return fsw.actualStore.UpdateEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry *Entry, err error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds()) + }() + + entry, err = fsw.actualStore.FindEntry(ctx, fp) + if err != nil { + return nil, err + } + filer_pb.AfterEntryDeserialization(entry.Chunks) + return +} + +func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) + }() + + return fsw.actualStore.DeleteEntry(ctx, fp) +} + +func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) + }() + + entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit) + if err != nil { + return nil, err + } + for _, entry := range entries { + filer_pb.AfterEntryDeserialization(entry.Chunks) + } + return entries, err +} + +func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { + return fsw.actualStore.BeginTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { + return fsw.actualStore.CommitTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { + return fsw.actualStore.RollbackTransaction(ctx) +} diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go index be6e34431..191e51cf3 100644 --- a/weed/filer2/fullpath.go +++ b/weed/filer2/fullpath.go @@ -8,10 +8,7 @@ import ( type FullPath string func NewFullPath(dir, name string) FullPath { - if strings.HasSuffix(dir, "/") { - return FullPath(dir + name) - } - return FullPath(dir + "/" + name) + return FullPath(dir).Child(name) } func (fp FullPath) DirAndName() (string, string) { @@ -29,3 +26,11 @@ func (fp FullPath) Name() string { _, name := filepath.Split(string(fp)) return name } + +func (fp FullPath) Child(name string) FullPath { + dir := string(fp) + if strings.HasSuffix(dir, "/") { + return FullPath(dir + name) + } + return FullPath(dir + "/" + name) +} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 179107e2c..d00eba859 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -2,12 +2,14 @@ package leveldb import ( "bytes" + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" weed_util "github.com/chrislusf/seaweedfs/weed/util" "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" ) @@ -38,14 +40,30 @@ func (store *LevelDBStore) initialize(dir string) (err error) { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } - if store.db, err = leveldb.OpenFile(dir, nil); err != nil { + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, + } + + if store.db, err = leveldb.OpenFile(dir, opts); err != nil { glog.Infof("filer store open dir %s: %v", dir, err) return } return } -func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *LevelDBStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { key := genKey(entry.DirAndName()) value, err := entry.EncodeAttributesAndChunks() @@ -64,12 +82,12 @@ func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { key := genKey(fullpath.DirAndName()) data, err := store.db.Get(key, nil) @@ -94,7 +112,7 @@ func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.En return entry, nil } -func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { key := genKey(fullpath.DirAndName()) err = store.db.Delete(key, nil) @@ -105,7 +123,7 @@ func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { return nil } -func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 5b214558f..904de8c97 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -1,6 +1,7 @@ package leveldb import ( + "context" "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" @@ -8,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -18,6 +19,8 @@ func TestCreateAndFind(t *testing.T) { fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + ctx := context.Background() + entry1 := &filer2.Entry{ FullPath: fullpath, Attr: filer2.Attr{ @@ -27,12 +30,12 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(fullpath) + entry, err := filer.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -45,14 +48,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -61,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -69,8 +72,10 @@ func TestEmptyRoot(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() + ctx := context.Background() + // checking one upper directory - entries, err := filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go new file mode 100644 index 000000000..bce81e357 --- /dev/null +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -0,0 +1,208 @@ +package leveldb + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + weed_util "github.com/chrislusf/seaweedfs/weed/util" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + leveldb_util "github.com/syndtr/goleveldb/leveldb/util" +) + +func init() { + filer2.Stores = append(filer2.Stores, &LevelDB2Store{}) +} + +type LevelDB2Store struct { + dbs []*leveldb.DB + dbCount int +} + +func (store *LevelDB2Store) GetName() string { + return "leveldb2" +} + +func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) { + dir := configuration.GetString("dir") + return store.initialize(dir, 8) +} + +func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { + glog.Infof("filer store leveldb2 dir: %s", dir) + if err := weed_util.TestFolderWritable(dir); err != nil { + return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) + } + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + + for d := 0 ; d < dbCount; d++ { + dbFolder := fmt.Sprintf("%s/%02d", dir, d) + os.MkdirAll(dbFolder, 0755) + db, dbErr := leveldb.OpenFile(dbFolder, opts) + if dbErr != nil { + glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + return + } + store.dbs = append(store.dbs, db) + } + store.dbCount = dbCount + + return +} + +func (store *LevelDB2Store) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *LevelDB2Store) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { + dir, name := entry.DirAndName() + key, partitionId := genKey(dir, name, store.dbCount) + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + err = store.dbs[partitionId].Put(key, value, nil) + + if err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + + return nil +} + +func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { + dir, name := fullpath.DirAndName() + key, partitionId := genKey(dir, name, store.dbCount) + + data, err := store.dbs[partitionId].Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer2.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + } + + entry = &filer2.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(data) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + + return entry, nil +} + +func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { + dir, name := fullpath.DirAndName() + key, partitionId := genKey(dir, name, store.dbCount) + + err = store.dbs[partitionId].Delete(key, nil) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, + limit int) (entries []*filer2.Entry, err error) { + + directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) + lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount) + + iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + if fileName == startFileName && !inclusive { + continue + } + limit-- + if limit < 0 { + break + } + entry := &filer2.Entry{ + FullPath: filer2.NewFullPath(string(fullpath), fileName), + } + + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + + if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + entries = append(entries, entry) + } + iter.Release() + + return entries, err +} + +func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) { + key, partitionId = hashToBytes(dirPath, dbCount) + key = append(key, []byte(fileName)...) + return key, partitionId +} + +func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { + keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount) + if len(startFileName) > 0 { + keyPrefix = append(keyPrefix, []byte(startFileName)...) + } + return keyPrefix, partitionId +} + +func getNameFromKey(key []byte) string { + + return string(key[md5.Size:]) + +} + +// hash directory, and use last byte for partitioning +func hashToBytes(dir string, dbCount int) ([]byte, int) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + x := b[len(b)-1] + + return b, int(x)%dbCount +} diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go new file mode 100644 index 000000000..a16803ca1 --- /dev/null +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -0,0 +1,88 @@ +package leveldb + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/filer2" + "io/ioutil" + "os" + "testing" +) + +func TestCreateAndFind(t *testing.T) { + filer := filer2.NewFiler(nil, nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + defer os.RemoveAll(dir) + store := &LevelDB2Store{} + store.initialize(dir,2) + filer.SetStore(store) + filer.DisableDirectoryCache() + + fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + + ctx := context.Background() + + entry1 := &filer2.Entry{ + FullPath: fullpath, + Attr: filer2.Attr{ + Mode: 0440, + Uid: 1234, + Gid: 5678, + }, + } + + if err := filer.CreateEntry(ctx, entry1); err != nil { + t.Errorf("create entry %v: %v", entry1.FullPath, err) + return + } + + entry, err := filer.FindEntry(ctx, fullpath) + + if err != nil { + t.Errorf("find entry: %v", err) + return + } + + if entry.FullPath != entry1.FullPath { + t.Errorf("find wrong entry: %v", entry.FullPath) + return + } + + // checking one upper directory + entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + + // checking one upper directory + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func TestEmptyRoot(t *testing.T) { + filer := filer2.NewFiler(nil, nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + defer os.RemoveAll(dir) + store := &LevelDB2Store{} + store.initialize(dir,2) + filer.SetStore(store) + filer.DisableDirectoryCache() + + ctx := context.Background() + + // checking one upper directory + entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + if err != nil { + t.Errorf("list entries: %v", err) + return + } + if len(entries) != 0 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go index 062f1cd1c..9c10a5472 100644 --- a/weed/filer2/memdb/memdb_store.go +++ b/weed/filer2/memdb/memdb_store.go @@ -1,11 +1,13 @@ package memdb import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/util" "github.com/google/btree" "strings" + "sync" ) func init() { @@ -13,7 +15,8 @@ func init() { } type MemDbStore struct { - tree *btree.BTree + tree *btree.BTree + treeLock sync.Mutex } type entryItem struct { @@ -33,21 +36,35 @@ func (store *MemDbStore) Initialize(configuration util.Configuration) (err error return nil } -func (store *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) { - // println("inserting", entry.FullPath) - store.tree.ReplaceOrInsert(entryItem{entry}) +func (store *MemDbStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *MemDbStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *MemDbStore) RollbackTransaction(ctx context.Context) error { return nil } -func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) { - if _, err = store.FindEntry(entry.FullPath); err != nil { +func (store *MemDbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { + // println("inserting", entry.FullPath) + store.treeLock.Lock() + store.tree.ReplaceOrInsert(entryItem{entry}) + store.treeLock.Unlock() + return nil +} + +func (store *MemDbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + if _, err = store.FindEntry(ctx, entry.FullPath); err != nil { return fmt.Errorf("no such file %s : %v", entry.FullPath, err) } + store.treeLock.Lock() store.tree.ReplaceOrInsert(entryItem{entry}) + store.treeLock.Unlock() return nil } -func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *MemDbStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}}) if item == nil { return nil, filer2.ErrNotFound @@ -56,12 +73,14 @@ func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entr return entry, nil } -func (store *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *MemDbStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { + store.treeLock.Lock() store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}}) + store.treeLock.Unlock() return nil } -func (store *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { +func (store *MemDbStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { startFrom := string(fullpath) if startFileName != "" { diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go index cf813e04b..d823c5177 100644 --- a/weed/filer2/memdb/memdb_store_test.go +++ b/weed/filer2/memdb/memdb_store_test.go @@ -1,17 +1,20 @@ package memdb import ( + "context" "github.com/chrislusf/seaweedfs/weed/filer2" "testing" ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) store := &MemDbStore{} store.Initialize(nil) filer.SetStore(store) filer.DisableDirectoryCache() + ctx := context.Background() + fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") entry1 := &filer2.Entry{ @@ -23,12 +26,12 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(fullpath) + entry, err := filer.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -43,12 +46,14 @@ func TestCreateAndFind(t *testing.T) { } func TestCreateFileAndList(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil) store := &MemDbStore{} store.Initialize(nil) filer.SetStore(store) filer.DisableDirectoryCache() + ctx := context.Background() + entry1 := &filer2.Entry{ FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"), Attr: filer2.Attr{ @@ -67,11 +72,11 @@ func TestCreateFileAndList(t *testing.T) { }, } - filer.CreateEntry(entry1) - filer.CreateEntry(entry2) + filer.CreateEntry(ctx, entry1) + filer.CreateEntry(ctx, entry2) // checking the 2 files - entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) @@ -94,21 +99,21 @@ func TestCreateFileAndList(t *testing.T) { } // checking the offset - entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) + entries, err = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking root directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -124,18 +129,18 @@ func TestCreateFileAndList(t *testing.T) { Gid: 5678, }, } - filer.CreateEntry(entry3) + filer.CreateEntry(ctx, entry3) // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) if len(entries) != 2 { t.Errorf("list entries count: %v", len(entries)) return } // delete file and count - filer.DeleteEntryMetaAndData(file3Path, false, false) - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) + filer.DeleteEntryMetaAndData(ctx, file3Path, false, false) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return diff --git a/weed/filer2/postgres/README.txt b/weed/filer2/postgres/README.txt index ef2ef683b..cb0c99c63 100644 --- a/weed/filer2/postgres/README.txt +++ b/weed/filer2/postgres/README.txt @@ -9,8 +9,8 @@ $PGHOME/bin/psql --username=postgres --password seaweedfs CREATE TABLE IF NOT EXISTS filemeta ( dirhash BIGINT, - name VARCHAR(1000), - directory VARCHAR(4096), + name VARCHAR(65535), + directory VARCHAR(65535), meta bytea, PRIMARY KEY (dirhash, name) ); diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer2/redis/redis_cluster_store.go index 4f74a8a22..11c315391 100644 --- a/weed/filer2/redis/redis_cluster_store.go +++ b/weed/filer2/redis/redis_cluster_store.go @@ -21,12 +21,14 @@ func (store *RedisClusterStore) GetName() string { func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { return store.initialize( configuration.GetStringSlice("addresses"), + configuration.GetString("password"), ) } -func (store *RedisClusterStore) initialize(addresses []string) (err error) { +func (store *RedisClusterStore) initialize(addresses []string, password string) (err error) { store.Client = redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: addresses, + Addrs: addresses, + Password: password, }) return } diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index 7fd7e1180..ce41d4d70 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -1,6 +1,7 @@ package redis import ( + "context" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -18,7 +19,17 @@ type UniversalRedisStore struct { Client redis.UniversalClient } -func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { value, err := entry.EncodeAttributesAndChunks() if err != nil { @@ -42,12 +53,12 @@ func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *UniversalRedisStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { data, err := store.Client.Get(string(fullpath)).Result() if err == redis.Nil { @@ -69,7 +80,7 @@ func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *fi return entry, nil } -func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { _, err = store.Client.Del(string(fullpath)).Result() @@ -88,7 +99,7 @@ func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err err return nil } -func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() @@ -126,7 +137,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, // fetch entry meta for _, fileName := range members { path := filer2.NewFullPath(string(fullpath), fileName) - entry, err := store.FindEntry(path) + entry, err := store.FindEntry(ctx, path) if err != nil { glog.V(0).Infof("list %s : %v", path, err) } else { diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go new file mode 100644 index 000000000..01b87cad1 --- /dev/null +++ b/weed/filer2/stream.go @@ -0,0 +1,41 @@ +package filer2 + +import ( + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error { + + chunkViews := ViewFromChunks(chunks, offset, size) + + fileId2Url := make(map[string]string) + + for _, chunkView := range chunkViews { + + urlString, err := masterClient.LookupFileId(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } + fileId2Url[chunkView.FileId] = urlString + } + + for _, chunkView := range chunkViews { + urlString := fileId2Url[chunkView.FileId] + _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) { + w.Write(data) + }) + if err != nil { + glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + return err + } + } + + return nil + +} diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index fae289217..0e9e92e16 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -4,7 +4,6 @@ import ( "context" "os" "path" - "path/filepath" "time" "github.com/chrislusf/seaweedfs/weed/filer2" @@ -29,15 +28,13 @@ var _ = fs.NodeRemover(&Dir{}) var _ = fs.NodeRenamer(&Dir{}) var _ = fs.NodeSetattrer(&Dir{}) -func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { +func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second if dir.Path == dir.wfs.option.FilerMountRootPath { - attr.Uid = dir.wfs.option.MountUid - attr.Gid = dir.wfs.option.MountGid - attr.Mode = dir.wfs.option.MountMode + dir.setRootDirAttributes(attr) return nil } @@ -54,40 +51,14 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { return nil } - parent, name := filepath.Split(dir.Path) - - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: parent, - Name: name, - } - - glog.V(1).Infof("read dir %s attr: %v", dir.Path, request) - resp, err := client.LookupDirectoryEntry(context, request) - if err != nil { - if err == filer2.ErrNotFound { - return nil - } - glog.V(0).Infof("read dir %s attr %v: %v", dir.Path, request, err) - return err - } - - if resp.Entry != nil { - dir.attributes = resp.Entry.Attributes - } - - // dir.wfs.listDirectoryEntriesCache.Set(dir.Path, resp.Entry, dir.wfs.option.EntryCacheTtl) - - return nil - }) - + entry, err := filer2.GetEntry(ctx, dir.wfs, dir.Path) if err != nil { + glog.V(2).Infof("read dir %s attr: %v, error: %v", dir.Path, dir.attributes, err) return err } + dir.attributes = entry.Attributes - // glog.V(1).Infof("dir %s: %v", dir.Path, attributes) - // glog.V(1).Infof("dir %s permission: %v", dir.Path, os.FileMode(attributes.FileMode)) + glog.V(2).Infof("dir %s: %v perm: %v", dir.Path, dir.attributes, os.FileMode(dir.attributes.FileMode)) attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir @@ -99,6 +70,16 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { return nil } +func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { + attr.Uid = dir.wfs.option.MountUid + attr.Gid = dir.wfs.option.MountGid + attr.Mode = dir.wfs.option.MountMode + attr.Crtime = dir.wfs.option.MountCtime + attr.Ctime = dir.wfs.option.MountCtime + attr.Mtime = dir.wfs.option.MountMtime + attr.Atime = dir.wfs.option.MountMtime +} + func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { return &File{ Name: name, @@ -132,7 +113,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, glog.V(1).Infof("create: %v", request) if request.Entry.IsDirectory { - if err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { if _, err := client.CreateEntry(ctx, request); err != nil { glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) return fuse.EIO @@ -155,7 +136,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: dir.Path, @@ -192,33 +173,18 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { var entry *filer_pb.Entry + fullFilePath := path.Join(dir.Path, req.Name) - item := dir.wfs.listDirectoryEntriesCache.Get(path.Join(dir.Path, req.Name)) + item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath) if item != nil && !item.Expired() { entry = item.Value().(*filer_pb.Entry) } if entry == nil { - err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir.Path, - Name: req.Name, - } - - glog.V(4).Infof("lookup directory entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - // glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err) - return fuse.ENOENT - } - - entry = resp.Entry - - // dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, dir.wfs.option.EntryCacheTtl) - - return nil - }) + entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) + if err != nil { + return nil, err + } } if entry != nil { @@ -243,7 +209,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { paginationLimit := 1024 remaining := dir.wfs.option.DirListingLimit @@ -305,33 +271,14 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { - var entry *filer_pb.Entry - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir.Path, - Name: req.Name, - } - - glog.V(4).Infof("lookup to-be-removed entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - // glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err) - return fuse.ENOENT - } - - entry = resp.Entry - - return nil - }) - + entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name)) if err != nil { return err } - dir.wfs.deleteFileChunks(entry.Chunks) + dir.wfs.deleteFileChunks(ctx, entry.Chunks) - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -355,7 +302,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -379,6 +326,10 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { + if dir.attributes == nil { + return nil + } + glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) if req.Valid.Mode() { dir.attributes.FileMode = uint32(req.Mode) @@ -397,7 +348,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus } parentDir, name := filer2.FullPath(dir.Path).DirAndName() - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 3b3735369..92cf04d58 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -35,7 +35,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, }, } - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { if _, err := client.CreateEntry(ctx, request); err != nil { glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) return fuse.EIO diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index d29281f35..e72a15758 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,118 +2,32 @@ package filesys import ( "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" - "math" - "path/filepath" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - // find existing entry - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir.Path, - Name: req.OldName, + request := &filer_pb.AtomicRenameEntryRequest{ + OldDirectory: dir.Path, + OldName: req.OldName, + NewDirectory: newDir.Path, + NewName: req.NewName, } - glog.V(4).Infof("find existing directory entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + _, err := client.AtomicRenameEntry(ctx, request) if err != nil { - glog.V(3).Infof("renaming find %s/%s: %v", dir.Path, req.OldName, err) - return fuse.ENOENT + return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) } - entry := resp.Entry + return nil - glog.V(4).Infof("found existing directory entry resp: %+v", resp) - - return moveEntry(ctx, client, dir.Path, entry, newDir.Path, req.NewName) }) } - -func moveEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, oldParent string, entry *filer_pb.Entry, newParent, newName string) error { - if entry.IsDirectory { - currentDirPath := filepath.Join(oldParent, entry.Name) - - lastFileName := "" - includeLastFile := false - limit := math.MaxInt32 - for limit > 0 { - request := &filer_pb.ListEntriesRequest{ - Directory: currentDirPath, - StartFromFileName: lastFileName, - InclusiveStartFrom: includeLastFile, - Limit: 1024, - } - glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("list %s: %v", oldParent, err) - return fuse.EIO - } - if len(resp.Entries) == 0 { - break - } - - for _, item := range resp.Entries { - lastFileName = item.Name - err := moveEntry(ctx, client, currentDirPath, item, filepath.Join(newParent, newName), item.Name) - if err != nil { - return err - } - limit-- - } - if len(resp.Entries) < 1024 { - break - } - } - - } - - // add to new directory - { - request := &filer_pb.CreateEntryRequest{ - Directory: newParent, - Entry: &filer_pb.Entry{ - Name: newName, - IsDirectory: entry.IsDirectory, - Attributes: entry.Attributes, - Chunks: entry.Chunks, - }, - } - - glog.V(1).Infof("create new entry: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("renaming create %s/%s: %v", newParent, newName, err) - return fuse.EIO - } - } - - // delete old entry - { - request := &filer_pb.DeleteEntryRequest{ - Directory: oldParent, - Name: entry.Name, - IsDeleteData: false, - } - - glog.V(1).Infof("remove old entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(0).Infof("renaming delete %s/%s: %v", oldParent, entry.Name, err) - return fuse.EIO - } - - } - - return nil - -} diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 696296e62..baee412b2 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -4,13 +4,14 @@ import ( "bytes" "context" "fmt" + "sync" "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "sync" + "github.com/chrislusf/seaweedfs/weed/security" ) type ContinuousDirtyPages struct { @@ -109,7 +110,7 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 // flush existing if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { if chunk != nil { - glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) chunks = append(chunks, chunk) } } else { @@ -122,7 +123,7 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 // flush the new page if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { if chunk != nil { - glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) chunks = append(chunks, chunk) } } else { @@ -164,8 +165,9 @@ func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Contex func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { var fileId, host string + var auth security.EncodedJwt - if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -181,7 +183,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte return err } - fileId, host = resp.FileId, resp.Url + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) return nil }); err != nil { @@ -190,7 +192,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "") + uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload data: %v", err) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 4bb169a33..1b359ebbe 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -74,10 +74,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f return err } - if file.isOpen { - return nil - } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { @@ -109,7 +105,11 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.entry.Attributes.Mtime = req.Mtime.Unix() } - return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if file.isOpen { + return nil + } + + return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: file.dir.Path, @@ -144,7 +144,7 @@ func (file *File) maybeLoadAttributes(ctx context.Context) error { file.setEntry(entry) // glog.V(1).Infof("file attr read cached %v attributes", file.Name) } else { - err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: file.Name, @@ -194,6 +194,8 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) { newVisibles = t } + glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks)) + file.entry.Chunks = append(file.entry.Chunks, chunks...) } diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 0f6ca1164..ceec50e13 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -3,17 +3,16 @@ package filesys import ( "context" "fmt" + "mime" + "path" + "time" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" + "github.com/gabriel-vasile/mimetype" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" - "net/http" - "strings" - "sync" - "time" ) type FileHandle struct { @@ -65,75 +64,14 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) - var vids []string - for _, chunkView := range chunkViews { - vids = append(vids, volumeId(chunkView.FileId)) - } - - vid2Locations := make(map[string]*filer_pb.Locations) - - err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - glog.V(4).Infof("read fh lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return err - } - - vid2Locations = resp.LocationsMap - - return nil - }) - - if err != nil { - glog.V(4).Infof("%v/%v read fh lookup volume ids: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err) - } - - var totalRead int64 - var wg sync.WaitGroup - for _, chunkView := range chunkViews { - wg.Add(1) - go func(chunkView *filer2.ChunkView) { - defer wg.Done() - - glog.V(4).Infof("read fh reading chunk: %+v", chunkView) - - locations := vid2Locations[volumeId(chunkView.FileId)] - if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", chunkView.FileId) - err = fmt.Errorf("failed to locate %s", chunkView.FileId) - return - } - - var n int64 - n, err = util.ReadUrl( - fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), - chunkView.Offset, - int(chunkView.Size), - buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)], - !chunkView.IsFullChunk) - - if err != nil { - - glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.f.dir.Path, fh.f.Name, locations.Locations[0].Url, chunkView.FileId, n, err) - - err = fmt.Errorf("failed to read http://%s/%s: %v", - locations.Locations[0].Url, chunkView.FileId, err) - return - } - - glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView) - totalRead += n - - }(chunkView) - } - wg.Wait() + totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset) resp.Data = buff[:totalRead] + if err != nil { + glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + } + return err } @@ -153,7 +91,13 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f resp.Size = len(req.Data) if req.Offset == 0 { - fh.contentType = http.DetectContentType(req.Data) + // detect mime type + var possibleExt string + fh.contentType, possibleExt = mimetype.Detect(req.Data) + if ext := path.Ext(fh.f.Name); ext != possibleExt { + fh.contentType = mime.TypeByExtension(ext) + } + fh.dirtyMetadata = true } @@ -196,7 +140,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { return nil } - return fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType @@ -212,70 +156,25 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { Entry: fh.f.entry, } - //glog.V(1).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) - //for i, chunk := range fh.f.entry.Chunks { - // glog.V(4).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) - //} + glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) + for i, chunk := range fh.f.entry.Chunks { + glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + } chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) fh.f.entry.Chunks = chunks // fh.f.entryViewCache = nil - fh.f.wfs.deleteFileChunks(garbages) if _, err := client.CreateEntry(ctx, request); err != nil { + glog.Errorf("update fh: %v", err) return fmt.Errorf("update fh: %v", err) } + fh.f.wfs.deleteFileChunks(ctx, garbages) + for i, chunk := range garbages { + glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + } + return nil }) } - -func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, fileIds []string) error { - - var vids []string - for _, fileId := range fileIds { - vids = append(vids, volumeId(fileId)) - } - - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { - - m := make(map[string]operation.LookupResult) - - glog.V(4).Infof("remove file lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return m, err - } - - for _, vid := range vids { - lr := operation.LookupResult{ - VolumeId: vid, - Locations: nil, - } - locations := resp.LocationsMap[vid] - for _, loc := range locations.Locations { - lr.Locations = append(lr.Locations, operation.Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - }) - } - m[vid] = lr - } - - return m, err - } - - _, err := operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) - - return err -} - -func volumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId -} diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 969514a06..9018c36ed 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -19,6 +19,7 @@ import ( type Option struct { FilerGrpcAddress string + GrpcDialOption grpc.DialOption FilerMountRootPath string Collection string Replication string @@ -28,9 +29,11 @@ type Option struct { DirListingLimit int EntryCacheTtl time.Duration - MountUid uint32 - MountGid uint32 - MountMode os.FileMode + MountUid uint32 + MountGid uint32 + MountMode os.FileMode + MountCtime time.Time + MountMtime time.Time } var _ = fs.FS(&WFS{}) @@ -46,8 +49,6 @@ type WFS struct { pathToHandleLock sync.Mutex bufPool sync.Pool - fileIdsDeletionChan chan []string - stats statsCache } type statsCache struct { @@ -65,11 +66,8 @@ func NewSeaweedFileSystem(option *Option) *WFS { return make([]byte, option.ChunkSizeLimit) }, }, - fileIdsDeletionChan: make(chan []string, 32), } - go wfs.loopProcessingDeletion() - return wfs } @@ -77,12 +75,12 @@ func (wfs *WFS) Root() (fs.Node, error) { return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil } -func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, wfs.option.FilerGrpcAddress) + }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) } @@ -137,7 +135,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index f58ef24f4..6e586b7df 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -2,57 +2,68 @@ package filesys import ( "context" - "time" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "google.golang.org/grpc" ) -func (wfs *WFS) loopProcessingDeletion() { - - ticker := time.NewTicker(2 * time.Second) - - wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - var fileIds []string - for { - select { - case fids := <-wfs.fileIdsDeletionChan: - fileIds = append(fileIds, fids...) - if len(fileIds) >= 1024 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - deleteFileIds(context.Background(), client, fileIds) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - deleteFileIds(context.Background(), client, fileIds) - fileIds = fileIds[:0] - } - } - } - }) - -} - -func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { +func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { if len(chunks) == 0 { return } var fileIds []string for _, chunk := range chunks { - fileIds = append(fileIds, chunk.FileId) + fileIds = append(fileIds, chunk.GetFileIdString()) } - var async = false - if async { - wfs.fileIdsDeletionChan <- fileIds - return - } - - wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(context.Background(), client, fileIds) + wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) return nil }) } + +func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { + + var vids []string + for _, fileId := range fileIds { + vids = append(vids, filer2.VolumeId(fileId)) + } + + lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { + + m := make(map[string]operation.LookupResult) + + glog.V(4).Infof("remove file lookup volume id locations: %v", vids) + resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + VolumeIds: vids, + }) + if err != nil { + return m, err + } + + for _, vid := range vids { + lr := operation.LookupResult{ + VolumeId: vid, + Locations: nil, + } + locations := resp.LocationsMap[vid] + for _, loc := range locations.Locations { + lr.Locations = append(lr.Locations, operation.Location{ + Url: loc.Url, + PublicUrl: loc.PublicUrl, + }) + } + m[vid] = lr + } + + return m, err + } + + _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) + + return err +} diff --git a/weed/glide.lock b/weed/glide.lock deleted file mode 100644 index fee78be42..000000000 --- a/weed/glide.lock +++ /dev/null @@ -1,190 +0,0 @@ -hash: 2e3a065472829938d25e879451b6d1aa43e55270e1166a9c044803ef8a3b9eb1 -updated: 2018-06-28T22:01:35.910567-07:00 -imports: -- name: github.com/seaweedfs/fuse - version: 65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e - subpackages: - - fs - - fuseutil -- name: github.com/boltdb/bolt - version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8 -- name: github.com/chrislusf/raft - version: 5f7ddd8f479583daf05879d3d3b174aa202c8fb7 - subpackages: - - protobuf -- name: github.com/dgrijalva/jwt-go - version: 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e -- name: github.com/disintegration/imaging - version: bbcee2f5c9d5e94ca42c8b50ec847fec64a6c134 -- name: github.com/fsnotify/fsnotify - version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 -- name: github.com/go-redis/redis - version: 83fb42932f6145ce52df09860384a4653d2d332a - subpackages: - - internal - - internal/consistenthash - - internal/hashtag - - internal/pool - - internal/proto - - internal/singleflight - - internal/util -- name: github.com/go-sql-driver/mysql - version: d523deb1b23d913de5bdada721a6071e71283618 -- name: github.com/gocql/gocql - version: e06f8c1bcd787e6bf0608288b314522f08cc7848 - subpackages: - - internal/lru - - internal/murmur - - internal/streams -- name: github.com/gogo/protobuf - version: 30cf7ac33676b5786e78c746683f0d4cd64fa75b - subpackages: - - proto -- name: github.com/golang/protobuf - version: b4deda0973fb4c70b50d226b1af49f3da59f5265 - subpackages: - - proto - - protoc-gen-go/descriptor - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/timestamp -- name: github.com/golang/snappy - version: 2e65f85255dbc3072edf28d6b5b8efc472979f5a -- name: github.com/google/btree - version: e89373fe6b4a7413d7acd6da1725b83ef713e6e4 -- name: github.com/gorilla/context - version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 -- name: github.com/gorilla/mux - version: e3702bed27f0d39777b0b37b664b6280e8ef8fbf -- name: github.com/hailocab/go-hostpool - version: e80d13ce29ede4452c43dea11e79b9bc8a15b478 -- name: github.com/hashicorp/hcl - version: ef8a98b0bbce4a65b5aa4c368430a80ddc533168 - subpackages: - - hcl/ast - - hcl/parser - - hcl/printer - - hcl/scanner - - hcl/strconv - - hcl/token - - json/parser - - json/scanner - - json/token -- name: github.com/karlseguin/ccache - version: b425c9ca005a2050ebe723f6a0cddcb907354ab7 -- name: github.com/klauspost/crc32 - version: cb6bfca970f6908083f26f39a79009d608efd5cd -- name: github.com/lib/pq - version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8 - subpackages: - - oid -- name: github.com/magiconair/properties - version: c2353362d570a7bfa228149c62842019201cfb71 -- name: github.com/mitchellh/mapstructure - version: bb74f1db0675b241733089d5a1faa5dd8b0ef57b -- name: github.com/pelletier/go-toml - version: c01d1270ff3e442a8a57cddc1c92dc1138598194 -- name: github.com/rwcarlsen/goexif - version: 8d986c03457a2057c7b0fb0a48113f7dd48f9619 - subpackages: - - exif - - tiff -- name: github.com/soheilhy/cmux - version: e09e9389d85d8492d313d73d1469c029e710623f -- name: github.com/spf13/afero - version: 787d034dfe70e44075ccc060d346146ef53270ad - subpackages: - - mem -- name: github.com/spf13/cast - version: 8965335b8c7107321228e3e3702cab9832751bac -- name: github.com/spf13/jwalterweatherman - version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 -- name: github.com/spf13/pflag - version: 3ebe029320b2676d667ae88da602a5f854788a8a -- name: github.com/spf13/viper - version: 15738813a09db5c8e5b60a19d67d3f9bd38da3a4 -- name: github.com/syndtr/goleveldb - version: 0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697 - subpackages: - - leveldb - - leveldb/cache - - leveldb/comparer - - leveldb/errors - - leveldb/filter - - leveldb/iterator - - leveldb/journal - - leveldb/memdb - - leveldb/opt - - leveldb/storage - - leveldb/table - - leveldb/util -- name: golang.org/x/image - version: cc896f830cedae125428bc9fe1b0362aa91b3fb1 - subpackages: - - bmp - - tiff - - tiff/lzw -- name: golang.org/x/net - version: 4cb1c02c05b0e749b0365f61ae859a8e0cfceed9 - subpackages: - - context - - http/httpguts - - http2 - - http2/hpack - - idna - - internal/timeseries - - trace -- name: golang.org/x/sys - version: 7138fd3d9dc8335c567ca206f4333fb75eb05d56 - subpackages: - - unix -- name: golang.org/x/text - version: 5cec4b58c438bd98288aeb248bab2c1840713d21 - subpackages: - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: google.golang.org/appengine - version: b1f26356af11148e710935ed1ac8a7f5702c7612 - subpackages: - - cloudsql -- name: google.golang.org/genproto - version: ff3583edef7de132f219f0efc00e097cabcc0ec0 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8 - subpackages: - - balancer - - balancer/base - - balancer/roundrobin - - codes - - connectivity - - credentials - - encoding - - encoding/proto - - grpclog - - internal - - internal/backoff - - internal/channelz - - internal/grpcrand - - keepalive - - metadata - - naming - - peer - - reflection - - reflection/grpc_reflection_v1alpha - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/inf.v0 - version: d2d2541c53f18d2a059457998ce2876cc8e67cbf -- name: gopkg.in/yaml.v2 - version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 -testImports: [] diff --git a/weed/glide.yaml b/weed/glide.yaml index 740d2ad3d..ef64b3a3c 100644 --- a/weed/glide.yaml +++ b/weed/glide.yaml @@ -1,44 +1,117 @@ package: github.com/chrislusf/seaweedfs/weed import: -- package: github.com/seaweedfs/fuse +- package: cloud.google.com/go + version: ^0.40.0 subpackages: - - fs -- package: github.com/boltdb/bolt - version: ^1.3.1 + - pubsub + - storage +- package: github.com/Azure/azure-storage-blob-go + version: ^0.7.0 + subpackages: + - azblob +- package: github.com/Shopify/sarama + version: ^1.22.1 +- package: github.com/aws/aws-sdk-go + version: ^1.20.12 + subpackages: + - aws + - aws/awserr + - aws/credentials + - aws/session + - service/s3 + - service/s3/s3iface + - service/sqs - package: github.com/chrislusf/raft + subpackages: + - protobuf - package: github.com/dgrijalva/jwt-go version: ^3.2.0 - package: github.com/disintegration/imaging - version: ^1.4.1 + version: ^1.6.0 +- package: github.com/dustin/go-humanize + version: ^1.0.0 +- package: github.com/gabriel-vasile/mimetype + version: ^0.3.14 - package: github.com/go-redis/redis - version: ^6.10.2 + version: ^6.15.3 - package: github.com/go-sql-driver/mysql - version: ^1.3.0 + version: ^1.4.1 - package: github.com/gocql/gocql - package: github.com/golang/protobuf - version: ^1.0.0 + version: ^1.3.1 subpackages: - proto - package: github.com/google/btree + version: ^1.0.0 - package: github.com/gorilla/mux - version: ^1.6.1 + version: ^1.7.3 +- package: github.com/jacobsa/daemonize +- package: github.com/kardianos/osext +- package: github.com/karlseguin/ccache + version: ^2.0.3 - package: github.com/klauspost/crc32 - version: ^1.1.0 + version: ^1.2.0 +- package: github.com/klauspost/reedsolomon + version: ^1.9.2 +- package: github.com/kurin/blazer + version: ^0.5.3 + subpackages: + - b2 - package: github.com/lib/pq + version: ^1.1.1 +- package: github.com/peterh/liner + version: ^1.1.0 +- package: github.com/prometheus/client_golang + version: ^1.0.0 + subpackages: + - prometheus + - prometheus/push +- package: github.com/rakyll/statik + version: ^0.1.6 + subpackages: + - fs - package: github.com/rwcarlsen/goexif subpackages: - exif -- package: github.com/soheilhy/cmux - version: ^0.1.4 +- package: github.com/satori/go.uuid + version: ^1.2.0 +- package: github.com/seaweedfs/fuse + subpackages: + - fs +- package: github.com/spf13/viper + version: ^1.4.0 - package: github.com/syndtr/goleveldb + version: ^1.0.0 subpackages: - leveldb + - leveldb/opt - leveldb/util +- package: github.com/willf/bloom + version: ^2.0.3 +- package: gocloud.dev + version: ^0.15.0 + subpackages: + - pubsub + - pubsub/awssnssqs + - pubsub/azuresb + - pubsub/gcppubsub + - pubsub/natspubsub + - pubsub/rabbitpubsub - package: golang.org/x/net subpackages: - context -- package: google.golang.org/grpc - version: ^1.11.3 + - webdav +- package: golang.org/x/tools subpackages: + - godoc/util +- package: google.golang.org/api + version: ^0.7.0 + subpackages: + - option +- package: google.golang.org/grpc + version: ^1.21.1 + subpackages: + - credentials + - keepalive - peer - reflection diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go new file mode 100644 index 000000000..94a413ac0 --- /dev/null +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -0,0 +1,71 @@ +// Package gocdk_pub_sub supports the Go CDK (Cloud Development Kit) PubSub API, +// which in turn supports many providers, including Amazon SNS/SQS, Azure Service Bus, +// Google Cloud PubSub, and RabbitMQ. +// +// In the config, select a provider and topic using a URL. See +// https://godoc.org/gocloud.dev/pubsub and its sub-packages for details. +// +// The Go CDK PubSub API does not support administrative operations like topic +// creation. Create the topic using a UI, CLI or provider-specific API before running +// weed. +// +// The Go CDK obtains credentials via environment variables and other +// provider-specific default mechanisms. See the provider's documentation for +// details. +package gocdk_pub_sub + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + _ "gocloud.dev/pubsub/azuresb" + _ "gocloud.dev/pubsub/gcppubsub" + _ "gocloud.dev/pubsub/natspubsub" + _ "gocloud.dev/pubsub/rabbitpubsub" +) + +func init() { + notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{}) +} + +type GoCDKPubSub struct { + topicURL string + topic *pubsub.Topic +} + +func (k *GoCDKPubSub) GetName() string { + return "gocdk_pub_sub" +} + +func (k *GoCDKPubSub) Initialize(config util.Configuration) error { + k.topicURL = config.GetString("topic_url") + glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) + topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) + if err != nil { + glog.Fatalf("Failed to open topic: %v", err) + } + k.topic = topic + return nil +} + +func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { + bytes, err := proto.Marshal(message) + if err != nil { + return err + } + ctx := context.Background() + err = k.topic.Send(ctx, &pubsub.Message{ + Body: bytes, + Metadata: map[string]string{"key": key}, + }) + if err != nil { + return fmt.Errorf("send message via Go CDK pubsub %s: %v", k.topicURL, err) + } + return nil +} diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 00e1caad5..4c50eaa26 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,9 +3,11 @@ package operation import ( "context" "fmt" - "time" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "strings" ) type VolumeAssignRequest struct { @@ -19,14 +21,15 @@ type VolumeAssignRequest struct { } type AssignResult struct { - Fid string `json:"fid,omitempty"` - Url string `json:"url,omitempty"` - PublicUrl string `json:"publicUrl,omitempty"` - Count uint64 `json:"count,omitempty"` - Error string `json:"error,omitempty"` + Fid string `json:"fid,omitempty"` + Url string `json:"url,omitempty"` + PublicUrl string `json:"publicUrl,omitempty"` + Count uint64 `json:"count,omitempty"` + Error string `json:"error,omitempty"` + Auth security.EncodedJwt `json:"auth,omitempty"` } -func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { +func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { var requests []*VolumeAssignRequest requests = append(requests, primaryRequest) @@ -40,9 +43,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque continue } - lastError = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.AssignRequest{ Count: primaryRequest.Count, @@ -53,7 +54,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque Rack: primaryRequest.Rack, DataNode: primaryRequest.DataNode, } - resp, grpcErr := masterClient.Assign(ctx, req) + resp, grpcErr := masterClient.Assign(context.Background(), req) if grpcErr != nil { return grpcErr } @@ -63,6 +64,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque ret.Url = resp.Url ret.PublicUrl = resp.PublicUrl ret.Error = resp.Error + ret.Auth = security.EncodedJwt(resp.Auth) return nil @@ -81,3 +83,17 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque return ret, lastError } + +func LookupJwt(master string, fileId string) security.EncodedJwt { + + tokenStr := "" + + if h, e := util.Head(fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId)); e == nil { + bearer := h.Get("Authorization") + if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" { + tokenStr = bearer[7:] + } + } + + return security.EncodedJwt(tokenStr) +} diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 9d8267dee..295204dd8 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -5,9 +5,12 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net/http" "sort" + "google.golang.org/grpc" + "sync" "github.com/chrislusf/seaweedfs/weed/glog" @@ -53,7 +56,7 @@ func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) { if isGzipped { var err error - if buffer, err = UnGzipData(buffer); err != nil { + if buffer, err = util.UnGzipData(buffer); err != nil { return nil, err } } @@ -69,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) { return json.Marshal(cm) } -func (cm *ChunkManifest) DeleteChunks(master string) error { +func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error { var fileIds []string for _, ci := range cm.Chunks { fileIds = append(fileIds, ci.Fid) } - results, err := DeleteFiles(master, fileIds) + results, err := DeleteFiles(master, grpcDialOption, fileIds) if err != nil { glog.V(0).Infof("delete %+v: %v", fileIds, err) return fmt.Errorf("chunk delete: %v", err) @@ -102,7 +105,10 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, if err != nil { return written, err } - defer resp.Body.Close() + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() switch resp.StatusCode { case http.StatusRequestedRangeNotSatisfiable: diff --git a/weed/operation/data_struts.go b/weed/operation/data_struts.go index bfc53aa50..4980f9913 100644 --- a/weed/operation/data_struts.go +++ b/weed/operation/data_struts.go @@ -2,6 +2,5 @@ package operation type JoinResult struct { VolumeSizeLimit uint64 `json:"VolumeSizeLimit,omitempty"` - SecretKey string `json:"secretKey,omitempty"` Error string `json:"error,omitempty"` } diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 3e468e1a3..6d84be76f 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "google.golang.org/grpc" "net/http" "strings" "sync" - "time" - - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -28,17 +28,17 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) { } // DeleteFiles batch deletes a list of fileIds -func DeleteFiles(master string, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { +func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { lookupFunc := func(vids []string) (map[string]LookupResult, error) { - return LookupVolumeIds(master, vids) + return LookupVolumeIds(master, grpcDialOption, vids) } - return DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) } -func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { +func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { var ret []*volume_server_pb.DeleteResult @@ -48,7 +48,7 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin vid, _, err := ParseFileId(fileId) if err != nil { ret = append(ret, &volume_server_pb.DeleteResult{ - FileId: vid, + FileId: fileId, Status: http.StatusBadRequest, Error: err.Error()}, ) @@ -85,38 +85,43 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin } } + resultChan := make(chan []*volume_server_pb.DeleteResult, len(server_to_fileIds)) var wg sync.WaitGroup - for server, fidList := range server_to_fileIds { wg.Add(1) go func(server string, fidList []string) { defer wg.Done() - if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, fidList); deleteErr != nil { + if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil { err = deleteErr } else { - ret = append(ret, deleteResults...) + resultChan <- deleteResults } }(server, fidList) } wg.Wait() + close(resultChan) + + for result := range resultChan { + ret = append(ret, result...) + } + + glog.V(0).Infof("deleted %d items", len(ret)) return ret, err } // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc -func DeleteFilesAtOneVolumeServer(volumeServer string, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { +func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { - err = WithVolumeServerClient(volumeServer, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { req := &volume_server_pb.BatchDeleteRequest{ FileIds: fileIds, } - resp, err := volumeServerClient.BatchDelete(ctx, req) + resp, err := volumeServerClient.BatchDelete(context.Background(), req) // fmt.Printf("deleted %v %v: %v\n", fileIds, err, resp) diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index d0931a8d3..f6b2b69e9 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -1,34 +1,30 @@ package operation import ( + "context" "fmt" - "strconv" - "strings" - "sync" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" + "strconv" + "strings" ) -var ( - grpcClients = make(map[string]*grpc.ClientConn) - grpcClientsLock sync.Mutex -) +func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { -func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.VolumeServerClient) error) error { + ctx := context.Background() grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { return err } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) - }, grpcAddress) + }, grpcAddress, grpcDialOption) } @@ -42,16 +38,18 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil } -func withMasterServerClient(masterServer string, fn func(masterClient master_pb.SeaweedClient) error) error { +func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0) + ctx := context.Background() + + masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v", masterServer) + return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) - }, masterGrpcAddress) + }, masterGrpcAddress, grpcDialOption) } diff --git a/weed/operation/list_masters.go b/weed/operation/list_masters.go deleted file mode 100644 index 75838de4d..000000000 --- a/weed/operation/list_masters.go +++ /dev/null @@ -1,32 +0,0 @@ -package operation - -import ( - "encoding/json" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" -) - -type ClusterStatusResult struct { - IsLeader bool `json:"IsLeader,omitempty"` - Leader string `json:"Leader,omitempty"` - Peers []string `json:"Peers,omitempty"` -} - -func ListMasters(server string) (leader string, peers []string, err error) { - jsonBlob, err := util.Get("http://" + server + "/cluster/status") - glog.V(2).Info("list masters result :", string(jsonBlob)) - if err != nil { - return "", nil, err - } - var ret ClusterStatusResult - err = json.Unmarshal(jsonBlob, &ret) - if err != nil { - return "", nil, err - } - peers = ret.Peers - if ret.IsLeader { - peers = append(peers, ret.Leader) - } - return ret.Leader, peers, nil -} diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index 562a11580..d0773e7fd 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "google.golang.org/grpc" "math/rand" "net/url" "strings" @@ -78,7 +79,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) { } // LookupVolumeIds find volume locations by cache and actual lookup -func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, error) { +func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) { ret := make(map[string]LookupResult) var unknown_vids []string @@ -98,14 +99,12 @@ func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, err //only query unknown_vids - err := withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, } - resp, grpcErr := masterClient.LookupVolume(ctx, req) + resp, grpcErr := masterClient.LookupVolume(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/stats.go b/weed/operation/stats.go index 364727272..b69a33750 100644 --- a/weed/operation/stats.go +++ b/weed/operation/stats.go @@ -2,18 +2,16 @@ package operation import ( "context" - "time" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) -func Statistics(server string, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { +func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - err = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - grpcResponse, grpcErr := masterClient.Statistics(ctx, req) + grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 7a1a3085e..bdf59d966 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -2,6 +2,7 @@ package operation import ( "bytes" + "google.golang.org/grpc" "io" "mime" "net/url" @@ -36,10 +37,8 @@ type SubmitResult struct { Error string `json:"error,omitempty"` } -func SubmitFiles(master string, files []FilePart, - replication string, collection string, dataCenter string, ttl string, maxMB int, - secret security.Secret, -) ([]SubmitResult, error) { +func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, + replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { results[index].FileName = file.FileName @@ -51,7 +50,7 @@ func SubmitFiles(master string, files []FilePart, DataCenter: dataCenter, Ttl: ttl, } - ret, err := Assign(master, ar) + ret, err := Assign(master, grpcDialOption, ar) if err != nil { for index, _ := range files { results[index].Error = err.Error() @@ -67,7 +66,7 @@ func SubmitFiles(master string, files []FilePart, file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, secret) + results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption) if err != nil { results[index].Error = err.Error() } @@ -110,8 +109,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { return ret, nil } -func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (retSize uint32, err error) { - jwt := security.GenJwt(secret, fi.Fid) +func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) @@ -139,7 +137,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret Collection: fi.Collection, Ttl: fi.Ttl, } - ret, err = Assign(master, ar) + ret, err = Assign(master, grpcDialOption, ar) if err != nil { return } @@ -152,10 +150,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret Collection: fi.Collection, Ttl: fi.Ttl, } - ret, err = Assign(master, ar) + ret, err = Assign(master, grpcDialOption, ar) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, grpcDialOption) return } id = ret.Fid @@ -170,10 +168,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret baseName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(fi.Reader, chunkSize), master, fileUrl, - jwt) + ret.Auth) if e != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, grpcDialOption) return 0, e } cm.Chunks = append(cm.Chunks, @@ -188,7 +186,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret err = upload_chunked_file_manifest(fileUrl, &cm, jwt) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, grpcDialOption) } } else { ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index e40c7de41..5562f12ab 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -2,63 +2,19 @@ package operation import ( "context" - "fmt" - "io" - "time" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" ) -func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { +func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { - WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{ - VolumdId: vid, + resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{ + VolumeId: vid, }) return nil }) return } - -func GetVolumeIdxEntries(server string, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error { - - return WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{ - VolumdId: vid, - }) - if err != nil { - return err - } - - var indexFileContent []byte - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("read index entries: %v", err) - } - indexFileContent = append(indexFileContent, resp.IndexFileContent...) - } - - dataSize := len(indexFileContent) - - for idx := 0; idx+NeedleEntrySize <= dataSize; idx += NeedleEntrySize { - line := indexFileContent[idx : idx+NeedleEntrySize] - key := BytesToNeedleId(line[:NeedleIdSize]) - offset := BytesToOffset(line[NeedleIdSize : NeedleIdSize+OffsetSize]) - size := util.BytesToUint32(line[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize]) - eachEntryFn(key, offset, size) - } - - return nil - }) -} diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go new file mode 100644 index 000000000..b53f18ce1 --- /dev/null +++ b/weed/operation/tail_volume.go @@ -0,0 +1,82 @@ +package operation + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" +) + +func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { + // find volume location, replication, ttl info + lookup, err := Lookup(master, vid.String()) + if err != nil { + return fmt.Errorf("look up volume %d: %v", vid, err) + } + if len(lookup.Locations) == 0 { + return fmt.Errorf("unable to locate volume %d", vid) + } + + volumeServer := lookup.Locations[0].Url + + return TailVolumeFromSource(volumeServer, grpcDialOption, vid, sinceNs, timeoutSeconds, fn) +} + +func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error { + return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + + stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{ + VolumeId: uint32(vid), + SinceNs: sinceNs, + IdleTimeoutSeconds: uint32(idleTimeoutSeconds), + }) + if err != nil { + return err + } + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + needleHeader := resp.NeedleHeader + needleBody := resp.NeedleBody + + if len(needleHeader) == 0 { + continue + } + + for !resp.IsLastChunk { + resp, recvErr = stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + needleBody = append(needleBody, resp.NeedleBody...) + } + + n := new(needle.Needle) + n.ParseNeedleHeader(needleHeader) + n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion) + + err = fn(n) + + if err != nil { + return err + } + + } + return nil + }) +} diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 030bf5889..c387d0230 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,6 +2,8 @@ package operation import ( "bytes" + "compress/flate" + "compress/gzip" "encoding/json" "errors" "fmt" @@ -16,6 +18,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) type UploadResult struct { @@ -37,13 +40,43 @@ func init() { var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") -// Upload sends a POST request to a volume server to upload the content -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return upload_content(uploadUrl, func(w io.Writer) (err error) { - _, err = io.Copy(w, reader) - return - }, filename, isGzipped, mtype, pairMap, jwt) +// Upload sends a POST request to a volume server to upload the content with adjustable compression level +func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { + if compressionLevel < 1 { + compressionLevel = 1 + } + if compressionLevel > 9 { + compressionLevel = 9 + } + return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt) } + +// Upload sends a POST request to a volume server to upload the content with fast compression +func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { + return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt) +} + +func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { + contentIsGzipped := isGzipped + shouldGzipNow := false + if !isGzipped { + if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped { + shouldGzipNow = true + contentIsGzipped = true + } + } + return upload_content(uploadUrl, func(w io.Writer) (err error) { + if shouldGzipNow { + gzWriter, _ := gzip.NewWriterLevel(w, compression) + _, err = io.Copy(gzWriter, reader) + gzWriter.Close() + } else { + _, err = io.Copy(w, reader) + } + return + }, filename, contentIsGzipped, mtype, pairMap, jwt) +} + func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { body_buf := bytes.NewBufferString("") body_writer := multipart.NewWriter(body_buf) @@ -58,9 +91,6 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error if isGzipped { h.Set("Content-Encoding", "gzip") } - if jwt != "" { - h.Set("Authorization", "BEARER "+string(jwt)) - } file_writer, cp_err := body_writer.CreatePart(h) if cp_err != nil { @@ -86,18 +116,15 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error for k, v := range pairMap { req.Header.Set(k, v) } + if jwt != "" { + req.Header.Set("Authorization", "BEARER "+string(jwt)) + } resp, post_err := client.Do(req) if post_err != nil { glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error()) return nil, post_err } defer resp.Body.Close() - - if resp.StatusCode < http.StatusOK || - resp.StatusCode > http.StatusIMUsed { - return nil, errors.New(http.StatusText(resp.StatusCode)) - } - etag := getEtag(resp) resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 6cd4df6b4..d72bced12 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { + } + rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { } @@ -36,6 +39,9 @@ service SeaweedFiler { rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } + rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { + } + } ////////////////////////////////////////////////// @@ -69,19 +75,33 @@ message Entry { map extended = 5; } +message FullEntry { + string dir = 1; + Entry entry = 2; +} + message EventNotification { Entry old_entry = 1; Entry new_entry = 2; bool delete_chunks = 3; + string new_parent_path = 4; } message FileChunk { - string file_id = 1; + string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; int64 mtime = 4; string e_tag = 5; - string source_file_id = 6; + string source_file_id = 6; // to be deprecated + FileId fid = 7; + FileId source_fid = 8; +} + +message FileId { + uint32 volume_id = 1; + uint64 file_key = 2; + fixed32 cookie = 3; } message FuseAttributes { @@ -126,6 +146,16 @@ message DeleteEntryRequest { message DeleteEntryResponse { } +message AtomicRenameEntryRequest { + string old_directory = 1; + string old_name = 2; + string new_directory = 3; + string new_name = 4; +} + +message AtomicRenameEntryResponse { +} + message AssignVolumeRequest { int32 count = 1; string collection = 2; @@ -139,6 +169,7 @@ message AssignVolumeResponse { string url = 2; string public_url = 3; int32 count = 4; + string auth = 5; } message LookupVolumeRequest { @@ -177,3 +208,12 @@ message StatisticsResponse { uint64 used_size = 5; uint64 file_count = 6; } + +message GetFilerConfigurationRequest { +} +message GetFilerConfigurationResponse { + repeated string masters = 1; + string replication = 2; + string collection = 3; + uint32 max_mb = 4; +} diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 6b4a27c0a..1a35ad7c0 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -14,8 +14,10 @@ It has these top-level messages: ListEntriesRequest ListEntriesResponse Entry + FullEntry EventNotification FileChunk + FileId FuseAttributes CreateEntryRequest CreateEntryResponse @@ -23,6 +25,8 @@ It has these top-level messages: UpdateEntryResponse DeleteEntryRequest DeleteEntryResponse + AtomicRenameEntryRequest + AtomicRenameEntryResponse AssignVolumeRequest AssignVolumeResponse LookupVolumeRequest @@ -33,6 +37,8 @@ It has these top-level messages: DeleteCollectionResponse StatisticsRequest StatisticsResponse + GetFilerConfigurationRequest + GetFilerConfigurationResponse */ package filer_pb @@ -208,16 +214,41 @@ func (m *Entry) GetExtended() map[string][]byte { return nil } +type FullEntry struct { + Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` +} + +func (m *FullEntry) Reset() { *m = FullEntry{} } +func (m *FullEntry) String() string { return proto.CompactTextString(m) } +func (*FullEntry) ProtoMessage() {} +func (*FullEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *FullEntry) GetDir() string { + if m != nil { + return m.Dir + } + return "" +} + +func (m *FullEntry) GetEntry() *Entry { + if m != nil { + return m.Entry + } + return nil +} + type EventNotification struct { - OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` - NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` - DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` + OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` + NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` + DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` + NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath" json:"new_parent_path,omitempty"` } func (m *EventNotification) Reset() { *m = EventNotification{} } func (m *EventNotification) String() string { return proto.CompactTextString(m) } func (*EventNotification) ProtoMessage() {} -func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *EventNotification) GetOldEntry() *Entry { if m != nil { @@ -240,19 +271,28 @@ func (m *EventNotification) GetDeleteChunks() bool { return false } +func (m *EventNotification) GetNewParentPath() string { + if m != nil { + return m.NewParentPath + } + return "" +} + type FileChunk struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` - Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` - Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"` - ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"` - SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` + Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` + Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` + Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"` + ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"` + SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` + Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"` + SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"` } func (m *FileChunk) Reset() { *m = FileChunk{} } func (m *FileChunk) String() string { return proto.CompactTextString(m) } func (*FileChunk) ProtoMessage() {} -func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *FileChunk) GetFileId() string { if m != nil { @@ -296,6 +336,52 @@ func (m *FileChunk) GetSourceFileId() string { return "" } +func (m *FileChunk) GetFid() *FileId { + if m != nil { + return m.Fid + } + return nil +} + +func (m *FileChunk) GetSourceFid() *FileId { + if m != nil { + return m.SourceFid + } + return nil +} + +type FileId struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` + Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie" json:"cookie,omitempty"` +} + +func (m *FileId) Reset() { *m = FileId{} } +func (m *FileId) String() string { return proto.CompactTextString(m) } +func (*FileId) ProtoMessage() {} +func (*FileId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *FileId) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *FileId) GetFileKey() uint64 { + if m != nil { + return m.FileKey + } + return 0 +} + +func (m *FileId) GetCookie() uint32 { + if m != nil { + return m.Cookie + } + return 0 +} + type FuseAttributes struct { FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"` @@ -315,7 +401,7 @@ type FuseAttributes struct { func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } func (m *FuseAttributes) String() string { return proto.CompactTextString(m) } func (*FuseAttributes) ProtoMessage() {} -func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *FuseAttributes) GetFileSize() uint64 { if m != nil { @@ -416,7 +502,7 @@ type CreateEntryRequest struct { func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) } func (*CreateEntryRequest) ProtoMessage() {} -func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *CreateEntryRequest) GetDirectory() string { if m != nil { @@ -438,7 +524,7 @@ type CreateEntryResponse struct { func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) } func (*CreateEntryResponse) ProtoMessage() {} -func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } type UpdateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` @@ -448,7 +534,7 @@ type UpdateEntryRequest struct { func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} } func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) } func (*UpdateEntryRequest) ProtoMessage() {} -func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *UpdateEntryRequest) GetDirectory() string { if m != nil { @@ -470,7 +556,7 @@ type UpdateEntryResponse struct { func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} } func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) } func (*UpdateEntryResponse) ProtoMessage() {} -func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } type DeleteEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` @@ -483,7 +569,7 @@ type DeleteEntryRequest struct { func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} } func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) } func (*DeleteEntryRequest) ProtoMessage() {} -func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *DeleteEntryRequest) GetDirectory() string { if m != nil { @@ -519,7 +605,55 @@ type DeleteEntryResponse struct { func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) } func (*DeleteEntryResponse) ProtoMessage() {} -func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type AtomicRenameEntryRequest struct { + OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"` + OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"` + NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory" json:"new_directory,omitempty"` + NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName" json:"new_name,omitempty"` +} + +func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} } +func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) } +func (*AtomicRenameEntryRequest) ProtoMessage() {} +func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *AtomicRenameEntryRequest) GetOldDirectory() string { + if m != nil { + return m.OldDirectory + } + return "" +} + +func (m *AtomicRenameEntryRequest) GetOldName() string { + if m != nil { + return m.OldName + } + return "" +} + +func (m *AtomicRenameEntryRequest) GetNewDirectory() string { + if m != nil { + return m.NewDirectory + } + return "" +} + +func (m *AtomicRenameEntryRequest) GetNewName() string { + if m != nil { + return m.NewName + } + return "" +} + +type AtomicRenameEntryResponse struct { +} + +func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} } +func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) } +func (*AtomicRenameEntryResponse) ProtoMessage() {} +func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } type AssignVolumeRequest struct { Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` @@ -532,7 +666,7 @@ type AssignVolumeRequest struct { func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *AssignVolumeRequest) GetCount() int32 { if m != nil { @@ -574,12 +708,13 @@ type AssignVolumeResponse struct { Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` } func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *AssignVolumeResponse) GetFileId() string { if m != nil { @@ -609,6 +744,13 @@ func (m *AssignVolumeResponse) GetCount() int32 { return 0 } +func (m *AssignVolumeResponse) GetAuth() string { + if m != nil { + return m.Auth + } + return "" +} + type LookupVolumeRequest struct { VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` } @@ -616,7 +758,7 @@ type LookupVolumeRequest struct { func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *LookupVolumeRequest) GetVolumeIds() []string { if m != nil { @@ -632,7 +774,7 @@ type Locations struct { func (m *Locations) Reset() { *m = Locations{} } func (m *Locations) String() string { return proto.CompactTextString(m) } func (*Locations) ProtoMessage() {} -func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *Locations) GetLocations() []*Location { if m != nil { @@ -649,7 +791,7 @@ type Location struct { func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *Location) GetUrl() string { if m != nil { @@ -672,7 +814,7 @@ type LookupVolumeResponse struct { func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { if m != nil { @@ -688,7 +830,7 @@ type DeleteCollectionRequest struct { func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *DeleteCollectionRequest) GetCollection() string { if m != nil { @@ -703,7 +845,7 @@ type DeleteCollectionResponse struct { func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } type StatisticsRequest struct { Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` @@ -714,7 +856,7 @@ type StatisticsRequest struct { func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *StatisticsRequest) GetReplication() string { if m != nil { @@ -749,7 +891,7 @@ type StatisticsResponse struct { func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } func (m *StatisticsResponse) GetReplication() string { if m != nil { @@ -793,14 +935,64 @@ func (m *StatisticsResponse) GetFileCount() uint64 { return 0 } +type GetFilerConfigurationRequest struct { +} + +func (m *GetFilerConfigurationRequest) Reset() { *m = GetFilerConfigurationRequest{} } +func (m *GetFilerConfigurationRequest) String() string { return proto.CompactTextString(m) } +func (*GetFilerConfigurationRequest) ProtoMessage() {} +func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +type GetFilerConfigurationResponse struct { + Masters []string `protobuf:"bytes,1,rep,name=masters" json:"masters,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` + MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` +} + +func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} } +func (m *GetFilerConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetFilerConfigurationResponse) ProtoMessage() {} +func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *GetFilerConfigurationResponse) GetMasters() []string { + if m != nil { + return m.Masters + } + return nil +} + +func (m *GetFilerConfigurationResponse) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *GetFilerConfigurationResponse) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 { + if m != nil { + return m.MaxMb + } + return 0 +} + func init() { proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest") proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse") proto.RegisterType((*Entry)(nil), "filer_pb.Entry") + proto.RegisterType((*FullEntry)(nil), "filer_pb.FullEntry") proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification") proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk") + proto.RegisterType((*FileId)(nil), "filer_pb.FileId") proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes") proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest") proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse") @@ -808,6 +1000,8 @@ func init() { proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse") proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest") proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse") + proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest") + proto.RegisterType((*AtomicRenameEntryResponse)(nil), "filer_pb.AtomicRenameEntryResponse") proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest") proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse") proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest") @@ -818,6 +1012,8 @@ func init() { proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse") proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest") proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse") + proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest") + proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -836,10 +1032,12 @@ type SeaweedFilerClient interface { CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) + AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) + GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) } type seaweedFilerClient struct { @@ -895,6 +1093,15 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq return out, nil } +func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { + out := new(AtomicRenameEntryResponse) + err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { out := new(AssignVolumeResponse) err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...) @@ -931,6 +1138,15 @@ func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsReque return out, nil } +func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) { + out := new(GetFilerConfigurationResponse) + err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for SeaweedFiler service type SeaweedFilerServer interface { @@ -939,10 +1155,12 @@ type SeaweedFilerServer interface { CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) + AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) + GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) } func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { @@ -1039,6 +1257,24 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AtomicRenameEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/AtomicRenameEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, req.(*AtomicRenameEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AssignVolumeRequest) if err := dec(in); err != nil { @@ -1111,6 +1347,24 @@ func _SeaweedFiler_Statistics_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFilerConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/GetFilerConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, req.(*GetFilerConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ ServiceName: "filer_pb.SeaweedFiler", HandlerType: (*SeaweedFilerServer)(nil), @@ -1135,6 +1389,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "DeleteEntry", Handler: _SeaweedFiler_DeleteEntry_Handler, }, + { + MethodName: "AtomicRenameEntry", + Handler: _SeaweedFiler_AtomicRenameEntry_Handler, + }, { MethodName: "AssignVolume", Handler: _SeaweedFiler_AssignVolume_Handler, @@ -1151,6 +1409,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "Statistics", Handler: _SeaweedFiler_Statistics_Handler, }, + { + MethodName: "GetFilerConfiguration", + Handler: _SeaweedFiler_GetFilerConfiguration_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "filer.proto", @@ -1159,86 +1421,104 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1291 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x8f, 0xdc, 0x44, - 0x13, 0x8e, 0xe7, 0x2b, 0xe3, 0x9a, 0x99, 0xbc, 0xbb, 0x3d, 0xfb, 0x12, 0x6b, 0xb2, 0x1b, 0x26, - 0x86, 0xa0, 0x8d, 0x88, 0x46, 0x51, 0xe0, 0x90, 0x10, 0x21, 0x91, 0x6c, 0x36, 0x52, 0xa4, 0x4d, - 0x82, 0xbc, 0x09, 0x12, 0xe2, 0x60, 0x79, 0xed, 0x9e, 0xa1, 0xb5, 0x1e, 0x7b, 0x70, 0xb7, 0x37, - 0x09, 0x7f, 0x82, 0x0b, 0x57, 0x0e, 0x9c, 0xf8, 0x17, 0x5c, 0xf8, 0x3f, 0xdc, 0xb9, 0xa1, 0xae, - 0x6e, 0x7b, 0xda, 0x63, 0xef, 0x06, 0x84, 0x72, 0xeb, 0x7e, 0xaa, 0xba, 0xbe, 0xfa, 0xe9, 0x2a, - 0x1b, 0x06, 0x73, 0x16, 0xd3, 0x6c, 0xb6, 0xca, 0x52, 0x91, 0x92, 0x3e, 0x6e, 0xfc, 0xd5, 0x89, - 0xfb, 0x02, 0xae, 0x1d, 0xa5, 0xe9, 0x69, 0xbe, 0x7a, 0xcc, 0x32, 0x1a, 0x8a, 0x34, 0x7b, 0x7b, - 0x98, 0x88, 0xec, 0xad, 0x47, 0x7f, 0xc8, 0x29, 0x17, 0x64, 0x17, 0xec, 0xa8, 0x10, 0x38, 0xd6, - 0xd4, 0xda, 0xb7, 0xbd, 0x35, 0x40, 0x08, 0x74, 0x92, 0x60, 0x49, 0x9d, 0x16, 0x0a, 0x70, 0xed, - 0x1e, 0xc2, 0x6e, 0xb3, 0x41, 0xbe, 0x4a, 0x13, 0x4e, 0xc9, 0x4d, 0xe8, 0x52, 0x09, 0xa0, 0xb5, - 0xc1, 0xdd, 0xff, 0xcd, 0x8a, 0x50, 0x66, 0x4a, 0x4f, 0x49, 0xdd, 0xdf, 0x2d, 0x20, 0x47, 0x8c, - 0x0b, 0x09, 0x32, 0xca, 0xff, 0x59, 0x3c, 0x1f, 0x40, 0x6f, 0x95, 0xd1, 0x39, 0x7b, 0xa3, 0x23, - 0xd2, 0x3b, 0x72, 0x1b, 0xb6, 0xb9, 0x08, 0x32, 0xf1, 0x24, 0x4b, 0x97, 0x4f, 0x58, 0x4c, 0x9f, - 0xcb, 0xa0, 0xdb, 0xa8, 0x52, 0x17, 0x90, 0x19, 0x10, 0x96, 0x84, 0x71, 0xce, 0xd9, 0x19, 0x3d, - 0x2e, 0xa4, 0x4e, 0x67, 0x6a, 0xed, 0xf7, 0xbd, 0x06, 0x09, 0xd9, 0x81, 0x6e, 0xcc, 0x96, 0x4c, - 0x38, 0xdd, 0xa9, 0xb5, 0x3f, 0xf2, 0xd4, 0xc6, 0xfd, 0x0a, 0xc6, 0x95, 0xf8, 0x75, 0xfa, 0xb7, - 0xe0, 0x32, 0x55, 0x90, 0x63, 0x4d, 0xdb, 0x4d, 0x05, 0x28, 0xe4, 0xee, 0x2f, 0x2d, 0xe8, 0x22, - 0x54, 0xd6, 0xd9, 0x5a, 0xd7, 0x99, 0xdc, 0x80, 0x21, 0xe3, 0xfe, 0xba, 0x18, 0x2d, 0x8c, 0x6f, - 0xc0, 0x78, 0x59, 0x77, 0xf2, 0x29, 0xf4, 0xc2, 0xef, 0xf3, 0xe4, 0x94, 0x3b, 0x6d, 0x74, 0x35, - 0x5e, 0xbb, 0x92, 0xc9, 0x1e, 0x48, 0x99, 0xa7, 0x55, 0xc8, 0x3d, 0x80, 0x40, 0x88, 0x8c, 0x9d, - 0xe4, 0x82, 0x72, 0xcc, 0x76, 0x70, 0xd7, 0x31, 0x0e, 0xe4, 0x9c, 0x3e, 0x2c, 0xe5, 0x9e, 0xa1, - 0x4b, 0xee, 0x43, 0x9f, 0xbe, 0x11, 0x34, 0x89, 0x68, 0xe4, 0x74, 0xd1, 0xd1, 0xde, 0x46, 0x4e, - 0xb3, 0x43, 0x2d, 0x57, 0x19, 0x96, 0xea, 0x93, 0x07, 0x30, 0xaa, 0x88, 0xc8, 0x16, 0xb4, 0x4f, - 0x69, 0x71, 0xb3, 0x72, 0x29, 0xab, 0x7b, 0x16, 0xc4, 0xb9, 0x22, 0xd9, 0xd0, 0x53, 0x9b, 0x2f, - 0x5a, 0xf7, 0x2c, 0xf7, 0x67, 0x0b, 0xb6, 0x0f, 0xcf, 0x68, 0x22, 0x9e, 0xa7, 0x82, 0xcd, 0x59, - 0x18, 0x08, 0x96, 0x26, 0xe4, 0x36, 0xd8, 0x69, 0x1c, 0xf9, 0x17, 0x72, 0xac, 0x9f, 0xc6, 0xda, - 0xdf, 0x6d, 0xb0, 0x13, 0xfa, 0x5a, 0x6b, 0xb7, 0xce, 0xd1, 0x4e, 0xe8, 0x6b, 0xa5, 0xfd, 0x11, - 0x8c, 0x22, 0x1a, 0x53, 0x41, 0xfd, 0xb2, 0xae, 0xb2, 0xe8, 0x43, 0x05, 0x62, 0x3d, 0xb9, 0xfb, - 0xab, 0x05, 0x76, 0x59, 0x5e, 0x72, 0x15, 0x2e, 0x4b, 0x73, 0x3e, 0x8b, 0x74, 0x52, 0x3d, 0xb9, - 0x7d, 0x1a, 0x49, 0xae, 0xa6, 0xf3, 0x39, 0xa7, 0x02, 0xdd, 0xb6, 0x3d, 0xbd, 0x93, 0x77, 0xcd, - 0xd9, 0x8f, 0x8a, 0x9e, 0x1d, 0x0f, 0xd7, 0xb2, 0x06, 0x4b, 0xc1, 0x96, 0x14, 0xaf, 0xa5, 0xed, - 0xa9, 0x0d, 0x19, 0x43, 0x97, 0xfa, 0x22, 0x58, 0x20, 0xef, 0x6c, 0xaf, 0x43, 0x5f, 0x06, 0x0b, - 0xf2, 0x31, 0x5c, 0xe1, 0x69, 0x9e, 0x85, 0xd4, 0x2f, 0xdc, 0xf6, 0x50, 0x3a, 0x54, 0xe8, 0x13, - 0x74, 0xee, 0xfe, 0xd9, 0x82, 0x2b, 0xd5, 0x1b, 0x25, 0xd7, 0xc0, 0xc6, 0x13, 0xe8, 0xdc, 0x42, - 0xe7, 0xd8, 0x25, 0x8e, 0x2b, 0x01, 0xb4, 0xcc, 0x00, 0x8a, 0x23, 0xcb, 0x34, 0x52, 0xf1, 0x8e, - 0xd4, 0x91, 0x67, 0x69, 0x44, 0xe5, 0x4d, 0xe6, 0x2c, 0xc2, 0x88, 0x47, 0x9e, 0x5c, 0x4a, 0x64, - 0xc1, 0x22, 0xfd, 0x4a, 0xe4, 0x52, 0xd6, 0x20, 0xcc, 0xd0, 0x6e, 0x4f, 0xd5, 0x40, 0xed, 0x64, - 0x0d, 0x96, 0x12, 0xbd, 0xac, 0x12, 0x93, 0x6b, 0x32, 0x85, 0x41, 0x46, 0x57, 0xb1, 0xbe, 0x66, - 0xa7, 0x8f, 0x22, 0x13, 0x22, 0xd7, 0x01, 0xc2, 0x34, 0x8e, 0x69, 0x88, 0x0a, 0x36, 0x2a, 0x18, - 0x88, 0xbc, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xa9, 0xb5, 0xdf, 0xf5, 0x7a, 0x42, 0xc4, - 0xc7, 0x34, 0x94, 0x79, 0xe4, 0x9c, 0x66, 0x3e, 0xbe, 0xb1, 0x01, 0x9e, 0xeb, 0x4b, 0x00, 0xbb, - 0xc1, 0x1e, 0xc0, 0x22, 0x4b, 0xf3, 0x95, 0x92, 0x0e, 0xa7, 0x6d, 0xd9, 0x72, 0x10, 0x41, 0xf1, - 0x4d, 0xb8, 0xc2, 0xdf, 0x2e, 0x63, 0x96, 0x9c, 0xfa, 0x22, 0xc8, 0x16, 0x54, 0x38, 0x23, 0x34, - 0x30, 0xd2, 0xe8, 0x4b, 0x04, 0xdd, 0x6f, 0x81, 0x1c, 0x64, 0x34, 0x10, 0xf4, 0x5f, 0x74, 0xd7, - 0xb2, 0x53, 0xb6, 0x2e, 0xec, 0x94, 0xff, 0x87, 0x71, 0xc5, 0xb4, 0x6a, 0x34, 0xd2, 0xe3, 0xab, - 0x55, 0xf4, 0xbe, 0x3c, 0x56, 0x4c, 0x6b, 0x8f, 0x3f, 0x59, 0x40, 0x1e, 0xe3, 0x4b, 0xf8, 0x6f, - 0x23, 0x44, 0x72, 0x58, 0xb6, 0x36, 0xf5, 0xd2, 0xa2, 0x40, 0x04, 0xba, 0xf9, 0x0e, 0x19, 0x57, - 0xf6, 0x1f, 0x07, 0x22, 0xd0, 0x0d, 0x30, 0xa3, 0x61, 0x9e, 0xc9, 0x7e, 0x8c, 0xbc, 0xc2, 0x06, - 0xe8, 0x15, 0x90, 0x0c, 0xb4, 0x12, 0x90, 0x0e, 0xf4, 0x37, 0x0b, 0xc6, 0x0f, 0x39, 0x67, 0x8b, - 0xe4, 0x9b, 0x34, 0xce, 0x97, 0xb4, 0x88, 0x74, 0x07, 0xba, 0x61, 0x9a, 0x27, 0x02, 0xa3, 0xec, - 0x7a, 0x6a, 0xb3, 0x41, 0xab, 0x56, 0x8d, 0x56, 0x1b, 0xc4, 0x6c, 0xd7, 0x89, 0x69, 0x10, 0xaf, - 0x53, 0x21, 0xde, 0x87, 0x30, 0x90, 0xe9, 0xf9, 0x21, 0x4d, 0x04, 0xcd, 0xf4, 0x3b, 0x06, 0x09, - 0x1d, 0x20, 0xe2, 0x9e, 0xc1, 0x4e, 0x35, 0x50, 0x3d, 0x45, 0xce, 0xed, 0x2a, 0xf2, 0xd5, 0x65, - 0xb1, 0x8e, 0x52, 0x2e, 0x25, 0x7f, 0x57, 0xf9, 0x49, 0xcc, 0x42, 0x5f, 0x0a, 0x54, 0x74, 0xb6, - 0x42, 0x5e, 0x65, 0xf1, 0x3a, 0xe7, 0x8e, 0x91, 0xb3, 0xfb, 0x39, 0x8c, 0xd5, 0x10, 0xaf, 0x16, - 0x68, 0x0f, 0xe0, 0x0c, 0x01, 0x9f, 0x45, 0x6a, 0x7e, 0xd9, 0x9e, 0xad, 0x90, 0xa7, 0x11, 0x77, - 0xbf, 0x04, 0xfb, 0x28, 0x55, 0x39, 0x73, 0x72, 0x07, 0xec, 0xb8, 0xd8, 0xe8, 0x51, 0x47, 0xd6, - 0x7c, 0x2a, 0xf4, 0xbc, 0xb5, 0x92, 0xfb, 0x00, 0xfa, 0x05, 0x5c, 0xe4, 0x61, 0x9d, 0x97, 0x47, - 0x6b, 0x23, 0x0f, 0xf7, 0x0f, 0x0b, 0x76, 0xaa, 0x21, 0xeb, 0x52, 0xbd, 0x82, 0x51, 0xe9, 0xc2, - 0x5f, 0x06, 0x2b, 0x1d, 0xcb, 0x1d, 0x33, 0x96, 0xfa, 0xb1, 0x32, 0x40, 0xfe, 0x2c, 0x58, 0x29, - 0xf6, 0x0c, 0x63, 0x03, 0x9a, 0xbc, 0x84, 0xed, 0x9a, 0x4a, 0xc3, 0xf4, 0xba, 0x65, 0x4e, 0xaf, - 0xca, 0x04, 0x2e, 0x4f, 0x9b, 0x23, 0xed, 0x3e, 0x5c, 0x55, 0x84, 0x3d, 0x28, 0xf9, 0x55, 0xd4, - 0xbe, 0x4a, 0x43, 0x6b, 0x93, 0x86, 0xee, 0x04, 0x9c, 0xfa, 0x51, 0x4d, 0xf8, 0x05, 0x6c, 0x1f, - 0x8b, 0x40, 0x30, 0x2e, 0x58, 0x58, 0x7e, 0x4a, 0x6d, 0xf0, 0xd6, 0x7a, 0x57, 0x43, 0xad, 0x33, - 0x7f, 0x0b, 0xda, 0x42, 0x14, 0x9c, 0x92, 0x4b, 0x79, 0x0b, 0xc4, 0xf4, 0xa4, 0xef, 0xe0, 0x3d, - 0xb8, 0x92, 0x7c, 0x10, 0xa9, 0x08, 0x62, 0x35, 0xb0, 0x3a, 0x38, 0xb0, 0x6c, 0x44, 0x70, 0x62, - 0xa9, 0x9e, 0x1e, 0x29, 0x69, 0x57, 0x8d, 0x33, 0x09, 0xa0, 0x70, 0x0f, 0x00, 0x9f, 0x8f, 0x62, - 0x7e, 0x4f, 0x9d, 0x95, 0xc8, 0x81, 0x04, 0xee, 0xfe, 0xd5, 0x85, 0xe1, 0x31, 0x0d, 0x5e, 0x53, - 0x1a, 0xc9, 0x79, 0x99, 0x91, 0x45, 0xc1, 0xad, 0xea, 0x37, 0x2d, 0xb9, 0xb9, 0x49, 0xa2, 0xc6, - 0x8f, 0xe8, 0xc9, 0x27, 0xef, 0x52, 0xd3, 0xd7, 0x74, 0x89, 0x1c, 0xc1, 0xc0, 0xf8, 0x68, 0x24, - 0xbb, 0xc6, 0xc1, 0xda, 0xb7, 0xf0, 0x64, 0xef, 0x1c, 0xa9, 0x69, 0xcd, 0x98, 0x0c, 0xa6, 0xb5, - 0xfa, 0x2c, 0x32, 0xad, 0x35, 0x8d, 0x13, 0xb4, 0x66, 0x74, 0x7d, 0xd3, 0x5a, 0x7d, 0xce, 0x98, - 0xd6, 0x9a, 0x46, 0x05, 0x5a, 0x33, 0x5a, 0xb3, 0x69, 0xad, 0x3e, 0x42, 0x4c, 0x6b, 0x4d, 0xfd, - 0xfc, 0x12, 0x79, 0x01, 0x43, 0xb3, 0x4f, 0x12, 0xe3, 0x40, 0x43, 0xa3, 0x9f, 0x5c, 0x3f, 0x4f, - 0x6c, 0x1a, 0x34, 0xdb, 0x82, 0x69, 0xb0, 0xa1, 0x31, 0x9a, 0x06, 0x9b, 0xba, 0x89, 0x7b, 0x89, - 0x7c, 0x07, 0x5b, 0x9b, 0xcf, 0x93, 0xdc, 0xd8, 0x4c, 0xab, 0xf6, 0xea, 0x27, 0xee, 0x45, 0x2a, - 0xa5, 0xf1, 0xa7, 0x00, 0xeb, 0x57, 0x47, 0xae, 0xad, 0xcf, 0xd4, 0x5e, 0xfd, 0x64, 0xb7, 0x59, - 0x58, 0x98, 0x7a, 0x74, 0x1d, 0xb6, 0xb8, 0xa2, 0xfe, 0x9c, 0xcf, 0xc2, 0x98, 0xd1, 0x44, 0x3c, - 0x02, 0x7c, 0x05, 0x5f, 0xcb, 0x3f, 0xc7, 0x93, 0x1e, 0xfe, 0x40, 0x7e, 0xf6, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x8d, 0x38, 0xa9, 0x9f, 0x4f, 0x0e, 0x00, 0x00, + // 1583 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xdb, 0x6f, 0xdc, 0x44, + 0x17, 0xaf, 0xf7, 0xee, 0xb3, 0xbb, 0x6d, 0x32, 0x49, 0xbf, 0xba, 0x9b, 0xcb, 0x97, 0x3a, 0x5f, + 0xfb, 0xa5, 0xa2, 0x0a, 0x55, 0xe1, 0xa1, 0xa5, 0x42, 0xa2, 0xcd, 0x05, 0x45, 0xa4, 0x17, 0x39, + 0x2d, 0x02, 0x21, 0x61, 0x39, 0xf6, 0xec, 0x66, 0x88, 0xed, 0x59, 0xec, 0x71, 0x92, 0xf2, 0x27, + 0xf0, 0x82, 0xc4, 0x23, 0x12, 0xcf, 0xfc, 0x13, 0x88, 0x17, 0xc4, 0xbf, 0xc3, 0x23, 0xcf, 0x68, + 0x2e, 0xf6, 0x8e, 0xd7, 0x9b, 0xa4, 0x08, 0xf5, 0xcd, 0x73, 0xae, 0xbf, 0x73, 0xe6, 0x5c, 0x66, + 0x17, 0xba, 0x43, 0x12, 0xe2, 0x64, 0x73, 0x9c, 0x50, 0x46, 0x51, 0x47, 0x1c, 0xdc, 0xf1, 0xa1, + 0xfd, 0x02, 0x96, 0xf6, 0x29, 0x3d, 0xce, 0xc6, 0xdb, 0x24, 0xc1, 0x3e, 0xa3, 0xc9, 0x9b, 0x9d, + 0x98, 0x25, 0x6f, 0x1c, 0xfc, 0x6d, 0x86, 0x53, 0x86, 0x96, 0xc1, 0x0c, 0x72, 0x86, 0x65, 0xac, + 0x19, 0x1b, 0xa6, 0x33, 0x21, 0x20, 0x04, 0x8d, 0xd8, 0x8b, 0xb0, 0x55, 0x13, 0x0c, 0xf1, 0x6d, + 0xef, 0xc0, 0xf2, 0x6c, 0x83, 0xe9, 0x98, 0xc6, 0x29, 0x46, 0xb7, 0xa1, 0x89, 0x39, 0x41, 0x58, + 0xeb, 0x3e, 0xb8, 0xb6, 0x99, 0x43, 0xd9, 0x94, 0x72, 0x92, 0x6b, 0xff, 0x66, 0x00, 0xda, 0x27, + 0x29, 0xe3, 0x44, 0x82, 0xd3, 0xb7, 0xc3, 0xf3, 0x1f, 0x68, 0x8d, 0x13, 0x3c, 0x24, 0x67, 0x0a, + 0x91, 0x3a, 0xa1, 0x7b, 0x30, 0x9f, 0x32, 0x2f, 0x61, 0xbb, 0x09, 0x8d, 0x76, 0x49, 0x88, 0x9f, + 0x73, 0xd0, 0x75, 0x21, 0x52, 0x65, 0xa0, 0x4d, 0x40, 0x24, 0xf6, 0xc3, 0x2c, 0x25, 0x27, 0xf8, + 0x20, 0xe7, 0x5a, 0x8d, 0x35, 0x63, 0xa3, 0xe3, 0xcc, 0xe0, 0xa0, 0x45, 0x68, 0x86, 0x24, 0x22, + 0xcc, 0x6a, 0xae, 0x19, 0x1b, 0x7d, 0x47, 0x1e, 0xec, 0x4f, 0x60, 0xa1, 0x84, 0x5f, 0x85, 0x7f, + 0x17, 0xda, 0x58, 0x92, 0x2c, 0x63, 0xad, 0x3e, 0x2b, 0x01, 0x39, 0xdf, 0xfe, 0xb9, 0x06, 0x4d, + 0x41, 0x2a, 0xf2, 0x6c, 0x4c, 0xf2, 0x8c, 0x6e, 0x41, 0x8f, 0xa4, 0xee, 0x24, 0x19, 0x35, 0x81, + 0xaf, 0x4b, 0xd2, 0x22, 0xef, 0xe8, 0x3d, 0x68, 0xf9, 0x47, 0x59, 0x7c, 0x9c, 0x5a, 0x75, 0xe1, + 0x6a, 0x61, 0xe2, 0x8a, 0x07, 0xbb, 0xc5, 0x79, 0x8e, 0x12, 0x41, 0x0f, 0x01, 0x3c, 0xc6, 0x12, + 0x72, 0x98, 0x31, 0x9c, 0x8a, 0x68, 0xbb, 0x0f, 0x2c, 0x4d, 0x21, 0x4b, 0xf1, 0x93, 0x82, 0xef, + 0x68, 0xb2, 0xe8, 0x11, 0x74, 0xf0, 0x19, 0xc3, 0x71, 0x80, 0x03, 0xab, 0x29, 0x1c, 0xad, 0x4c, + 0xc5, 0xb4, 0xb9, 0xa3, 0xf8, 0x32, 0xc2, 0x42, 0x7c, 0xf0, 0x18, 0xfa, 0x25, 0x16, 0x9a, 0x83, + 0xfa, 0x31, 0xce, 0x6f, 0x96, 0x7f, 0xf2, 0xec, 0x9e, 0x78, 0x61, 0x26, 0x8b, 0xac, 0xe7, 0xc8, + 0xc3, 0x47, 0xb5, 0x87, 0x86, 0xbd, 0x0d, 0xe6, 0x6e, 0x16, 0x86, 0x85, 0x62, 0x40, 0x92, 0x5c, + 0x31, 0x20, 0xc9, 0xa4, 0xd0, 0x6a, 0x17, 0x16, 0xda, 0xaf, 0x06, 0xcc, 0xef, 0x9c, 0xe0, 0x98, + 0x3d, 0xa7, 0x8c, 0x0c, 0x89, 0xef, 0x31, 0x42, 0x63, 0x74, 0x0f, 0x4c, 0x1a, 0x06, 0xee, 0x85, + 0x95, 0xda, 0xa1, 0xa1, 0x42, 0x7d, 0x0f, 0xcc, 0x18, 0x9f, 0xba, 0x17, 0xba, 0xeb, 0xc4, 0xf8, + 0x54, 0x4a, 0xaf, 0x43, 0x3f, 0xc0, 0x21, 0x66, 0xd8, 0x2d, 0x6e, 0x87, 0x5f, 0x5d, 0x4f, 0x12, + 0xb7, 0xe4, 0x75, 0xdc, 0x81, 0x6b, 0xdc, 0xe4, 0xd8, 0x4b, 0x70, 0xcc, 0xdc, 0xb1, 0xc7, 0x8e, + 0xc4, 0x9d, 0x98, 0x4e, 0x3f, 0xc6, 0xa7, 0x2f, 0x05, 0xf5, 0xa5, 0xc7, 0x8e, 0xec, 0xbf, 0x0c, + 0x30, 0x8b, 0xcb, 0x44, 0x37, 0xa0, 0xcd, 0xdd, 0xba, 0x24, 0x50, 0x99, 0x68, 0xf1, 0xe3, 0x5e, + 0xc0, 0x3b, 0x83, 0x0e, 0x87, 0x29, 0x66, 0x02, 0x5e, 0xdd, 0x51, 0x27, 0x5e, 0x59, 0x29, 0xf9, + 0x4e, 0x36, 0x43, 0xc3, 0x11, 0xdf, 0x3c, 0xe3, 0x11, 0x23, 0x11, 0x16, 0x0e, 0xeb, 0x8e, 0x3c, + 0xa0, 0x05, 0x68, 0x62, 0x97, 0x79, 0x23, 0x51, 0xe5, 0xa6, 0xd3, 0xc0, 0xaf, 0xbc, 0x11, 0xfa, + 0x1f, 0x5c, 0x4d, 0x69, 0x96, 0xf8, 0xd8, 0xcd, 0xdd, 0xb6, 0x04, 0xb7, 0x27, 0xa9, 0xbb, 0xd2, + 0xb9, 0x0d, 0xf5, 0x21, 0x09, 0xac, 0xb6, 0x48, 0xcc, 0x5c, 0xb9, 0x08, 0xf7, 0x02, 0x87, 0x33, + 0xd1, 0xfb, 0x00, 0x85, 0xa5, 0xc0, 0xea, 0x9c, 0x23, 0x6a, 0xe6, 0x76, 0x03, 0xfb, 0x0b, 0x68, + 0x29, 0xf3, 0x4b, 0x60, 0x9e, 0xd0, 0x30, 0x8b, 0x8a, 0xb0, 0xfb, 0x4e, 0x47, 0x12, 0xf6, 0x02, + 0x74, 0x13, 0xc4, 0xac, 0x73, 0x79, 0x55, 0xd5, 0x44, 0x90, 0x22, 0x43, 0x9f, 0x61, 0x31, 0x2d, + 0x7c, 0x4a, 0x8f, 0x89, 0x8c, 0xbe, 0xed, 0xa8, 0x93, 0xfd, 0x67, 0x0d, 0xae, 0x96, 0xcb, 0x9d, + 0xbb, 0x10, 0x56, 0x44, 0xae, 0x0c, 0x61, 0x46, 0x98, 0x3d, 0x28, 0xe5, 0xab, 0xa6, 0xe7, 0x2b, + 0x57, 0x89, 0x68, 0x20, 0x1d, 0xf4, 0xa5, 0xca, 0x33, 0x1a, 0x60, 0x5e, 0xad, 0x19, 0x09, 0x44, + 0x82, 0xfb, 0x0e, 0xff, 0xe4, 0x94, 0x11, 0x09, 0xd4, 0x08, 0xe1, 0x9f, 0x02, 0x5e, 0x22, 0xec, + 0xb6, 0xe4, 0x95, 0xc9, 0x13, 0xbf, 0xb2, 0x88, 0x53, 0xdb, 0xf2, 0x1e, 0xf8, 0x37, 0x5a, 0x83, + 0x6e, 0x82, 0xc7, 0xa1, 0xaa, 0x5e, 0x91, 0x3e, 0xd3, 0xd1, 0x49, 0x68, 0x15, 0xc0, 0xa7, 0x61, + 0x88, 0x7d, 0x21, 0x60, 0x0a, 0x01, 0x8d, 0xc2, 0x2b, 0x87, 0xb1, 0xd0, 0x4d, 0xb1, 0x6f, 0xc1, + 0x9a, 0xb1, 0xd1, 0x74, 0x5a, 0x8c, 0x85, 0x07, 0xd8, 0xe7, 0x71, 0x64, 0x29, 0x4e, 0x5c, 0x31, + 0x80, 0xba, 0x42, 0xaf, 0xc3, 0x09, 0x62, 0x54, 0xae, 0x00, 0x8c, 0x12, 0x9a, 0x8d, 0x25, 0xb7, + 0xb7, 0x56, 0xe7, 0xf3, 0x58, 0x50, 0x04, 0xfb, 0x36, 0x5c, 0x4d, 0xdf, 0x44, 0x21, 0x89, 0x8f, + 0x5d, 0xe6, 0x25, 0x23, 0xcc, 0xac, 0xbe, 0xac, 0x61, 0x45, 0x7d, 0x25, 0x88, 0xf6, 0x97, 0x80, + 0xb6, 0x12, 0xec, 0x31, 0xfc, 0x0f, 0x56, 0xcf, 0x5b, 0x76, 0xf7, 0x75, 0x58, 0x28, 0x99, 0x96, + 0x53, 0x98, 0x7b, 0x7c, 0x3d, 0x0e, 0xde, 0x95, 0xc7, 0x92, 0x69, 0xe5, 0xf1, 0x07, 0x03, 0xd0, + 0xb6, 0x68, 0xf0, 0x7f, 0xb7, 0x5f, 0x79, 0xcb, 0xf1, 0xb9, 0x2f, 0x07, 0x48, 0xe0, 0x31, 0x4f, + 0x6d, 0xa6, 0x1e, 0x49, 0xa5, 0xfd, 0x6d, 0x8f, 0x79, 0x6a, 0x3b, 0x24, 0xd8, 0xcf, 0x12, 0xbe, + 0xac, 0x44, 0x5d, 0x89, 0xed, 0xe0, 0xe4, 0x24, 0x0e, 0xb4, 0x04, 0x48, 0x01, 0xfd, 0xc9, 0x00, + 0xeb, 0x09, 0xa3, 0x11, 0xf1, 0x1d, 0xcc, 0x1d, 0x96, 0xe0, 0xae, 0x43, 0x9f, 0x8f, 0xc5, 0x69, + 0xc8, 0x3d, 0x1a, 0x06, 0x93, 0xb5, 0x73, 0x13, 0xf8, 0x64, 0x74, 0x35, 0xe4, 0x6d, 0x1a, 0x06, + 0xa2, 0x20, 0xd6, 0x81, 0x8f, 0x2f, 0x4d, 0x5f, 0x2e, 0xe1, 0x5e, 0x8c, 0x4f, 0x4b, 0xfa, 0x5c, + 0x48, 0xe8, 0xcb, 0x99, 0xd7, 0x8e, 0xf1, 0x29, 0xd7, 0xb7, 0x97, 0xe0, 0xe6, 0x0c, 0x6c, 0x0a, + 0xf9, 0x2f, 0x06, 0x2c, 0x3c, 0x49, 0x53, 0x32, 0x8a, 0x3f, 0x17, 0xdd, 0x9f, 0x83, 0x5e, 0x84, + 0xa6, 0x4f, 0xb3, 0x98, 0x09, 0xb0, 0x4d, 0x47, 0x1e, 0xa6, 0x1a, 0xa2, 0x56, 0x69, 0x88, 0xa9, + 0x96, 0xaa, 0x57, 0x5b, 0x4a, 0x6b, 0x99, 0x46, 0xa9, 0x65, 0xfe, 0x0b, 0x5d, 0x7e, 0x31, 0xae, + 0x8f, 0x63, 0x86, 0x13, 0x35, 0x30, 0x81, 0x93, 0xb6, 0x04, 0xc5, 0xfe, 0xde, 0x80, 0xc5, 0x32, + 0x52, 0xf5, 0x3a, 0x38, 0x77, 0x7e, 0xf3, 0x81, 0x91, 0x84, 0x0a, 0x26, 0xff, 0xe4, 0xad, 0x37, + 0xce, 0x0e, 0x43, 0xe2, 0xbb, 0x9c, 0x21, 0xe1, 0x99, 0x92, 0xf2, 0x3a, 0x09, 0x27, 0x41, 0x37, + 0xf4, 0xa0, 0x11, 0x34, 0xbc, 0x8c, 0x1d, 0xe5, 0x33, 0x9c, 0x7f, 0xdb, 0x1f, 0xc2, 0x82, 0x7c, + 0xb0, 0x95, 0xb3, 0xb6, 0x02, 0x50, 0x4c, 0x55, 0xf9, 0x56, 0x31, 0x1d, 0x33, 0x1f, 0xab, 0xa9, + 0xfd, 0x31, 0x98, 0xfb, 0x54, 0x26, 0x22, 0x45, 0xf7, 0xc1, 0x0c, 0xf3, 0x83, 0x7a, 0xd6, 0xa0, + 0x49, 0x7b, 0xe4, 0x72, 0xce, 0x44, 0xc8, 0x7e, 0x0c, 0x9d, 0x9c, 0x9c, 0xc7, 0x66, 0x9c, 0x17, + 0x5b, 0x6d, 0x2a, 0x36, 0xfb, 0x77, 0x03, 0x16, 0xcb, 0x90, 0x55, 0xfa, 0x5e, 0x43, 0xbf, 0x70, + 0xe1, 0x46, 0xde, 0x58, 0x61, 0xb9, 0xaf, 0x63, 0xa9, 0xaa, 0x15, 0x00, 0xd3, 0x67, 0xde, 0x58, + 0x96, 0x54, 0x2f, 0xd4, 0x48, 0x83, 0x57, 0x30, 0x5f, 0x11, 0x99, 0xf1, 0x52, 0xb9, 0xab, 0xbf, + 0x54, 0x4a, 0xaf, 0xad, 0x42, 0x5b, 0x7f, 0xbe, 0x3c, 0x82, 0x1b, 0xb2, 0xff, 0xb6, 0x8a, 0xa2, + 0xcb, 0x73, 0x5f, 0xae, 0x4d, 0x63, 0xba, 0x36, 0xed, 0x01, 0x58, 0x55, 0x55, 0xd5, 0x05, 0x23, + 0x98, 0x3f, 0x60, 0x1e, 0x23, 0x29, 0x23, 0x7e, 0xf1, 0x6c, 0x9e, 0x2a, 0x66, 0xe3, 0xb2, 0xfd, + 0x50, 0x6d, 0x87, 0x39, 0xa8, 0x33, 0x96, 0xd7, 0x19, 0xff, 0xe4, 0xb7, 0x80, 0x74, 0x4f, 0xea, + 0x0e, 0xde, 0x81, 0x2b, 0x5e, 0x0f, 0x8c, 0x32, 0x2f, 0x94, 0xfb, 0xb7, 0x21, 0xf6, 0xaf, 0x29, + 0x28, 0x62, 0x01, 0xcb, 0x15, 0x15, 0x48, 0x6e, 0x53, 0x6e, 0x67, 0x4e, 0x10, 0xcc, 0x15, 0x00, + 0xd1, 0x52, 0xb2, 0x1b, 0x5a, 0x52, 0x97, 0x53, 0xb6, 0x38, 0xc1, 0x5e, 0x85, 0xe5, 0x4f, 0x31, + 0xe3, 0x2f, 0x89, 0x64, 0x8b, 0xc6, 0x43, 0x32, 0xca, 0x12, 0x4f, 0xbb, 0x0a, 0xfb, 0x47, 0x03, + 0x56, 0xce, 0x11, 0x50, 0x01, 0x5b, 0xd0, 0x8e, 0xbc, 0x94, 0xe1, 0x24, 0xef, 0x92, 0xfc, 0x38, + 0x9d, 0x8a, 0xda, 0x65, 0xa9, 0xa8, 0x57, 0x52, 0x71, 0x1d, 0x5a, 0x91, 0x77, 0xe6, 0x46, 0x87, + 0xea, 0xa9, 0xd0, 0x8c, 0xbc, 0xb3, 0x67, 0x87, 0x0f, 0xfe, 0x68, 0x43, 0xef, 0x00, 0x7b, 0xa7, + 0x18, 0x07, 0x02, 0x18, 0x1a, 0xe5, 0x0d, 0x51, 0xfe, 0xd1, 0x85, 0x6e, 0x4f, 0x57, 0xfe, 0xcc, + 0x5f, 0x79, 0x83, 0x3b, 0x97, 0x89, 0xa9, 0xda, 0xba, 0x82, 0xf6, 0xa1, 0xab, 0xfd, 0xaa, 0x41, + 0xcb, 0x9a, 0x62, 0xe5, 0xc7, 0xda, 0x60, 0xe5, 0x1c, 0xae, 0x6e, 0x4d, 0xdb, 0xce, 0xba, 0xb5, + 0xea, 0x7b, 0x40, 0xb7, 0x36, 0x6b, 0xa5, 0x0b, 0x6b, 0xda, 0xe6, 0xd5, 0xad, 0x55, 0x77, 0xbd, + 0x6e, 0x6d, 0xd6, 0xba, 0x16, 0xd6, 0xb4, 0xf5, 0xa8, 0x5b, 0xab, 0xae, 0x71, 0xdd, 0xda, 0xac, + 0x9d, 0x7a, 0x05, 0x7d, 0x0d, 0xf3, 0x95, 0xc5, 0x85, 0xec, 0x89, 0xd6, 0x79, 0x1b, 0x77, 0xb0, + 0x7e, 0xa1, 0x4c, 0x61, 0xff, 0x05, 0xf4, 0xf4, 0x85, 0x82, 0x34, 0x40, 0x33, 0x56, 0xe2, 0x60, + 0xf5, 0x3c, 0xb6, 0x6e, 0x50, 0x9f, 0x95, 0xba, 0xc1, 0x19, 0xdb, 0x42, 0x37, 0x38, 0x6b, 0xc4, + 0xda, 0x57, 0xd0, 0x57, 0x30, 0x37, 0x3d, 0xb3, 0xd0, 0xad, 0xe9, 0xb4, 0x55, 0x46, 0xe1, 0xc0, + 0xbe, 0x48, 0xa4, 0x30, 0xbe, 0x07, 0x30, 0x19, 0x45, 0x68, 0x69, 0xa2, 0x53, 0x19, 0x85, 0x83, + 0xe5, 0xd9, 0xcc, 0xc2, 0xd4, 0x37, 0x70, 0x7d, 0x66, 0xbf, 0x23, 0xad, 0x49, 0x2e, 0x9a, 0x18, + 0x83, 0xff, 0x5f, 0x2a, 0x97, 0xfb, 0x7a, 0xba, 0x0a, 0x73, 0xa9, 0x6c, 0xe3, 0x61, 0xba, 0xe9, + 0x87, 0x04, 0xc7, 0xec, 0x29, 0x08, 0x8d, 0x97, 0x09, 0x65, 0xf4, 0xb0, 0x25, 0xfe, 0xad, 0xf9, + 0xe0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xec, 0xb0, 0x56, 0xbc, 0x11, 0x00, 0x00, } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go new file mode 100644 index 000000000..5c40332e6 --- /dev/null +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -0,0 +1,69 @@ +package filer_pb + +import ( + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func toFileIdObject(fileIdStr string) (*FileId, error) { + t, err := needle.ParseFileIdFromString(fileIdStr) + if err != nil { + return nil, err + } + return &FileId{ + VolumeId: uint32(t.VolumeId), + Cookie: uint32(t.Cookie), + FileKey: uint64(t.Key), + }, nil + +} + +func (fid *FileId) toFileIdString() string { + return needle.NewFileId(needle.VolumeId(fid.VolumeId), fid.FileKey, fid.Cookie).String() +} + +func (c *FileChunk) GetFileIdString() string { + if c.FileId != "" { + return c.FileId + } + if c.Fid != nil { + c.FileId = c.Fid.toFileIdString() + return c.FileId + } + return "" +} + +func BeforeEntrySerialization(chunks []*FileChunk) { + + for _, chunk := range chunks { + + if chunk.FileId != "" { + if fid, err := toFileIdObject(chunk.FileId); err == nil { + chunk.Fid = fid + chunk.FileId = "" + } + } + + if chunk.SourceFileId != "" { + if fid, err := toFileIdObject(chunk.SourceFileId); err == nil { + chunk.SourceFid = fid + chunk.SourceFileId = "" + } + } + + } +} + +func AfterEntryDeserialization(chunks []*FileChunk) { + + for _, chunk := range chunks { + + if chunk.Fid != nil && chunk.FileId == "" { + chunk.FileId = chunk.Fid.toFileIdString() + } + + if chunk.SourceFid != nil && chunk.SourceFileId == "" { + chunk.SourceFileId = chunk.SourceFid.toFileIdString() + } + + } +} diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go new file mode 100644 index 000000000..d4468c011 --- /dev/null +++ b/weed/pb/filer_pb/filer_pb_helper_test.go @@ -0,0 +1,17 @@ +package filer_pb + +import ( + "testing" + + "github.com/golang/protobuf/proto" +) + +func TestFileIdSize(t *testing.T) { + fileIdStr := "11745,0293434534cbb9892b" + + fid, _ := toFileIdObject(fileIdStr) + bytes, _ := proto.Marshal(fid) + + println(len(fileIdStr)) + println(len(bytes)) +} diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 544160c06..7dcab40db 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -15,6 +15,16 @@ service Seaweed { } rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } + rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) { + } + rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) { + } + rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) { + } + rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) { + } + rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { + } } ////////////////////////////////////////////////// @@ -29,15 +39,25 @@ message Heartbeat { string rack = 7; uint32 admin_port = 8; repeated VolumeInformationMessage volumes = 9; - // delta volume ids - repeated uint32 new_vids = 10; - repeated uint32 deleted_vids = 11; + // delta volumes + repeated VolumeShortInformationMessage new_volumes = 10; + repeated VolumeShortInformationMessage deleted_volumes = 11; + bool has_no_volumes = 12; + + // erasure coding + repeated VolumeEcShardInformationMessage ec_shards = 16; + // delta erasure coding shards + repeated VolumeEcShardInformationMessage new_ec_shards = 17; + repeated VolumeEcShardInformationMessage deleted_ec_shards = 18; + bool has_no_ec_shards = 19; + } message HeartbeatResponse { - uint64 volumeSizeLimit = 1; - string secretKey = 2; - string leader = 3; + uint64 volume_size_limit = 1; + string leader = 2; + string metrics_address = 3; + uint32 metrics_interval_seconds = 4; } message VolumeInformationMessage { @@ -51,6 +71,22 @@ message VolumeInformationMessage { uint32 replica_placement = 8; uint32 version = 9; uint32 ttl = 10; + uint32 compact_revision = 11; + int64 modified_at_second = 12; +} + +message VolumeShortInformationMessage { + uint32 id = 1; + string collection = 3; + uint32 replica_placement = 8; + uint32 version = 9; + uint32 ttl = 10; +} + +message VolumeEcShardInformationMessage { + uint32 id = 1; + string collection = 2; + uint32 ec_index_bits = 3; } message Empty { @@ -109,6 +145,7 @@ message AssignResponse { string public_url = 3; uint64 count = 4; string error = 5; + string auth = 6; } message StatisticsRequest { @@ -124,3 +161,90 @@ message StatisticsResponse { uint64 used_size = 5; uint64 file_count = 6; } + +// +// collection related +// + +message StorageType { + string replication = 1; + string ttl = 2; +} +message Collection { + string name = 1; +} +message CollectionListRequest { + bool include_normal_volumes = 1; + bool include_ec_volumes = 2; +} +message CollectionListResponse { + repeated Collection collections = 1; +} + +message CollectionDeleteRequest { + string name = 1; +} +message CollectionDeleteResponse { +} + +// +// volume related +// +message DataNodeInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated VolumeInformationMessage volume_infos = 6; + repeated VolumeEcShardInformationMessage ec_shard_infos = 7; +} +message RackInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated DataNodeInfo data_node_infos = 6; +} +message DataCenterInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated RackInfo rack_infos = 6; +} +message TopologyInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated DataCenterInfo data_center_infos = 6; +} +message VolumeListRequest { +} +message VolumeListResponse { + TopologyInfo topology_info = 1; + uint64 volume_size_limit_mb = 2; +} + +message LookupEcVolumeRequest { + uint32 volume_id = 1; +} +message LookupEcVolumeResponse { + uint32 volume_id = 1; + message EcShardIdLocation { + uint32 shard_id = 1; + repeated Location locations = 2; + } + repeated EcShardIdLocation shard_id_locations = 2; +} + +message GetMasterConfigurationRequest { +} +message GetMasterConfigurationResponse { + string metrics_address = 1; + uint32 metrics_interval_seconds = 2; +} diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 894f08471..cc1766fe8 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -12,6 +12,8 @@ It has these top-level messages: Heartbeat HeartbeatResponse VolumeInformationMessage + VolumeShortInformationMessage + VolumeEcShardInformationMessage Empty SuperBlockExtra ClientListenRequest @@ -23,6 +25,22 @@ It has these top-level messages: AssignResponse StatisticsRequest StatisticsResponse + StorageType + Collection + CollectionListRequest + CollectionListResponse + CollectionDeleteRequest + CollectionDeleteResponse + DataNodeInfo + RackInfo + DataCenterInfo + TopologyInfo + VolumeListRequest + VolumeListResponse + LookupEcVolumeRequest + LookupEcVolumeResponse + GetMasterConfigurationRequest + GetMasterConfigurationResponse */ package master_pb @@ -56,9 +74,16 @@ type Heartbeat struct { Rack string `protobuf:"bytes,7,opt,name=rack" json:"rack,omitempty"` AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"` Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` - // delta volume ids - NewVids []uint32 `protobuf:"varint,10,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"` - DeletedVids []uint32 `protobuf:"varint,11,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"` + // delta volumes + NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes" json:"new_volumes,omitempty"` + DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes" json:"deleted_volumes,omitempty"` + HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes" json:"has_no_volumes,omitempty"` + // erasure coding + EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards" json:"ec_shards,omitempty"` + // delta erasure coding shards + NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards" json:"new_ec_shards,omitempty"` + DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards" json:"deleted_ec_shards,omitempty"` + HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards" json:"has_no_ec_shards,omitempty"` } func (m *Heartbeat) Reset() { *m = Heartbeat{} } @@ -129,24 +154,60 @@ func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage { return nil } -func (m *Heartbeat) GetNewVids() []uint32 { +func (m *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage { if m != nil { - return m.NewVids + return m.NewVolumes } return nil } -func (m *Heartbeat) GetDeletedVids() []uint32 { +func (m *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage { if m != nil { - return m.DeletedVids + return m.DeletedVolumes } return nil } +func (m *Heartbeat) GetHasNoVolumes() bool { + if m != nil { + return m.HasNoVolumes + } + return false +} + +func (m *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage { + if m != nil { + return m.EcShards + } + return nil +} + +func (m *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage { + if m != nil { + return m.NewEcShards + } + return nil +} + +func (m *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage { + if m != nil { + return m.DeletedEcShards + } + return nil +} + +func (m *Heartbeat) GetHasNoEcShards() bool { + if m != nil { + return m.HasNoEcShards + } + return false +} + type HeartbeatResponse struct { - VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volumeSizeLimit" json:"volumeSizeLimit,omitempty"` - SecretKey string `protobuf:"bytes,2,opt,name=secretKey" json:"secretKey,omitempty"` - Leader string `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` + MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` } func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } @@ -161,13 +222,6 @@ func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 { return 0 } -func (m *HeartbeatResponse) GetSecretKey() string { - if m != nil { - return m.SecretKey - } - return "" -} - func (m *HeartbeatResponse) GetLeader() string { if m != nil { return m.Leader @@ -175,6 +229,20 @@ func (m *HeartbeatResponse) GetLeader() string { return "" } +func (m *HeartbeatResponse) GetMetricsAddress() string { + if m != nil { + return m.MetricsAddress + } + return "" +} + +func (m *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { + if m != nil { + return m.MetricsIntervalSeconds + } + return 0 +} + type VolumeInformationMessage struct { Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` @@ -186,6 +254,8 @@ type VolumeInformationMessage struct { ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` + ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"` } func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } @@ -263,13 +333,107 @@ func (m *VolumeInformationMessage) GetTtl() uint32 { return 0 } +func (m *VolumeInformationMessage) GetCompactRevision() uint32 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *VolumeInformationMessage) GetModifiedAtSecond() int64 { + if m != nil { + return m.ModifiedAtSecond + } + return 0 +} + +type VolumeShortInformationMessage struct { + Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` +} + +func (m *VolumeShortInformationMessage) Reset() { *m = VolumeShortInformationMessage{} } +func (m *VolumeShortInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeShortInformationMessage) ProtoMessage() {} +func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *VolumeShortInformationMessage) GetId() uint32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *VolumeShortInformationMessage) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeShortInformationMessage) GetReplicaPlacement() uint32 { + if m != nil { + return m.ReplicaPlacement + } + return 0 +} + +func (m *VolumeShortInformationMessage) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *VolumeShortInformationMessage) GetTtl() uint32 { + if m != nil { + return m.Ttl + } + return 0 +} + +type VolumeEcShardInformationMessage struct { + Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` +} + +func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } +func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *VolumeEcShardInformationMessage) GetId() uint32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *VolumeEcShardInformationMessage) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { + if m != nil { + return m.EcIndexBits + } + return 0 +} + type Empty struct { } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } type SuperBlockExtra struct { ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"` @@ -278,7 +442,7 @@ type SuperBlockExtra struct { func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} } func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) } func (*SuperBlockExtra) ProtoMessage() {} -func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { if m != nil { @@ -297,7 +461,7 @@ func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_E func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) } func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 0} + return fileDescriptor0, []int{6, 0} } func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 { @@ -328,7 +492,7 @@ type ClientListenRequest struct { func (m *ClientListenRequest) Reset() { *m = ClientListenRequest{} } func (m *ClientListenRequest) String() string { return proto.CompactTextString(m) } func (*ClientListenRequest) ProtoMessage() {} -func (*ClientListenRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*ClientListenRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *ClientListenRequest) GetName() string { if m != nil { @@ -347,7 +511,7 @@ type VolumeLocation struct { func (m *VolumeLocation) Reset() { *m = VolumeLocation{} } func (m *VolumeLocation) String() string { return proto.CompactTextString(m) } func (*VolumeLocation) ProtoMessage() {} -func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *VolumeLocation) GetUrl() string { if m != nil { @@ -385,7 +549,7 @@ type LookupVolumeRequest struct { func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *LookupVolumeRequest) GetVolumeIds() []string { if m != nil { @@ -408,7 +572,7 @@ type LookupVolumeResponse struct { func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { if m != nil { @@ -427,7 +591,7 @@ func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVol func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) } func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{8, 0} + return fileDescriptor0, []int{10, 0} } func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { @@ -459,7 +623,7 @@ type Location struct { func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *Location) GetUrl() string { if m != nil { @@ -488,7 +652,7 @@ type AssignRequest struct { func (m *AssignRequest) Reset() { *m = AssignRequest{} } func (m *AssignRequest) String() string { return proto.CompactTextString(m) } func (*AssignRequest) ProtoMessage() {} -func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *AssignRequest) GetCount() uint64 { if m != nil { @@ -545,12 +709,13 @@ type AssignResponse struct { PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` Count uint64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` Error string `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"` + Auth string `protobuf:"bytes,6,opt,name=auth" json:"auth,omitempty"` } func (m *AssignResponse) Reset() { *m = AssignResponse{} } func (m *AssignResponse) String() string { return proto.CompactTextString(m) } func (*AssignResponse) ProtoMessage() {} -func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *AssignResponse) GetFid() string { if m != nil { @@ -587,6 +752,13 @@ func (m *AssignResponse) GetError() string { return "" } +func (m *AssignResponse) GetAuth() string { + if m != nil { + return m.Auth + } + return "" +} + type StatisticsRequest struct { Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` @@ -596,7 +768,7 @@ type StatisticsRequest struct { func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *StatisticsRequest) GetReplication() string { if m != nil { @@ -631,7 +803,7 @@ type StatisticsResponse struct { func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *StatisticsResponse) GetReplication() string { if m != nil { @@ -675,10 +847,483 @@ func (m *StatisticsResponse) GetFileCount() uint64 { return 0 } +type StorageType struct { + Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,2,opt,name=ttl" json:"ttl,omitempty"` +} + +func (m *StorageType) Reset() { *m = StorageType{} } +func (m *StorageType) String() string { return proto.CompactTextString(m) } +func (*StorageType) ProtoMessage() {} +func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *StorageType) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *StorageType) GetTtl() string { + if m != nil { + return m.Ttl + } + return "" +} + +type Collection struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *Collection) Reset() { *m = Collection{} } +func (m *Collection) String() string { return proto.CompactTextString(m) } +func (*Collection) ProtoMessage() {} +func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *Collection) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type CollectionListRequest struct { + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes" json:"include_ec_volumes,omitempty"` +} + +func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} } +func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) } +func (*CollectionListRequest) ProtoMessage() {} +func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *CollectionListRequest) GetIncludeNormalVolumes() bool { + if m != nil { + return m.IncludeNormalVolumes + } + return false +} + +func (m *CollectionListRequest) GetIncludeEcVolumes() bool { + if m != nil { + return m.IncludeEcVolumes + } + return false +} + +type CollectionListResponse struct { + Collections []*Collection `protobuf:"bytes,1,rep,name=collections" json:"collections,omitempty"` +} + +func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} } +func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) } +func (*CollectionListResponse) ProtoMessage() {} +func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *CollectionListResponse) GetCollections() []*Collection { + if m != nil { + return m.Collections + } + return nil +} + +type CollectionDeleteRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} } +func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*CollectionDeleteRequest) ProtoMessage() {} +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *CollectionDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type CollectionDeleteResponse struct { +} + +func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} } +func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*CollectionDeleteResponse) ProtoMessage() {} +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +// +// volume related +// +type DataNodeInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos" json:"volume_infos,omitempty"` + EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos" json:"ec_shard_infos,omitempty"` +} + +func (m *DataNodeInfo) Reset() { *m = DataNodeInfo{} } +func (m *DataNodeInfo) String() string { return proto.CompactTextString(m) } +func (*DataNodeInfo) ProtoMessage() {} +func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *DataNodeInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *DataNodeInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *DataNodeInfo) GetVolumeInfos() []*VolumeInformationMessage { + if m != nil { + return m.VolumeInfos + } + return nil +} + +func (m *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { + if m != nil { + return m.EcShardInfos + } + return nil +} + +type RackInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos" json:"data_node_infos,omitempty"` +} + +func (m *RackInfo) Reset() { *m = RackInfo{} } +func (m *RackInfo) String() string { return proto.CompactTextString(m) } +func (*RackInfo) ProtoMessage() {} +func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *RackInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *RackInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *RackInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *RackInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *RackInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *RackInfo) GetDataNodeInfos() []*DataNodeInfo { + if m != nil { + return m.DataNodeInfos + } + return nil +} + +type DataCenterInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos" json:"rack_infos,omitempty"` +} + +func (m *DataCenterInfo) Reset() { *m = DataCenterInfo{} } +func (m *DataCenterInfo) String() string { return proto.CompactTextString(m) } +func (*DataCenterInfo) ProtoMessage() {} +func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *DataCenterInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *DataCenterInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *DataCenterInfo) GetRackInfos() []*RackInfo { + if m != nil { + return m.RackInfos + } + return nil +} + +type TopologyInfo struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` + DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos" json:"data_center_infos,omitempty"` +} + +func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } +func (m *TopologyInfo) String() string { return proto.CompactTextString(m) } +func (*TopologyInfo) ProtoMessage() {} +func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *TopologyInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TopologyInfo) GetVolumeCount() uint64 { + if m != nil { + return m.VolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetMaxVolumeCount() uint64 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetFreeVolumeCount() uint64 { + if m != nil { + return m.FreeVolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetActiveVolumeCount() uint64 { + if m != nil { + return m.ActiveVolumeCount + } + return 0 +} + +func (m *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { + if m != nil { + return m.DataCenterInfos + } + return nil +} + +type VolumeListRequest struct { +} + +func (m *VolumeListRequest) Reset() { *m = VolumeListRequest{} } +func (m *VolumeListRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeListRequest) ProtoMessage() {} +func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +type VolumeListResponse struct { + TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo" json:"topology_info,omitempty"` + VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb" json:"volume_size_limit_mb,omitempty"` +} + +func (m *VolumeListResponse) Reset() { *m = VolumeListResponse{} } +func (m *VolumeListResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeListResponse) ProtoMessage() {} +func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *VolumeListResponse) GetTopologyInfo() *TopologyInfo { + if m != nil { + return m.TopologyInfo + } + return nil +} + +func (m *VolumeListResponse) GetVolumeSizeLimitMb() uint64 { + if m != nil { + return m.VolumeSizeLimitMb + } + return 0 +} + +type LookupEcVolumeRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +} + +func (m *LookupEcVolumeRequest) Reset() { *m = LookupEcVolumeRequest{} } +func (m *LookupEcVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*LookupEcVolumeRequest) ProtoMessage() {} +func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *LookupEcVolumeRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +type LookupEcVolumeResponse struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations" json:"shard_id_locations,omitempty"` +} + +func (m *LookupEcVolumeResponse) Reset() { *m = LookupEcVolumeResponse{} } +func (m *LookupEcVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*LookupEcVolumeResponse) ProtoMessage() {} +func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *LookupEcVolumeResponse) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation { + if m != nil { + return m.ShardIdLocations + } + return nil +} + +type LookupEcVolumeResponse_EcShardIdLocation struct { + ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"` +} + +func (m *LookupEcVolumeResponse_EcShardIdLocation) Reset() { + *m = LookupEcVolumeResponse_EcShardIdLocation{} +} +func (m *LookupEcVolumeResponse_EcShardIdLocation) String() string { return proto.CompactTextString(m) } +func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} +func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{29, 0} +} + +func (m *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { + if m != nil { + return m.ShardId + } + return 0 +} + +func (m *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location { + if m != nil { + return m.Locations + } + return nil +} + +type GetMasterConfigurationRequest struct { +} + +func (m *GetMasterConfigurationRequest) Reset() { *m = GetMasterConfigurationRequest{} } +func (m *GetMasterConfigurationRequest) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationRequest) ProtoMessage() {} +func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type GetMasterConfigurationResponse struct { + MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` +} + +func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } +func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationResponse) ProtoMessage() {} +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { + if m != nil { + return m.MetricsAddress + } + return "" +} + +func (m *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { + if m != nil { + return m.MetricsIntervalSeconds + } + return 0 +} + func init() { proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse") proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage") + proto.RegisterType((*VolumeShortInformationMessage)(nil), "master_pb.VolumeShortInformationMessage") + proto.RegisterType((*VolumeEcShardInformationMessage)(nil), "master_pb.VolumeEcShardInformationMessage") proto.RegisterType((*Empty)(nil), "master_pb.Empty") proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra") proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding") @@ -692,6 +1337,23 @@ func init() { proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse") proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest") proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse") + proto.RegisterType((*StorageType)(nil), "master_pb.StorageType") + proto.RegisterType((*Collection)(nil), "master_pb.Collection") + proto.RegisterType((*CollectionListRequest)(nil), "master_pb.CollectionListRequest") + proto.RegisterType((*CollectionListResponse)(nil), "master_pb.CollectionListResponse") + proto.RegisterType((*CollectionDeleteRequest)(nil), "master_pb.CollectionDeleteRequest") + proto.RegisterType((*CollectionDeleteResponse)(nil), "master_pb.CollectionDeleteResponse") + proto.RegisterType((*DataNodeInfo)(nil), "master_pb.DataNodeInfo") + proto.RegisterType((*RackInfo)(nil), "master_pb.RackInfo") + proto.RegisterType((*DataCenterInfo)(nil), "master_pb.DataCenterInfo") + proto.RegisterType((*TopologyInfo)(nil), "master_pb.TopologyInfo") + proto.RegisterType((*VolumeListRequest)(nil), "master_pb.VolumeListRequest") + proto.RegisterType((*VolumeListResponse)(nil), "master_pb.VolumeListResponse") + proto.RegisterType((*LookupEcVolumeRequest)(nil), "master_pb.LookupEcVolumeRequest") + proto.RegisterType((*LookupEcVolumeResponse)(nil), "master_pb.LookupEcVolumeResponse") + proto.RegisterType((*LookupEcVolumeResponse_EcShardIdLocation)(nil), "master_pb.LookupEcVolumeResponse.EcShardIdLocation") + proto.RegisterType((*GetMasterConfigurationRequest)(nil), "master_pb.GetMasterConfigurationRequest") + proto.RegisterType((*GetMasterConfigurationResponse)(nil), "master_pb.GetMasterConfigurationResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -710,6 +1372,11 @@ type SeaweedClient interface { LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) + CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) + CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) + VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) + LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) + GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) } type seaweedClient struct { @@ -809,6 +1476,51 @@ func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, o return out, nil } +func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { + out := new(CollectionListResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) { + out := new(CollectionDeleteResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) { + out := new(VolumeListResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) { + out := new(LookupEcVolumeResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) { + out := new(GetMasterConfigurationResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Seaweed service type SeaweedServer interface { @@ -817,6 +1529,11 @@ type SeaweedServer interface { LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) Assign(context.Context, *AssignRequest) (*AssignResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) + CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) + CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) + VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) + LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) + GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -929,6 +1646,96 @@ func _Seaweed_Statistics_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Seaweed_CollectionList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).CollectionList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/CollectionList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).CollectionList(ctx, req.(*CollectionListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_CollectionDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).CollectionDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/CollectionDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).CollectionDelete(ctx, req.(*CollectionDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_VolumeList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).VolumeList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/VolumeList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).VolumeList(ctx, req.(*VolumeListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_LookupEcVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LookupEcVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).LookupEcVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/LookupEcVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).LookupEcVolume(ctx, req.(*LookupEcVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMasterConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).GetMasterConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/GetMasterConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).GetMasterConfiguration(ctx, req.(*GetMasterConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -945,6 +1752,26 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ MethodName: "Statistics", Handler: _Seaweed_Statistics_Handler, }, + { + MethodName: "CollectionList", + Handler: _Seaweed_CollectionList_Handler, + }, + { + MethodName: "CollectionDelete", + Handler: _Seaweed_CollectionDelete_Handler, + }, + { + MethodName: "VolumeList", + Handler: _Seaweed_VolumeList_Handler, + }, + { + MethodName: "LookupEcVolume", + Handler: _Seaweed_LookupEcVolume_Handler, + }, + { + MethodName: "GetMasterConfiguration", + Handler: _Seaweed_GetMasterConfiguration_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -966,71 +1793,123 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1055 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x4b, 0x6f, 0xe4, 0x44, - 0x10, 0x5e, 0x7b, 0x9e, 0xae, 0xd9, 0xc9, 0x4e, 0x3a, 0x11, 0xf2, 0xce, 0xbe, 0x06, 0x73, 0x19, - 0x04, 0x8a, 0x96, 0x70, 0x44, 0x08, 0xb1, 0xd1, 0x22, 0xa2, 0x04, 0x36, 0x38, 0xb0, 0x07, 0x2e, - 0xa6, 0x63, 0x57, 0xa2, 0x56, 0xfc, 0xa2, 0xbb, 0x27, 0x99, 0xd9, 0x0b, 0x47, 0xfe, 0x15, 0x17, - 0xb8, 0xf1, 0x53, 0xb8, 0xf1, 0x0b, 0x50, 0x3f, 0xec, 0xf1, 0x38, 0x09, 0x91, 0x90, 0xb8, 0xb5, - 0xbf, 0xae, 0xee, 0xaa, 0xfe, 0xbe, 0x7a, 0x18, 0x1e, 0x66, 0x54, 0x48, 0xe4, 0x7b, 0x25, 0x2f, - 0x64, 0x41, 0x3c, 0xf3, 0x15, 0x95, 0x67, 0xc1, 0x5f, 0x2e, 0x78, 0x5f, 0x23, 0xe5, 0xf2, 0x0c, - 0xa9, 0x24, 0x5b, 0xe0, 0xb2, 0xd2, 0x77, 0x66, 0xce, 0xdc, 0x0b, 0x5d, 0x56, 0x12, 0x02, 0xdd, - 0xb2, 0xe0, 0xd2, 0x77, 0x67, 0xce, 0x7c, 0x1c, 0xea, 0x35, 0x79, 0x06, 0x50, 0x2e, 0xce, 0x52, - 0x16, 0x47, 0x0b, 0x9e, 0xfa, 0x1d, 0x6d, 0xeb, 0x19, 0xe4, 0x07, 0x9e, 0x92, 0x39, 0x4c, 0x32, - 0xba, 0x8c, 0xae, 0x8a, 0x74, 0x91, 0x61, 0x14, 0x17, 0x8b, 0x5c, 0xfa, 0x5d, 0x7d, 0x7c, 0x2b, - 0xa3, 0xcb, 0xb7, 0x1a, 0x3e, 0x50, 0x28, 0x99, 0xa9, 0xa8, 0x96, 0xd1, 0x39, 0x4b, 0x31, 0xba, - 0xc4, 0x95, 0xdf, 0x9b, 0x39, 0xf3, 0x6e, 0x08, 0x19, 0x5d, 0x7e, 0xc5, 0x52, 0x3c, 0xc2, 0x15, - 0x79, 0x01, 0xa3, 0x84, 0x4a, 0x1a, 0xc5, 0x98, 0x4b, 0xe4, 0x7e, 0x5f, 0xfb, 0x02, 0x05, 0x1d, - 0x68, 0x44, 0xc5, 0xc7, 0x69, 0x7c, 0xe9, 0x0f, 0xf4, 0x8e, 0x5e, 0xab, 0xf8, 0x68, 0x92, 0xb1, - 0x3c, 0xd2, 0x91, 0x0f, 0xb5, 0x6b, 0x4f, 0x23, 0x27, 0x2a, 0xfc, 0xcf, 0x61, 0x60, 0x62, 0x13, - 0xbe, 0x37, 0xeb, 0xcc, 0x47, 0xfb, 0x1f, 0xec, 0xd5, 0x6c, 0xec, 0x99, 0xf0, 0x0e, 0xf3, 0xf3, - 0x82, 0x67, 0x54, 0xb2, 0x22, 0xff, 0x06, 0x85, 0xa0, 0x17, 0x18, 0x56, 0x67, 0xc8, 0x63, 0x18, - 0xe6, 0x78, 0x1d, 0x5d, 0xb1, 0x44, 0xf8, 0x30, 0xeb, 0xcc, 0xc7, 0xe1, 0x20, 0xc7, 0xeb, 0xb7, - 0x2c, 0x11, 0xe4, 0x7d, 0x78, 0x98, 0x60, 0x8a, 0x12, 0x13, 0xb3, 0x3d, 0xd2, 0xdb, 0x23, 0x8b, - 0x29, 0x93, 0x40, 0xc0, 0x76, 0x4d, 0x76, 0x88, 0xa2, 0x2c, 0x72, 0x81, 0x64, 0x0e, 0x8f, 0xcc, - 0xed, 0xa7, 0xec, 0x1d, 0x1e, 0xb3, 0x8c, 0x49, 0xad, 0x40, 0x37, 0x6c, 0xc3, 0xe4, 0x29, 0x78, - 0x02, 0x63, 0x8e, 0xf2, 0x08, 0x57, 0x5a, 0x13, 0x2f, 0x5c, 0x03, 0xe4, 0x3d, 0xe8, 0xa7, 0x48, - 0x13, 0xe4, 0x56, 0x14, 0xfb, 0x15, 0xfc, 0xe1, 0x82, 0x7f, 0xd7, 0xc3, 0xb4, 0xe2, 0x89, 0xf6, - 0x37, 0x0e, 0x5d, 0x96, 0x28, 0x46, 0x05, 0x7b, 0x87, 0xfa, 0xf6, 0x6e, 0xa8, 0xd7, 0xe4, 0x39, - 0x40, 0x5c, 0xa4, 0x29, 0xc6, 0xea, 0xa0, 0xbd, 0xbc, 0x81, 0x28, 0xc6, 0xb5, 0x88, 0x6b, 0xb1, - 0xbb, 0xa1, 0xa7, 0x10, 0xa3, 0x73, 0xcd, 0x8b, 0x35, 0x30, 0x3a, 0x5b, 0x5e, 0x8c, 0xc9, 0xc7, - 0x40, 0x2a, 0xea, 0xce, 0x56, 0xb5, 0x61, 0x5f, 0x1b, 0x4e, 0xec, 0xce, 0xab, 0x55, 0x65, 0xfd, - 0x04, 0x3c, 0x8e, 0x34, 0x89, 0x8a, 0x3c, 0x5d, 0x69, 0xe9, 0x87, 0xe1, 0x50, 0x01, 0x6f, 0xf2, - 0x74, 0x45, 0x3e, 0x82, 0x6d, 0x8e, 0x65, 0xca, 0x62, 0x1a, 0x95, 0x29, 0x8d, 0x31, 0xc3, 0xbc, - 0xca, 0x82, 0x89, 0xdd, 0x38, 0xa9, 0x70, 0xe2, 0xc3, 0xe0, 0x0a, 0xb9, 0x50, 0xcf, 0xf2, 0xb4, - 0x49, 0xf5, 0x49, 0x26, 0xd0, 0x91, 0x32, 0xf5, 0x41, 0xa3, 0x6a, 0x19, 0x0c, 0xa0, 0xf7, 0x3a, - 0x2b, 0xe5, 0x2a, 0xf8, 0xcd, 0x81, 0x47, 0xa7, 0x8b, 0x12, 0xf9, 0xab, 0xb4, 0x88, 0x2f, 0x5f, - 0x2f, 0x25, 0xa7, 0xe4, 0x0d, 0x6c, 0x21, 0xa7, 0x62, 0xc1, 0x55, 0xec, 0x09, 0xcb, 0x2f, 0x34, - 0xa5, 0xa3, 0xfd, 0x79, 0x23, 0xb9, 0x5a, 0x67, 0xf6, 0x5e, 0x9b, 0x03, 0x07, 0xda, 0x3e, 0x1c, - 0x63, 0xf3, 0x73, 0xfa, 0x23, 0x8c, 0x37, 0xf6, 0x95, 0x30, 0x2a, 0xf1, 0xad, 0x54, 0x7a, 0xad, - 0x14, 0x2f, 0x29, 0x67, 0x72, 0x65, 0x0b, 0xd4, 0x7e, 0x29, 0x41, 0x6c, 0xfd, 0xa9, 0x3c, 0xec, - 0xe8, 0x3c, 0xf4, 0x0c, 0x72, 0x98, 0x88, 0xe0, 0x43, 0xd8, 0x39, 0x48, 0x19, 0xe6, 0xf2, 0x98, - 0x09, 0x89, 0x79, 0x88, 0x3f, 0x2f, 0x50, 0x48, 0xe5, 0x21, 0xa7, 0x19, 0xda, 0xf2, 0xd7, 0xeb, - 0xe0, 0x17, 0xd8, 0x32, 0xa9, 0x73, 0x5c, 0xc4, 0x3a, 0x6f, 0x14, 0x31, 0xaa, 0xee, 0x8d, 0x91, - 0x5a, 0xb6, 0x1a, 0x82, 0xdb, 0x6e, 0x08, 0xcd, 0x8a, 0xe9, 0xfc, 0x7b, 0xc5, 0x74, 0x6f, 0x56, - 0xcc, 0xf7, 0xb0, 0x73, 0x5c, 0x14, 0x97, 0x8b, 0xd2, 0x84, 0x51, 0xc5, 0xba, 0xf9, 0x42, 0x67, - 0xd6, 0x51, 0x3e, 0xeb, 0x17, 0xb6, 0x32, 0xd6, 0x6d, 0x67, 0x6c, 0xf0, 0xb7, 0x03, 0xbb, 0x9b, - 0xd7, 0xda, 0x5a, 0xfc, 0x09, 0x76, 0xea, 0x7b, 0xa3, 0xd4, 0xbe, 0xd9, 0x38, 0x18, 0xed, 0xbf, - 0x6c, 0x88, 0x79, 0xdb, 0xe9, 0xaa, 0x7d, 0x24, 0x15, 0x59, 0xe1, 0xf6, 0x55, 0x0b, 0x11, 0xd3, - 0x25, 0x4c, 0xda, 0x66, 0x2a, 0xa1, 0x6b, 0xaf, 0x96, 0xd9, 0x61, 0x75, 0x92, 0x7c, 0x02, 0xde, - 0x3a, 0x10, 0x57, 0x07, 0xb2, 0xb3, 0x11, 0x88, 0xf5, 0xb5, 0xb6, 0x22, 0xbb, 0xd0, 0x43, 0xce, - 0x8b, 0xaa, 0x11, 0x98, 0x8f, 0xe0, 0x33, 0x18, 0xfe, 0x67, 0x15, 0x83, 0x3f, 0x1d, 0x18, 0x7f, - 0x29, 0x04, 0xbb, 0xa8, 0xd3, 0x65, 0x17, 0x7a, 0xa6, 0x4c, 0x4d, 0xb3, 0x32, 0x1f, 0x64, 0x06, - 0x23, 0x5b, 0x65, 0x0d, 0xea, 0x9b, 0xd0, 0xbd, 0xdd, 0xc4, 0x56, 0x5e, 0xd7, 0x84, 0x26, 0x65, - 0xda, 0x1e, 0x03, 0xbd, 0x3b, 0xc7, 0x40, 0xbf, 0x31, 0x06, 0x9e, 0x80, 0xa7, 0x0f, 0xe5, 0x45, - 0x82, 0x76, 0x3e, 0x0c, 0x15, 0xf0, 0x6d, 0x91, 0xe8, 0xb4, 0xae, 0x1e, 0x63, 0x85, 0x9f, 0x40, - 0xe7, 0xbc, 0x26, 0x5f, 0x2d, 0x2b, 0x8a, 0xdc, 0xbb, 0x28, 0xba, 0x31, 0xf9, 0x6a, 0x42, 0xba, - 0x4d, 0x42, 0x6a, 0x2d, 0x7a, 0x4d, 0x2d, 0x2e, 0x60, 0xfb, 0x54, 0x52, 0xc9, 0x84, 0x64, 0xb1, - 0xa8, 0x18, 0x6d, 0x71, 0xe7, 0xdc, 0xc7, 0x9d, 0x7b, 0x17, 0x77, 0x9d, 0x9a, 0xbb, 0xe0, 0x77, - 0x07, 0x48, 0xd3, 0x93, 0x7d, 0xee, 0xff, 0xe0, 0x4a, 0xd1, 0x23, 0x0b, 0x49, 0xd3, 0x48, 0x0f, - 0x10, 0x3b, 0x06, 0x34, 0xa2, 0x26, 0x98, 0x12, 0x64, 0x21, 0x30, 0x31, 0xbb, 0x66, 0x06, 0x0c, - 0x15, 0xa0, 0x37, 0x37, 0x47, 0x48, 0xbf, 0x35, 0x42, 0xf6, 0x7f, 0xed, 0xc0, 0xe0, 0x14, 0xe9, - 0x35, 0x62, 0x42, 0x0e, 0x61, 0x7c, 0x8a, 0x79, 0xb2, 0xfe, 0x69, 0xd9, 0x6d, 0x54, 0x43, 0x8d, - 0x4e, 0x9f, 0xde, 0x86, 0x56, 0xef, 0x0f, 0x1e, 0xcc, 0x9d, 0x97, 0x0e, 0x39, 0x81, 0xf1, 0x11, - 0x62, 0x79, 0x50, 0xe4, 0x39, 0xc6, 0x12, 0x13, 0xf2, 0xbc, 0x71, 0xe8, 0x96, 0x16, 0x39, 0x7d, - 0x7c, 0xe3, 0x5f, 0xa1, 0xaa, 0x28, 0x7b, 0xe3, 0x77, 0xf0, 0xb0, 0xd9, 0x19, 0x36, 0x2e, 0xbc, - 0xa5, 0x8f, 0x4d, 0x5f, 0xdc, 0xd3, 0x52, 0x82, 0x07, 0xe4, 0x0b, 0xe8, 0x9b, 0x5c, 0x25, 0x7e, - 0xc3, 0x78, 0xa3, 0x16, 0x37, 0xe2, 0xda, 0x4c, 0xec, 0xe0, 0x01, 0x39, 0x02, 0x58, 0x67, 0x00, - 0x69, 0xf2, 0x72, 0x23, 0x05, 0xa7, 0xcf, 0xee, 0xd8, 0xad, 0x2e, 0x3b, 0xeb, 0xeb, 0x3f, 0xc8, - 0x4f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x9f, 0x0a, 0x25, 0x51, 0x0a, 0x00, 0x00, + // 1888 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0x1c, 0x49, + 0x15, 0x4f, 0xcf, 0x8c, 0xc7, 0x33, 0x6f, 0x3e, 0x3c, 0x53, 0x76, 0xbc, 0x93, 0x59, 0x92, 0x4c, + 0x7a, 0x91, 0xd6, 0x09, 0x8b, 0x59, 0xb2, 0x2b, 0x81, 0x04, 0x68, 0x95, 0x38, 0xde, 0xc5, 0xca, + 0xc7, 0x3a, 0x3d, 0x21, 0x48, 0x48, 0xa8, 0xa9, 0xe9, 0x2e, 0xdb, 0x25, 0xf7, 0x74, 0x37, 0x5d, + 0x35, 0x8e, 0x67, 0x39, 0x70, 0x80, 0x1b, 0x12, 0x17, 0xce, 0xdc, 0xf9, 0x1b, 0x38, 0x70, 0xe1, + 0xc8, 0x9d, 0x3b, 0xff, 0x02, 0x57, 0x84, 0xb4, 0xaa, 0xaf, 0xee, 0xea, 0x99, 0xb1, 0x1d, 0xaf, + 0xb4, 0x87, 0xdc, 0xaa, 0xdf, 0x7b, 0xf5, 0xde, 0xab, 0xdf, 0xab, 0xf7, 0x51, 0x0d, 0xed, 0x29, + 0x66, 0x9c, 0x64, 0xbb, 0x69, 0x96, 0xf0, 0x04, 0x35, 0xd5, 0x97, 0x9f, 0x4e, 0xdc, 0x3f, 0xd5, + 0xa1, 0xf9, 0x73, 0x82, 0x33, 0x3e, 0x21, 0x98, 0xa3, 0x2e, 0x54, 0x68, 0x3a, 0x70, 0x46, 0xce, + 0x4e, 0xd3, 0xab, 0xd0, 0x14, 0x21, 0xa8, 0xa5, 0x49, 0xc6, 0x07, 0x95, 0x91, 0xb3, 0xd3, 0xf1, + 0xe4, 0x1a, 0xdd, 0x06, 0x48, 0x67, 0x93, 0x88, 0x06, 0xfe, 0x2c, 0x8b, 0x06, 0x55, 0x29, 0xdb, + 0x54, 0x94, 0x5f, 0x64, 0x11, 0xda, 0x81, 0xde, 0x14, 0x9f, 0xfb, 0x67, 0x49, 0x34, 0x9b, 0x12, + 0x3f, 0x48, 0x66, 0x31, 0x1f, 0xd4, 0xe4, 0xf6, 0xee, 0x14, 0x9f, 0xbf, 0x96, 0xe4, 0x3d, 0x41, + 0x45, 0x23, 0xe1, 0xd5, 0xb9, 0x7f, 0x44, 0x23, 0xe2, 0x9f, 0x92, 0xf9, 0x60, 0x6d, 0xe4, 0xec, + 0xd4, 0x3c, 0x98, 0xe2, 0xf3, 0xcf, 0x69, 0x44, 0x9e, 0x92, 0x39, 0xba, 0x0b, 0xad, 0x10, 0x73, + 0xec, 0x07, 0x24, 0xe6, 0x24, 0x1b, 0xd4, 0xa5, 0x2d, 0x10, 0xa4, 0x3d, 0x49, 0x11, 0xfe, 0x65, + 0x38, 0x38, 0x1d, 0xac, 0x4b, 0x8e, 0x5c, 0x0b, 0xff, 0x70, 0x38, 0xa5, 0xb1, 0x2f, 0x3d, 0x6f, + 0x48, 0xd3, 0x4d, 0x49, 0x39, 0x14, 0xee, 0xff, 0x0c, 0xd6, 0x95, 0x6f, 0x6c, 0xd0, 0x1c, 0x55, + 0x77, 0x5a, 0x0f, 0x3f, 0xd8, 0xcd, 0xd1, 0xd8, 0x55, 0xee, 0x1d, 0xc4, 0x47, 0x49, 0x36, 0xc5, + 0x9c, 0x26, 0xf1, 0x73, 0xc2, 0x18, 0x3e, 0x26, 0x9e, 0xd9, 0x83, 0x0e, 0xa0, 0x15, 0x93, 0x37, + 0xbe, 0x51, 0x01, 0x52, 0xc5, 0xce, 0x92, 0x8a, 0xf1, 0x49, 0x92, 0xf1, 0x15, 0x7a, 0x20, 0x26, + 0x6f, 0x5e, 0x6b, 0x55, 0x2f, 0x61, 0x23, 0x24, 0x11, 0xe1, 0x24, 0xcc, 0xd5, 0xb5, 0xae, 0xa9, + 0xae, 0xab, 0x15, 0x18, 0x95, 0xdf, 0x85, 0xee, 0x09, 0x66, 0x7e, 0x9c, 0xe4, 0x1a, 0xdb, 0x23, + 0x67, 0xa7, 0xe1, 0xb5, 0x4f, 0x30, 0x7b, 0x91, 0x18, 0xa9, 0x2f, 0xa0, 0x49, 0x02, 0x9f, 0x9d, + 0xe0, 0x2c, 0x64, 0x83, 0x9e, 0x34, 0xf9, 0x60, 0xc9, 0xe4, 0x7e, 0x30, 0x16, 0x02, 0x2b, 0x8c, + 0x36, 0x88, 0x62, 0x31, 0xf4, 0x02, 0x3a, 0x02, 0x8c, 0x42, 0x59, 0xff, 0xda, 0xca, 0x04, 0x9a, + 0xfb, 0x46, 0xdf, 0x6b, 0xe8, 0x1b, 0x44, 0x0a, 0x9d, 0xe8, 0xda, 0x3a, 0x0d, 0xac, 0xb9, 0xde, + 0x0f, 0xa1, 0xa7, 0x61, 0x29, 0xd4, 0x6e, 0x4a, 0x60, 0x3a, 0x12, 0x18, 0x23, 0xe8, 0xfe, 0xdd, + 0x81, 0x7e, 0x9e, 0x0d, 0x1e, 0x61, 0x69, 0x12, 0x33, 0x82, 0x1e, 0x40, 0x5f, 0x5f, 0x67, 0x46, + 0xbf, 0x22, 0x7e, 0x44, 0xa7, 0x94, 0xcb, 0x24, 0xa9, 0x79, 0x1b, 0x8a, 0x31, 0xa6, 0x5f, 0x91, + 0x67, 0x82, 0x8c, 0xb6, 0xa1, 0x1e, 0x11, 0x1c, 0x92, 0x4c, 0xe6, 0x4c, 0xd3, 0xd3, 0x5f, 0xe8, + 0x43, 0xd8, 0x98, 0x12, 0x9e, 0xd1, 0x80, 0xf9, 0x38, 0x0c, 0x33, 0xc2, 0x98, 0x4e, 0x9d, 0xae, + 0x26, 0x3f, 0x52, 0x54, 0xf4, 0x63, 0x18, 0x18, 0x41, 0x2a, 0xee, 0xf8, 0x19, 0x8e, 0x7c, 0x46, + 0x82, 0x24, 0x0e, 0x99, 0xce, 0xa3, 0x6d, 0xcd, 0x3f, 0xd0, 0xec, 0xb1, 0xe2, 0xba, 0x7f, 0xad, + 0xc2, 0xe0, 0xa2, 0x0b, 0x2c, 0x33, 0x3b, 0x94, 0x4e, 0x77, 0xbc, 0x0a, 0x0d, 0x45, 0xe6, 0x88, + 0xc3, 0x48, 0x2f, 0x6b, 0x9e, 0x5c, 0xa3, 0x3b, 0x00, 0x41, 0x12, 0x45, 0x24, 0x10, 0x1b, 0xb5, + 0x7b, 0x16, 0x45, 0x64, 0x96, 0x4c, 0xd6, 0x22, 0xa9, 0x6b, 0x5e, 0x53, 0x50, 0x54, 0x3e, 0xdf, + 0x83, 0xb6, 0x02, 0x5e, 0x0b, 0xa8, 0x7c, 0x6e, 0x29, 0x9a, 0x12, 0xf9, 0x08, 0x90, 0x09, 0xf0, + 0x64, 0x9e, 0x0b, 0xd6, 0xa5, 0x60, 0x4f, 0x73, 0x1e, 0xcf, 0x8d, 0xf4, 0xfb, 0xd0, 0xcc, 0x08, + 0x0e, 0xfd, 0x24, 0x8e, 0xe6, 0x32, 0xc5, 0x1b, 0x5e, 0x43, 0x10, 0xbe, 0x8c, 0xa3, 0x39, 0xfa, + 0x1e, 0xf4, 0x33, 0x92, 0x46, 0x34, 0xc0, 0x7e, 0x1a, 0xe1, 0x80, 0x4c, 0x49, 0x6c, 0xb2, 0xbd, + 0xa7, 0x19, 0x87, 0x86, 0x8e, 0x06, 0xb0, 0x7e, 0x46, 0x32, 0x26, 0x8e, 0xd5, 0x94, 0x22, 0xe6, + 0x13, 0xf5, 0xa0, 0xca, 0x79, 0x34, 0x00, 0x49, 0x15, 0x4b, 0x74, 0x1f, 0x7a, 0x41, 0x32, 0x4d, + 0x71, 0xc0, 0xfd, 0x8c, 0x9c, 0x51, 0xb9, 0xa9, 0x25, 0xd9, 0x1b, 0x9a, 0xee, 0x69, 0xb2, 0x38, + 0xce, 0x34, 0x09, 0xe9, 0x11, 0x25, 0xa1, 0x8f, 0xb9, 0x0e, 0x93, 0x4c, 0xb9, 0xaa, 0xd7, 0x33, + 0x9c, 0x47, 0x5c, 0x05, 0xc8, 0xfd, 0x9b, 0x03, 0xb7, 0x2f, 0x4d, 0xe7, 0xa5, 0x20, 0x5d, 0x15, + 0x90, 0x6f, 0x0b, 0x03, 0x77, 0x06, 0x77, 0xaf, 0x48, 0xb2, 0x2b, 0x7c, 0xad, 0x2c, 0xf9, 0xea, + 0x42, 0x87, 0x04, 0x3e, 0x8d, 0x43, 0x72, 0xee, 0x4f, 0x28, 0x57, 0xd7, 0xbf, 0xe3, 0xb5, 0x48, + 0x70, 0x20, 0x68, 0x8f, 0x29, 0x67, 0xee, 0x3a, 0xac, 0xed, 0x4f, 0x53, 0x3e, 0x77, 0xff, 0xe1, + 0xc0, 0xc6, 0x78, 0x96, 0x92, 0xec, 0x71, 0x94, 0x04, 0xa7, 0xfb, 0xe7, 0x3c, 0xc3, 0xe8, 0x4b, + 0xe8, 0x92, 0x0c, 0xb3, 0x59, 0x26, 0xae, 0x4d, 0x48, 0xe3, 0x63, 0x69, 0xbc, 0x5c, 0x2d, 0x17, + 0xf6, 0xec, 0xee, 0xab, 0x0d, 0x7b, 0x52, 0xde, 0xeb, 0x10, 0xfb, 0x73, 0xf8, 0x2b, 0xe8, 0x94, + 0xf8, 0x22, 0x27, 0x44, 0x6f, 0xd1, 0x87, 0x92, 0x6b, 0x91, 0xcf, 0x29, 0xce, 0x28, 0x9f, 0xeb, + 0x1e, 0xa8, 0xbf, 0x44, 0x2e, 0xe8, 0x9a, 0x40, 0x43, 0x71, 0x96, 0xaa, 0xe8, 0x32, 0x8a, 0x72, + 0x10, 0x32, 0xf7, 0x3e, 0x6c, 0xee, 0x45, 0x94, 0xc4, 0xfc, 0x19, 0x65, 0x9c, 0xc4, 0x1e, 0xf9, + 0xed, 0x8c, 0x30, 0x2e, 0x2c, 0xc4, 0x78, 0x4a, 0x74, 0x87, 0x95, 0x6b, 0xf7, 0xf7, 0xd0, 0x55, + 0x58, 0x3f, 0x4b, 0x02, 0x89, 0xb0, 0x88, 0x87, 0x68, 0xad, 0x4a, 0x48, 0x2c, 0x17, 0x7a, 0x6e, + 0x65, 0xb1, 0xe7, 0xde, 0x82, 0x86, 0x6c, 0x4a, 0x85, 0x2b, 0xeb, 0xa2, 0xcf, 0xd0, 0x90, 0x15, + 0x49, 0x19, 0x2a, 0x76, 0x4d, 0xb2, 0x5b, 0xa6, 0x6f, 0xd0, 0x90, 0xb9, 0xaf, 0x60, 0xf3, 0x59, + 0x92, 0x9c, 0xce, 0x52, 0xe5, 0x86, 0xf1, 0xb5, 0x7c, 0x42, 0x67, 0x54, 0x15, 0x36, 0xf3, 0x13, + 0x5e, 0x15, 0x6f, 0xf7, 0xbf, 0x0e, 0x6c, 0x95, 0xd5, 0xea, 0x6a, 0xfa, 0x1b, 0xd8, 0xcc, 0xf5, + 0xfa, 0x91, 0x3e, 0xb3, 0x32, 0xd0, 0x7a, 0xf8, 0xb1, 0x15, 0xcc, 0x55, 0xbb, 0x4d, 0x87, 0x0e, + 0x0d, 0x58, 0x5e, 0xff, 0x6c, 0x81, 0xc2, 0x86, 0xe7, 0xd0, 0x5b, 0x14, 0x13, 0xb5, 0x24, 0xb7, + 0xaa, 0x91, 0x6d, 0x98, 0x9d, 0xe8, 0x87, 0xd0, 0x2c, 0x1c, 0xa9, 0x48, 0x47, 0x36, 0x4b, 0x8e, + 0x68, 0x5b, 0x85, 0x14, 0xda, 0x82, 0x35, 0x92, 0x65, 0x49, 0xa6, 0xb3, 0x52, 0x7d, 0xb8, 0x3f, + 0x81, 0xc6, 0x37, 0x8e, 0xa2, 0xfb, 0x2f, 0x07, 0x3a, 0x8f, 0x18, 0xa3, 0xc7, 0xf9, 0x75, 0xd9, + 0x82, 0x35, 0x55, 0x21, 0x55, 0xb3, 0x51, 0x1f, 0x68, 0x04, 0x2d, 0x9d, 0xdc, 0x16, 0xf4, 0x36, + 0xe9, 0xca, 0xba, 0xa1, 0x13, 0xbe, 0xa6, 0x5c, 0x13, 0x45, 0x6f, 0x61, 0xd2, 0x5a, 0xbb, 0x70, + 0xd2, 0xaa, 0x5b, 0x93, 0xd6, 0xfb, 0xd0, 0x94, 0x9b, 0xe2, 0x24, 0x24, 0x7a, 0x04, 0x6b, 0x08, + 0xc2, 0x8b, 0x24, 0x24, 0xee, 0x5f, 0x1c, 0xe8, 0x9a, 0xd3, 0xe8, 0xc8, 0xf7, 0xa0, 0x7a, 0x94, + 0xa3, 0x2f, 0x96, 0x06, 0xa3, 0xca, 0x45, 0x18, 0x2d, 0x4d, 0x97, 0x39, 0x22, 0x35, 0x1b, 0x91, + 0x3c, 0x18, 0x6b, 0x56, 0x30, 0x84, 0xcb, 0x78, 0xc6, 0x4f, 0x8c, 0xcb, 0x62, 0xed, 0x1e, 0x43, + 0x7f, 0xcc, 0x31, 0xa7, 0x8c, 0xd3, 0x80, 0x19, 0x98, 0x17, 0x00, 0x75, 0xae, 0x02, 0xb4, 0x72, + 0x11, 0xa0, 0xd5, 0x1c, 0x50, 0xf7, 0x9f, 0x0e, 0x20, 0xdb, 0x92, 0x86, 0xe0, 0x5b, 0x30, 0x25, + 0x20, 0xe3, 0x09, 0x17, 0x63, 0x82, 0x68, 0xe8, 0xba, 0x2d, 0x4b, 0x8a, 0x18, 0x4b, 0x44, 0x94, + 0x66, 0x8c, 0x84, 0x8a, 0xab, 0x7a, 0x72, 0x43, 0x10, 0x24, 0xb3, 0xdc, 0xd2, 0xeb, 0x0b, 0x2d, + 0xdd, 0x7d, 0x04, 0xad, 0x31, 0x4f, 0x32, 0x7c, 0x4c, 0x5e, 0xcd, 0xd3, 0xb7, 0xf1, 0x5e, 0x7b, + 0x57, 0x29, 0x80, 0x18, 0x01, 0xec, 0x15, 0xde, 0xaf, 0x2a, 0x80, 0xbf, 0x83, 0x9b, 0x85, 0x84, + 0xa8, 0x97, 0x26, 0x2e, 0x9f, 0xc2, 0x36, 0x8d, 0x83, 0x68, 0x16, 0x12, 0x3f, 0x16, 0xed, 0x27, + 0xca, 0xa7, 0x5a, 0x47, 0x0e, 0x03, 0x5b, 0x9a, 0xfb, 0x42, 0x32, 0xcd, 0x74, 0xfb, 0x11, 0x20, + 0xb3, 0x8b, 0x04, 0xf9, 0x8e, 0x8a, 0xdc, 0xd1, 0xd3, 0x9c, 0xfd, 0x40, 0x4b, 0xbb, 0x2f, 0x61, + 0x7b, 0xd1, 0xb8, 0x0e, 0xd5, 0x8f, 0xa0, 0x55, 0xc0, 0x6e, 0xea, 0xd3, 0x4d, 0xab, 0x2c, 0x14, + 0xfb, 0x3c, 0x5b, 0xd2, 0xfd, 0x3e, 0xbc, 0x57, 0xb0, 0x9e, 0xc8, 0x42, 0x7b, 0x59, 0xfd, 0x1f, + 0xc2, 0x60, 0x59, 0x5c, 0xf9, 0xe0, 0xfe, 0xa7, 0x02, 0xed, 0x27, 0x3a, 0xa3, 0x44, 0x0f, 0xb6, + 0xba, 0x6e, 0x53, 0x76, 0xdd, 0x7b, 0xd0, 0x2e, 0xbd, 0xb4, 0xd4, 0x38, 0xd7, 0x3a, 0xb3, 0x9e, + 0x59, 0xab, 0x1e, 0x64, 0x55, 0x29, 0xb6, 0xf8, 0x20, 0x7b, 0x00, 0xfd, 0xa3, 0x8c, 0x90, 0xe5, + 0xb7, 0x5b, 0xcd, 0xdb, 0x10, 0x0c, 0x5b, 0x76, 0x17, 0x36, 0x71, 0xc0, 0xe9, 0xd9, 0x82, 0xb4, + 0xba, 0x5f, 0x7d, 0xc5, 0xb2, 0xe5, 0x3f, 0xcf, 0x1d, 0xa5, 0xf1, 0x51, 0xc2, 0x06, 0xf5, 0xb7, + 0x7f, 0x7b, 0xe9, 0xd3, 0x08, 0x0e, 0x43, 0x87, 0xd0, 0x35, 0x33, 0xbc, 0xd6, 0xb4, 0x7e, 0xed, + 0xf7, 0x41, 0x9b, 0x14, 0x2c, 0xe6, 0xfe, 0xb1, 0x02, 0x0d, 0x0f, 0x07, 0xa7, 0xef, 0x36, 0xbe, + 0x9f, 0xc1, 0x46, 0x5e, 0x8b, 0x4b, 0x10, 0xbf, 0x67, 0x01, 0x63, 0x5f, 0x25, 0xaf, 0x13, 0x5a, + 0x5f, 0xcc, 0xfd, 0xbf, 0x03, 0xdd, 0x27, 0x79, 0xbd, 0x7f, 0xb7, 0xc1, 0x78, 0x08, 0x20, 0x1a, + 0x54, 0x09, 0x07, 0xbb, 0xa1, 0x9b, 0x70, 0x7b, 0xcd, 0x4c, 0xaf, 0x98, 0xfb, 0xe7, 0x0a, 0xb4, + 0x5f, 0x25, 0x69, 0x12, 0x25, 0xc7, 0xf3, 0x77, 0xfb, 0xf4, 0xfb, 0xd0, 0xb7, 0x7a, 0x79, 0x09, + 0x84, 0x5b, 0x0b, 0x97, 0xa1, 0x08, 0xb6, 0xb7, 0x11, 0x96, 0xbe, 0x99, 0xbb, 0x09, 0x7d, 0x3d, + 0x97, 0x16, 0x25, 0xd9, 0xfd, 0x83, 0x03, 0xc8, 0xa6, 0xea, 0x5a, 0xf9, 0x53, 0xe8, 0x70, 0x8d, + 0x9d, 0xb4, 0xa7, 0x47, 0x73, 0xfb, 0xee, 0xd9, 0xd8, 0x7a, 0x6d, 0x6e, 0x23, 0xfd, 0x03, 0xd8, + 0x5a, 0x7a, 0x5f, 0xfb, 0xd3, 0x89, 0x46, 0xb8, 0xbf, 0xf0, 0xc4, 0x7e, 0x3e, 0x71, 0x3f, 0x85, + 0x9b, 0x6a, 0x38, 0x34, 0x75, 0xdc, 0xd4, 0xd7, 0xa5, 0x29, 0xaf, 0x53, 0x4c, 0x79, 0xee, 0xff, + 0x1c, 0xd8, 0x5e, 0xdc, 0xa6, 0xfd, 0xbf, 0x6c, 0x1f, 0xc2, 0x80, 0x74, 0xbd, 0xb1, 0xe7, 0x55, + 0x35, 0x26, 0x7e, 0xb2, 0x34, 0xaf, 0x2e, 0xea, 0xde, 0x35, 0x75, 0xa8, 0x18, 0x59, 0x7b, 0xac, + 0x4c, 0x60, 0x43, 0x0c, 0xfd, 0x25, 0x31, 0x31, 0xd5, 0x1b, 0xbb, 0xda, 0xa7, 0x75, 0xbd, 0xf1, + 0x1b, 0x0c, 0xac, 0xee, 0x5d, 0xb8, 0xfd, 0x05, 0xe1, 0xcf, 0xa5, 0xcc, 0x5e, 0x12, 0x1f, 0xd1, + 0xe3, 0x59, 0xa6, 0x84, 0x8a, 0xd0, 0xde, 0xb9, 0x48, 0x42, 0xc3, 0xb4, 0xe2, 0x27, 0x86, 0x73, + 0xed, 0x9f, 0x18, 0x95, 0xcb, 0x7e, 0x62, 0x3c, 0xfc, 0x77, 0x1d, 0xd6, 0xc7, 0x04, 0xbf, 0x21, + 0x24, 0x44, 0x07, 0xd0, 0x19, 0x93, 0x38, 0x2c, 0x7e, 0x4f, 0x6e, 0x59, 0x67, 0xcc, 0xa9, 0xc3, + 0xef, 0xac, 0xa2, 0xe6, 0x2d, 0xf4, 0xc6, 0x8e, 0xf3, 0xb1, 0x83, 0x0e, 0xa1, 0xf3, 0x94, 0x90, + 0x74, 0x2f, 0x89, 0x63, 0x12, 0x70, 0x12, 0xa2, 0x3b, 0x76, 0x23, 0x5f, 0x7e, 0xa9, 0x0d, 0x6f, + 0x2d, 0xf5, 0x13, 0x03, 0xaa, 0xd6, 0xf8, 0x12, 0xda, 0xf6, 0x03, 0xa5, 0xa4, 0x70, 0xc5, 0x73, + 0x6a, 0x78, 0xf7, 0x8a, 0x97, 0x8d, 0x7b, 0x03, 0x7d, 0x06, 0x75, 0x35, 0x31, 0xa3, 0x81, 0x25, + 0x5c, 0x7a, 0x12, 0x94, 0xfc, 0x2a, 0x8f, 0xd7, 0xee, 0x0d, 0xf4, 0x14, 0xa0, 0x98, 0x39, 0x91, + 0x8d, 0xcb, 0xd2, 0xd0, 0x3b, 0xbc, 0x7d, 0x01, 0x37, 0x57, 0xf6, 0x4b, 0xe8, 0x96, 0x27, 0x23, + 0x34, 0x5a, 0x39, 0xfc, 0x58, 0xe5, 0x61, 0x78, 0xef, 0x12, 0x89, 0x5c, 0xf1, 0xaf, 0xa1, 0xb7, + 0x38, 0xf0, 0x20, 0x77, 0xe5, 0xc6, 0xd2, 0xf0, 0x34, 0xfc, 0xe0, 0x52, 0x19, 0x1b, 0x84, 0xa2, + 0x42, 0x95, 0x40, 0x58, 0x2a, 0x67, 0x25, 0x10, 0x96, 0xcb, 0x9a, 0x02, 0xa1, 0x9c, 0xd6, 0x25, + 0x10, 0x56, 0x16, 0xa1, 0x12, 0x08, 0xab, 0x6b, 0x82, 0x7b, 0x03, 0x25, 0xb0, 0xbd, 0x3a, 0xd9, + 0x90, 0xfd, 0x3f, 0xe3, 0xd2, 0x8c, 0x1d, 0xde, 0x7f, 0x0b, 0x49, 0x63, 0x70, 0x52, 0x97, 0xbf, + 0xfe, 0x3f, 0xf9, 0x3a, 0x00, 0x00, 0xff, 0xff, 0x28, 0x1f, 0x4c, 0x0e, 0x0a, 0x18, 0x00, 0x00, } diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 8ab67a1bf..4004875ed 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -19,14 +19,12 @@ service VolumeServer { rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { } - rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { + rpc AllocateVolume (AllocateVolumeRequest) returns (AllocateVolumeResponse) { } rpc VolumeSyncStatus (VolumeSyncStatusRequest) returns (VolumeSyncStatusResponse) { } - rpc VolumeSyncIndex (VolumeSyncIndexRequest) returns (stream VolumeSyncIndexResponse) { - } - rpc VolumeSyncData (VolumeSyncDataRequest) returns (stream VolumeSyncDataResponse) { + rpc VolumeIncrementalCopy (VolumeIncrementalCopyRequest) returns (stream VolumeIncrementalCopyResponse) { } rpc VolumeMount (VolumeMountRequest) returns (VolumeMountResponse) { @@ -35,8 +33,39 @@ service VolumeServer { } rpc VolumeDelete (VolumeDeleteRequest) returns (VolumeDeleteResponse) { } + rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { + } - // rpc VolumeUiPage (VolumeUiPageRequest) returns (VolumeUiPageResponse) {} + // copy the .idx .dat files, and mount this volume + rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) { + } + rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) { + } + rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) { + } + + rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) { + } + rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) { + } + + // erasure coding + rpc VolumeEcShardsGenerate (VolumeEcShardsGenerateRequest) returns (VolumeEcShardsGenerateResponse) { + } + rpc VolumeEcShardsRebuild (VolumeEcShardsRebuildRequest) returns (VolumeEcShardsRebuildResponse) { + } + rpc VolumeEcShardsCopy (VolumeEcShardsCopyRequest) returns (VolumeEcShardsCopyResponse) { + } + rpc VolumeEcShardsDelete (VolumeEcShardsDeleteRequest) returns (VolumeEcShardsDeleteResponse) { + } + rpc VolumeEcShardsMount (VolumeEcShardsMountRequest) returns (VolumeEcShardsMountResponse) { + } + rpc VolumeEcShardsUnmount (VolumeEcShardsUnmountRequest) returns (VolumeEcShardsUnmountResponse) { + } + rpc VolumeEcShardRead (VolumeEcShardReadRequest) returns (stream VolumeEcShardReadResponse) { + } + rpc VolumeEcBlobDelete (VolumeEcBlobDeleteRequest) returns (VolumeEcBlobDeleteResponse) { + } } @@ -54,33 +83,34 @@ message DeleteResult { int32 status = 2; string error = 3; uint32 size = 4; + uint32 version = 5; } message Empty { } message VacuumVolumeCheckRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCheckResponse { double garbage_ratio = 1; } message VacuumVolumeCompactRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; int64 preallocate = 2; } message VacuumVolumeCompactResponse { } message VacuumVolumeCommitRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCommitResponse { } message VacuumVolumeCleanupRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCleanupResponse { } @@ -91,21 +121,21 @@ message DeleteCollectionRequest { message DeleteCollectionResponse { } -message AssignVolumeRequest { - uint32 volumd_id = 1; +message AllocateVolumeRequest { + uint32 volume_id = 1; string collection = 2; int64 preallocate = 3; string replication = 4; string ttl = 5; } -message AssignVolumeResponse { +message AllocateVolumeResponse { } message VolumeSyncStatusRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeSyncStatusResponse { - uint32 volumd_id = 1; + uint32 volume_id = 1; string collection = 2; string replication = 4; string ttl = 5; @@ -114,45 +144,162 @@ message VolumeSyncStatusResponse { uint64 idx_file_size = 8; } -message VolumeSyncIndexRequest { - uint32 volumd_id = 1; +message VolumeIncrementalCopyRequest { + uint32 volume_id = 1; + uint64 since_ns = 2; } -message VolumeSyncIndexResponse { - bytes index_file_content = 1; -} - -message VolumeSyncDataRequest { - uint32 volumd_id = 1; - uint32 revision = 2; - uint32 offset = 3; - uint32 size = 4; - string needle_id = 5; -} -message VolumeSyncDataResponse { +message VolumeIncrementalCopyResponse { bytes file_content = 1; } message VolumeMountRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeMountResponse { } message VolumeUnmountRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeUnmountResponse { } message VolumeDeleteRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeDeleteResponse { } -message VolumeUiPageRequest { +message VolumeMarkReadonlyRequest { + uint32 volume_id = 1; } -message VolumeUiPageResponse { +message VolumeMarkReadonlyResponse { +} + +message VolumeCopyRequest { + uint32 volume_id = 1; + string collection = 2; + string replication = 3; + string ttl = 4; + string source_data_node = 5; +} +message VolumeCopyResponse { + uint64 last_append_at_ns = 1; +} + +message CopyFileRequest { + uint32 volume_id = 1; + string ext = 2; + uint32 compaction_revision = 3; + uint64 stop_offset = 4; + string collection = 5; + bool is_ec_volume = 6; +} +message CopyFileResponse { + bytes file_content = 1; +} + +message VolumeTailSenderRequest { + uint32 volume_id = 1; + uint64 since_ns = 2; + uint32 idle_timeout_seconds = 3; +} +message VolumeTailSenderResponse { + bytes needle_header = 1; + bytes needle_body = 2; + bool is_last_chunk = 3; +} + +message VolumeTailReceiverRequest { + uint32 volume_id = 1; + uint64 since_ns = 2; + uint32 idle_timeout_seconds = 3; + string source_volume_server = 4; +} +message VolumeTailReceiverResponse { +} + +message VolumeEcShardsGenerateRequest { + uint32 volume_id = 1; + string collection = 2; +} +message VolumeEcShardsGenerateResponse { +} + +message VolumeEcShardsRebuildRequest { + uint32 volume_id = 1; + string collection = 2; +} +message VolumeEcShardsRebuildResponse { + repeated uint32 rebuilt_shard_ids = 1; +} + +message VolumeEcShardsCopyRequest { + uint32 volume_id = 1; + string collection = 2; + repeated uint32 shard_ids = 3; + bool copy_ecx_file = 4; + string source_data_node = 5; +} +message VolumeEcShardsCopyResponse { +} + +message VolumeEcShardsDeleteRequest { + uint32 volume_id = 1; + string collection = 2; + repeated uint32 shard_ids = 3; +} +message VolumeEcShardsDeleteResponse { +} + +message VolumeEcShardsMountRequest { + uint32 volume_id = 1; + string collection = 2; + repeated uint32 shard_ids = 3; +} +message VolumeEcShardsMountResponse { +} + +message VolumeEcShardsUnmountRequest { + uint32 volume_id = 1; + repeated uint32 shard_ids = 3; +} +message VolumeEcShardsUnmountResponse { +} + +message VolumeEcShardReadRequest { + uint32 volume_id = 1; + uint32 shard_id = 2; + int64 offset = 3; + int64 size = 4; + uint64 file_key = 5; +} +message VolumeEcShardReadResponse { + bytes data = 1; + bool is_deleted = 2; +} + +message VolumeEcBlobDeleteRequest { + uint32 volume_id = 1; + string collection = 2; + uint64 file_key = 3; + uint32 version = 4; +} +message VolumeEcBlobDeleteResponse { +} + +message ReadVolumeFileStatusRequest { + uint32 volume_id = 1; +} +message ReadVolumeFileStatusResponse { + uint32 volume_id = 1; + uint64 idx_file_timestamp_seconds = 2; + uint64 idx_file_size = 3; + uint64 dat_file_timestamp_seconds = 4; + uint64 dat_file_size = 5; + uint64 file_count = 6; + uint32 compaction_revision = 7; + string collection = 8; } message DiskStatus { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index fa700e2e5..13d14b1e5 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -23,22 +23,46 @@ It has these top-level messages: VacuumVolumeCleanupResponse DeleteCollectionRequest DeleteCollectionResponse - AssignVolumeRequest - AssignVolumeResponse + AllocateVolumeRequest + AllocateVolumeResponse VolumeSyncStatusRequest VolumeSyncStatusResponse - VolumeSyncIndexRequest - VolumeSyncIndexResponse - VolumeSyncDataRequest - VolumeSyncDataResponse + VolumeIncrementalCopyRequest + VolumeIncrementalCopyResponse VolumeMountRequest VolumeMountResponse VolumeUnmountRequest VolumeUnmountResponse VolumeDeleteRequest VolumeDeleteResponse - VolumeUiPageRequest - VolumeUiPageResponse + VolumeMarkReadonlyRequest + VolumeMarkReadonlyResponse + VolumeCopyRequest + VolumeCopyResponse + CopyFileRequest + CopyFileResponse + VolumeTailSenderRequest + VolumeTailSenderResponse + VolumeTailReceiverRequest + VolumeTailReceiverResponse + VolumeEcShardsGenerateRequest + VolumeEcShardsGenerateResponse + VolumeEcShardsRebuildRequest + VolumeEcShardsRebuildResponse + VolumeEcShardsCopyRequest + VolumeEcShardsCopyResponse + VolumeEcShardsDeleteRequest + VolumeEcShardsDeleteResponse + VolumeEcShardsMountRequest + VolumeEcShardsMountResponse + VolumeEcShardsUnmountRequest + VolumeEcShardsUnmountResponse + VolumeEcShardReadRequest + VolumeEcShardReadResponse + VolumeEcBlobDeleteRequest + VolumeEcBlobDeleteResponse + ReadVolumeFileStatusRequest + ReadVolumeFileStatusResponse DiskStatus MemStatus */ @@ -97,10 +121,11 @@ func (m *BatchDeleteResponse) GetResults() []*DeleteResult { } type DeleteResult struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Status int32 `protobuf:"varint,2,opt,name=status" json:"status,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` + Status int32 `protobuf:"varint,2,opt,name=status" json:"status,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` + Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` + Version uint32 `protobuf:"varint,5,opt,name=version" json:"version,omitempty"` } func (m *DeleteResult) Reset() { *m = DeleteResult{} } @@ -136,6 +161,13 @@ func (m *DeleteResult) GetSize() uint32 { return 0 } +func (m *DeleteResult) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + type Empty struct { } @@ -145,7 +177,7 @@ func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type VacuumVolumeCheckRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} } @@ -153,9 +185,9 @@ func (m *VacuumVolumeCheckRequest) String() string { return proto.Com func (*VacuumVolumeCheckRequest) ProtoMessage() {} func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } -func (m *VacuumVolumeCheckRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -177,7 +209,7 @@ func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { } type VacuumVolumeCompactRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Preallocate int64 `protobuf:"varint,2,opt,name=preallocate" json:"preallocate,omitempty"` } @@ -186,9 +218,9 @@ func (m *VacuumVolumeCompactRequest) String() string { return proto.C func (*VacuumVolumeCompactRequest) ProtoMessage() {} func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } -func (m *VacuumVolumeCompactRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -209,7 +241,7 @@ func (*VacuumVolumeCompactResponse) ProtoMessage() {} func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } type VacuumVolumeCommitRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} } @@ -217,9 +249,9 @@ func (m *VacuumVolumeCommitRequest) String() string { return proto.Co func (*VacuumVolumeCommitRequest) ProtoMessage() {} func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } -func (m *VacuumVolumeCommitRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -233,7 +265,7 @@ func (*VacuumVolumeCommitResponse) ProtoMessage() {} func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } type VacuumVolumeCleanupRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} } @@ -241,9 +273,9 @@ func (m *VacuumVolumeCleanupRequest) String() string { return proto.C func (*VacuumVolumeCleanupRequest) ProtoMessage() {} func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } -func (m *VacuumVolumeCleanupRequest) GetVolumdId() uint32 { +func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -280,64 +312,64 @@ func (m *DeleteCollectionResponse) String() string { return proto.Com func (*DeleteCollectionResponse) ProtoMessage() {} func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } -type AssignVolumeRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +type AllocateVolumeRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"` Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` } -func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } -func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} } +func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateVolumeRequest) ProtoMessage() {} +func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } -func (m *AssignVolumeRequest) GetVolumdId() uint32 { +func (m *AllocateVolumeRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } -func (m *AssignVolumeRequest) GetCollection() string { +func (m *AllocateVolumeRequest) GetCollection() string { if m != nil { return m.Collection } return "" } -func (m *AssignVolumeRequest) GetPreallocate() int64 { +func (m *AllocateVolumeRequest) GetPreallocate() int64 { if m != nil { return m.Preallocate } return 0 } -func (m *AssignVolumeRequest) GetReplication() string { +func (m *AllocateVolumeRequest) GetReplication() string { if m != nil { return m.Replication } return "" } -func (m *AssignVolumeRequest) GetTtl() string { +func (m *AllocateVolumeRequest) GetTtl() string { if m != nil { return m.Ttl } return "" } -type AssignVolumeResponse struct { +type AllocateVolumeResponse struct { } -func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } -func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} } +func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateVolumeResponse) ProtoMessage() {} +func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } type VolumeSyncStatusRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} } @@ -345,15 +377,15 @@ func (m *VolumeSyncStatusRequest) String() string { return proto.Comp func (*VolumeSyncStatusRequest) ProtoMessage() {} func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } -func (m *VolumeSyncStatusRequest) GetVolumdId() uint32 { +func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } type VolumeSyncStatusResponse struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` @@ -367,9 +399,9 @@ func (m *VolumeSyncStatusResponse) String() string { return proto.Com func (*VolumeSyncStatusResponse) ProtoMessage() {} func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } -func (m *VolumeSyncStatusResponse) GetVolumdId() uint32 { +func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -416,96 +448,40 @@ func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { return 0 } -type VolumeSyncIndexRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +type VolumeIncrementalCopyRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` } -func (m *VolumeSyncIndexRequest) Reset() { *m = VolumeSyncIndexRequest{} } -func (m *VolumeSyncIndexRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncIndexRequest) ProtoMessage() {} -func (*VolumeSyncIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *VolumeIncrementalCopyRequest) Reset() { *m = VolumeIncrementalCopyRequest{} } +func (m *VolumeIncrementalCopyRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeIncrementalCopyRequest) ProtoMessage() {} +func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } -func (m *VolumeSyncIndexRequest) GetVolumdId() uint32 { +func (m *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } -type VolumeSyncIndexResponse struct { - IndexFileContent []byte `protobuf:"bytes,1,opt,name=index_file_content,json=indexFileContent,proto3" json:"index_file_content,omitempty"` -} - -func (m *VolumeSyncIndexResponse) Reset() { *m = VolumeSyncIndexResponse{} } -func (m *VolumeSyncIndexResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncIndexResponse) ProtoMessage() {} -func (*VolumeSyncIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *VolumeSyncIndexResponse) GetIndexFileContent() []byte { +func (m *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { if m != nil { - return m.IndexFileContent - } - return nil -} - -type VolumeSyncDataRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` - Revision uint32 `protobuf:"varint,2,opt,name=revision" json:"revision,omitempty"` - Offset uint32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` - NeedleId string `protobuf:"bytes,5,opt,name=needle_id,json=needleId" json:"needle_id,omitempty"` -} - -func (m *VolumeSyncDataRequest) Reset() { *m = VolumeSyncDataRequest{} } -func (m *VolumeSyncDataRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncDataRequest) ProtoMessage() {} -func (*VolumeSyncDataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *VolumeSyncDataRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId + return m.SinceNs } return 0 } -func (m *VolumeSyncDataRequest) GetRevision() uint32 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *VolumeSyncDataRequest) GetOffset() uint32 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *VolumeSyncDataRequest) GetSize() uint32 { - if m != nil { - return m.Size - } - return 0 -} - -func (m *VolumeSyncDataRequest) GetNeedleId() string { - if m != nil { - return m.NeedleId - } - return "" -} - -type VolumeSyncDataResponse struct { +type VolumeIncrementalCopyResponse struct { FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } -func (m *VolumeSyncDataResponse) Reset() { *m = VolumeSyncDataResponse{} } -func (m *VolumeSyncDataResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncDataResponse) ProtoMessage() {} -func (*VolumeSyncDataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (m *VolumeIncrementalCopyResponse) Reset() { *m = VolumeIncrementalCopyResponse{} } +func (m *VolumeIncrementalCopyResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeIncrementalCopyResponse) ProtoMessage() {} +func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } -func (m *VolumeSyncDataResponse) GetFileContent() []byte { +func (m *VolumeIncrementalCopyResponse) GetFileContent() []byte { if m != nil { return m.FileContent } @@ -513,17 +489,17 @@ func (m *VolumeSyncDataResponse) GetFileContent() []byte { } type VolumeMountRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } -func (m *VolumeMountRequest) GetVolumdId() uint32 { +func (m *VolumeMountRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -534,20 +510,20 @@ type VolumeMountResponse struct { func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } type VolumeUnmountRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } -func (m *VolumeUnmountRequest) GetVolumdId() uint32 { +func (m *VolumeUnmountRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -558,20 +534,20 @@ type VolumeUnmountResponse struct { func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } type VolumeDeleteRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } -func (m *VolumeDeleteRequest) GetVolumdId() uint32 { +func (m *VolumeDeleteRequest) GetVolumeId() uint32 { if m != nil { - return m.VolumdId + return m.VolumeId } return 0 } @@ -582,23 +558,727 @@ type VolumeDeleteResponse struct { func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } -type VolumeUiPageRequest struct { +type VolumeMarkReadonlyRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } -func (m *VolumeUiPageRequest) Reset() { *m = VolumeUiPageRequest{} } -func (m *VolumeUiPageRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeUiPageRequest) ProtoMessage() {} -func (*VolumeUiPageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (m *VolumeMarkReadonlyRequest) Reset() { *m = VolumeMarkReadonlyRequest{} } +func (m *VolumeMarkReadonlyRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeMarkReadonlyRequest) ProtoMessage() {} +func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } -type VolumeUiPageResponse struct { +func (m *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 } -func (m *VolumeUiPageResponse) Reset() { *m = VolumeUiPageResponse{} } -func (m *VolumeUiPageResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeUiPageResponse) ProtoMessage() {} -func (*VolumeUiPageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +type VolumeMarkReadonlyResponse struct { +} + +func (m *VolumeMarkReadonlyResponse) Reset() { *m = VolumeMarkReadonlyResponse{} } +func (m *VolumeMarkReadonlyResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeMarkReadonlyResponse) ProtoMessage() {} +func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +type VolumeCopyRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` +} + +func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} } +func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeCopyRequest) ProtoMessage() {} +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *VolumeCopyRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeCopyRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeCopyRequest) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +func (m *VolumeCopyRequest) GetTtl() string { + if m != nil { + return m.Ttl + } + return "" +} + +func (m *VolumeCopyRequest) GetSourceDataNode() string { + if m != nil { + return m.SourceDataNode + } + return "" +} + +type VolumeCopyResponse struct { + LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs" json:"last_append_at_ns,omitempty"` +} + +func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} } +func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeCopyResponse) ProtoMessage() {} +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { + if m != nil { + return m.LastAppendAtNs + } + return 0 +} + +type CopyFileRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"` + CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` + StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"` + Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"` + IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"` +} + +func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } +func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } +func (*CopyFileRequest) ProtoMessage() {} +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *CopyFileRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *CopyFileRequest) GetExt() string { + if m != nil { + return m.Ext + } + return "" +} + +func (m *CopyFileRequest) GetCompactionRevision() uint32 { + if m != nil { + return m.CompactionRevision + } + return 0 +} + +func (m *CopyFileRequest) GetStopOffset() uint64 { + if m != nil { + return m.StopOffset + } + return 0 +} + +func (m *CopyFileRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *CopyFileRequest) GetIsEcVolume() bool { + if m != nil { + return m.IsEcVolume + } + return false +} + +type CopyFileResponse struct { + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +} + +func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } +func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } +func (*CopyFileResponse) ProtoMessage() {} +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *CopyFileResponse) GetFileContent() []byte { + if m != nil { + return m.FileContent + } + return nil +} + +type VolumeTailSenderRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"` +} + +func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} } +func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeTailSenderRequest) ProtoMessage() {} +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *VolumeTailSenderRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeTailSenderRequest) GetSinceNs() uint64 { + if m != nil { + return m.SinceNs + } + return 0 +} + +func (m *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { + if m != nil { + return m.IdleTimeoutSeconds + } + return 0 +} + +type VolumeTailSenderResponse struct { + NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` + NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` + IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk" json:"is_last_chunk,omitempty"` +} + +func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} } +func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeTailSenderResponse) ProtoMessage() {} +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte { + if m != nil { + return m.NeedleHeader + } + return nil +} + +func (m *VolumeTailSenderResponse) GetNeedleBody() []byte { + if m != nil { + return m.NeedleBody + } + return nil +} + +func (m *VolumeTailSenderResponse) GetIsLastChunk() bool { + if m != nil { + return m.IsLastChunk + } + return false +} + +type VolumeTailReceiverRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"` + SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer" json:"source_volume_server,omitempty"` +} + +func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} } +func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeTailReceiverRequest) ProtoMessage() {} +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeTailReceiverRequest) GetSinceNs() uint64 { + if m != nil { + return m.SinceNs + } + return 0 +} + +func (m *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 { + if m != nil { + return m.IdleTimeoutSeconds + } + return 0 +} + +func (m *VolumeTailReceiverRequest) GetSourceVolumeServer() string { + if m != nil { + return m.SourceVolumeServer + } + return "" +} + +type VolumeTailReceiverResponse struct { +} + +func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} } +func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeTailReceiverResponse) ProtoMessage() {} +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +type VolumeEcShardsGenerateRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` +} + +func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} } +func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsGenerateRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +type VolumeEcShardsGenerateResponse struct { +} + +func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } +func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type VolumeEcShardsRebuildRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` +} + +func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} } +func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsRebuildRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +type VolumeEcShardsRebuildResponse struct { + RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds" json:"rebuilt_shard_ids,omitempty"` +} + +func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} } +func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { + if m != nil { + return m.RebuiltShardIds + } + return nil +} + +type VolumeEcShardsCopyRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` + CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile" json:"copy_ecx_file,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` +} + +func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} } +func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsCopyRequest) ProtoMessage() {} +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsCopyRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeEcShardsCopyRequest) GetShardIds() []uint32 { + if m != nil { + return m.ShardIds + } + return nil +} + +func (m *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool { + if m != nil { + return m.CopyEcxFile + } + return false +} + +func (m *VolumeEcShardsCopyRequest) GetSourceDataNode() string { + if m != nil { + return m.SourceDataNode + } + return "" +} + +type VolumeEcShardsCopyResponse struct { +} + +func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} } +func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsCopyResponse) ProtoMessage() {} +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +type VolumeEcShardsDeleteRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` +} + +func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} } +func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsDeleteRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { + if m != nil { + return m.ShardIds + } + return nil +} + +type VolumeEcShardsDeleteResponse struct { +} + +func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} } +func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +type VolumeEcShardsMountRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` +} + +func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} } +func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsMountRequest) ProtoMessage() {} +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsMountRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeEcShardsMountRequest) GetShardIds() []uint32 { + if m != nil { + return m.ShardIds + } + return nil +} + +type VolumeEcShardsMountResponse struct { +} + +func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} } +func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsMountResponse) ProtoMessage() {} +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +type VolumeEcShardsUnmountRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` +} + +func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} } +func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { + if m != nil { + return m.ShardIds + } + return nil +} + +type VolumeEcShardsUnmountResponse struct { +} + +func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} } +func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +type VolumeEcShardReadRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` + FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` +} + +func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} } +func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardReadRequest) ProtoMessage() {} +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcShardReadRequest) GetShardId() uint32 { + if m != nil { + return m.ShardId + } + return 0 +} + +func (m *VolumeEcShardReadRequest) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *VolumeEcShardReadRequest) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *VolumeEcShardReadRequest) GetFileKey() uint64 { + if m != nil { + return m.FileKey + } + return 0 +} + +type VolumeEcShardReadResponse struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted" json:"is_deleted,omitempty"` +} + +func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} } +func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardReadResponse) ProtoMessage() {} +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *VolumeEcShardReadResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *VolumeEcShardReadResponse) GetIsDeleted() bool { + if m != nil { + return m.IsDeleted + } + return false +} + +type VolumeEcBlobDeleteRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` + Version uint32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` +} + +func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} } +func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeEcBlobDeleteRequest) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeEcBlobDeleteRequest) GetFileKey() uint64 { + if m != nil { + return m.FileKey + } + return 0 +} + +func (m *VolumeEcBlobDeleteRequest) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +type VolumeEcBlobDeleteResponse struct { +} + +func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} } +func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type ReadVolumeFileStatusRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +} + +func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } +func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } +func (*ReadVolumeFileStatusRequest) ProtoMessage() {} +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +type ReadVolumeFileStatusResponse struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds" json:"idx_file_timestamp_seconds,omitempty"` + IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` + DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds" json:"dat_file_timestamp_seconds,omitempty"` + DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize" json:"dat_file_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` + CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` + Collection string `protobuf:"bytes,8,opt,name=collection" json:"collection,omitempty"` +} + +func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } +func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } +func (*ReadVolumeFileStatusResponse) ProtoMessage() {} +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 { + if m != nil { + return m.IdxFileTimestampSeconds + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { + if m != nil { + return m.IdxFileSize + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 { + if m != nil { + return m.DatFileTimestampSeconds + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { + if m != nil { + return m.DatFileSize + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetFileCount() uint64 { + if m != nil { + return m.FileCount + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 { + if m != nil { + return m.CompactionRevision + } + return 0 +} + +func (m *ReadVolumeFileStatusResponse) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} type DiskStatus struct { Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` @@ -610,7 +1290,7 @@ type DiskStatus struct { func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -653,7 +1333,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -719,22 +1399,46 @@ func init() { proto.RegisterType((*VacuumVolumeCleanupResponse)(nil), "volume_server_pb.VacuumVolumeCleanupResponse") proto.RegisterType((*DeleteCollectionRequest)(nil), "volume_server_pb.DeleteCollectionRequest") proto.RegisterType((*DeleteCollectionResponse)(nil), "volume_server_pb.DeleteCollectionResponse") - proto.RegisterType((*AssignVolumeRequest)(nil), "volume_server_pb.AssignVolumeRequest") - proto.RegisterType((*AssignVolumeResponse)(nil), "volume_server_pb.AssignVolumeResponse") + proto.RegisterType((*AllocateVolumeRequest)(nil), "volume_server_pb.AllocateVolumeRequest") + proto.RegisterType((*AllocateVolumeResponse)(nil), "volume_server_pb.AllocateVolumeResponse") proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest") proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse") - proto.RegisterType((*VolumeSyncIndexRequest)(nil), "volume_server_pb.VolumeSyncIndexRequest") - proto.RegisterType((*VolumeSyncIndexResponse)(nil), "volume_server_pb.VolumeSyncIndexResponse") - proto.RegisterType((*VolumeSyncDataRequest)(nil), "volume_server_pb.VolumeSyncDataRequest") - proto.RegisterType((*VolumeSyncDataResponse)(nil), "volume_server_pb.VolumeSyncDataResponse") + proto.RegisterType((*VolumeIncrementalCopyRequest)(nil), "volume_server_pb.VolumeIncrementalCopyRequest") + proto.RegisterType((*VolumeIncrementalCopyResponse)(nil), "volume_server_pb.VolumeIncrementalCopyResponse") proto.RegisterType((*VolumeMountRequest)(nil), "volume_server_pb.VolumeMountRequest") proto.RegisterType((*VolumeMountResponse)(nil), "volume_server_pb.VolumeMountResponse") proto.RegisterType((*VolumeUnmountRequest)(nil), "volume_server_pb.VolumeUnmountRequest") proto.RegisterType((*VolumeUnmountResponse)(nil), "volume_server_pb.VolumeUnmountResponse") proto.RegisterType((*VolumeDeleteRequest)(nil), "volume_server_pb.VolumeDeleteRequest") proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") - proto.RegisterType((*VolumeUiPageRequest)(nil), "volume_server_pb.VolumeUiPageRequest") - proto.RegisterType((*VolumeUiPageResponse)(nil), "volume_server_pb.VolumeUiPageResponse") + proto.RegisterType((*VolumeMarkReadonlyRequest)(nil), "volume_server_pb.VolumeMarkReadonlyRequest") + proto.RegisterType((*VolumeMarkReadonlyResponse)(nil), "volume_server_pb.VolumeMarkReadonlyResponse") + proto.RegisterType((*VolumeCopyRequest)(nil), "volume_server_pb.VolumeCopyRequest") + proto.RegisterType((*VolumeCopyResponse)(nil), "volume_server_pb.VolumeCopyResponse") + proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest") + proto.RegisterType((*CopyFileResponse)(nil), "volume_server_pb.CopyFileResponse") + proto.RegisterType((*VolumeTailSenderRequest)(nil), "volume_server_pb.VolumeTailSenderRequest") + proto.RegisterType((*VolumeTailSenderResponse)(nil), "volume_server_pb.VolumeTailSenderResponse") + proto.RegisterType((*VolumeTailReceiverRequest)(nil), "volume_server_pb.VolumeTailReceiverRequest") + proto.RegisterType((*VolumeTailReceiverResponse)(nil), "volume_server_pb.VolumeTailReceiverResponse") + proto.RegisterType((*VolumeEcShardsGenerateRequest)(nil), "volume_server_pb.VolumeEcShardsGenerateRequest") + proto.RegisterType((*VolumeEcShardsGenerateResponse)(nil), "volume_server_pb.VolumeEcShardsGenerateResponse") + proto.RegisterType((*VolumeEcShardsRebuildRequest)(nil), "volume_server_pb.VolumeEcShardsRebuildRequest") + proto.RegisterType((*VolumeEcShardsRebuildResponse)(nil), "volume_server_pb.VolumeEcShardsRebuildResponse") + proto.RegisterType((*VolumeEcShardsCopyRequest)(nil), "volume_server_pb.VolumeEcShardsCopyRequest") + proto.RegisterType((*VolumeEcShardsCopyResponse)(nil), "volume_server_pb.VolumeEcShardsCopyResponse") + proto.RegisterType((*VolumeEcShardsDeleteRequest)(nil), "volume_server_pb.VolumeEcShardsDeleteRequest") + proto.RegisterType((*VolumeEcShardsDeleteResponse)(nil), "volume_server_pb.VolumeEcShardsDeleteResponse") + proto.RegisterType((*VolumeEcShardsMountRequest)(nil), "volume_server_pb.VolumeEcShardsMountRequest") + proto.RegisterType((*VolumeEcShardsMountResponse)(nil), "volume_server_pb.VolumeEcShardsMountResponse") + proto.RegisterType((*VolumeEcShardsUnmountRequest)(nil), "volume_server_pb.VolumeEcShardsUnmountRequest") + proto.RegisterType((*VolumeEcShardsUnmountResponse)(nil), "volume_server_pb.VolumeEcShardsUnmountResponse") + proto.RegisterType((*VolumeEcShardReadRequest)(nil), "volume_server_pb.VolumeEcShardReadRequest") + proto.RegisterType((*VolumeEcShardReadResponse)(nil), "volume_server_pb.VolumeEcShardReadResponse") + proto.RegisterType((*VolumeEcBlobDeleteRequest)(nil), "volume_server_pb.VolumeEcBlobDeleteRequest") + proto.RegisterType((*VolumeEcBlobDeleteResponse)(nil), "volume_server_pb.VolumeEcBlobDeleteResponse") + proto.RegisterType((*ReadVolumeFileStatusRequest)(nil), "volume_server_pb.ReadVolumeFileStatusRequest") + proto.RegisterType((*ReadVolumeFileStatusResponse)(nil), "volume_server_pb.ReadVolumeFileStatusResponse") proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus") proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus") } @@ -757,13 +1461,28 @@ type VolumeServerClient interface { VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) - AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) + AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) - VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) - VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) + VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) + VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) + // copy the .idx .dat files, and mount this volume + VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) + ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) + CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) + VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) + VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) + // erasure coding + VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) + VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) + VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) + VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) + VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) + VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) + VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) + VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) } type volumeServerClient struct { @@ -828,9 +1547,9 @@ func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCol return out, nil } -func (c *volumeServerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { - out := new(AssignVolumeResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AssignVolume", in, out, c.cc, opts...) +func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { + out := new(AllocateVolumeResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -846,12 +1565,12 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn return out, nil } -func (c *volumeServerClient) VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncIndex", opts...) +func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } - x := &volumeServerVolumeSyncIndexClient{stream} + x := &volumeServerVolumeIncrementalCopyClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -861,49 +1580,17 @@ func (c *volumeServerClient) VolumeSyncIndex(ctx context.Context, in *VolumeSync return x, nil } -type VolumeServer_VolumeSyncIndexClient interface { - Recv() (*VolumeSyncIndexResponse, error) +type VolumeServer_VolumeIncrementalCopyClient interface { + Recv() (*VolumeIncrementalCopyResponse, error) grpc.ClientStream } -type volumeServerVolumeSyncIndexClient struct { +type volumeServerVolumeIncrementalCopyClient struct { grpc.ClientStream } -func (x *volumeServerVolumeSyncIndexClient) Recv() (*VolumeSyncIndexResponse, error) { - m := new(VolumeSyncIndexResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *volumeServerClient) VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncData", opts...) - if err != nil { - return nil, err - } - x := &volumeServerVolumeSyncDataClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type VolumeServer_VolumeSyncDataClient interface { - Recv() (*VolumeSyncDataResponse, error) - grpc.ClientStream -} - -type volumeServerVolumeSyncDataClient struct { - grpc.ClientStream -} - -func (x *volumeServerVolumeSyncDataClient) Recv() (*VolumeSyncDataResponse, error) { - m := new(VolumeSyncDataResponse) +func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopyResponse, error) { + m := new(VolumeIncrementalCopyResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -937,6 +1624,201 @@ func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteR return out, nil } +func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { + out := new(VolumeMarkReadonlyResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) { + out := new(VolumeCopyResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { + out := new(ReadVolumeFileStatusResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + if err != nil { + return nil, err + } + x := &volumeServerCopyFileClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_CopyFileClient interface { + Recv() (*CopyFileResponse, error) + grpc.ClientStream +} + +type volumeServerCopyFileClient struct { + grpc.ClientStream +} + +func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { + m := new(CopyFileResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTailSenderClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTailSenderClient interface { + Recv() (*VolumeTailSenderResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTailSenderClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, error) { + m := new(VolumeTailSenderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) { + out := new(VolumeTailReceiverResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) { + out := new(VolumeEcShardsGenerateResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) { + out := new(VolumeEcShardsRebuildResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) { + out := new(VolumeEcShardsCopyResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) { + out := new(VolumeEcShardsDeleteResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) { + out := new(VolumeEcShardsMountResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) { + out := new(VolumeEcShardsUnmountResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeEcShardReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeEcShardReadClient interface { + Recv() (*VolumeEcShardReadResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeEcShardReadClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse, error) { + m := new(VolumeEcShardReadResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) { + out := new(VolumeEcBlobDeleteResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for VolumeServer service type VolumeServerServer interface { @@ -947,13 +1829,28 @@ type VolumeServerServer interface { VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) - AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) + AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) - VolumeSyncIndex(*VolumeSyncIndexRequest, VolumeServer_VolumeSyncIndexServer) error - VolumeSyncData(*VolumeSyncDataRequest, VolumeServer_VolumeSyncDataServer) error + VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) + VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) + // copy the .idx .dat files, and mount this volume + VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) + ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) + CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error + VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error + VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) + // erasure coding + VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) + VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) + VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) + VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) + VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) + VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) + VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error + VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) } func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { @@ -1068,20 +1965,20 @@ func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } -func _VolumeServer_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AssignVolumeRequest) +func _VolumeServer_AllocateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AllocateVolumeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).AssignVolume(ctx, in) + return srv.(VolumeServerServer).AllocateVolume(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/AssignVolume", + FullMethod: "/volume_server_pb.VolumeServer/AllocateVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).AssignVolume(ctx, req.(*AssignVolumeRequest)) + return srv.(VolumeServerServer).AllocateVolume(ctx, req.(*AllocateVolumeRequest)) } return interceptor(ctx, in, info, handler) } @@ -1104,45 +2001,24 @@ func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } -func _VolumeServer_VolumeSyncIndex_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(VolumeSyncIndexRequest) +func _VolumeServer_VolumeIncrementalCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeIncrementalCopyRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeSyncIndex(m, &volumeServerVolumeSyncIndexServer{stream}) + return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &volumeServerVolumeIncrementalCopyServer{stream}) } -type VolumeServer_VolumeSyncIndexServer interface { - Send(*VolumeSyncIndexResponse) error +type VolumeServer_VolumeIncrementalCopyServer interface { + Send(*VolumeIncrementalCopyResponse) error grpc.ServerStream } -type volumeServerVolumeSyncIndexServer struct { +type volumeServerVolumeIncrementalCopyServer struct { grpc.ServerStream } -func (x *volumeServerVolumeSyncIndexServer) Send(m *VolumeSyncIndexResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _VolumeServer_VolumeSyncData_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(VolumeSyncDataRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(VolumeServerServer).VolumeSyncData(m, &volumeServerVolumeSyncDataServer{stream}) -} - -type VolumeServer_VolumeSyncDataServer interface { - Send(*VolumeSyncDataResponse) error - grpc.ServerStream -} - -type volumeServerVolumeSyncDataServer struct { - grpc.ServerStream -} - -func (x *volumeServerVolumeSyncDataServer) Send(m *VolumeSyncDataResponse) error { +func (x *volumeServerVolumeIncrementalCopyServer) Send(m *VolumeIncrementalCopyResponse) error { return x.ServerStream.SendMsg(m) } @@ -1200,6 +2076,267 @@ func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeMarkReadonlyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkReadonly", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeCopyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeCopy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeCopy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeCopy(ctx, req.(*VolumeCopyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadVolumeFileStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, req.(*ReadVolumeFileStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CopyFileRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).CopyFile(m, &volumeServerCopyFileServer{stream}) +} + +type VolumeServer_CopyFileServer interface { + Send(*CopyFileResponse) error + grpc.ServerStream +} + +type volumeServerCopyFileServer struct { + grpc.ServerStream +} + +func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTailSenderRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTailSender(m, &volumeServerVolumeTailSenderServer{stream}) +} + +type VolumeServer_VolumeTailSenderServer interface { + Send(*VolumeTailSenderResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTailSenderServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTailSenderServer) Send(m *VolumeTailSenderResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeTailReceiverRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeTailReceiver(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeTailReceiver", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeTailReceiver(ctx, req.(*VolumeTailReceiverRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeEcShardsGenerate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsGenerateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, req.(*VolumeEcShardsGenerateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeEcShardsRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsRebuildRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, req.(*VolumeEcShardsRebuildRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeEcShardsCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsCopyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, req.(*VolumeEcShardsCopyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeEcShardsDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, req.(*VolumeEcShardsDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeEcShardsMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsMountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsMount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, req.(*VolumeEcShardsMountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeEcShardsUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsUnmountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, req.(*VolumeEcShardsUnmountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeEcShardRead_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeEcShardReadRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeEcShardRead(m, &volumeServerVolumeEcShardReadServer{stream}) +} + +type VolumeServer_VolumeEcShardReadServer interface { + Send(*VolumeEcShardReadResponse) error + grpc.ServerStream +} + +type volumeServerVolumeEcShardReadServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeEcShardReadServer) Send(m *VolumeEcShardReadResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcBlobDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, req.(*VolumeEcBlobDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _VolumeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "volume_server_pb.VolumeServer", HandlerType: (*VolumeServerServer)(nil), @@ -1229,8 +2366,8 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_DeleteCollection_Handler, }, { - MethodName: "AssignVolume", - Handler: _VolumeServer_AssignVolume_Handler, + MethodName: "AllocateVolume", + Handler: _VolumeServer_AllocateVolume_Handler, }, { MethodName: "VolumeSyncStatus", @@ -1248,16 +2385,70 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeDelete", Handler: _VolumeServer_VolumeDelete_Handler, }, + { + MethodName: "VolumeMarkReadonly", + Handler: _VolumeServer_VolumeMarkReadonly_Handler, + }, + { + MethodName: "VolumeCopy", + Handler: _VolumeServer_VolumeCopy_Handler, + }, + { + MethodName: "ReadVolumeFileStatus", + Handler: _VolumeServer_ReadVolumeFileStatus_Handler, + }, + { + MethodName: "VolumeTailReceiver", + Handler: _VolumeServer_VolumeTailReceiver_Handler, + }, + { + MethodName: "VolumeEcShardsGenerate", + Handler: _VolumeServer_VolumeEcShardsGenerate_Handler, + }, + { + MethodName: "VolumeEcShardsRebuild", + Handler: _VolumeServer_VolumeEcShardsRebuild_Handler, + }, + { + MethodName: "VolumeEcShardsCopy", + Handler: _VolumeServer_VolumeEcShardsCopy_Handler, + }, + { + MethodName: "VolumeEcShardsDelete", + Handler: _VolumeServer_VolumeEcShardsDelete_Handler, + }, + { + MethodName: "VolumeEcShardsMount", + Handler: _VolumeServer_VolumeEcShardsMount_Handler, + }, + { + MethodName: "VolumeEcShardsUnmount", + Handler: _VolumeServer_VolumeEcShardsUnmount_Handler, + }, + { + MethodName: "VolumeEcBlobDelete", + Handler: _VolumeServer_VolumeEcBlobDelete_Handler, + }, }, Streams: []grpc.StreamDesc{ { - StreamName: "VolumeSyncIndex", - Handler: _VolumeServer_VolumeSyncIndex_Handler, + StreamName: "VolumeIncrementalCopy", + Handler: _VolumeServer_VolumeIncrementalCopy_Handler, ServerStreams: true, }, { - StreamName: "VolumeSyncData", - Handler: _VolumeServer_VolumeSyncData_Handler, + StreamName: "CopyFile", + Handler: _VolumeServer_CopyFile_Handler, + ServerStreams: true, + }, + { + StreamName: "VolumeTailSender", + Handler: _VolumeServer_VolumeTailSender_Handler, + ServerStreams: true, + }, + { + StreamName: "VolumeEcShardRead", + Handler: _VolumeServer_VolumeEcShardRead_Handler, ServerStreams: true, }, }, @@ -1267,71 +2458,126 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1044 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x57, 0xdd, 0x72, 0xdb, 0x44, - 0x14, 0x8e, 0x6a, 0x3b, 0x76, 0x8e, 0x6d, 0x6a, 0xd6, 0x69, 0xa2, 0xaa, 0x10, 0x8c, 0x80, 0xd4, - 0x69, 0x43, 0x80, 0x74, 0x80, 0x32, 0xdc, 0x00, 0x09, 0x30, 0xb9, 0xe8, 0x94, 0xd9, 0x4c, 0x3b, - 0xcc, 0xd0, 0x19, 0x8f, 0x22, 0xad, 0x9d, 0x25, 0xb2, 0xe4, 0x6a, 0x57, 0x99, 0x94, 0x37, 0xe1, - 0x9a, 0x1b, 0x9e, 0x8e, 0x17, 0xe0, 0x86, 0xd9, 0x1f, 0xd9, 0xfa, 0x73, 0x24, 0xe0, 0x6e, 0xf7, - 0xec, 0x39, 0xdf, 0xf9, 0xd9, 0xa3, 0xf3, 0xad, 0x60, 0x78, 0x1d, 0xfa, 0xf1, 0x9c, 0x4c, 0x18, - 0x89, 0xae, 0x49, 0x74, 0xb4, 0x88, 0x42, 0x1e, 0xa2, 0x41, 0x46, 0x38, 0x59, 0x5c, 0xd8, 0x9f, - 0x00, 0xfa, 0xce, 0xe1, 0xee, 0xe5, 0x29, 0xf1, 0x09, 0x27, 0x98, 0xbc, 0x8e, 0x09, 0xe3, 0xe8, - 0x3e, 0x74, 0xa6, 0xd4, 0x27, 0x13, 0xea, 0x31, 0xd3, 0x18, 0x35, 0xc6, 0x5b, 0xb8, 0x2d, 0xf6, - 0x67, 0x1e, 0xb3, 0x9f, 0xc3, 0x30, 0x63, 0xc0, 0x16, 0x61, 0xc0, 0x08, 0x7a, 0x0a, 0xed, 0x88, - 0xb0, 0xd8, 0xe7, 0xca, 0xa0, 0x7b, 0xbc, 0x77, 0x94, 0xf7, 0x75, 0xb4, 0x34, 0x89, 0x7d, 0x8e, - 0x13, 0x75, 0x9b, 0x42, 0x2f, 0x7d, 0x80, 0x76, 0xa1, 0xad, 0x7d, 0x9b, 0xc6, 0xc8, 0x18, 0x6f, - 0xe1, 0x4d, 0xe5, 0x1a, 0xed, 0xc0, 0x26, 0xe3, 0x0e, 0x8f, 0x99, 0x79, 0x67, 0x64, 0x8c, 0x5b, - 0x58, 0xef, 0xd0, 0x36, 0xb4, 0x48, 0x14, 0x85, 0x91, 0xd9, 0x90, 0xea, 0x6a, 0x83, 0x10, 0x34, - 0x19, 0xfd, 0x8d, 0x98, 0xcd, 0x91, 0x31, 0xee, 0x63, 0xb9, 0xb6, 0xdb, 0xd0, 0xfa, 0x7e, 0xbe, - 0xe0, 0x6f, 0xec, 0x2f, 0xc1, 0x7c, 0xe9, 0xb8, 0x71, 0x3c, 0x7f, 0x29, 0x63, 0x3c, 0xb9, 0x24, - 0xee, 0x55, 0x92, 0xfb, 0x03, 0xd8, 0x92, 0x91, 0x7b, 0x49, 0x04, 0x7d, 0xdc, 0x51, 0x82, 0x33, - 0xcf, 0xfe, 0x06, 0xee, 0x97, 0x18, 0xea, 0x1a, 0x7c, 0x00, 0xfd, 0x99, 0x13, 0x5d, 0x38, 0x33, - 0x32, 0x89, 0x1c, 0x4e, 0x43, 0x69, 0x6d, 0xe0, 0x9e, 0x16, 0x62, 0x21, 0xb3, 0x7f, 0x01, 0x2b, - 0x83, 0x10, 0xce, 0x17, 0x8e, 0xcb, 0xeb, 0x38, 0x47, 0x23, 0xe8, 0x2e, 0x22, 0xe2, 0xf8, 0x7e, - 0xe8, 0x3a, 0x9c, 0xc8, 0x2a, 0x34, 0x70, 0x5a, 0x64, 0xbf, 0x0b, 0x0f, 0x4a, 0xc1, 0x55, 0x80, - 0xf6, 0xd3, 0x5c, 0xf4, 0xe1, 0x7c, 0x4e, 0x6b, 0xb9, 0xb6, 0xdf, 0x29, 0x44, 0x2d, 0x2d, 0x35, - 0xee, 0x57, 0xb9, 0x53, 0x9f, 0x38, 0x41, 0xbc, 0xa8, 0x05, 0x9c, 0x8f, 0x38, 0x31, 0x5d, 0x22, - 0xef, 0xaa, 0xe6, 0x38, 0x09, 0x7d, 0x9f, 0xb8, 0x9c, 0x86, 0x41, 0x02, 0xbb, 0x07, 0xe0, 0x2e, - 0x85, 0xba, 0x55, 0x52, 0x12, 0xdb, 0x02, 0xb3, 0x68, 0xaa, 0x61, 0xff, 0x34, 0x60, 0xf8, 0x2d, - 0x63, 0x74, 0x16, 0x28, 0xb7, 0xb5, 0xca, 0x9f, 0x75, 0x78, 0x27, 0xef, 0x30, 0x7f, 0x3d, 0x8d, - 0xc2, 0xf5, 0x08, 0x8d, 0x88, 0x2c, 0x7c, 0xea, 0x3a, 0x12, 0xa2, 0x29, 0x21, 0xd2, 0x22, 0x34, - 0x80, 0x06, 0xe7, 0xbe, 0xd9, 0x92, 0x27, 0x62, 0x69, 0xef, 0xc0, 0x76, 0x36, 0x52, 0x9d, 0xc2, - 0x17, 0xb0, 0xab, 0x24, 0xe7, 0x6f, 0x02, 0xf7, 0x5c, 0x7e, 0x09, 0xb5, 0x0a, 0xfe, 0xb7, 0x01, - 0x66, 0xd1, 0x50, 0x77, 0xf0, 0xff, 0xcd, 0xff, 0xdf, 0x66, 0x87, 0xde, 0x83, 0x2e, 0x77, 0xa8, - 0x3f, 0x09, 0xa7, 0x53, 0x46, 0xb8, 0xb9, 0x39, 0x32, 0xc6, 0x4d, 0x0c, 0x42, 0xf4, 0x5c, 0x4a, - 0xd0, 0x01, 0x0c, 0x5c, 0xd5, 0xc5, 0x93, 0x88, 0x5c, 0x53, 0x26, 0x90, 0xdb, 0x32, 0xb0, 0xbb, - 0x6e, 0xd2, 0xdd, 0x4a, 0x8c, 0x6c, 0xe8, 0x53, 0xef, 0x66, 0x22, 0x87, 0x87, 0xfc, 0xf4, 0x3b, - 0x12, 0xad, 0x4b, 0xbd, 0x9b, 0x1f, 0xa8, 0x4f, 0xce, 0xc5, 0x04, 0xf8, 0x1c, 0x76, 0x56, 0xc9, - 0x9f, 0x05, 0x1e, 0xb9, 0xa9, 0x55, 0xb4, 0x1f, 0xd3, 0xc5, 0xd6, 0x66, 0xba, 0x64, 0x87, 0x80, - 0xa8, 0x10, 0x28, 0xbf, 0x6e, 0x18, 0x70, 0x12, 0x70, 0x09, 0xd0, 0xc3, 0x03, 0x79, 0x22, 0x9c, - 0x9f, 0x28, 0xb9, 0xfd, 0xbb, 0x01, 0xf7, 0x56, 0x48, 0xa7, 0x0e, 0x77, 0x6a, 0xb5, 0x9e, 0x05, - 0x9d, 0x65, 0xf6, 0x77, 0xd4, 0x59, 0xb2, 0x17, 0x63, 0x51, 0x57, 0xaf, 0x21, 0x4f, 0xf4, 0xae, - 0x6c, 0x00, 0x0a, 0x27, 0x01, 0x21, 0x9e, 0x9a, 0xae, 0xea, 0x1a, 0x3a, 0x4a, 0x70, 0xe6, 0xd9, - 0x5f, 0xa7, 0x6b, 0xa3, 0x42, 0xd3, 0x39, 0xbe, 0x0f, 0xbd, 0x92, 0xec, 0xba, 0xd3, 0x54, 0x62, - 0x9f, 0x01, 0x52, 0xc6, 0xcf, 0xc2, 0x38, 0xa8, 0x37, 0x53, 0xee, 0xc1, 0x30, 0x63, 0xa2, 0x1b, - 0xfb, 0x09, 0x6c, 0x2b, 0xf1, 0x8b, 0x60, 0x5e, 0x1b, 0x6b, 0x37, 0x29, 0xeb, 0xd2, 0x48, 0xa3, - 0x1d, 0x27, 0x4e, 0xb2, 0x04, 0x77, 0x2b, 0xd8, 0x4e, 0x12, 0x41, 0x96, 0xe3, 0x56, 0x01, 0xbf, - 0xa0, 0x3f, 0x89, 0x79, 0xae, 0xb0, 0x56, 0xea, 0x89, 0x58, 0xab, 0xff, 0x0c, 0x70, 0x4a, 0xd9, - 0x95, 0xfa, 0xc4, 0x44, 0xef, 0x7b, 0x34, 0xd2, 0x73, 0x4a, 0x2c, 0x85, 0xc4, 0xf1, 0x7d, 0x79, - 0x9f, 0x4d, 0x2c, 0x96, 0xe2, 0xca, 0x62, 0x46, 0x3c, 0x79, 0x91, 0x4d, 0x2c, 0xd7, 0x42, 0x36, - 0x8d, 0x88, 0xba, 0xc6, 0x26, 0x96, 0x6b, 0xfb, 0x0f, 0x03, 0xb6, 0x9e, 0x91, 0xb9, 0x46, 0xde, - 0x03, 0x98, 0x85, 0x51, 0x18, 0x73, 0x1a, 0x10, 0x26, 0x1d, 0xb4, 0x70, 0x4a, 0xf2, 0xdf, 0xfd, - 0xc8, 0x16, 0x22, 0xfe, 0x54, 0x76, 0x4a, 0x13, 0xcb, 0xb5, 0x90, 0x5d, 0x12, 0x67, 0xa1, 0x3f, - 0x55, 0xb9, 0x16, 0x0c, 0xcc, 0xb8, 0xe3, 0x5e, 0xc9, 0x2f, 0xb3, 0x89, 0xd5, 0xe6, 0xf8, 0x2f, - 0x80, 0x9e, 0x6e, 0x28, 0xf9, 0x04, 0x40, 0xaf, 0xa0, 0x9b, 0x7a, 0x3a, 0xa0, 0x0f, 0x8b, 0x2f, - 0x84, 0xe2, 0x53, 0xc4, 0xfa, 0xa8, 0x42, 0x4b, 0x17, 0x7b, 0x03, 0x05, 0xf0, 0x76, 0x81, 0x9a, - 0xd1, 0xa3, 0xa2, 0xf5, 0x3a, 0xe2, 0xb7, 0x1e, 0xd7, 0xd2, 0x5d, 0xfa, 0xe3, 0x30, 0x2c, 0xe1, - 0x5a, 0x74, 0x58, 0x81, 0x92, 0xe1, 0x7b, 0xeb, 0xe3, 0x9a, 0xda, 0x4b, 0xaf, 0xaf, 0x01, 0x15, - 0x89, 0x18, 0x3d, 0xae, 0x84, 0x59, 0x11, 0xbd, 0x75, 0x58, 0x4f, 0x79, 0x6d, 0xa2, 0x8a, 0xa2, - 0x2b, 0x13, 0xcd, 0x3c, 0x02, 0x2a, 0x13, 0xcd, 0xf1, 0xfe, 0x06, 0xba, 0x82, 0x41, 0x9e, 0xbe, - 0xd1, 0xc1, 0xba, 0x37, 0x65, 0xe1, 0x75, 0x60, 0x3d, 0xaa, 0xa3, 0xba, 0x74, 0x36, 0x81, 0x5e, - 0x9a, 0x64, 0x51, 0x49, 0xd3, 0x95, 0x3c, 0x17, 0xac, 0xfd, 0x2a, 0xb5, 0x74, 0x36, 0x79, 0xd2, - 0x2d, 0xcb, 0x66, 0x0d, 0xa3, 0x97, 0x65, 0xb3, 0x8e, 0xc3, 0xed, 0x0d, 0xf4, 0x2b, 0xdc, 0xcd, - 0xb1, 0x15, 0x1a, 0xdf, 0x06, 0x90, 0xe6, 0x41, 0xeb, 0xa0, 0x86, 0x66, 0xe2, 0xe9, 0x53, 0x03, - 0xcd, 0xe0, 0xad, 0x2c, 0x69, 0xa0, 0x87, 0xb7, 0x01, 0xa4, 0x18, 0xcf, 0x1a, 0x57, 0x2b, 0xa6, - 0x1c, 0xbd, 0x82, 0x6e, 0x8a, 0x2d, 0xca, 0x86, 0x47, 0x91, 0x7f, 0xca, 0x86, 0x47, 0x19, 0xe5, - 0x6c, 0xa0, 0x0b, 0xe8, 0x67, 0xf8, 0x03, 0xed, 0xaf, 0xb3, 0xcc, 0xb2, 0x92, 0xf5, 0xb0, 0x52, - 0x2f, 0xdd, 0x64, 0x69, 0x5a, 0x41, 0x6b, 0x83, 0xcb, 0x0e, 0xc0, 0xfd, 0x2a, 0xb5, 0xc4, 0xc1, - 0xc5, 0xa6, 0xfc, 0xc9, 0x7b, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x3c, 0x6d, 0xd7, - 0xfb, 0x0d, 0x00, 0x00, + // 1924 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x19, 0xcb, 0x72, 0xdc, 0xc6, + 0x91, 0xd0, 0x2e, 0xb9, 0xbb, 0xbd, 0x4b, 0x89, 0x1c, 0x52, 0xe4, 0x0a, 0x14, 0x29, 0x1a, 0x76, + 0x6c, 0x8a, 0xb2, 0x49, 0x45, 0xae, 0x24, 0x4e, 0x72, 0x48, 0x44, 0x8a, 0x49, 0x54, 0x8e, 0xe9, + 0x2a, 0x50, 0x56, 0x39, 0x65, 0x57, 0xa1, 0x86, 0xc0, 0x50, 0x44, 0x11, 0x0b, 0x40, 0x98, 0x01, + 0xa5, 0x55, 0x25, 0x27, 0xe5, 0x9a, 0x0f, 0xc8, 0x39, 0xb7, 0x1c, 0x72, 0xcd, 0x07, 0xe4, 0x17, + 0x72, 0xcb, 0x37, 0xe4, 0x0b, 0x72, 0x49, 0xcd, 0x03, 0x58, 0x3c, 0x77, 0xc1, 0x90, 0x55, 0xb9, + 0x61, 0x7b, 0xfa, 0x3d, 0xdd, 0x3d, 0xdd, 0xbd, 0xb0, 0x72, 0x19, 0x78, 0xf1, 0x88, 0x58, 0x94, + 0x44, 0x97, 0x24, 0xda, 0x0b, 0xa3, 0x80, 0x05, 0x68, 0x29, 0x07, 0xb4, 0xc2, 0x53, 0x63, 0x1f, + 0xd0, 0x01, 0x66, 0xf6, 0xf9, 0x33, 0xe2, 0x11, 0x46, 0x4c, 0xf2, 0x3a, 0x26, 0x94, 0xa1, 0x7b, + 0xd0, 0x3d, 0x73, 0x3d, 0x62, 0xb9, 0x0e, 0x1d, 0x6a, 0xdb, 0xad, 0x9d, 0x9e, 0xd9, 0xe1, 0xbf, + 0x9f, 0x3b, 0xd4, 0xf8, 0x1a, 0x56, 0x72, 0x04, 0x34, 0x0c, 0x7c, 0x4a, 0xd0, 0x17, 0xd0, 0x89, + 0x08, 0x8d, 0x3d, 0x26, 0x09, 0xfa, 0x4f, 0xb6, 0xf6, 0x8a, 0xb2, 0xf6, 0x52, 0x92, 0xd8, 0x63, + 0x66, 0x82, 0x6e, 0xbc, 0xd7, 0x60, 0x90, 0x3d, 0x41, 0xeb, 0xd0, 0x51, 0xc2, 0x87, 0xda, 0xb6, + 0xb6, 0xd3, 0x33, 0x17, 0xa4, 0x6c, 0xb4, 0x06, 0x0b, 0x94, 0x61, 0x16, 0xd3, 0xe1, 0xad, 0x6d, + 0x6d, 0x67, 0xde, 0x54, 0xbf, 0xd0, 0x2a, 0xcc, 0x93, 0x28, 0x0a, 0xa2, 0x61, 0x4b, 0xa0, 0xcb, + 0x1f, 0x08, 0x41, 0x9b, 0xba, 0xef, 0xc8, 0xb0, 0xbd, 0xad, 0xed, 0x2c, 0x9a, 0xe2, 0x1b, 0x0d, + 0xa1, 0x73, 0x49, 0x22, 0xea, 0x06, 0xfe, 0x70, 0x5e, 0x80, 0x93, 0x9f, 0x46, 0x07, 0xe6, 0x8f, + 0x46, 0x21, 0x1b, 0x1b, 0x3f, 0x81, 0xe1, 0x4b, 0x6c, 0xc7, 0xf1, 0xe8, 0xa5, 0x50, 0xff, 0xf0, + 0x9c, 0xd8, 0x17, 0x89, 0x5b, 0x36, 0xa0, 0xa7, 0x8c, 0x52, 0xba, 0x2d, 0x9a, 0x5d, 0x09, 0x78, + 0xee, 0x18, 0xbf, 0x84, 0x7b, 0x15, 0x84, 0xca, 0x3d, 0x1f, 0xc2, 0xe2, 0x2b, 0x1c, 0x9d, 0xe2, + 0x57, 0xc4, 0x8a, 0x30, 0x73, 0x03, 0x41, 0xad, 0x99, 0x03, 0x05, 0x34, 0x39, 0xcc, 0xf8, 0x0e, + 0xf4, 0x1c, 0x87, 0x60, 0x14, 0x62, 0x9b, 0x35, 0x11, 0x8e, 0xb6, 0xa1, 0x1f, 0x46, 0x04, 0x7b, + 0x5e, 0x60, 0x63, 0x46, 0x84, 0x7f, 0x5a, 0x66, 0x16, 0x64, 0x6c, 0xc2, 0x46, 0x25, 0x73, 0xa9, + 0xa0, 0xf1, 0x45, 0x41, 0xfb, 0x60, 0x34, 0x72, 0x1b, 0x89, 0x36, 0xee, 0x97, 0xb4, 0x16, 0x94, + 0x8a, 0xef, 0x4f, 0x0b, 0xa7, 0x1e, 0xc1, 0x7e, 0x1c, 0x36, 0x62, 0x5c, 0xd4, 0x38, 0x21, 0x4d, + 0x39, 0xaf, 0xcb, 0xb0, 0x39, 0x0c, 0x3c, 0x8f, 0xd8, 0xcc, 0x0d, 0xfc, 0x84, 0xed, 0x16, 0x80, + 0x9d, 0x02, 0x55, 0x10, 0x65, 0x20, 0x86, 0x0e, 0xc3, 0x32, 0xa9, 0x62, 0xfb, 0x57, 0x0d, 0xee, + 0x3e, 0x55, 0x4e, 0x93, 0x82, 0x1b, 0x5d, 0x40, 0x5e, 0xe4, 0xad, 0xa2, 0xc8, 0xe2, 0x05, 0xb5, + 0x4a, 0x17, 0xc4, 0x31, 0x22, 0x12, 0x7a, 0xae, 0x8d, 0x05, 0x8b, 0xb6, 0x60, 0x91, 0x05, 0xa1, + 0x25, 0x68, 0x31, 0xe6, 0x89, 0xc8, 0xed, 0x99, 0xfc, 0xd3, 0x18, 0xc2, 0x5a, 0x51, 0x57, 0x65, + 0xc6, 0x8f, 0x61, 0x5d, 0x42, 0x4e, 0xc6, 0xbe, 0x7d, 0x22, 0xf2, 0xa4, 0x91, 0xd3, 0xff, 0xa3, + 0xc1, 0xb0, 0x4c, 0xa8, 0xa2, 0xf8, 0xba, 0x1e, 0xb8, 0xaa, 0x7d, 0xe8, 0x01, 0xf4, 0x19, 0x76, + 0x3d, 0x2b, 0x38, 0x3b, 0xa3, 0x84, 0x0d, 0x17, 0xb6, 0xb5, 0x9d, 0xb6, 0x09, 0x1c, 0xf4, 0xb5, + 0x80, 0xa0, 0x87, 0xb0, 0x64, 0xcb, 0x48, 0xb6, 0x22, 0x72, 0xe9, 0x8a, 0xcc, 0xee, 0x08, 0xc5, + 0xee, 0xd8, 0x49, 0x84, 0x4b, 0x30, 0x32, 0x60, 0xd1, 0x75, 0xde, 0x5a, 0xa2, 0xb4, 0x88, 0xc2, + 0xd0, 0x15, 0xdc, 0xfa, 0xae, 0xf3, 0xf6, 0x57, 0xae, 0x47, 0x4e, 0xdc, 0x77, 0xc4, 0x78, 0x09, + 0xf7, 0xa5, 0xf1, 0xcf, 0x7d, 0x3b, 0x22, 0x23, 0xe2, 0x33, 0xec, 0x1d, 0x06, 0xe1, 0xb8, 0x51, + 0x08, 0xdc, 0x83, 0x2e, 0x75, 0x7d, 0x9b, 0x58, 0xbe, 0x2c, 0x50, 0x6d, 0xb3, 0x23, 0x7e, 0x1f, + 0x53, 0xe3, 0x00, 0x36, 0x6b, 0xf8, 0x2a, 0xcf, 0x7e, 0x00, 0x03, 0xa1, 0x98, 0x1d, 0xf8, 0x8c, + 0xf8, 0x4c, 0xf0, 0x1e, 0x98, 0x7d, 0x0e, 0x3b, 0x94, 0x20, 0xe3, 0x87, 0x80, 0x24, 0x8f, 0xaf, + 0x82, 0xd8, 0x6f, 0x96, 0x9a, 0x77, 0x61, 0x25, 0x47, 0xa2, 0x62, 0xe3, 0x73, 0x58, 0x95, 0xe0, + 0x6f, 0xfc, 0x51, 0x63, 0x5e, 0xeb, 0x70, 0xb7, 0x40, 0xa4, 0xb8, 0x3d, 0x49, 0x84, 0xe4, 0x9f, + 0x90, 0xa9, 0xcc, 0xd6, 0x12, 0x0d, 0xf2, 0xaf, 0x88, 0xa8, 0x42, 0x52, 0x61, 0x1c, 0x5d, 0x98, + 0x04, 0x3b, 0x81, 0xef, 0x8d, 0x1b, 0x57, 0xa1, 0x0a, 0x4a, 0xc5, 0xf7, 0x6f, 0x1a, 0x2c, 0x27, + 0xe5, 0xa9, 0xe1, 0x6d, 0x5e, 0x31, 0x9c, 0x5b, 0xb5, 0xe1, 0xdc, 0x9e, 0x84, 0xf3, 0x0e, 0x2c, + 0xd1, 0x20, 0x8e, 0x6c, 0x62, 0x39, 0x98, 0x61, 0xcb, 0x0f, 0x1c, 0xa2, 0xa2, 0xfd, 0xb6, 0x84, + 0x3f, 0xc3, 0x0c, 0x1f, 0x07, 0x0e, 0x31, 0x7e, 0x91, 0x5c, 0x76, 0x2e, 0x4a, 0x1e, 0xc2, 0xb2, + 0x87, 0x29, 0xb3, 0x70, 0x18, 0x12, 0xdf, 0xb1, 0x30, 0xe3, 0xa1, 0xa6, 0x89, 0x50, 0xbb, 0xcd, + 0x0f, 0x9e, 0x0a, 0xf8, 0x53, 0x76, 0x4c, 0x8d, 0x7f, 0x6a, 0x70, 0x87, 0xd3, 0xf2, 0xd0, 0x6e, + 0x64, 0xef, 0x12, 0xb4, 0xc8, 0x5b, 0xa6, 0x0c, 0xe5, 0x9f, 0x68, 0x1f, 0x56, 0x54, 0x0e, 0xb9, + 0x81, 0x3f, 0x49, 0xaf, 0x96, 0x20, 0x44, 0x93, 0xa3, 0x34, 0xc3, 0x1e, 0x40, 0x9f, 0xb2, 0x20, + 0x4c, 0xb2, 0xb5, 0x2d, 0xb3, 0x95, 0x83, 0x54, 0xb6, 0xe6, 0x7d, 0x3a, 0x5f, 0xe1, 0xd3, 0x81, + 0x4b, 0x2d, 0x62, 0x5b, 0x52, 0x2b, 0x91, 0xef, 0x5d, 0x13, 0x5c, 0x7a, 0x64, 0x4b, 0x6f, 0x18, + 0x3f, 0x82, 0xa5, 0x89, 0x55, 0xcd, 0x73, 0xe7, 0xbd, 0x96, 0x94, 0xc3, 0x17, 0xd8, 0xf5, 0x4e, + 0x88, 0xef, 0x90, 0xe8, 0x9a, 0x39, 0x8d, 0x1e, 0xc3, 0xaa, 0xeb, 0x78, 0xc4, 0x62, 0xee, 0x88, + 0x04, 0x31, 0xb3, 0x28, 0xb1, 0x03, 0xdf, 0xa1, 0x89, 0x7f, 0xf8, 0xd9, 0x0b, 0x79, 0x74, 0x22, + 0x4f, 0x8c, 0x3f, 0xa6, 0xb5, 0x35, 0xab, 0xc5, 0xa4, 0x43, 0xf0, 0x09, 0xe1, 0x0c, 0xcf, 0x09, + 0x76, 0x48, 0xa4, 0xcc, 0x18, 0x48, 0xe0, 0x6f, 0x04, 0x8c, 0x7b, 0x58, 0x21, 0x9d, 0x06, 0xce, + 0x58, 0x68, 0x34, 0x30, 0x41, 0x82, 0x0e, 0x02, 0x67, 0x2c, 0x8a, 0x1c, 0xb5, 0x44, 0x90, 0xd8, + 0xe7, 0xb1, 0x7f, 0x21, 0xb4, 0xe9, 0x9a, 0x7d, 0x97, 0xfe, 0x16, 0x53, 0x76, 0xc8, 0x41, 0xc6, + 0xdf, 0xb5, 0x24, 0xcb, 0xb8, 0x1a, 0x26, 0xb1, 0x89, 0x7b, 0xf9, 0x7f, 0x70, 0x07, 0xa7, 0x50, + 0xd9, 0x90, 0xeb, 0x14, 0x55, 0xc2, 0x20, 0x79, 0xa6, 0xde, 0x22, 0x71, 0x32, 0x49, 0xf2, 0xbc, + 0xe2, 0x2a, 0xc9, 0xbf, 0x4f, 0x8a, 0xec, 0x91, 0x7d, 0x72, 0x8e, 0x23, 0x87, 0xfe, 0x9a, 0xf8, + 0x24, 0xc2, 0xec, 0x46, 0x1e, 0x70, 0x63, 0x1b, 0xb6, 0xea, 0xb8, 0x2b, 0xf9, 0xdf, 0x25, 0x8f, + 0x47, 0x82, 0x61, 0x92, 0xd3, 0xd8, 0xf5, 0x9c, 0x1b, 0x11, 0xff, 0x65, 0xd1, 0xb8, 0x94, 0xb9, + 0x8a, 0x9f, 0x5d, 0x58, 0x8e, 0x04, 0x88, 0x59, 0x94, 0x23, 0xa4, 0xbd, 0xfb, 0xa2, 0x79, 0x47, + 0x1d, 0x08, 0x42, 0xde, 0xc3, 0xff, 0x23, 0x8d, 0x80, 0x84, 0xdb, 0x8d, 0x95, 0xc5, 0x0d, 0xe8, + 0x4d, 0xc4, 0xb7, 0x84, 0xf8, 0x2e, 0x55, 0x72, 0x79, 0x74, 0xda, 0x41, 0x38, 0xb6, 0x88, 0x2d, + 0xdf, 0x61, 0x71, 0xd5, 0x5d, 0xb3, 0xcf, 0x81, 0x47, 0xb6, 0x78, 0x86, 0xaf, 0x50, 0x23, 0xd3, + 0x68, 0xc8, 0x1b, 0xa1, 0x6e, 0xe3, 0x0d, 0x6c, 0xe4, 0x4f, 0x9b, 0x3f, 0x4f, 0xd7, 0x32, 0xd2, + 0xd8, 0x2a, 0x86, 0x41, 0xe1, 0x8d, 0xbb, 0x2c, 0xaa, 0xdd, 0xf8, 0x3d, 0xbf, 0x9e, 0x5e, 0x9b, + 0x45, 0x87, 0xe4, 0x9b, 0x82, 0x6f, 0x8b, 0x6a, 0x5f, 0xa1, 0x39, 0x98, 0x2e, 0xf8, 0x41, 0x31, + 0x74, 0x8b, 0x1d, 0xc4, 0x9f, 0xd3, 0xba, 0xa8, 0x30, 0xf8, 0xfb, 0xdd, 0xb8, 0x1e, 0x29, 0xb9, + 0xc2, 0x1d, 0x8b, 0x66, 0x47, 0x89, 0xe5, 0xc3, 0xa2, 0x7a, 0x87, 0x64, 0xaf, 0xad, 0x7e, 0xe5, + 0xc6, 0xc2, 0x96, 0x1a, 0x0b, 0x93, 0x71, 0xf7, 0x82, 0x8c, 0x45, 0xac, 0xb5, 0xe5, 0xb8, 0xfb, + 0x25, 0x19, 0x1b, 0xc7, 0x85, 0x4c, 0x91, 0xaa, 0xa9, 0x9c, 0x43, 0xd0, 0xe6, 0x41, 0xaa, 0x4a, + 0xb5, 0xf8, 0x46, 0x9b, 0x00, 0x2e, 0xb5, 0x1c, 0x71, 0xe7, 0x52, 0xa9, 0xae, 0xd9, 0x73, 0x55, + 0x10, 0x38, 0xc6, 0x9f, 0x32, 0xa9, 0x77, 0xe0, 0x05, 0xa7, 0x37, 0x18, 0x95, 0x59, 0x2b, 0x5a, + 0x39, 0x2b, 0xb2, 0x73, 0x6f, 0x3b, 0x3f, 0xf7, 0x66, 0x92, 0x28, 0xab, 0x8e, 0xba, 0x99, 0x9f, + 0xc1, 0x06, 0x37, 0x58, 0x62, 0x88, 0x2e, 0xb9, 0xf9, 0x24, 0xf1, 0xef, 0x5b, 0x70, 0xbf, 0x9a, + 0xb8, 0xc9, 0x34, 0xf1, 0x73, 0xd0, 0xd3, 0x6e, 0x9d, 0x3f, 0x29, 0x94, 0xe1, 0x51, 0x98, 0x3e, + 0x2a, 0xf2, 0xed, 0x59, 0x57, 0xad, 0xfb, 0x8b, 0xe4, 0x3c, 0x79, 0x59, 0x4a, 0xad, 0x7e, 0xab, + 0xd4, 0xea, 0x73, 0x01, 0x0e, 0x66, 0x75, 0x02, 0x64, 0xef, 0xb2, 0xee, 0x60, 0x56, 0x27, 0x20, + 0x25, 0x16, 0x02, 0x64, 0xd4, 0xf4, 0x15, 0xbe, 0x10, 0xb0, 0x09, 0xa0, 0xda, 0x92, 0xd8, 0x4f, + 0x46, 0x97, 0x9e, 0x6c, 0x4a, 0x62, 0xbf, 0xb6, 0xbb, 0xea, 0xd4, 0x76, 0x57, 0xf9, 0xeb, 0xef, + 0x96, 0x5e, 0x88, 0x6f, 0x01, 0x9e, 0xb9, 0xf4, 0x42, 0x3a, 0x99, 0xb7, 0x73, 0x8e, 0x1b, 0xa9, + 0xd9, 0x97, 0x7f, 0x72, 0x08, 0xf6, 0x3c, 0xe5, 0x3a, 0xfe, 0xc9, 0xc3, 0x37, 0xa6, 0xc4, 0x51, + 0xde, 0x11, 0xdf, 0x1c, 0x76, 0x16, 0x11, 0xa2, 0x1c, 0x20, 0xbe, 0x8d, 0xbf, 0x68, 0xd0, 0xfb, + 0x8a, 0x8c, 0x14, 0xe7, 0x2d, 0x80, 0x57, 0x41, 0x14, 0xc4, 0xcc, 0xf5, 0x89, 0xec, 0x3e, 0xe7, + 0xcd, 0x0c, 0xe4, 0x7f, 0x97, 0x23, 0x52, 0x93, 0x78, 0x67, 0xca, 0x99, 0xe2, 0x9b, 0xc3, 0xce, + 0x09, 0x0e, 0x95, 0xff, 0xc4, 0x37, 0x5a, 0x85, 0x79, 0xca, 0xb0, 0x7d, 0x21, 0x9c, 0xd5, 0x36, + 0xe5, 0x8f, 0x27, 0xff, 0x5a, 0x83, 0x41, 0xb6, 0x5b, 0x40, 0xdf, 0x43, 0x3f, 0xb3, 0xa9, 0x42, + 0x1f, 0x95, 0x17, 0x52, 0xe5, 0xcd, 0x97, 0xfe, 0x83, 0x19, 0x58, 0x2a, 0x31, 0xe6, 0x90, 0x0f, + 0xcb, 0xa5, 0x75, 0x0f, 0xda, 0x2d, 0x53, 0xd7, 0x2d, 0x93, 0xf4, 0x47, 0x8d, 0x70, 0x53, 0x79, + 0x0c, 0x56, 0x2a, 0xf6, 0x37, 0xe8, 0xd3, 0x19, 0x5c, 0x72, 0x3b, 0x24, 0xfd, 0xb3, 0x86, 0xd8, + 0xa9, 0xd4, 0xd7, 0x80, 0xca, 0xcb, 0x1d, 0xf4, 0x68, 0x26, 0x9b, 0xc9, 0xf2, 0x48, 0xff, 0xb4, + 0x19, 0x72, 0xad, 0xa1, 0x72, 0xed, 0x33, 0xd3, 0xd0, 0xdc, 0x62, 0x69, 0xa6, 0xa1, 0x85, 0x5d, + 0xd2, 0x1c, 0xba, 0x80, 0xa5, 0xe2, 0x4a, 0x08, 0x3d, 0xac, 0x5b, 0x61, 0x96, 0x36, 0x4e, 0xfa, + 0x6e, 0x13, 0xd4, 0x54, 0x18, 0x81, 0xdb, 0xf9, 0xb5, 0x0d, 0xfa, 0xa4, 0x4c, 0x5f, 0xb9, 0x84, + 0xd2, 0x77, 0x66, 0x23, 0x66, 0x6d, 0x2a, 0xae, 0x72, 0xaa, 0x6c, 0xaa, 0xd9, 0x13, 0x55, 0xd9, + 0x54, 0xb7, 0x19, 0x32, 0xe6, 0xd0, 0xef, 0x93, 0xfd, 0x40, 0x61, 0xc5, 0x81, 0xf6, 0xea, 0xd8, + 0x54, 0xef, 0x58, 0xf4, 0xfd, 0xc6, 0xf8, 0x89, 0xec, 0xc7, 0x1a, 0xcf, 0xf5, 0xcc, 0xa6, 0xa3, + 0x2a, 0xd7, 0xcb, 0xbb, 0x93, 0xaa, 0x5c, 0xaf, 0x5a, 0x97, 0xcc, 0xa1, 0x53, 0x58, 0xcc, 0xed, + 0x3e, 0xd0, 0xc7, 0x75, 0x94, 0xf9, 0xa6, 0x49, 0xff, 0x64, 0x26, 0x5e, 0x2a, 0xc3, 0x4a, 0xaa, + 0x97, 0x2a, 0x57, 0xb5, 0xca, 0xe5, 0xeb, 0xd5, 0xc7, 0xb3, 0xd0, 0x72, 0xa9, 0x5c, 0xda, 0x90, + 0x54, 0xa6, 0x72, 0xdd, 0x06, 0xa6, 0x32, 0x95, 0xeb, 0x97, 0x2e, 0x73, 0xe8, 0x77, 0x00, 0x93, + 0x2d, 0x06, 0xfa, 0xb0, 0x8e, 0x3a, 0x7b, 0xfb, 0x1f, 0x4d, 0x47, 0x4a, 0x59, 0xbf, 0x81, 0xd5, + 0xaa, 0xe6, 0x02, 0x55, 0x24, 0xfe, 0x94, 0x0e, 0x46, 0xdf, 0x6b, 0x8a, 0x9e, 0x0a, 0xfe, 0x06, + 0xba, 0xc9, 0x06, 0x02, 0x7d, 0x50, 0xa6, 0x2e, 0xec, 0x5c, 0x74, 0x63, 0x1a, 0x4a, 0x26, 0x80, + 0x47, 0x49, 0xae, 0x4e, 0x56, 0x03, 0xf5, 0xb9, 0x5a, 0x5a, 0x62, 0xd4, 0xe7, 0x6a, 0x79, 0xd3, + 0x20, 0xc4, 0xa5, 0xc1, 0x90, 0x9d, 0xa4, 0xeb, 0x83, 0xa1, 0x62, 0x51, 0x50, 0x1f, 0x0c, 0x95, + 0xc3, 0xf9, 0x1c, 0xfa, 0x03, 0xac, 0x55, 0x0f, 0xd0, 0xa8, 0x36, 0xe3, 0x6b, 0x06, 0x79, 0xfd, + 0x71, 0x73, 0x82, 0x54, 0xfc, 0xbb, 0xa4, 0x3e, 0x15, 0x06, 0xe8, 0xfa, 0xfa, 0x54, 0x3d, 0xc6, + 0xeb, 0xfb, 0x8d, 0xf1, 0xcb, 0xa9, 0x97, 0x9d, 0x54, 0xeb, 0xbd, 0x5d, 0x31, 0x94, 0xd7, 0x7b, + 0xbb, 0x72, 0xf8, 0x15, 0xf9, 0x51, 0x35, 0x85, 0x56, 0xe5, 0xc7, 0x94, 0x31, 0x59, 0xdf, 0x6b, + 0x8a, 0x9e, 0x7b, 0xbe, 0xcb, 0x63, 0x26, 0x9a, 0xa9, 0x7f, 0xae, 0x32, 0x7f, 0xd6, 0x10, 0xbb, + 0xfe, 0x76, 0x93, 0x4a, 0x3d, 0xd3, 0x80, 0x42, 0xc5, 0xde, 0x6f, 0x8c, 0x9f, 0xca, 0x0e, 0x93, + 0xdd, 0x72, 0x66, 0x44, 0x44, 0xbb, 0x33, 0xf8, 0x64, 0x46, 0x5c, 0xfd, 0x51, 0x23, 0xdc, 0xaa, + 0xec, 0xcd, 0x0e, 0x6d, 0xd3, 0xe2, 0xa9, 0x34, 0x69, 0x4e, 0x8b, 0xa7, 0x8a, 0x39, 0x70, 0xee, + 0x74, 0x41, 0xfc, 0x81, 0xfc, 0xf9, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x44, 0x1a, 0xc5, + 0x57, 0x1e, 0x00, 0x00, } diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index ac8235fd5..7353cdc91 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -1,6 +1,8 @@ package replication import ( + "context" + "fmt" "path/filepath" "strings" @@ -29,33 +31,38 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin } } -func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) error { +func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error { if !strings.HasPrefix(key, r.source.Dir) { glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) return nil } - newKey := filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]) + newKey := filepath.ToSlash(filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])) glog.V(3).Infof("replicate %s => %s", key, newKey) key = newKey if message.OldEntry != nil && message.NewEntry == nil { glog.V(4).Infof("deleting %v", key) - return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks) + return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks) } if message.OldEntry == nil && message.NewEntry != nil { glog.V(4).Infof("creating %v", key) - return r.sink.CreateEntry(key, message.NewEntry) + return r.sink.CreateEntry(ctx, key, message.NewEntry) } if message.OldEntry == nil && message.NewEntry == nil { glog.V(0).Infof("weird message %+v", message) return nil } - foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) if foundExisting { glog.V(4).Infof("updated %v", key) return err } + err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false) + if err != nil { + return fmt.Errorf("delete old entry %v: %v", key, err) + } + glog.V(4).Infof("creating missing %v", key) - return r.sink.CreateEntry(key, message.NewEntry) + return r.sink.CreateEntry(ctx, key, message.NewEntry) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 7acf37fa5..6381908a1 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e return nil } -func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -78,8 +78,6 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo key = key + "/" } - ctx := context.Background() - if _, err := g.containerURL.NewBlobURL(key).Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) @@ -89,7 +87,7 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo } -func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -100,8 +98,6 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - ctx := context.Background() - // Create a URL that references a to-be-created blob in your // Azure Storage account's container. appendBlobURL := g.containerURL.NewAppendBlobURL(key) @@ -113,7 +109,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return err } @@ -136,7 +132,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *AzureSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 17f5e39b2..35c2230fa 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -58,7 +58,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { return nil } -func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -66,8 +66,6 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) key = key + "/" } - ctx := context.Background() - bucket, err := g.client.Bucket(ctx, g.bucket) if err != nil { return err @@ -79,7 +77,7 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) } -func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -90,8 +88,6 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - ctx := context.Background() - bucket, err := g.client.Bucket(ctx, g.bucket) if err != nil { return err @@ -102,7 +98,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return err } @@ -128,7 +124,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *B2Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index c14566723..97e9671a3 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,16 +3,18 @@ package filersink import ( "context" "fmt" + "google.golang.org/grpc" "strings" "sync" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } @@ -21,7 +23,7 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic wg.Add(1) go func(chunk *filer_pb.FileChunk) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(chunk) + replicatedChunk, e := fs.replicateOneChunk(ctx, chunk) if e != nil { err = e } @@ -33,11 +35,11 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replic return } -func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(sourceChunk) + fileId, err := fs.fetchAndWrite(ctx, sourceChunk) if err != nil { - return nil, fmt.Errorf("copy %s: %v", sourceChunk.FileId, err) + return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err) } return &filer_pb.FileChunk{ @@ -46,21 +48,22 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_ Size: sourceChunk.Size, Mtime: sourceChunk.Mtime, ETag: sourceChunk.ETag, - SourceFileId: sourceChunk.FileId, + SourceFileId: sourceChunk.GetFileIdString(), }, nil } -func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) { - filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.FileId) + filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString()) if err != nil { - return "", fmt.Errorf("read part %s: %v", sourceChunk.FileId, err) + return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) } defer readCloser.Close() var host string + var auth security.EncodedJwt - if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -70,13 +73,13 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri DataCenter: fs.dataCenter, } - resp, err := client.AssignVolume(context.Background(), request) + resp, err := client.AssignVolume(ctx, request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } - fileId, host = resp.FileId, resp.Url + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) return nil }); err != nil { @@ -88,7 +91,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) uploadResult, err := operation.Upload(fileUrl, filename, readCloser, - "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, "") + "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) return "", fmt.Errorf("upload data: %v", err) @@ -101,17 +104,13 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri return } -func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(fs.grpcAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.grpcAddress, fs.grpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } func volumeId(fileId string) string { diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 2e9cc86d1..f99c7fdf6 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,6 +3,9 @@ package filersink import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -13,13 +16,14 @@ import ( ) type FilerSink struct { - filerSource *source.FilerSource - grpcAddress string - dir string - replication string - collection string - ttlSec int32 - dataCenter string + filerSource *source.FilerSource + grpcAddress string + dir string + replication string + collection string + ttlSec int32 + dataCenter string + grpcDialOption grpc.DialOption } func init() { @@ -55,11 +59,12 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) + fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") return nil } -func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { + return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -70,7 +75,7 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo } glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(context.Background(), request) + _, err := client.DeleteEntry(ctx, request) if err != nil { glog.V(0).Infof("delete entry %s: %v", key, err) return fmt.Errorf("delete entry %s: %v", key, err) @@ -80,12 +85,11 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo }) } -func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() - ctx := context.Background() // look up existing entry lookupRequest := &filer_pb.LookupDirectoryEntryRequest{ @@ -100,7 +104,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { } } - replicatedChunks, err := fs.replicateChunks(entry.Chunks) + replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks) if err != nil { glog.V(0).Infof("replicate entry chunks %s: %v", key, err) @@ -129,15 +133,13 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { }) } -func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { - - ctx := context.Background() +func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { dir, name := filer2.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -177,11 +179,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, // delete the chunks that are deleted from the source if deleteIncludeChunks { // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks - existingEntry.Chunks = minusChunks(existingEntry.Chunks, deletedChunks) + existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks) } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(newChunks) + replicatedChunks, err := fs.replicateChunks(ctx, newChunks) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } @@ -189,10 +191,10 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, } // save updated meta data - return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ - Directory: dir, + Directory: newParentPath, Entry: existingEntry, } @@ -205,23 +207,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, } func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) { - deletedChunks = minusChunks(oldEntry.Chunks, newEntry.Chunks) - newChunks = minusChunks(newEntry.Chunks, oldEntry.Chunks) - return -} - -func minusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { - for _, a := range as { - found := false - for _, b := range bs { - if a.FileId == b.FileId { - found = true - break - } - } - if !found { - delta = append(delta, a) - } - } + deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks) + newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks) return } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index c1beefc33..abd7c49b9 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str return nil } -func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { if isDirectory { key = key + "/" } - if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil { + if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil { return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err) } @@ -83,7 +83,7 @@ func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) } -func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { if entry.IsDirectory { return nil @@ -92,13 +92,11 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - ctx := context.Background() - wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return err } @@ -121,7 +119,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *GcsSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index 0a86139d3..dd54f0005 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -1,6 +1,7 @@ package sink import ( + "context" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" @@ -9,9 +10,9 @@ import ( type ReplicationSink interface { GetName() string Initialize(configuration util.Configuration) error - DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error - CreateEntry(key string, entry *filer_pb.Entry) error - UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error + CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error + UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) } diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 0a4e78318..d5cad3541 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -1,6 +1,7 @@ package S3Sink import ( + "context" "fmt" "strings" "sync" @@ -76,7 +77,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, buc return nil } -func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { +func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -88,7 +89,7 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b } -func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { +func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -111,7 +112,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { wg.Add(1) go func(chunk *filer2.ChunkView) { defer wg.Done() - if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { + if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr } else { parts = append(parts, part) @@ -125,11 +126,11 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { return err } - return s3sink.completeMultipartUpload(key, uploadId, parts) + return s3sink.completeMultipartUpload(ctx, key, uploadId, parts) } -func (s3sink *S3Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 5c4be7aee..0a190b27d 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -2,6 +2,7 @@ package S3Sink import ( "bytes" + "context" "fmt" "io" @@ -81,7 +82,7 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error { } // To complete multipart upload -func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.CompletedPart) error { +func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId string, parts []*s3.CompletedPart) error { input := &s3.CompleteMultipartUploadInput{ Bucket: aws.String(s3sink.bucket), Key: aws.String(key), @@ -102,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3. } // To upload a part -func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { +func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { var readSeeker io.ReadSeeker - readSeeker, err := s3sink.buildReadSeeker(chunk) + readSeeker, err := s3sink.buildReadSeeker(ctx, chunk) if err != nil { glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) @@ -155,8 +156,8 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou return err } -func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) { - fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId) +func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) { + fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId) if err != nil { return nil, err } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index efe71e706..d7b5ebc4d 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -3,6 +3,9 @@ package source import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" + "google.golang.org/grpc" "io" "net/http" "strings" @@ -17,8 +20,9 @@ type ReplicationSource interface { } type FilerSource struct { - grpcAddress string - Dir string + grpcAddress string + grpcDialOption grpc.DialOption + Dir string } func (fs *FilerSource) Initialize(configuration util.Configuration) error { @@ -31,19 +35,20 @@ func (fs *FilerSource) Initialize(configuration util.Configuration) error { func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { fs.grpcAddress = grpcAddress fs.Dir = dir + fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") return nil } -func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { +func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) { vid2Locations := make(map[string]*filer_pb.Locations) vid := volumeId(part) - err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) - resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -72,9 +77,9 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { return } -func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { +func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { - fileUrl, err := fs.LookupFileId(part) + fileUrl, err := fs.LookupFileId(ctx, part) if err != nil { return "", nil, nil, err } @@ -84,17 +89,13 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(fs.grpcAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.grpcAddress, fs.grpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } func volumeId(fileId string) string { diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go new file mode 100644 index 000000000..9c76e6918 --- /dev/null +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -0,0 +1,50 @@ +package sub + +import ( + "context" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + _ "gocloud.dev/pubsub/azuresb" + _ "gocloud.dev/pubsub/gcppubsub" + _ "gocloud.dev/pubsub/natspubsub" + _ "gocloud.dev/pubsub/rabbitpubsub" +) + +func init() { + NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{}) +} + +type GoCDKPubSubInput struct { + sub *pubsub.Subscription +} + +func (k *GoCDKPubSubInput) GetName() string { + return "gocdk_pub_sub" +} + +func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { + subURL := config.GetString("sub_url") + glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) + sub, err := pubsub.OpenSubscription(context.Background(), subURL) + if err != nil { + return err + } + k.sub = sub + return nil +} + +func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { + msg, err := k.sub.Receive(context.Background()) + key = msg.Metadata["key"] + message = &filer_pb.EventNotification{} + err = proto.Unmarshal(msg.Body, message) + if err != nil { + return "", nil, err + } + return key, message, nil +} diff --git a/weed/s3api/custom_types.go b/weed/s3api/custom_types.go new file mode 100644 index 000000000..569dfc3ac --- /dev/null +++ b/weed/s3api/custom_types.go @@ -0,0 +1,3 @@ +package s3api + +const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00" diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 73be496d9..4de1dda36 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,6 +1,8 @@ package s3api import ( + "context" + "encoding/xml" "fmt" "path/filepath" "strconv" @@ -16,14 +18,15 @@ import ( ) type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"` s3.CreateMultipartUploadOutput } -func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { uploadId, _ := uuid.NewV4() uploadIdString := uploadId.String() - if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { + if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } @@ -34,9 +37,9 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp } output = &InitiateMultipartUploadResult{ - s3.CreateMultipartUploadOutput{ + CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{ Bucket: input.Bucket, - Key: input.Key, + Key: objectKey(input.Key), UploadId: aws.String(uploadIdString), }, } @@ -45,14 +48,15 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp } type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"` s3.CompleteMultipartUploadOutput } -func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId - entries, err := s3a.list(uploadDirectory, "", "", false, 0) + entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0) if err != nil { glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload @@ -65,7 +69,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { for _, chunk := range entry.Chunks { p := &filer_pb.FileChunk{ - FileId: chunk.FileId, + FileId: chunk.GetFileIdString(), Offset: offset, Size: chunk.Size, Mtime: chunk.Mtime, @@ -87,7 +91,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName) - err = s3a.mkFile(dirName, entryName, finalParts) + err = s3a.mkFile(ctx, dirName, entryName, finalParts) if err != nil { glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) @@ -95,29 +99,30 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } output = &CompleteMultipartUploadResult{ - s3.CompleteMultipartUploadOutput{ + CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ + Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)), Bucket: input.Bucket, ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""), - Key: input.Key, + Key: objectKey(input.Key), }, } - if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } return } -func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { +func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { - exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) + exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) if err != nil { glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload } if exists { - err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) @@ -128,13 +133,14 @@ func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput } type ListMultipartUploadsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"` s3.ListMultipartUploadsOutput } -func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { +func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { output = &ListMultipartUploadsResult{ - s3.ListMultipartUploadsOutput{ + ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ Bucket: input.Bucket, Delimiter: input.Delimiter, EncodingType: input.EncodingType, @@ -144,7 +150,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return @@ -154,7 +160,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput if entry.Extended != nil { key := entry.Extended["key"] output.Uploads = append(output.Uploads, &s3.MultipartUpload{ - Key: aws.String(string(key)), + Key: objectKey(aws.String(string(key))), UploadId: aws.String(entry.Name), }) } @@ -164,21 +170,22 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput } type ListPartsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"` s3.ListPartsOutput } -func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { +func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { output = &ListPartsResult{ - s3.ListPartsOutput{ + ListPartsOutput: s3.ListPartsOutput{ Bucket: input.Bucket, - Key: input.Key, + Key: objectKey(input.Key), UploadId: input.UploadId, MaxParts: input.MaxParts, // the maximum number of parts to return. PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, + entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go new file mode 100644 index 000000000..835665dd6 --- /dev/null +++ b/weed/s3api/filer_multipart_test.go @@ -0,0 +1,26 @@ +package s3api + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "testing" +) + +func TestInitiateMultipartUploadResult(t *testing.T) { + + expected := ` +example-bucketexample-objectVXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA` + response := &InitiateMultipartUploadResult{ + CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{ + Bucket: aws.String("example-bucket"), + Key: aws.String("example-object"), + UploadId: aws.String("VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"), + }, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } + +} diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 40c5a3e26..84e3050cd 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -4,14 +4,15 @@ import ( "context" "fmt" "os" + "strings" "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { + return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: dirName, @@ -35,7 +36,7 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } @@ -43,8 +44,8 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun }) } -func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { + return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: fileName, @@ -65,7 +66,7 @@ func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chun } glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if _, err := client.CreateEntry(ctx, request); err != nil { return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } @@ -73,9 +74,9 @@ func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chun }) } -func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { +func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: parentDirectoryPath, @@ -86,7 +87,7 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl } glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(context.Background(), request) + resp, err := client.ListEntries(ctx, request) if err != nil { return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) } @@ -100,11 +101,9 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl } -func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { +func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() + return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: parentDirectoryPath, @@ -123,11 +122,9 @@ func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirec } -func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { +func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() + err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, @@ -147,3 +144,11 @@ func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isD return } + +func objectKey(key *string) *string { + if strings.HasPrefix(*key, "/") { + t := (*key)[1:] + return &t + } + return key +} \ No newline at end of file diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 1d319e354..492d94616 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -2,6 +2,7 @@ package s3api import ( "context" + "encoding/xml" "fmt" "math" "net/http" @@ -21,15 +22,16 @@ var ( ) type ListAllMyBucketsResult struct { - Buckets []*s3.Bucket `xml:"Buckets>Bucket"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` Owner *s3.Owner + Buckets []*s3.Bucket `xml:"Buckets>Bucket"` } func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { var response ListAllMyBucketsResult - entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32) + entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -63,7 +65,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) bucket := vars["bucket"] // create the folder for bucket, but lazily create actual collection - if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil { + if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil { writeErrorResponse(w, ErrInternalError, r.URL) return } @@ -76,9 +78,8 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() + ctx := context.Background() + err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -93,7 +94,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque return nil }) - err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -108,7 +109,9 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx := context.Background() + + err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: s3a.option.BucketsPath, @@ -116,7 +119,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request } glog.V(1).Infof("lookup bucket: %v", request) - if _, err := client.LookupDirectoryEntry(context.Background(), request); err != nil { + if _, err := client.LookupDirectoryEntry(ctx, request); err != nil { return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/s3api/s3api_bucket_handlers_test.go b/weed/s3api/s3api_bucket_handlers_test.go new file mode 100644 index 000000000..7ab04830b --- /dev/null +++ b/weed/s3api/s3api_bucket_handlers_test.go @@ -0,0 +1,39 @@ +package s3api + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestListBucketsHandler(t *testing.T) { + + expected := ` +2011-04-09T12:34:49Ztest12011-02-09T12:34:49Ztest2` + var response ListAllMyBucketsResult + + var buckets []*s3.Bucket + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test1"), + CreationDate: aws.Time(time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC)), + }) + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test2"), + CreationDate: aws.Time(time.Date(2011, 2, 9, 12, 34, 49, 0, time.UTC)), + }) + + response = ListAllMyBucketsResult{ + Owner: &s3.Owner{ + ID: aws.String(""), + DisplayName: aws.String(""), + }, + Buckets: buckets, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } +} diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 286398310..127be07e3 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -2,12 +2,14 @@ package s3api import ( "bytes" + "context" "encoding/base64" "encoding/xml" "fmt" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "net/http" "net/url" "time" @@ -35,17 +37,13 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err) - } - defer grpcConnection.Close() + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) } // If none of the http routes match respond with MethodNotAllowed diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 267d126c5..72a25e4a5 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -1,6 +1,7 @@ package s3api import ( + "context" "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -25,9 +26,9 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http bucket = vars["bucket"] object = vars["object"] - response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{ + response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), }) if errCode != ErrNone { @@ -50,9 +51,9 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ + response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) @@ -76,9 +77,9 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ + response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) @@ -111,7 +112,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht } } - response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ + response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{ Bucket: aws.String(bucket), Delimiter: aws.String(delimiter), EncodingType: aws.String(encodingType), @@ -148,9 +149,9 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re return } - response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ + response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), MaxParts: aws.Int64(int64(maxParts)), PartNumberMarker: aws.Int64(int64(partNumberMarker)), UploadId: aws.String(uploadID), @@ -174,8 +175,10 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ rAuthType := getRequestAuthType(r) + ctx := context.Background() + uploadID := r.URL.Query().Get("uploadId") - exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true) + exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true) if !exists { writeErrorResponse(w, ErrNoSuchUpload, r.URL) return @@ -197,8 +200,8 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ dataReader = newSignV4ChunkedReader(r) } - uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part", - s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1) + uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index d751a3b1d..4053913fb 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -7,10 +7,9 @@ import ( "net/url" "path/filepath" "strconv" + "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -46,7 +45,9 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ marker = startAfter } - response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) + ctx := context.Background() + + response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -64,6 +65,8 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ vars := mux.Vars(r) bucket := vars["bucket"] + ctx := context.Background() + originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) if maxKeys < 0 { @@ -75,7 +78,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ return } - response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -85,13 +88,16 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response *s3.ListObjectsOutput, err error) { +func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name dir, prefix := filepath.Split(originalPrefix) + if strings.HasPrefix(dir, "/") { + dir = dir[1:] + } // check filer - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), @@ -101,13 +107,13 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys InclusiveStartFrom: false, } - resp, err := client.ListEntries(context.Background(), request) + resp, err := client.ListEntries(ctx, request) if err != nil { return fmt.Errorf("list buckets: %v", err) } - var contents []*s3.Object - var commonPrefixes []*s3.CommonPrefix + var contents []ListEntry + var commonPrefixes []PrefixEntry var counter int var lastEntryName string var isTruncated bool @@ -119,37 +125,37 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys } lastEntryName = entry.Name if entry.IsDirectory { - commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{ - Prefix: aws.String(fmt.Sprintf("%s%s/", dir, entry.Name)), + commonPrefixes = append(commonPrefixes, PrefixEntry{ + Prefix: fmt.Sprintf("%s%s/", dir, entry.Name), }) } else { - contents = append(contents, &s3.Object{ - Key: aws.String(fmt.Sprintf("%s%s", dir, entry.Name)), - LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), - ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""), - Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), - Owner: &s3.Owner{ - ID: aws.String("bcaf161ca5fb16fd081034f"), - DisplayName: aws.String("webfile"), + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s%s", dir, entry.Name), + LastModified: time.Unix(entry.Attributes.Mtime, 0), + ETag: "\"" + filer2.ETag(entry.Chunks) + "\"", + Size: int64(filer2.TotalSize(entry.Chunks)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, }, - StorageClass: aws.String("STANDARD"), + StorageClass: "STANDARD", }) } } - response = &s3.ListObjectsOutput{ - Name: aws.String(bucket), - Prefix: aws.String(originalPrefix), - Marker: aws.String(marker), - NextMarker: aws.String(lastEntryName), - MaxKeys: aws.Int64(int64(maxKeys)), - Delimiter: aws.String("/"), - IsTruncated: aws.Bool(isTruncated), + response = ListBucketResult{ + Name: bucket, + Prefix: originalPrefix, + Marker: marker, + NextMarker: lastEntryName, + MaxKeys: maxKeys, + Delimiter: "/", + IsTruncated: isTruncated, Contents: contents, CommonPrefixes: commonPrefixes, } - glog.V(4).Infof("read directory: %v, found: %v", request, counter) + glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response) return nil }) diff --git a/weed/s3api/s3api_objects_list_handlers_test.go b/weed/s3api/s3api_objects_list_handlers_test.go new file mode 100644 index 000000000..7b87b32fb --- /dev/null +++ b/weed/s3api/s3api_objects_list_handlers_test.go @@ -0,0 +1,38 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestListObjectsHandler(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html + + expected := ` +test_container1000false1.zip"4397da7a7649e8085de9916c240e8166"123456765a011niqo39cdf8ec533ec3d1ccaafsa932STANDARD2011-04-09T12:34:49Z` + + response := ListBucketResult{ + Name: "test_container", + Prefix: "", + Marker: "", + NextMarker: "", + MaxKeys: 1000, + IsTruncated: false, + Contents: []ListEntry{{ + Key: "1.zip", + LastModified: time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC), + ETag: "\"4397da7a7649e8085de9916c240e8166\"", + Size: 1234567, + Owner: CanonicalUser{ + ID: "65a011niqo39cdf8ec533ec3d1ccaafsa932", + }, + StorageClass: "STANDARD", + }}, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } +} diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index db798a546..24458592d 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -8,6 +8,7 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" "github.com/gorilla/mux" + "google.golang.org/grpc" "net/http" ) @@ -16,6 +17,7 @@ type S3ApiServerOption struct { FilerGrpcAddress string DomainName string BucketsPath string + GrpcDialOption grpc.DialOption } type S3ApiServer struct { diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go index df07f3fea..573c09ede 100644 --- a/weed/s3api/s3api_xsd_generated.go +++ b/weed/s3api/s3api_xsd_generated.go @@ -25,8 +25,8 @@ type BucketLoggingStatus struct { } type CanonicalUser struct { - ID string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ID"` - DisplayName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DisplayName,omitempty"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` } type CopyObject struct { @@ -506,15 +506,15 @@ func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement) } type ListAllMyBucketsEntry struct { - Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` - CreationDate time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + Name string `xml:"Name"` + CreationDate time.Time `xml:"CreationDate"` } func (t *ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListAllMyBucketsEntry var layout struct { *T - CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + CreationDate *xsdDateTime `xml:"CreationDate"` } layout.T = (*T)(t) layout.CreationDate = (*xsdDateTime)(&layout.T.CreationDate) @@ -524,7 +524,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem type T ListAllMyBucketsEntry var overlay struct { *T - CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + CreationDate *xsdDateTime `xml:"CreationDate"` } overlay.T = (*T)(t) overlay.CreationDate = (*xsdDateTime)(&overlay.T.CreationDate) @@ -532,7 +532,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem } type ListAllMyBucketsList struct { - Bucket []ListAllMyBucketsEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket,omitempty"` + Bucket []ListAllMyBucketsEntry `xml:"Bucket,omitempty"` } type ListAllMyBucketsResponse struct { @@ -577,32 +577,33 @@ type ListBucketResponse struct { } type ListBucketResult struct { - Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` - Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` - Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"` - Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker"` - NextMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextMarker,omitempty"` - MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"` - Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"` - IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"` - Contents []ListEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Contents,omitempty"` - CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` + Metadata []MetadataEntry `xml:"Metadata,omitempty"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker,omitempty"` + MaxKeys int `xml:"MaxKeys"` + Delimiter string `xml:"Delimiter,omitempty"` + IsTruncated bool `xml:"IsTruncated"` + Contents []ListEntry `xml:"Contents,omitempty"` + CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` } type ListEntry struct { - Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` - LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` - ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` - Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"` - Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"` - StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"` + Key string `xml:"Key"` + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` + Size int64 `xml:"Size"` + Owner CanonicalUser `xml:"Owner,omitempty"` + StorageClass StorageClass `xml:"StorageClass"` } func (t *ListEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListEntry var layout struct { *T - LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + LastModified *xsdDateTime `xml:"LastModified"` } layout.T = (*T)(t) layout.LastModified = (*xsdDateTime)(&layout.T.LastModified) @@ -612,7 +613,7 @@ func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { type T ListEntry var overlay struct { *T - LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + LastModified *xsdDateTime `xml:"LastModified"` } overlay.T = (*T)(t) overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified) @@ -965,10 +966,10 @@ func (b xsdBase64Binary) MarshalText() ([]byte, error) { type xsdDateTime time.Time func (t *xsdDateTime) UnmarshalText(text []byte) error { - return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999") + return _unmarshalTime(text, (*time.Time)(t), s3TimeFormat) } func (t xsdDateTime) MarshalText() ([]byte, error) { - return []byte((time.Time)(t).Format("2006-01-02T15:04:05.999999999")), nil + return []byte((time.Time)(t).Format(s3TimeFormat)), nil } func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if (time.Time)(t).IsZero() { diff --git a/weed/security/guard.go b/weed/security/guard.go index dea3b12f2..17fe2ea9e 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -41,21 +41,30 @@ https://github.com/pkieltyka/jwtauth/blob/master/jwtauth.go */ type Guard struct { - whiteList []string - SecretKey Secret + whiteList []string + SigningKey SigningKey + ExpiresAfterSec int + ReadSigningKey SigningKey + ReadExpiresAfterSec int - isActive bool + isWriteActive bool } -func NewGuard(whiteList []string, secretKey string) *Guard { - g := &Guard{whiteList: whiteList, SecretKey: Secret(secretKey)} - g.isActive = len(g.whiteList) != 0 || len(g.SecretKey) != 0 +func NewGuard(whiteList []string, signingKey string, expiresAfterSec int, readSigningKey string, readExpiresAfterSec int) *Guard { + g := &Guard{ + whiteList: whiteList, + SigningKey: SigningKey(signingKey), + ExpiresAfterSec: expiresAfterSec, + ReadSigningKey: SigningKey(readSigningKey), + ReadExpiresAfterSec: readExpiresAfterSec, + } + g.isWriteActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0 return g } func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { - if !g.isActive { - //if no security needed, just skip all checkings + if !g.isWriteActive { + //if no security needed, just skip all checking return f } return func(w http.ResponseWriter, r *http.Request) { @@ -67,20 +76,6 @@ func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w } } -func (g *Guard) Secure(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { - if !g.isActive { - //if no security needed, just skip all checkings - return f - } - return func(w http.ResponseWriter, r *http.Request) { - if err := g.checkJwt(w, r); err != nil { - w.WriteHeader(http.StatusUnauthorized) - return - } - f(w, r) - } -} - func GetActualRemoteHost(r *http.Request) (host string, err error) { host = r.Header.Get("HTTP_X_FORWARDED_FOR") if host == "" { @@ -130,33 +125,3 @@ func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error { glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr) return fmt.Errorf("Not in whitelis: %s", r.RemoteAddr) } - -func (g *Guard) checkJwt(w http.ResponseWriter, r *http.Request) error { - if g.checkWhiteList(w, r) == nil { - return nil - } - - if len(g.SecretKey) == 0 { - return nil - } - - tokenStr := GetJwt(r) - - if tokenStr == "" { - return ErrUnauthorized - } - - // Verify the token - token, err := DecodeJwt(g.SecretKey, tokenStr) - if err != nil { - glog.V(1).Infof("Token verification error from %s: %v", r.RemoteAddr, err) - return ErrUnauthorized - } - if !token.Valid { - glog.V(1).Infof("Token invliad from %s: %v", r.RemoteAddr, tokenStr) - return ErrUnauthorized - } - - glog.V(1).Infof("No permission from %s", r.RemoteAddr) - return fmt.Errorf("No write permisson from %s", r.RemoteAddr) -} diff --git a/weed/security/jwt.go b/weed/security/jwt.go index 46b7efaaf..0bd7fa974 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -1,9 +1,9 @@ package security import ( + "fmt" "net/http" "strings" - "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -11,21 +11,29 @@ import ( ) type EncodedJwt string -type Secret string +type SigningKey []byte -func GenJwt(secret Secret, fileId string) EncodedJwt { - if secret == "" { +type SeaweedFileIdClaims struct { + Fid string `json:"fid"` + jwt.StandardClaims +} + +func GenJwt(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJwt { + if len(signingKey) == 0 { return "" } - t := jwt.New(jwt.GetSigningMethod("HS256")) - t.Claims = &jwt.StandardClaims{ - ExpiresAt: time.Now().Add(time.Second * 10).Unix(), - Subject: fileId, + claims := SeaweedFileIdClaims{ + fileId, + jwt.StandardClaims{}, } - encoded, e := t.SignedString(secret) + if expiresAfterSec > 0 { + claims.ExpiresAt = time.Now().Add(time.Second * time.Duration(expiresAfterSec)).Unix() + } + t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + encoded, e := t.SignedString([]byte(signingKey)) if e != nil { - glog.V(0).Infof("Failed to sign claims: %v", t.Claims) + glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e) return "" } return EncodedJwt(encoded) @@ -44,31 +52,15 @@ func GetJwt(r *http.Request) EncodedJwt { } } - // Get token from cookie - if tokenStr == "" { - cookie, err := r.Cookie("jwt") - if err == nil { - tokenStr = cookie.Value - } - } - return EncodedJwt(tokenStr) } -func EncodeJwt(secret Secret, claims *jwt.StandardClaims) (EncodedJwt, error) { - if secret == "" { - return "", nil - } - - t := jwt.New(jwt.GetSigningMethod("HS256")) - t.Claims = claims - encoded, e := t.SignedString(secret) - return EncodedJwt(encoded), e -} - -func DecodeJwt(secret Secret, tokenString EncodedJwt) (token *jwt.Token, err error) { +func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt) (token *jwt.Token, err error) { // check exp, nbf - return jwt.Parse(string(tokenString), func(token *jwt.Token) (interface{}, error) { - return secret, nil + return jwt.ParseWithClaims(string(tokenString), &SeaweedFileIdClaims{}, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unknown token method") + } + return []byte(signingKey), nil }) } diff --git a/weed/security/tls.go b/weed/security/tls.go new file mode 100644 index 000000000..e81ba4831 --- /dev/null +++ b/weed/security/tls.go @@ -0,0 +1,66 @@ +package security + +import ( + "crypto/tls" + "crypto/x509" + "github.com/spf13/viper" + "io/ioutil" + + "github.com/chrislusf/seaweedfs/weed/glog" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { + if config == nil { + return nil + } + + // load cert/key, ca cert + cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + if err != nil { + glog.Errorf("load cert/key error: %v", err) + return nil + } + caCert, err := ioutil.ReadFile(config.GetString("ca")) + if err != nil { + glog.Errorf("read ca cert file error: %v", err) + return nil + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + ta := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + }) + + return grpc.Creds(ta) +} + +func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { + if config == nil { + return grpc.WithInsecure() + } + + // load cert/key, cacert + cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + if err != nil { + glog.Errorf("load cert/key error: %v", err) + return grpc.WithInsecure() + } + caCert, err := ioutil.ReadFile(config.GetString("ca")) + if err != nil { + glog.Errorf("read ca cert file error: %v", err) + return grpc.WithInsecure() + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + ta := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + InsecureSkipVerify: true, + }) + return grpc.WithTransportCredentials(ta) +} diff --git a/weed/server/common.go b/weed/server/common.go index d88abfdc8..e02ab38a6 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -11,11 +11,12 @@ import ( "strings" "time" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" _ "github.com/chrislusf/seaweedfs/weed/statik" @@ -82,8 +83,7 @@ func debug(params ...interface{}) { glog.V(4).Infoln(params...) } -func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string) { - jwt := security.GetJwt(r) +func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) { m := make(map[string]interface{}) if r.Method != "POST" { writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) @@ -91,7 +91,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := storage.ParseUpload(r) + fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return @@ -113,7 +113,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st Collection: r.FormValue("collection"), Ttl: r.FormValue("ttl"), } - assignResult, ae := operation.Assign(masterUrl, ar) + assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar) if ae != nil { writeJsonError(w, r, http.StatusInternalServerError, ae) return @@ -125,7 +125,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, jwt) + uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return diff --git a/weed/server/common_test.go b/weed/server/common_test.go new file mode 100644 index 000000000..2e6c70bfe --- /dev/null +++ b/weed/server/common_test.go @@ -0,0 +1,31 @@ +package weed_server + +import ( + "strings" + "testing" +) + +func TestParseURL(t *testing.T) { + if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684"); true { + if vid != "1" { + t.Errorf("fail to parse vid: %s", vid) + } + if fid != "06dfa8a684" { + t.Errorf("fail to parse fid: %s", fid) + } + } + if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684_1"); true { + if vid != "1" { + t.Errorf("fail to parse vid: %s", vid) + } + if fid != "06dfa8a684_1" { + t.Errorf("fail to parse fid: %s", fid) + } + if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 { + fid = fid[:sepIndex] + } + if fid != "06dfa8a684" { + t.Errorf("fail to parse fid: %s", fid) + } + } +} diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 06589e3c6..8eea2441e 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -14,12 +14,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name))) + entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) if err != nil { return nil, fmt.Errorf("%s not found under %s: %v", req.Name, req.Directory, err) } @@ -45,7 +44,7 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(req.Directory), lastFileName, includeLastFile, 1024) + entries, err := fs.filer.ListDirectoryEntries(ctx, filer2.FullPath(req.Directory), lastFileName, includeLastFile, 1024) if err != nil { return nil, err } @@ -112,22 +111,21 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { - fullpath := filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)) + fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) - fs.filer.DeleteChunks(garbages) - if req.Entry.Attributes == nil { return nil, fmt.Errorf("can not create entry with empty attributes") } - err = fs.filer.CreateEntry(&filer2.Entry{ + err = fs.filer.CreateEntry(ctx, &filer2.Entry{ FullPath: fullpath, Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), Chunks: chunks, }) if err == nil { + fs.filer.DeleteChunks(fullpath, garbages) } return &filer_pb.CreateEntryResponse{}, err @@ -135,19 +133,19 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { - fullpath := filepath.Join(req.Directory, req.Entry.Name) - entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath)) + fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)) + entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(fullpath)) if err != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) } // remove old chunks if not included in the new ones - unusedChunks := filer2.FindUnusedFileChunks(entry.Chunks, req.Entry.Chunks) + unusedChunks := filer2.MinusChunks(entry.Chunks, req.Entry.Chunks) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) newEntry := &filer2.Entry{ - FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)), + FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), Attr: entry.Attr, Chunks: chunks, } @@ -175,9 +173,9 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr return &filer_pb.UpdateEntryResponse{}, err } - if err = fs.filer.UpdateEntry(entry, newEntry); err == nil { - fs.filer.DeleteChunks(unusedChunks) - fs.filer.DeleteChunks(garbages) + if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { + fs.filer.DeleteChunks(entry.FullPath, unusedChunks) + fs.filer.DeleteChunks(entry.FullPath, garbages) } fs.filer.NotifyUpdateEvent(entry, newEntry, true) @@ -186,7 +184,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsRecursive, req.IsDeleteData) + err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IsDeleteData) return &filer_pb.DeleteEntryResponse{}, err } @@ -220,7 +218,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol DataCenter: "", } } - assignResult, err := operation.Assign(fs.filer.GetMaster(), assignRequest, altRequest) + assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { return nil, fmt.Errorf("assign volume: %v", err) } @@ -233,14 +231,18 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol Count: int32(assignResult.Count), Url: assignResult.Url, PublicUrl: assignResult.PublicUrl, + Auth: string(assignResult.Auth), }, err } func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - for _, master := range fs.option.Masters { - _, err = util.Get(fmt.Sprintf("http://%s/col/delete?collection=%s", master, req.Collection)) - } + err = fs.filer.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + Name: req.GetCollection(), + }) + return err + }) return &filer_pb.DeleteCollectionResponse{}, err } @@ -253,7 +255,7 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR Ttl: req.Ttl, } - output, err := operation.Statistics(fs.filer.GetMaster(), input) + output, err := operation.Statistics(fs.filer.GetMaster(), fs.grpcDialOption, input) if err != nil { return nil, err } @@ -264,3 +266,13 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR FileCount: output.FileCount, }, nil } + +func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) { + + return &filer_pb.GetFilerConfigurationResponse{ + Masters: fs.option.Masters, + Collection: fs.option.Collection, + Replication: fs.option.DefaultReplication, + MaxMb: uint32(fs.option.MaxMB), + }, nil +} diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go new file mode 100644 index 000000000..7142f7606 --- /dev/null +++ b/weed/server/filer_grpc_server_rename.go @@ -0,0 +1,130 @@ +package weed_server + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "path/filepath" +) + +func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { + + glog.V(1).Infof("AtomicRenameEntry %v", req) + + ctx, err := fs.filer.BeginTransaction(ctx) + if err != nil { + return nil, err + } + + oldParent := filer2.FullPath(filepath.ToSlash(req.OldDirectory)) + + oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName)) + if err != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err) + } + + var events MoveEvents + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events) + if moveErr != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, err) + } else { + if commitError := fs.filer.CommitTransaction(ctx); commitError != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, err) + } + } + + for _, entry := range events.newEntries { + fs.filer.NotifyUpdateEvent(nil, entry, false) + } + for _, entry := range events.oldEntries { + fs.filer.NotifyUpdateEvent(entry, nil, false) + } + + return &filer_pb.AtomicRenameEntryResponse{}, nil +} + +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { + if entry.IsDirectory() { + if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil { + return err + } + } + return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events) +} + +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { + + currentDirPath := oldParent.Child(entry.Name()) + newDirPath := newParent.Child(newName) + + glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath) + + lastFileName := "" + includeLastFile := false + for { + + entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) + if err != nil { + return err + } + + println("found", len(entries), "entries under", currentDirPath) + + for _, item := range entries { + lastFileName = item.Name() + println("processing", lastFileName) + err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events) + if err != nil { + return err + } + } + if len(entries) < 1024 { + break + } + } + return nil +} + +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { + + oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) + + glog.V(1).Infof("moving entry %s => %s", oldPath, newPath) + + if oldPath == newPath { + glog.V(1).Infof("skip moving entry %s => %s", oldPath, newPath) + return nil + } + + // add to new directory + newEntry := &filer2.Entry{ + FullPath: newPath, + Attr: entry.Attr, + Chunks: entry.Chunks, + } + createErr := fs.filer.CreateEntry(ctx, newEntry) + if createErr != nil { + return createErr + } + + // delete old entry + deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false) + if deleteErr != nil { + return deleteErr + } + + events.oldEntries = append(events.oldEntries, entry) + events.newEntries = append(events.newEntries, newEntry) + return nil + +} + +type MoveEvents struct { + oldEntries []*filer2.Entry + newEntries []*filer2.Entry +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 9d70e4dac..b9e6aa23d 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -1,12 +1,22 @@ package weed_server import ( + "context" + "fmt" "net/http" "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/filer2" _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" + _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2" _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb" _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" @@ -14,6 +24,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" + _ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub" _ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub" _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" @@ -28,85 +39,93 @@ type FilerOption struct { RedirectOnRead bool DisableDirListing bool MaxMB int - SecretKey string DirListingLimit int DataCenter string DefaultLevelDbDir string + DisableHttp bool + Port int } type FilerServer struct { - option *FilerOption - secret security.Secret - filer *filer2.Filer + option *FilerOption + secret security.SigningKey + filer *filer2.Filer + grpcDialOption grpc.DialOption } func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { fs = &FilerServer{ - option: option, + option: option, + grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), } if len(option.Masters) == 0 { glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters) + fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) go fs.filer.KeepConnectedToMaster() v := viper.GetViper() - if !LoadConfiguration("filer", false) { - v.Set("leveldb.enabled", true) - v.Set("leveldb.dir", option.DefaultLevelDbDir) + if !util.LoadConfiguration("filer", false) { + v.Set("leveldb2.enabled", true) + v.Set("leveldb2.dir", option.DefaultLevelDbDir) _, err := os.Stat(option.DefaultLevelDbDir) if os.IsNotExist(err) { os.MkdirAll(option.DefaultLevelDbDir, 0755) } } - LoadConfiguration("notification", false) + util.LoadConfiguration("notification", false) fs.filer.LoadConfiguration(v) notification.LoadConfiguration(v.Sub("notification")) handleStaticResources(defaultMux) - defaultMux.HandleFunc("/", fs.filerHandler) + if !option.DisableHttp { + defaultMux.HandleFunc("/", fs.filerHandler) + } if defaultMux != readonlyMux { readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } + maybeStartMetrics(fs, option) + return fs, nil } -func (fs *FilerServer) jwt(fileId string) security.EncodedJwt { - return security.GenJwt(fs.secret, fileId) -} - -func LoadConfiguration(configFileName string, required bool) (loaded bool) { - - // find a filer store - viper.SetConfigName(configFileName) // name of config file (without extension) - viper.AddConfigPath(".") // optionally look for config in the working directory - viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths - viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in - - glog.V(0).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) - - if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file - glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) - if required { - glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ - "\n\nPlease follow this example and add a filer.toml file to "+ - "current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n"+ - " https://github.com/chrislusf/seaweedfs/blob/master/weed/%s.toml\n"+ - "\nOr use this command to generate the default toml file\n"+ - " weed scaffold -config=%s -output=.\n\n\n", - configFileName, configFileName, configFileName) +func maybeStartMetrics(fs *FilerServer, option *FilerOption) { + isConnected := false + var metricsAddress string + var metricsIntervalSec int + var readErr error + for !isConnected { + metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, option.Masters[0]) + if readErr == nil { + isConnected = true } else { - return false + time.Sleep(7 * time.Second) } } - - return true - + if metricsAddress == "" && metricsIntervalSec <= 0 { + return + } + go stats.LoopPushingMetric("filer", stats.SourceName(option.Port), stats.FilerGather, + func() (addr string, intervalSeconds int) { + return metricsAddress, metricsIntervalSec + }) +} + +func readFilerConfiguration(grpcDialOption grpc.DialOption, masterGrpcAddress string) (metricsAddress string, metricsIntervalSec int, err error) { + err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", masterGrpcAddress, err) + } + metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) + return nil + }) + return } diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index d76d7df8c..b6bfc3b04 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -2,28 +2,47 @@ package weed_server import ( "net/http" + "time" + + "github.com/chrislusf/seaweedfs/weed/stats" ) func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { + start := time.Now() switch r.Method { case "GET": + stats.FilerRequestCounter.WithLabelValues("get").Inc() fs.GetOrHeadHandler(w, r, true) + stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) case "HEAD": + stats.FilerRequestCounter.WithLabelValues("head").Inc() fs.GetOrHeadHandler(w, r, false) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) case "DELETE": + stats.FilerRequestCounter.WithLabelValues("delete").Inc() fs.DeleteHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) case "PUT": + stats.FilerRequestCounter.WithLabelValues("put").Inc() fs.PostHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) case "POST": + stats.FilerRequestCounter.WithLabelValues("post").Inc() fs.PostHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) } } func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) { + start := time.Now() switch r.Method { case "GET": + stats.FilerRequestCounter.WithLabelValues("get").Inc() fs.GetOrHeadHandler(w, r, true) + stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) case "HEAD": + stats.FilerRequestCounter.WithLabelValues("head").Inc() fs.GetOrHeadHandler(w, r, false) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) } } diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 226de640c..0edf501a8 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -1,7 +1,9 @@ package weed_server import ( + "context" "io" + "io/ioutil" "mime" "mime/multipart" "net/http" @@ -12,22 +14,27 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { + path := r.URL.Path - if strings.HasSuffix(path, "/") && len(path) > 1 { + isForDirectory := strings.HasSuffix(path, "/") + if isForDirectory && len(path) > 1 { path = path[:len(path)-1] } - entry, err := fs.filer.FindEntry(filer2.FullPath(path)) + entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path)) if err != nil { if path == "/" { fs.listDirectoryHandler(w, r) return } glog.V(1).Infof("Not found %s: %v", path, err) + + stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc() w.WriteHeader(http.StatusNotFound) return } @@ -41,8 +48,14 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, return } + if isForDirectory { + w.WriteHeader(http.StatusNotFound) + return + } + if len(entry.Chunks) == 0 { glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr) + stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc() w.WriteHeader(http.StatusNoContent) return } @@ -51,6 +64,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) + setEtag(w, filer2.ETag(entry.Chunks)) return } @@ -65,7 +79,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - fileId := entry.Chunks[0].FileId + fileId := entry.Chunks[0].GetFileIdString() urlString, err := fs.filer.MasterClient.LookupFileId(fileId) if err != nil { @@ -75,6 +89,7 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, } if fs.option.RedirectOnRead { + stats.FilerRequestCounter.WithLabelValues("redirect").Inc() http.Redirect(w, r, urlString, http.StatusFound) return } @@ -105,17 +120,23 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, writeJsonError(w, r, http.StatusInternalServerError, do_err) return } - defer resp.Body.Close() + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() for k, v := range resp.Header { w.Header()[k] = v } + if entry.Attr.Mime != "" { + w.Header().Set("Content-Type", entry.Attr.Mime) + } w.WriteHeader(resp.StatusCode) io.Copy(w, resp.Body) } func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - mimeType := entry.Mime + mimeType := entry.Attr.Mime if mimeType == "" { if ext := path.Ext(entry.Name()); ext != "" { mimeType = mime.TypeByExtension(ext) @@ -222,31 +243,6 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error { - chunkViews := filer2.ViewFromChunks(entry.Chunks, offset, size) - - fileId2Url := make(map[string]string) - - for _, chunkView := range chunkViews { - - urlString, err := fs.filer.MasterClient.LookupFileId(chunkView.FileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) - return err - } - fileId2Url[chunkView.FileId] = urlString - } - - for _, chunkView := range chunkViews { - urlString := fileId2Url[chunkView.FileId] - _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) { - w.Write(data) - }) - if err != nil { - glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) - return err - } - } - - return nil + return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size) } diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index bcf7f0eb5..87e864559 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -1,6 +1,7 @@ package weed_server import ( + "context" "net/http" "strconv" "strings" @@ -8,6 +9,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" + "github.com/chrislusf/seaweedfs/weed/stats" ) // listDirectoryHandler lists directories and folers under a directory @@ -15,6 +17,9 @@ import ( // sub directories are listed on the first page, when "lastFileName" // is empty. func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) { + + stats.FilerRequestCounter.WithLabelValues("list").Inc() + path := r.URL.Path if strings.HasSuffix(path, "/") && len(path) > 1 { path = path[:len(path)-1] @@ -27,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName := r.FormValue("lastFileName") - entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(path), lastFileName, false, limit) + entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 32f481e74..0bf1218ce 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -1,11 +1,17 @@ package weed_server import ( + "context" "encoding/json" "errors" + "fmt" + "io" "io/ioutil" + "mime" "net/http" "net/url" + "os" + filenamePath "path" "strconv" "strings" "time" @@ -14,8 +20,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "os" ) var ( @@ -31,7 +38,12 @@ type FilerPostResult struct { Url string `json:"url,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, err error) { +func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { + + stats.FilerRequestCounter.WithLabelValues("assign").Inc() + start := time.Now() + defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }() + ar := &operation.VolumeAssignRequest{ Count: 1, Replication: replication, @@ -50,7 +62,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, } } - assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar, altRequest) + assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest) if ae != nil { glog.Errorf("failing to assign a file id: %v", ae) writeJsonError(w, r, http.StatusInternalServerError, ae) @@ -59,11 +71,14 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, } fileId = assignResult.Fid urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid + auth = assignResult.Auth return } func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + query := r.URL.Query() replication := query.Get("replication") if replication == "" { @@ -78,11 +93,11 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { dataCenter = fs.option.DataCenter } - if autoChunked := fs.autoChunk(w, r, replication, collection, dataCenter); autoChunked { + if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter); autoChunked { return } - fileId, urlLocation, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) if err != nil || fileId == "" || urlLocation == "" { glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) @@ -103,70 +118,47 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { } glog.V(4).Infoln("post to", u) - // send request to volume server - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - resp, do_err := util.Do(request) - if do_err != nil { - glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, do_err, r.Method) - writeJsonError(w, r, http.StatusInternalServerError, do_err) - return - } - defer resp.Body.Close() - etag := resp.Header.Get("ETag") - resp_body, ra_err := ioutil.ReadAll(resp.Body) - if ra_err != nil { - glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, ra_err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, ra_err) - return - } - glog.V(4).Infoln("post result", string(resp_body)) - var ret operation.UploadResult - unmarshal_err := json.Unmarshal(resp_body, &ret) - if unmarshal_err != nil { - glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(resp_body)) - writeJsonError(w, r, http.StatusInternalServerError, unmarshal_err) - return - } - if ret.Error != "" { - glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error) - writeJsonError(w, r, http.StatusInternalServerError, errors.New(ret.Error)) + ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId) + if err != nil { return } - // find correct final path + if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId); err != nil { + return + } + + // send back post result + reply := FilerPostResult{ + Name: ret.Name, + Size: ret.Size, + Error: ret.Error, + Fid: fileId, + Url: urlLocation, + } + setEtag(w, ret.ETag) + writeJsonQuiet(w, r, http.StatusCreated, reply) +} + +// update metadata in filer store +func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, + replication string, collection string, ret operation.UploadResult, fileId string) (err error) { + + stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds()) + }() + path := r.URL.Path if strings.HasSuffix(path, "/") { if ret.Name != "" { path += ret.Name - } else { - fs.filer.DeleteFileByFileId(fileId) - glog.V(0).Infoln("Can not to write to folder", path, "without a file name!") - writeJsonError(w, r, http.StatusInternalServerError, - errors.New("Can not to write to folder "+path+" without a file name")) - return } } - - // update metadata in filer store - existingEntry, err := fs.filer.FindEntry(filer2.FullPath(path)) + existingEntry, err := fs.filer.FindEntry(ctx, filer2.FullPath(path)) crTime := time.Now() if err == nil && existingEntry != nil { - // glog.V(4).Infof("existing %s => %+v", path, existingEntry) - if existingEntry.IsDirectory() { - path += "/" + ret.Name - } else { - crTime = existingEntry.Crtime - } + crTime = existingEntry.Crtime } entry := &filer2.Entry{ FullPath: filer2.FullPath(path), @@ -184,27 +176,95 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { FileId: fileId, Size: uint64(ret.Size), Mtime: time.Now().UnixNano(), - ETag: etag, + ETag: ret.ETag, }}, } + if ext := filenamePath.Ext(path); ext != "" { + entry.Attr.Mime = mime.TypeByExtension(ext) + } // glog.V(4).Infof("saving %s => %+v", path, entry) - if db_err := fs.filer.CreateEntry(entry); db_err != nil { - fs.filer.DeleteFileByFileId(fileId) - glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err) - writeJsonError(w, r, http.StatusInternalServerError, db_err) + if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { + fs.filer.DeleteChunks(entry.FullPath, entry.Chunks) + glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, dbErr) + err = dbErr return } - // send back post result - reply := FilerPostResult{ - Name: ret.Name, - Size: ret.Size, - Error: ret.Error, - Fid: fileId, - Url: urlLocation, + return nil +} + +// send request to volume server +func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret operation.UploadResult, err error) { + + stats.FilerRequestCounter.WithLabelValues("postUpload").Inc() + start := time.Now() + defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() + + request := &http.Request{ + Method: r.Method, + URL: u, + Proto: r.Proto, + ProtoMajor: r.ProtoMajor, + ProtoMinor: r.ProtoMinor, + Header: r.Header, + Body: r.Body, + Host: r.Host, + ContentLength: r.ContentLength, } - setEtag(w, etag) - writeJsonQuiet(w, r, http.StatusCreated, reply) + if auth != "" { + request.Header.Set("Authorization", "BEARER "+string(auth)) + } + resp, doErr := util.Do(request) + if doErr != nil { + glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, doErr, r.Method) + writeJsonError(w, r, http.StatusInternalServerError, doErr) + err = doErr + return + } + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + etag := resp.Header.Get("ETag") + respBody, raErr := ioutil.ReadAll(resp.Body) + if raErr != nil { + glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error()) + writeJsonError(w, r, http.StatusInternalServerError, raErr) + err = raErr + return + } + glog.V(4).Infoln("post result", string(respBody)) + unmarshalErr := json.Unmarshal(respBody, &ret) + if unmarshalErr != nil { + glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(respBody)) + writeJsonError(w, r, http.StatusInternalServerError, unmarshalErr) + err = unmarshalErr + return + } + if ret.Error != "" { + err = errors.New(ret.Error) + glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error) + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + // find correct final path + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if ret.Name != "" { + path += ret.Name + } else { + err = fmt.Errorf("can not to write to folder %s without a file name", path) + fs.filer.DeleteFileByFileId(fileId) + glog.V(0).Infoln("Can not to write to folder", path, "without a file name!") + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + } + if etag != "" { + ret.ETag = etag + } + return } // curl -X DELETE http://localhost:8888/path/to @@ -213,7 +273,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { isRecursive := r.FormValue("recursive") == "true" - err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), isRecursive, true) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, true) if err != nil { glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) writeJsonError(w, r, http.StatusInternalServerError, err) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 4b1745aaa..492b55943 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -2,6 +2,7 @@ package weed_server import ( "bytes" + "context" "io" "io/ioutil" "net/http" @@ -14,10 +15,13 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string) bool { +func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, + replication string, collection string, dataCenter string) bool { if r.Method != "POST" { glog.V(4).Infoln("AutoChunking not supported for method", r.Method) return false @@ -53,7 +57,7 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica return false } - reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection, dataCenter) + reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else if reply != nil { @@ -62,7 +66,14 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica return true } -func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { +func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, + contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { + + stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("postAutoChunk").Observe(time.Since(start).Seconds()) + }() multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { @@ -105,14 +116,14 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { writtenChunks = writtenChunks + 1 - fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) if assignErr != nil { return nil, assignErr } // upload the chunk to the volume server chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId) + uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId, auth) if uploadErr != nil { return nil, uploadErr } @@ -165,21 +176,28 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte }, Chunks: fileChunks, } - if db_err := fs.filer.CreateEntry(entry); db_err != nil { - replyerr = db_err - filerResult.Error = db_err.Error() - glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err) + if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { + fs.filer.DeleteChunks(entry.FullPath, entry.Chunks) + replyerr = dbErr + filerResult.Error = dbErr.Error() + glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) return } return } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string) (err error) { - err = nil +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, + chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { + + stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) + }() ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId)) + uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) if uploadResult != nil { glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) } diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index d056a4b25..55a1909a8 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -16,7 +16,7 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { for i := 0; i < len(parts); i++ { crumbs = append(crumbs, Breadcrumb{ Name: parts[i] + "/", - Link: "/" + filepath.Join(parts[0:i+1]...), + Link: "/" + filepath.ToSlash(filepath.Join(parts[0:i+1]...)), }) } diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index e31685ea0..884798936 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -162,7 +162,7 @@ function uploadFile(file, i) { var url = window.location.href var xhr = new XMLHttpRequest() var formData = new FormData() - xhr.open('POST', url, true) + xhr.open('POST', url, false) formData.append('file', file) xhr.send(formData) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 93dce59d8..1a17327a0 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -9,6 +9,7 @@ import ( "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" "google.golang.org/grpc/peer" ) @@ -30,6 +31,9 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ for _, v := range dn.GetVolumes() { message.DeletedVids = append(message.DeletedVids, uint32(v.Id)) } + for _, s := range dn.GetEcShards() { + message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId)) + } if len(message.DeletedVids) > 0 { ms.clientChansLock.RLock() @@ -63,39 +67,84 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ rack := dc.GetOrCreateRack(rackName) dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), heartbeat.PublicUrl, - int(heartbeat.MaxVolumeCount)) + int64(heartbeat.MaxVolumeCount)) glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) if err := stream.Send(&master_pb.HeartbeatResponse{ - VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024, - SecretKey: string(ms.guard.SecretKey), + VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, }); err != nil { return err } } + glog.V(4).Infof("master received heartbeat %s", heartbeat.String()) message := &master_pb.VolumeLocation{ Url: dn.Url(), PublicUrl: dn.PublicUrl, } - if len(heartbeat.NewVids) > 0 || len(heartbeat.DeletedVids) > 0 { + if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 { // process delta volume ids if exists for fast volume id updates - message.NewVids = append(message.NewVids, heartbeat.NewVids...) - message.DeletedVids = append(message.DeletedVids, heartbeat.DeletedVids...) - } else { + for _, volInfo := range heartbeat.NewVolumes { + message.NewVids = append(message.NewVids, volInfo.Id) + } + for _, volInfo := range heartbeat.DeletedVolumes { + message.DeletedVids = append(message.DeletedVids, volInfo.Id) + } + // update master internal volume layouts + t.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn) + } + + if len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes { // process heartbeat.Volumes newVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn) for _, v := range newVolumes { + glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url()) message.NewVids = append(message.NewVids, uint32(v.Id)) } for _, v := range deletedVolumes { + glog.V(0).Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url()) message.DeletedVids = append(message.DeletedVids, uint32(v.Id)) } } + if len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 { + + // update master internal volume layouts + t.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn) + + for _, s := range heartbeat.NewEcShards { + message.NewVids = append(message.NewVids, s.Id) + } + for _, s := range heartbeat.DeletedEcShards { + if dn.HasVolumesById(needle.VolumeId(s.Id)) { + continue + } + message.DeletedVids = append(message.DeletedVids, s.Id) + } + + } + + if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards { + glog.V(1).Infof("master recieved ec shards from %s: %+v", dn.Url(), heartbeat.EcShards) + newShards, deletedShards := t.SyncDataNodeEcShards(heartbeat.EcShards, dn) + + // broadcast the ec vid changes to master clients + for _, s := range newShards { + message.NewVids = append(message.NewVids, uint32(s.VolumeId)) + } + for _, s := range deletedShards { + if dn.HasVolumesById(s.VolumeId) { + continue + } + message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId)) + } + + } + if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 { ms.clientChansLock.RLock() - for _, ch := range ms.clientChans { + for host, ch := range ms.clientChans { + glog.V(0).Infof("master send to %s: %s", host, message.String()) ch <- message } ms.clientChansLock.RUnlock() @@ -103,12 +152,15 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ // tell the volume servers about the leader newLeader, err := t.Leader() - if err == nil { - if err := stream.Send(&master_pb.HeartbeatResponse{ - Leader: newLeader, - }); err != nil { - return err - } + if err != nil { + return err + } + if err := stream.Send(&master_pb.HeartbeatResponse{ + Leader: newLeader, + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + }); err != nil { + return err } } } diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go new file mode 100644 index 000000000..a50cfa192 --- /dev/null +++ b/weed/server/master_grpc_server_collection.go @@ -0,0 +1,94 @@ +package weed_server + +import ( + "context" + + "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +) + +func (ms *MasterServer) CollectionList(ctx context.Context, req *master_pb.CollectionListRequest) (*master_pb.CollectionListResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.CollectionListResponse{} + collections := ms.Topo.ListCollections(req.IncludeNormalVolumes, req.IncludeEcVolumes) + for _, c := range collections { + resp.Collections = append(resp.Collections, &master_pb.Collection{ + Name: c, + }) + } + + return resp, nil +} + +func (ms *MasterServer) CollectionDelete(ctx context.Context, req *master_pb.CollectionDeleteRequest) (*master_pb.CollectionDeleteResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.CollectionDeleteResponse{} + + err := ms.doDeleteNormalCollection(req.Name) + + if err != nil { + return nil, err + } + + err = ms.doDeleteEcCollection(req.Name) + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error { + + collection, ok := ms.Topo.FindCollection(collectionName) + if !ok { + return nil + } + + for _, server := range collection.ListVolumeServers() { + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + Collection: collectionName, + }) + return deleteErr + }) + if err != nil { + return err + } + } + ms.Topo.DeleteCollection(collectionName) + + return nil +} + +func (ms *MasterServer) doDeleteEcCollection(collectionName string) error { + + listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName) + + for _, server := range listOfEcServers { + err := operation.WithVolumeServerClient(server, ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + Collection: collectionName, + }) + return deleteErr + }) + if err != nil { + return err + } + } + + ms.Topo.DeleteEcCollection(collectionName) + + return nil +} diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index ae0819d2d..19064bcde 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -6,7 +6,9 @@ import ( "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -48,13 +50,13 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } if req.Replication == "" { - req.Replication = ms.defaultReplicaPlacement + req.Replication = ms.option.DefaultReplicaPlacement } replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } - ttl, err := storage.ReadTTL(req.Ttl) + ttl, err := needle.ReadTTL(req.Ttl) if err != nil { return nil, err } @@ -63,7 +65,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest Collection: req.Collection, ReplicaPlacement: replicaPlacement, Ttl: ttl, - Prealloacte: ms.preallocate, + Prealloacte: ms.preallocateSize, DataCenter: req.DataCenter, Rack: req.Rack, DataNode: req.DataNode, @@ -75,7 +77,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } ms.vgLock.Lock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil { ms.vgLock.Unlock() return nil, fmt.Errorf("Cannot grow volume group! %v", err) } @@ -92,6 +94,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest Url: dn.Url(), PublicUrl: dn.PublicUrl, Count: count, + Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)), }, nil } @@ -102,13 +105,13 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic } if req.Replication == "" { - req.Replication = ms.defaultReplicaPlacement + req.Replication = ms.option.DefaultReplicaPlacement } replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } - ttl, err := storage.ReadTTL(req.Ttl) + ttl, err := needle.ReadTTL(req.Ttl) if err != nil { return nil, err } @@ -124,3 +127,60 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic return resp, nil } + +func (ms *MasterServer) VolumeList(ctx context.Context, req *master_pb.VolumeListRequest) (*master_pb.VolumeListResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.VolumeListResponse{ + TopologyInfo: ms.Topo.ToTopologyInfo(), + VolumeSizeLimitMb: uint64(ms.option.VolumeSizeLimitMB), + } + + return resp, nil +} + +func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.LookupEcVolumeRequest) (*master_pb.LookupEcVolumeResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.LookupEcVolumeResponse{} + + ecLocations, found := ms.Topo.LookupEcShards(needle.VolumeId(req.VolumeId)) + + if !found { + return resp, fmt.Errorf("ec volume %d not found", req.VolumeId) + } + + resp.VolumeId = req.VolumeId + + for shardId, shardLocations := range ecLocations.Locations { + var locations []*master_pb.Location + for _, dn := range shardLocations { + locations = append(locations, &master_pb.Location{ + Url: string(dn.Id()), + PublicUrl: dn.PublicUrl, + }) + } + resp.ShardIdLocations = append(resp.ShardIdLocations, &master_pb.LookupEcVolumeResponse_EcShardIdLocation{ + ShardId: uint32(shardId), + Locations: locations, + }) + } + + return resp, nil +} + +func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) { + + resp := &master_pb.GetMasterConfigurationResponse{ + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + } + + return resp, nil +} diff --git a/weed/server/master_server.go b/weed/server/master_server.go index f22925e56..3689b5495 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -2,10 +2,17 @@ package weed_server import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/shell" + "google.golang.org/grpc" "net/http" "net/http/httputil" "net/url" + "os" + "regexp" + "strconv" + "strings" "sync" + "time" "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" @@ -15,17 +22,28 @@ import ( "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" + "github.com/spf13/viper" ) +type MasterOption struct { + Port int + MetaFolder string + VolumeSizeLimitMB uint + VolumePreallocate bool + PulseSeconds int + DefaultReplicaPlacement string + GarbageThreshold float64 + WhiteList []string + DisableHttp bool + MetricsAddress string + MetricsIntervalSec int +} + type MasterServer struct { - port int - metaFolder string - volumeSizeLimitMB uint - preallocate int64 - pulseSeconds int - defaultReplicaPlacement string - garbageThreshold float64 - guard *security.Guard + option *MasterOption + guard *security.Guard + + preallocateSize int64 Topo *topology.Topology vg *topology.VolumeGrowth @@ -36,56 +54,60 @@ type MasterServer struct { // notifying clients clientChansLock sync.RWMutex clientChans map[string]chan *master_pb.VolumeLocation + + grpcDialOpiton grpc.DialOption } -func NewMasterServer(r *mux.Router, port int, metaFolder string, - volumeSizeLimitMB uint, - preallocate bool, - pulseSeconds int, - defaultReplicaPlacement string, - garbageThreshold float64, - whiteList []string, - secureKey string, -) *MasterServer { +func NewMasterServer(r *mux.Router, option *MasterOption) *MasterServer { + + v := viper.GetViper() + signingKey := v.GetString("jwt.signing.key") + v.SetDefault("jwt.signing.expires_after_seconds", 10) + expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") + + readSigningKey := v.GetString("jwt.signing.read.key") + v.SetDefault("jwt.signing.read.expires_after_seconds", 60) + readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds") var preallocateSize int64 - if preallocate { - preallocateSize = int64(volumeSizeLimitMB) * (1 << 20) + if option.VolumePreallocate { + preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20) } ms := &MasterServer{ - port: port, - volumeSizeLimitMB: volumeSizeLimitMB, - preallocate: preallocateSize, - pulseSeconds: pulseSeconds, - defaultReplicaPlacement: defaultReplicaPlacement, - garbageThreshold: garbageThreshold, - clientChans: make(map[string]chan *master_pb.VolumeLocation), + option: option, + preallocateSize: preallocateSize, + clientChans: make(map[string]chan *master_pb.VolumeLocation), + grpcDialOpiton: security.LoadClientTLS(v.Sub("grpc"), "master"), } ms.bounedLeaderChan = make(chan int, 16) seq := sequence.NewMemorySequencer() - ms.Topo = topology.NewTopology("topo", seq, uint64(volumeSizeLimitMB)*1024*1024, pulseSeconds) + ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, ms.option.PulseSeconds) ms.vg = topology.NewDefaultVolumeGrowth() - glog.V(0).Infoln("Volume Size Limit is", volumeSizeLimitMB, "MB") + glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB") - ms.guard = security.NewGuard(whiteList, secureKey) + ms.guard = security.NewGuard(ms.option.WhiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) - handleStaticResources2(r) - r.HandleFunc("/", ms.uiStatusHandler) - r.HandleFunc("/ui/index.html", ms.uiStatusHandler) - r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) - r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) - r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) - r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler))) - r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler))) - r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) - r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) - r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) - r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) - r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) - r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) - r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) + if !ms.option.DisableHttp { + handleStaticResources2(r) + r.HandleFunc("/", ms.proxyToLeader(ms.uiStatusHandler)) + r.HandleFunc("/ui/index.html", ms.uiStatusHandler) + r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) + r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) + r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) + r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler))) + r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler))) + r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) + r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) + r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) + r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) + r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) + r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) + } - ms.Topo.StartRefreshWritableVolumes(garbageThreshold, ms.preallocate) + ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOpiton, ms.option.GarbageThreshold, ms.preallocateSize) + + ms.startAdminScripts() return ms } @@ -98,6 +120,9 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.") } }) + ms.Topo.RaftServer.AddEventListener(raft.StateChangeEventType, func(e raft.Event) { + glog.V(0).Infof("state change: %+v", e) + }) if ms.Topo.IsLeader() { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!") } else { @@ -138,3 +163,63 @@ func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Requ } } } + +func (ms *MasterServer) startAdminScripts() { + v := viper.GetViper() + adminScripts := v.GetString("master.maintenance.scripts") + v.SetDefault("master.maintenance.sleep_minutes", 17) + sleepMinutes := v.GetInt("master.maintenance.sleep_minutes") + + glog.V(0).Infof("adminScripts:\n%v", adminScripts) + if adminScripts == "" { + return + } + + scriptLines := strings.Split(adminScripts, "\n") + + masterAddress := "localhost:" + strconv.Itoa(ms.option.Port) + + var shellOptions shell.ShellOptions + shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "master") + shellOptions.Masters = &masterAddress + shellOptions.FilerHost = "localhost" + shellOptions.FilerPort = 8888 + shellOptions.Directory = "/" + + commandEnv := shell.NewCommandEnv(shellOptions) + + reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`) + + go commandEnv.MasterClient.KeepConnectedToMaster() + + go func() { + commandEnv.MasterClient.WaitUntilConnected() + + c := time.Tick(time.Duration(sleepMinutes) * time.Minute) + for _ = range c { + if ms.Topo.IsLeader() { + for _, line := range scriptLines { + + cmds := reg.FindAllString(line, -1) + if len(cmds) == 0 { + continue + } + args := make([]string, len(cmds[1:])) + for i := range args { + args[i] = strings.Trim(string(cmds[1+i]), "\"'") + } + cmd := strings.ToLower(cmds[0]) + + for _, c := range shell.Commands { + if c.Name() == cmd { + glog.V(0).Infof("executing: %s %v", cmd, args) + if err := c.Do(args, commandEnv, os.Stdout); err != nil { + glog.V(0).Infof("error: %v", err) + } + } + } + } + } + } + }() +} diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index a797dddfc..5c7ff41cf 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -7,8 +7,9 @@ import ( "strings" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volumeLocations map[string]operation.LookupResult) { @@ -21,7 +22,7 @@ func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volume if _, ok := volumeLocations[vid]; ok { continue } - volumeId, err := storage.NewVolumeId(vid) + volumeId, err := needle.NewVolumeId(vid) if err == nil { machines := ms.Topo.Lookup(collection, volumeId) if machines != nil { @@ -40,12 +41,23 @@ func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volume return } -// Takes one volumeId only, can not do batch lookup +// If "fileId" is provided, this returns the fileId location and a JWT to update or delete the file. +// If "volumeId" is provided, this only returns the volumeId location func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) { vid := r.FormValue("volumeId") - commaSep := strings.Index(vid, ",") - if commaSep > 0 { - vid = vid[0:commaSep] + if vid != "" { + // backward compatible + commaSep := strings.Index(vid, ",") + if commaSep > 0 { + vid = vid[0:commaSep] + } + } + fileId := r.FormValue("fileId") + if fileId != "" { + commaSep := strings.Index(fileId, ",") + if commaSep > 0 { + vid = fileId[0:commaSep] + } } vids := []string{vid} collection := r.FormValue("collection") //optional, but can be faster if too many collections @@ -54,6 +66,10 @@ func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) httpStatus := http.StatusOK if location.Error != "" { httpStatus = http.StatusNotFound + } else { + forRead := r.FormValue("read") + isRead := forRead == "yes" + ms.maybeAddJwtAuthorization(w, fileId, !isRead) } writeJsonQuiet(w, r, httpStatus, location) } @@ -79,7 +95,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) ms.vgLock.Lock() defer ms.vgLock.Unlock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Cannot grow volume group! %v", err)) return @@ -88,8 +104,23 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) } fid, count, dn, err := ms.Topo.PickForWrite(requestedCount, option) if err == nil { + ms.maybeAddJwtAuthorization(w, fid, true) writeJsonQuiet(w, r, http.StatusOK, operation.AssignResult{Fid: fid, Url: dn.Url(), PublicUrl: dn.PublicUrl, Count: count}) } else { writeJsonQuiet(w, r, http.StatusNotAcceptable, operation.AssignResult{Error: err.Error()}) } } + +func (ms *MasterServer) maybeAddJwtAuthorization(w http.ResponseWriter, fileId string, isWrite bool) { + var encodedJwt security.EncodedJwt + if isWrite { + encodedJwt = security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fileId) + } else { + encodedJwt = security.GenJwt(ms.guard.ReadSigningKey, ms.guard.ReadExpiresAfterSec, fileId) + } + if encodedJwt == "" { + return + } + + w.Header().Set("Authorization", "BEARER "+string(encodedJwt)) +} diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 3a2662908..343bcb8da 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -7,12 +7,12 @@ import ( "math/rand" "net/http" "strconv" - "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -24,11 +24,8 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R return } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), func(client volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - - _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ Collection: collection.Name, }) return deleteErr @@ -50,7 +47,7 @@ func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Request) { gcString := r.FormValue("garbageThreshold") - gcThreshold := ms.garbageThreshold + gcThreshold := ms.option.GarbageThreshold if gcString != "" { var err error gcThreshold, err = strconv.ParseFloat(gcString, 32) @@ -60,7 +57,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque } } glog.Infoln("garbageThreshold =", gcThreshold) - ms.Topo.Vacuum(gcThreshold, ms.preallocate) + ms.Topo.Vacuum(ms.grpcDialOpiton, gcThreshold, ms.preallocateSize) ms.dirStatusHandler(w, r) } @@ -73,10 +70,10 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request } if err == nil { if count, err = strconv.Atoi(r.FormValue("count")); err == nil { - if ms.Topo.FreeSpace() < count*option.ReplicaPlacement.GetCopyCount() { - err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*option.ReplicaPlacement.GetCopyCount())) + if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) { + err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount()) } else { - count, err = ms.vg.GrowByCountAndType(count, option, ms.Topo) + count, err = ms.vg.GrowByCountAndType(ms.grpcDialOpiton, count, option, ms.Topo) } } else { err = errors.New("parameter count is not found") @@ -98,7 +95,7 @@ func (ms *MasterServer) volumeStatusHandler(w http.ResponseWriter, r *http.Reque func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request) { vid, _, _, _, _ := parseURLPath(r.URL.Path) - volumeId, err := storage.NewVolumeId(vid) + volumeId, err := needle.NewVolumeId(vid) if err != nil { debug("parsing error:", err, r.URL.Path) return @@ -122,17 +119,17 @@ func (ms *MasterServer) selfUrl(r *http.Request) string { if r.Host != "" { return r.Host } - return "localhost:" + strconv.Itoa(ms.port) + return "localhost:" + strconv.Itoa(ms.option.Port) } func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) { if ms.Topo.IsLeader() { - submitForClientHandler(w, r, ms.selfUrl(r)) + submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOpiton) } else { masterUrl, err := ms.Topo.Leader() if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else { - submitForClientHandler(w, r, masterUrl) + submitForClientHandler(w, r, masterUrl, ms.grpcDialOpiton) } } } @@ -145,17 +142,17 @@ func (ms *MasterServer) HasWritableVolume(option *topology.VolumeGrowOption) boo func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGrowOption, error) { replicationString := r.FormValue("replication") if replicationString == "" { - replicationString = ms.defaultReplicaPlacement + replicationString = ms.option.DefaultReplicaPlacement } replicaPlacement, err := storage.NewReplicaPlacementFromString(replicationString) if err != nil { return nil, err } - ttl, err := storage.ReadTTL(r.FormValue("ttl")) + ttl, err := needle.ReadTTL(r.FormValue("ttl")) if err != nil { return nil, err } - preallocate := ms.preallocate + preallocate := ms.preallocateSize if r.FormValue("preallocate") != "" { preallocate, err = strconv.ParseInt(r.FormValue("preallocate"), 10, 64) if err != nil { diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go index f32e8e61b..b674e3f82 100644 --- a/weed/server/master_ui/templates.go +++ b/weed/server/master_ui/templates.go @@ -41,7 +41,7 @@ var StatusTpl = template.Must(template.New("status").Parse(`

@@ -76,6 +76,7 @@ var StatusTpl = template.Must(template.New("status").Parse(` Rack RemoteAddr #Volumes + #ErasureCodingShards Max @@ -88,6 +89,7 @@ var StatusTpl = template.Must(template.New("status").Parse(` {{ $rack.Id }} {{ $dn.Url }} {{ $dn.Volumes }} + {{ $dn.EcShards }} {{ $dn.Max }} {{ end }} diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 68042da54..88320ed98 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -2,37 +2,35 @@ package weed_server import ( "encoding/json" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "io/ioutil" - "math/rand" "os" "path" "reflect" "sort" - "strings" "time" "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" - "github.com/gorilla/mux" ) type RaftServer struct { peers []string // initial peers to join with raftServer raft.Server dataDir string - httpAddr string - router *mux.Router + serverAddr string topo *topology.Topology + *raft.GrpcServer } -func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { +func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { s := &RaftServer{ - peers: peers, - httpAddr: httpAddr, - dataDir: dataDir, - router: r, - topo: topo, + peers: peers, + serverAddr: serverAddr, + dataDir: dataDir, + topo: topo, } if glog.V(4) { @@ -42,42 +40,39 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin raft.RegisterCommand(&topology.MaxVolumeIdCommand{}) var err error - transporter := raft.NewHTTPTransporter("/cluster", time.Second) - transporter.Transport.MaxIdleConnsPerHost = 1024 - transporter.Transport.IdleConnTimeout = time.Second - glog.V(0).Infof("Starting RaftServer with %v", httpAddr) + transporter := raft.NewGrpcTransporter(grpcDialOption) + glog.V(0).Infof("Starting RaftServer with %v", serverAddr) // Clear old cluster configurations if peers are changed - if oldPeers, changed := isPeersChanged(s.dataDir, httpAddr, s.peers); changed { + if oldPeers, changed := isPeersChanged(s.dataDir, serverAddr, s.peers); changed { glog.V(0).Infof("Peers Change: %v => %v", oldPeers, s.peers) os.RemoveAll(path.Join(s.dataDir, "conf")) os.RemoveAll(path.Join(s.dataDir, "log")) os.RemoveAll(path.Join(s.dataDir, "snapshot")) } - s.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, topo, "") + s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, nil, topo, "") if err != nil { glog.V(0).Infoln(err) return nil } - transporter.Install(s.raftServer, s) s.raftServer.SetHeartbeatInterval(500 * time.Millisecond) s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 500 * time.Millisecond) s.raftServer.Start() - s.router.HandleFunc("/cluster/status", s.statusHandler).Methods("GET") - for _, peer := range s.peers { - s.raftServer.AddPeer(peer, "http://"+peer) + s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer)) } - time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond) - if s.raftServer.IsLogEmpty() { + + s.GrpcServer = raft.NewGrpcServer(s.raftServer) + + if s.raftServer.IsLogEmpty() && isTheFirstOne(serverAddr, s.peers) { // Initialize the server by joining itself. glog.V(0).Infoln("Initializing new cluster") _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ Name: s.raftServer.Name(), - ConnectionString: "http://" + s.httpAddr, + ConnectionString: util.ServerToGrpcAddress(s.serverAddr), }) if err != nil { @@ -95,7 +90,7 @@ func (s *RaftServer) Peers() (members []string) { peers := s.raftServer.Peers() for _, p := range peers { - members = append(members, strings.TrimPrefix(p.ConnectionString, "http://")) + members = append(members, p.Name) } return @@ -114,7 +109,7 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, } for _, p := range conf.Peers { - oldPeers = append(oldPeers, strings.TrimPrefix(p.ConnectionString, "http://")) + oldPeers = append(oldPeers, p.Name) } oldPeers = append(oldPeers, self) @@ -128,3 +123,11 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, return oldPeers, !reflect.DeepEqual(peers, oldPeers) } + +func isTheFirstOne(self string, peers []string) bool { + sort.Strings(peers) + if len(peers) <= 0 { + return true + } + return self == peers[0] +} diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go index 627fe354e..fd38cb977 100644 --- a/weed/server/raft_server_handlers.go +++ b/weed/server/raft_server_handlers.go @@ -1,16 +1,17 @@ package weed_server import ( - "github.com/chrislusf/seaweedfs/weed/operation" "net/http" ) -func (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) { - s.router.HandleFunc(pattern, handler) +type ClusterStatusResult struct { + IsLeader bool `json:"IsLeader,omitempty"` + Leader string `json:"Leader,omitempty"` + Peers []string `json:"Peers,omitempty"` } -func (s *RaftServer) statusHandler(w http.ResponseWriter, r *http.Request) { - ret := operation.ClusterStatusResult{ +func (s *RaftServer) StatusHandler(w http.ResponseWriter, r *http.Request) { + ret := ClusterStatusResult{ IsLeader: s.topo.IsLeader(), Peers: s.Peers(), } diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index 429ca9b68..35c2508a6 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -5,7 +5,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) { @@ -24,12 +24,12 @@ func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server } -func (vs *VolumeServer) AssignVolume(ctx context.Context, req *volume_server_pb.AssignVolumeRequest) (*volume_server_pb.AssignVolumeResponse, error) { +func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_pb.AllocateVolumeRequest) (*volume_server_pb.AllocateVolumeResponse, error) { - resp := &volume_server_pb.AssignVolumeResponse{} + resp := &volume_server_pb.AllocateVolumeResponse{} err := vs.store.AddVolume( - storage.VolumeId(req.VolumdId), + needle.VolumeId(req.VolumeId), req.Collection, vs.needleMapKind, req.Replication, @@ -51,7 +51,7 @@ func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.V resp := &volume_server_pb.VolumeMountResponse{} - err := vs.store.MountVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume mount %v: %v", req, err) @@ -67,7 +67,7 @@ func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb resp := &volume_server_pb.VolumeUnmountResponse{} - err := vs.store.UnmountVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume unmount %v: %v", req, err) @@ -83,7 +83,7 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. resp := &volume_server_pb.VolumeDeleteResponse{} - err := vs.store.DeleteVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume delete %v: %v", req, err) @@ -94,3 +94,19 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. return resp, err } + +func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) { + + resp := &volume_server_pb.VolumeMarkReadonlyResponse{} + + err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId)) + + if err != nil { + glog.Errorf("volume mark readonly %v: %v", req, err) + } else { + glog.V(2).Infof("volume mark readonly %v", req) + } + + return resp, err + +} diff --git a/weed/server/volume_grpc_batch_delete.go b/weed/server/volume_grpc_batch_delete.go index 3554d97ae..d7fbb6edf 100644 --- a/weed/server/volume_grpc_batch_delete.go +++ b/weed/server/volume_grpc_batch_delete.go @@ -7,7 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.BatchDeleteRequest) (*volume_server_pb.BatchDeleteResponse, error) { @@ -26,8 +26,8 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B continue } - n := new(storage.Needle) - volumeId, _ := storage.NewVolumeId(vid) + n := new(needle.Needle) + volumeId, _ := needle.NewVolumeId(vid) n.ParsePath(id_cookie) cookie := n.Cookie diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index bd3ffd7b3..731675b48 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -2,11 +2,16 @@ package weed_server import ( "fmt" + "net" "time" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/spf13/viper" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "golang.org/x/net/context" ) @@ -16,41 +21,46 @@ func (vs *VolumeServer) GetMaster() string { } func (vs *VolumeServer) heartbeat() { - glog.V(0).Infof("Volume server start with masters: %v", vs.MasterNodes) + glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes) vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) + grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "volume") + var err error var newLeader string for { - for _, master := range vs.MasterNodes { + for _, master := range vs.SeedMasterNodes { if newLeader != "" { master = newLeader } - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) + masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) if parseErr != nil { - glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress) + glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr) continue } - newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, time.Duration(vs.pulseSeconds)*time.Second) + vs.store.MasterAddress = master + newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) + newLeader = "" + vs.store.MasterAddress = "" } } } } -func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepInterval time.Duration) (newLeader string, err error) { +func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { - grpcConection, err := util.GrpcDial(masterGrpcAddress) + grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } defer grpcConection.Close() client := master_pb.NewSeaweedClient(grpcConection) - stream, err := client.SendHeartbeat(context.Background()) + stream, err := client.SendHeartbeat(ctx) if err != nil { glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err) return "", err @@ -58,9 +68,6 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI glog.V(0).Infof("Heartbeat to: %v", masterNode) vs.currentMaster = masterNode - vs.store.Client = stream - defer func() { vs.store.Client = nil }() - doneChan := make(chan error, 1) go func() { @@ -71,17 +78,18 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI return } if in.GetVolumeSizeLimit() != 0 { - vs.store.VolumeSizeLimit = in.GetVolumeSizeLimit() + vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit()) } - if in.GetSecretKey() != "" { - vs.guard.SecretKey = security.Secret(in.GetSecretKey()) - } - if in.GetLeader() != "" && masterNode != in.GetLeader() { + if in.GetLeader() != "" && masterNode != in.GetLeader() && !isSameIP(in.GetLeader(), masterNode) { glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode) newLeader = in.GetLeader() doneChan <- nil return } + if in.GetMetricsAddress() != "" && vs.MetricsAddress != in.GetMetricsAddress() { + vs.MetricsAddress = in.GetMetricsAddress() + vs.MetricsIntervalSec = int(in.GetMetricsIntervalSeconds()) + } } }() @@ -90,33 +98,89 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI return "", err } - tickChan := time.Tick(sleepInterval) + if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + return "", err + } + + volumeTickChan := time.Tick(sleepInterval) + ecShardTickChan := time.Tick(17 * sleepInterval) for { select { - case vid := <-vs.store.NewVolumeIdChan: + case volumeMessage := <-vs.store.NewVolumesChan: deltaBeat := &master_pb.Heartbeat{ - NewVids: []uint32{uint32(vid)}, + NewVolumes: []*master_pb.VolumeShortInformationMessage{ + &volumeMessage, + }, } + glog.V(1).Infof("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) if err = stream.Send(deltaBeat); err != nil { glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) return "", err } - case vid := <-vs.store.DeletedVolumeIdChan: + case ecShardMessage := <-vs.store.NewEcShardsChan: deltaBeat := &master_pb.Heartbeat{ - DeletedVids: []uint32{uint32(vid)}, + NewEcShards: []*master_pb.VolumeEcShardInformationMessage{ + &ecShardMessage, + }, } + glog.V(1).Infof("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, + erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds()) if err = stream.Send(deltaBeat); err != nil { glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) return "", err } - case <-tickChan: + case volumeMessage := <-vs.store.DeletedVolumesChan: + deltaBeat := &master_pb.Heartbeat{ + DeletedVolumes: []*master_pb.VolumeShortInformationMessage{ + &volumeMessage, + }, + } + glog.V(1).Infof("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) + if err = stream.Send(deltaBeat); err != nil { + glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) + return "", err + } + case ecShardMessage := <-vs.store.DeletedEcShardsChan: + deltaBeat := &master_pb.Heartbeat{ + DeletedEcShards: []*master_pb.VolumeEcShardInformationMessage{ + &ecShardMessage, + }, + } + glog.V(1).Infof("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, + erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds()) + if err = stream.Send(deltaBeat); err != nil { + glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) + return "", err + } + case <-volumeTickChan: + glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port) if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) return "", err } + case <-ecShardTickChan: + glog.V(4).Infof("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port) + if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + return "", err + } case err = <-doneChan: return } } } + +func isSameIP(ip string, host string) bool { + ips, err := net.LookupIP(host) + if err != nil { + return false + } + for _, t := range ips { + if ip == t.String() { + return true + } + } + return false +} diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go new file mode 100644 index 000000000..8b39146ee --- /dev/null +++ b/weed/server/volume_grpc_copy.go @@ -0,0 +1,263 @@ +package weed_server + +import ( + "context" + "fmt" + "io" + "math" + "os" + "path" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const BufferSizeLimit = 1024 * 1024 * 2 + +// VolumeCopy copy the .idx .dat files, and mount the volume +func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v != nil { + return nil, fmt.Errorf("volume %d already exists", req.VolumeId) + } + + location := vs.store.FindFreeLocation() + if location == nil { + return nil, fmt.Errorf("no space left") + } + + // the master will not start compaction for read-only volumes, so it is safe to just copy files directly + // copy .dat and .idx files + // read .idx .dat file size and timestamp + // send .idx file + // send .dat file + // confirm size and timestamp + var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse + var volumeFileName, idxFileName, datFileName string + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + var err error + volFileInfoResp, err = client.ReadVolumeFileStatus(ctx, + &volume_server_pb.ReadVolumeFileStatusRequest{ + VolumeId: req.VolumeId, + }) + if nil != err { + return fmt.Errorf("read volume file status failed, %v", err) + } + + volumeFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId)) + + // println("source:", volFileInfoResp.String()) + // copy ecx file + if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false); err != nil { + return err + } + + if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false); err != nil { + return err + } + + return nil + }) + + idxFileName = volumeFileName + ".idx" + datFileName = volumeFileName + ".dat" + + if err != nil && volumeFileName != "" { + if idxFileName != "" { + os.Remove(idxFileName) + } + if datFileName != "" { + os.Remove(datFileName) + } + return nil, err + } + + if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16 + return nil, err + } + + // mount the volume + err = vs.store.MountVolume(needle.VolumeId(req.VolumeId)) + if err != nil { + return nil, fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err) + } + + return &volume_server_pb.VolumeCopyResponse{ + LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second), + }, err +} + +func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32, + compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool) error { + + copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ + VolumeId: vid, + Ext: ext, + CompactionRevision: compactRevision, + StopOffset: stopOffset, + Collection: collection, + IsEcVolume: isEcVolume, + }) + if err != nil { + return fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err) + } + + err = writeToFile(copyFileClient, baseFileName+ext, util.NewWriteThrottler(vs.compactionBytePerSecond), isAppend) + if err != nil { + return fmt.Errorf("failed to copy %s file: %v", baseFileName+ext, err) + } + + return nil + +} + +/** +only check the the differ of the file size +todo: maybe should check the received count and deleted count of the volume +*/ +func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) error { + stat, err := os.Stat(idxFileName) + if err != nil { + return fmt.Errorf("stat idx file %s failed, %v", idxFileName, err) + } + if originFileInf.IdxFileSize != uint64(stat.Size()) { + return fmt.Errorf("idx file %s size [%v] is not same as origin file size [%v]", + idxFileName, stat.Size(), originFileInf.IdxFileSize) + } + + stat, err = os.Stat(datFileName) + if err != nil { + return fmt.Errorf("get dat file info failed, %v", err) + } + if originFileInf.DatFileSize != uint64(stat.Size()) { + return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]", + stat.Size(), originFileInf.DatFileSize) + } + return nil +} + +func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool) error { + glog.V(4).Infof("writing to %s", fileName) + flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC + if isAppend { + flags = os.O_WRONLY | os.O_CREATE + } + dst, err := os.OpenFile(fileName, flags, 0644) + if err != nil { + return nil + } + defer dst.Close() + + for { + resp, receiveErr := client.Recv() + if receiveErr == io.EOF { + break + } + if receiveErr != nil { + return fmt.Errorf("receiving %s: %v", fileName, receiveErr) + } + dst.Write(resp.FileContent) + wt.MaybeSlowdown(int64(len(resp.FileContent))) + } + return nil +} + +func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_server_pb.ReadVolumeFileStatusRequest) (*volume_server_pb.ReadVolumeFileStatusResponse, error) { + resp := &volume_server_pb.ReadVolumeFileStatusResponse{} + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp.VolumeId = req.VolumeId + datSize, idxSize, modTime := v.FileStat() + resp.DatFileSize = datSize + resp.IdxFileSize = idxSize + resp.DatFileTimestampSeconds = uint64(modTime.Unix()) + resp.IdxFileTimestampSeconds = uint64(modTime.Unix()) + resp.FileCount = v.FileCount() + resp.CompactionRevision = uint32(v.CompactionRevision) + resp.Collection = v.Collection + return resp, nil +} + +// CopyFile client pulls the volume related file from the source server. +// if req.CompactionRevision != math.MaxUint32, it ensures the compact revision is as expected +// The copying still stop at req.StopOffset, but you can set it to math.MaxUint64 in order to read all data. +func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) error { + + var fileName string + if !req.IsEcVolume { + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + if uint32(v.CompactionRevision) != req.CompactionRevision && req.CompactionRevision != math.MaxUint32 { + return fmt.Errorf("volume %d is compacted", req.VolumeId) + } + fileName = v.FileName() + req.Ext + } else { + baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext + for _, location := range vs.store.Locations { + tName := path.Join(location.Directory, baseFileName) + if util.FileExists(tName) { + fileName = tName + } + } + if fileName == "" { + return fmt.Errorf("CopyFile not found ec volume id %d", req.VolumeId) + } + } + + bytesToRead := int64(req.StopOffset) + + file, err := os.Open(fileName) + if err != nil { + return err + } + defer file.Close() + + buffer := make([]byte, BufferSizeLimit) + + for bytesToRead > 0 { + bytesread, err := file.Read(buffer) + + // println(fileName, "read", bytesread, "bytes, with target", bytesToRead) + + if err != nil { + if err != io.EOF { + return err + } + // println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error()) + break + } + + if int64(bytesread) > bytesToRead { + bytesread = int(bytesToRead) + } + err = stream.Send(&volume_server_pb.CopyFileResponse{ + FileContent: buffer[:bytesread], + }) + if err != nil { + // println("sending", bytesread, "bytes err", err.Error()) + return err + } + + bytesToRead -= int64(bytesread) + + } + + return nil +} + +func (vs *VolumeServer) findVolumeOrEcVolumeLocation(volumeId needle.VolumeId) { + +} diff --git a/weed/server/volume_grpc_copy_incremental.go b/weed/server/volume_grpc_copy_incremental.go new file mode 100644 index 000000000..f56fbeef4 --- /dev/null +++ b/weed/server/volume_grpc_copy_incremental.go @@ -0,0 +1,66 @@ +package weed_server + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrementalCopyRequest, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + stopOffset, _, _ := v.FileStat() + foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(req.SinceNs) + if err != nil { + return fmt.Errorf("fail to locate by appendAtNs %d: %s", req.SinceNs, err) + } + + if isLastOne { + return nil + } + + startOffset := foundOffset.ToAcutalOffset() + + buf := make([]byte, 1024*1024*2) + return sendFileContent(v.DataFile(), buf, startOffset, int64(stopOffset), stream) + +} + +func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp := v.GetVolumeSyncStatus() + + return resp, nil + +} + +func sendFileContent(datFile *os.File, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { + var blockSizeLimit = int64(len(buf)) + for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit { + n, readErr := datFile.ReadAt(buf, startOffset+i) + if readErr == nil || readErr == io.EOF { + resp := &volume_server_pb.VolumeIncrementalCopyResponse{} + resp.FileContent = buf[:int64(n)] + sendErr := stream.Send(resp) + if sendErr != nil { + return sendErr + } + } else { + return readErr + } + } + return nil +} diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go new file mode 100644 index 000000000..8140a06f6 --- /dev/null +++ b/weed/server/volume_grpc_erasure_coding.go @@ -0,0 +1,313 @@ +package weed_server + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" +) + +/* + +Steps to apply erasure coding to .dat .idx files +0. ensure the volume is readonly +1. client call VolumeEcShardsGenerate to generate the .ecx and .ec01~.ec14 files +2. client ask master for possible servers to hold the ec files, at least 4 servers +3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server +4. target servers report the new ec files to the master +5. master stores vid -> [14]*DataNode +6. client checks master. If all 14 slices are ready, delete the original .idx, .idx files + +*/ + +// VolumeEcShardsGenerate generates the .ecx and .ec01 ~ .ec14 files +func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("volume %d not found", req.VolumeId) + } + baseFileName := v.FileName() + + if v.Collection != req.Collection { + return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // write .ecx file + if err := erasure_coding.WriteSortedEcxFile(baseFileName); err != nil { + return nil, fmt.Errorf("WriteSortedEcxFile %s: %v", baseFileName, err) + } + + // write .ec01 ~ .ec14 files + if err := erasure_coding.WriteEcFiles(baseFileName); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + + return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil +} + +// VolumeEcShardsRebuild generates the any of the missing .ec01 ~ .ec14 files +func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) { + + baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + + var rebuiltShardIds []uint32 + + for _, location := range vs.store.Locations { + if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) { + // write .ec01 ~ .ec14 files + baseFileName = path.Join(location.Directory, baseFileName) + if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil { + return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err) + } else { + rebuiltShardIds = generatedShardIds + } + + if err := erasure_coding.RebuildEcxFile(baseFileName); err != nil { + return nil, fmt.Errorf("RebuildEcxFile %s: %v", baseFileName, err) + } + + break + } + } + + return &volume_server_pb.VolumeEcShardsRebuildResponse{ + RebuiltShardIds: rebuiltShardIds, + }, nil +} + +// VolumeEcShardsCopy copy the .ecx and some ec data slices +func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) { + + location := vs.store.FindFreeLocation() + if location == nil { + return nil, fmt.Errorf("no space left") + } + + baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId)) + + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + + // copy ec data slices + for _, shardId := range req.ShardIds { + if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false); err != nil { + return err + } + } + + if !req.CopyEcxFile { + return nil + } + + // copy ecx file + if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false); err != nil { + return err + } + + // copy ecj file + if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true); err != nil { + return err + } + + return nil + }) + if err != nil { + return nil, fmt.Errorf("VolumeEcShardsCopy volume %d: %v", req.VolumeId, err) + } + + return &volume_server_pb.VolumeEcShardsCopyResponse{}, nil +} + +// VolumeEcShardsDelete local delete the .ecx and some ec data slices if not needed +// the shard should not be mounted before calling this. +func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) { + + baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + + found := false + for _, location := range vs.store.Locations { + if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) { + found = true + baseFilename = path.Join(location.Directory, baseFilename) + for _, shardId := range req.ShardIds { + os.Remove(baseFilename + erasure_coding.ToExt(int(shardId))) + } + break + } + } + + if !found { + return nil, nil + } + + // check whether to delete the ecx file also + hasEcxFile := false + existingShardCount := 0 + + for _, location := range vs.store.Locations { + fileInfos, err := ioutil.ReadDir(location.Directory) + if err != nil { + continue + } + for _, fileInfo := range fileInfos { + if fileInfo.Name() == baseFilename+".ecx" { + hasEcxFile = true + continue + } + if strings.HasPrefix(fileInfo.Name(), baseFilename+".ec") { + existingShardCount++ + } + } + } + + if hasEcxFile && existingShardCount == 0 { + if err := os.Remove(baseFilename + ".ecx"); err != nil { + return nil, err + } + if err := os.Remove(baseFilename + ".ecj"); err != nil { + return nil, err + } + } + + return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil +} + +func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) { + + for _, shardId := range req.ShardIds { + err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) + + if err != nil { + glog.Errorf("ec shard mount %v: %v", req, err) + } else { + glog.V(2).Infof("ec shard mount %v", req) + } + + if err != nil { + return nil, fmt.Errorf("mount %d.%d: %v", req.VolumeId, shardId, err) + } + } + + return &volume_server_pb.VolumeEcShardsMountResponse{}, nil +} + +func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) { + + for _, shardId := range req.ShardIds { + err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) + + if err != nil { + glog.Errorf("ec shard unmount %v: %v", req, err) + } else { + glog.V(2).Infof("ec shard unmount %v", req) + } + + if err != nil { + return nil, fmt.Errorf("unmount %d.%d: %v", req.VolumeId, shardId, err) + } + } + + return &volume_server_pb.VolumeEcShardsUnmountResponse{}, nil +} + +func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardReadRequest, stream volume_server_pb.VolumeServer_VolumeEcShardReadServer) error { + + ecVolume, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId)) + if !found { + return fmt.Errorf("VolumeEcShardRead not found ec volume id %d", req.VolumeId) + } + ecShard, found := ecVolume.FindEcVolumeShard(erasure_coding.ShardId(req.ShardId)) + if !found { + return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId) + } + + if req.FileKey != 0 { + _, size, _ := ecVolume.FindNeedleFromEcx(types.Uint64ToNeedleId(req.FileKey)) + if size == types.TombstoneFileSize { + return stream.Send(&volume_server_pb.VolumeEcShardReadResponse{ + IsDeleted: true, + }) + } + } + + bufSize := req.Size + if bufSize > BufferSizeLimit { + bufSize = BufferSizeLimit + } + buffer := make([]byte, bufSize) + + startOffset, bytesToRead := req.Offset, req.Size + + for bytesToRead > 0 { + bytesread, err := ecShard.ReadAt(buffer, startOffset) + + // println(fileName, "read", bytesread, "bytes, with target", bytesToRead) + if bytesread > 0 { + + if int64(bytesread) > bytesToRead { + bytesread = int(bytesToRead) + } + err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{ + Data: buffer[:bytesread], + }) + if err != nil { + // println("sending", bytesread, "bytes err", err.Error()) + return err + } + + bytesToRead -= int64(bytesread) + + } + + if err != nil { + if err != io.EOF { + return err + } + return nil + } + + } + + return nil + +} + +func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) { + + resp := &volume_server_pb.VolumeEcBlobDeleteResponse{} + + for _, location := range vs.store.Locations { + if localEcVolume, found := location.FindEcVolume(needle.VolumeId(req.VolumeId)); found { + + _, size, _, err := localEcVolume.LocateEcShardNeedle(types.NeedleId(req.FileKey), needle.Version(req.Version)) + if err != nil { + return nil, fmt.Errorf("locate in local ec volume: %v", err) + } + if size == types.TombstoneFileSize { + return resp, nil + } + + err = localEcVolume.DeleteNeedleFromEcx(types.NeedleId(req.FileKey)) + if err != nil { + return nil, err + } + + break + } + } + + return resp, nil +} diff --git a/weed/server/volume_grpc_sync.go b/weed/server/volume_grpc_sync.go deleted file mode 100644 index 5f56ec17d..000000000 --- a/weed/server/volume_grpc_sync.go +++ /dev/null @@ -1,101 +0,0 @@ -package weed_server - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/chrislusf/seaweedfs/weed/storage/types" -) - -func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) - if v == nil { - return nil, fmt.Errorf("Not Found Volume Id %d", req.VolumdId) - } - - resp := v.GetVolumeSyncStatus() - - glog.V(2).Infof("volume sync status %d", req.VolumdId) - - return resp, nil - -} - -func (vs *VolumeServer) VolumeSyncIndex(req *volume_server_pb.VolumeSyncIndexRequest, stream volume_server_pb.VolumeServer_VolumeSyncIndexServer) error { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) - if v == nil { - return fmt.Errorf("Not Found Volume Id %d", req.VolumdId) - } - - content, err := v.IndexFileContent() - - if err != nil { - glog.Errorf("sync volume %d index: %v", req.VolumdId, err) - } else { - glog.V(2).Infof("sync volume %d index", req.VolumdId) - } - - const blockSizeLimit = 1024 * 1024 * 2 - for i := 0; i < len(content); i += blockSizeLimit { - blockSize := len(content) - i - if blockSize > blockSizeLimit { - blockSize = blockSizeLimit - } - resp := &volume_server_pb.VolumeSyncIndexResponse{} - resp.IndexFileContent = content[i : i+blockSize] - stream.Send(resp) - } - - return nil - -} - -func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataRequest, stream volume_server_pb.VolumeServer_VolumeSyncDataServer) error { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) - if v == nil { - return fmt.Errorf("Not Found Volume Id %d", req.VolumdId) - } - - if uint32(v.SuperBlock.CompactRevision) != req.Revision { - return fmt.Errorf("Requested Volume Revision is %d, but current revision is %d", req.Revision, v.SuperBlock.CompactRevision) - } - - content, err := storage.ReadNeedleBlob(v.DataFile(), int64(req.Offset)*types.NeedlePaddingSize, req.Size, v.Version()) - if err != nil { - return fmt.Errorf("read offset:%d size:%d", req.Offset, req.Size) - } - - id, err := types.ParseNeedleId(req.NeedleId) - if err != nil { - return fmt.Errorf("parsing needle id %s: %v", req.NeedleId, err) - } - n := new(storage.Needle) - n.ParseNeedleHeader(content) - if id != n.Id { - return fmt.Errorf("Expected file entry id %d, but found %d", id, n.Id) - } - - if err != nil { - glog.Errorf("sync volume %d data: %v", req.VolumdId, err) - } - - const blockSizeLimit = 1024 * 1024 * 2 - for i := 0; i < len(content); i += blockSizeLimit { - blockSize := len(content) - i - if blockSize > blockSizeLimit { - blockSize = blockSizeLimit - } - resp := &volume_server_pb.VolumeSyncDataResponse{} - resp.FileContent = content[i : i+blockSize] - stream.Send(resp) - } - - return nil - -} diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go new file mode 100644 index 000000000..698bad5b8 --- /dev/null +++ b/weed/server/volume_grpc_tail.go @@ -0,0 +1,117 @@ +package weed_server + +import ( + "context" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderRequest, stream volume_server_pb.VolumeServer_VolumeTailSenderServer) error { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + defer glog.V(1).Infof("tailing volume %d finished", v.Id) + + lastTimestampNs := req.SinceNs + drainingSeconds := req.IdleTimeoutSeconds + + for { + lastProcessedTimestampNs, err := sendNeedlesSince(stream, v, lastTimestampNs) + if err != nil { + glog.Infof("sendNeedlesSince: %v", err) + return fmt.Errorf("streamFollow: %v", err) + } + time.Sleep(2 * time.Second) + + if req.IdleTimeoutSeconds == 0 { + lastTimestampNs = lastProcessedTimestampNs + continue + } + if lastProcessedTimestampNs == lastTimestampNs { + drainingSeconds-- + if drainingSeconds <= 0 { + return nil + } + glog.V(1).Infof("tailing volume %d drains requests with %d seconds remaining", v.Id, drainingSeconds) + } else { + lastTimestampNs = lastProcessedTimestampNs + drainingSeconds = req.IdleTimeoutSeconds + glog.V(1).Infof("tailing volume %d resets draining wait time to %d seconds", v.Id, drainingSeconds) + } + + } + +} + +func sendNeedlesSince(stream volume_server_pb.VolumeServer_VolumeTailSenderServer, v *storage.Volume, lastTimestampNs uint64) (lastProcessedTimestampNs uint64, err error) { + + foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(lastTimestampNs) + if err != nil { + return 0, fmt.Errorf("fail to locate by appendAtNs %d: %s", lastTimestampNs, err) + } + + // log.Printf("reading ts %d offset %d isLast %v", lastTimestampNs, foundOffset, isLastOne) + + if isLastOne { + // need to heart beat to the client to ensure the connection health + sendErr := stream.Send(&volume_server_pb.VolumeTailSenderResponse{IsLastChunk: true}) + return lastTimestampNs, sendErr + } + + err = storage.ScanVolumeFileNeedleFrom(v.Version(), v.DataFile(), foundOffset.ToAcutalOffset(), func(needleHeader, needleBody []byte, needleAppendAtNs uint64) error { + + isLastChunk := false + + // need to send body by chunks + for i := 0; i < len(needleBody); i += BufferSizeLimit { + stopOffset := i + BufferSizeLimit + if stopOffset >= len(needleBody) { + isLastChunk = true + stopOffset = len(needleBody) + } + + sendErr := stream.Send(&volume_server_pb.VolumeTailSenderResponse{ + NeedleHeader: needleHeader, + NeedleBody: needleBody[i:stopOffset], + IsLastChunk: isLastChunk, + }) + if sendErr != nil { + return sendErr + } + } + + lastProcessedTimestampNs = needleAppendAtNs + return nil + + }) + + return + +} + +func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_server_pb.VolumeTailReceiverRequest) (*volume_server_pb.VolumeTailReceiverResponse, error) { + + resp := &volume_server_pb.VolumeTailReceiverResponse{} + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return resp, fmt.Errorf("receiver not found volume id %d", req.VolumeId) + } + + defer glog.V(1).Infof("receive tailing volume %d finished", v.Id) + + return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error { + _, _, err := vs.store.Write(v.Id, n) + return err + }) + +} diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go index f0c87b582..24f982241 100644 --- a/weed/server/volume_grpc_vacuum.go +++ b/weed/server/volume_grpc_vacuum.go @@ -5,19 +5,19 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_server_pb.VacuumVolumeCheckRequest) (*volume_server_pb.VacuumVolumeCheckResponse, error) { resp := &volume_server_pb.VacuumVolumeCheckResponse{} - garbageRatio, err := vs.store.CheckCompactVolume(storage.VolumeId(req.VolumdId)) + garbageRatio, err := vs.store.CheckCompactVolume(needle.VolumeId(req.VolumeId)) resp.GarbageRatio = garbageRatio if err != nil { - glog.V(3).Infof("check volume %d: %v", req.VolumdId, err) + glog.V(3).Infof("check volume %d: %v", req.VolumeId, err) } return resp, err @@ -28,12 +28,12 @@ func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_ser resp := &volume_server_pb.VacuumVolumeCompactResponse{} - err := vs.store.CompactVolume(storage.VolumeId(req.VolumdId), req.Preallocate) + err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond) if err != nil { - glog.Errorf("compact volume %d: %v", req.VolumdId, err) + glog.Errorf("compact volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("compact volume %d", req.VolumdId) + glog.V(1).Infof("compact volume %d", req.VolumeId) } return resp, err @@ -44,12 +44,12 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv resp := &volume_server_pb.VacuumVolumeCommitResponse{} - err := vs.store.CommitCompactVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("commit volume %d: %v", req.VolumdId, err) + glog.Errorf("commit volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("commit volume %d", req.VolumdId) + glog.V(1).Infof("commit volume %d", req.VolumeId) } return resp, err @@ -60,12 +60,12 @@ func (vs *VolumeServer) VacuumVolumeCleanup(ctx context.Context, req *volume_ser resp := &volume_server_pb.VacuumVolumeCleanupResponse{} - err := vs.store.CommitCleanupVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.CommitCleanupVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("cleanup volume %d: %v", req.VolumdId, err) + glog.Errorf("cleanup volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("cleanup volume %d", req.VolumdId) + glog.V(1).Infof("cleanup volume %d", req.VolumeId) } return resp, err diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 0914e81b0..6cf654738 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -1,25 +1,34 @@ package weed_server import ( + "fmt" "net/http" + "github.com/chrislusf/seaweedfs/weed/stats" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/spf13/viper" ) type VolumeServer struct { - MasterNodes []string - currentMaster string - pulseSeconds int - dataCenter string - rack string - store *storage.Store - guard *security.Guard + SeedMasterNodes []string + currentMaster string + pulseSeconds int + dataCenter string + rack string + store *storage.Store + guard *security.Guard + grpcDialOption grpc.DialOption - needleMapKind storage.NeedleMapType - FixJpgOrientation bool - ReadRedirect bool + needleMapKind storage.NeedleMapType + FixJpgOrientation bool + ReadRedirect bool + compactionBytePerSecond int64 + MetricsAddress string + MetricsIntervalSec int } func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, @@ -30,26 +39,44 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, dataCenter string, rack string, whiteList []string, fixJpgOrientation bool, - readRedirect bool) *VolumeServer { - vs := &VolumeServer{ - pulseSeconds: pulseSeconds, - dataCenter: dataCenter, - rack: rack, - needleMapKind: needleMapKind, - FixJpgOrientation: fixJpgOrientation, - ReadRedirect: readRedirect, - } - vs.MasterNodes = masterNodes - vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) + readRedirect bool, + compactionMBPerSecond int, +) *VolumeServer { - vs.guard = security.NewGuard(whiteList, "") + v := viper.GetViper() + signingKey := v.GetString("jwt.signing.key") + v.SetDefault("jwt.signing.expires_after_seconds", 10) + expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") + enableUiAccess := v.GetBool("access.ui") + + readSigningKey := v.GetString("jwt.signing.read.key") + v.SetDefault("jwt.signing.read.expires_after_seconds", 60) + readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds") + + vs := &VolumeServer{ + pulseSeconds: pulseSeconds, + dataCenter: dataCenter, + rack: rack, + needleMapKind: needleMapKind, + FixJpgOrientation: fixJpgOrientation, + ReadRedirect: readRedirect, + grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), + compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024, + } + vs.SeedMasterNodes = masterNodes + vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) + + vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) handleStaticResources(adminMux) - adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) - adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) - adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) - adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) - adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + if signingKey == "" || enableUiAccess { + // only expose the volume server details for safe environments + adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) + adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) + adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) + adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) + adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + } adminMux.HandleFunc("/", vs.privateStoreHandler) if publicMux != adminMux { // separated admin and public port @@ -58,6 +85,11 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, } go vs.heartbeat() + hostAddress := fmt.Sprintf("%s:%d", ip, port) + go stats.LoopPushingMetric("volumeServer", hostAddress, stats.VolumeServerGather, + func() (addr string, intervalSeconds int) { + return vs.MetricsAddress, vs.MetricsIntervalSec + }) return vs } @@ -67,7 +99,3 @@ func (vs *VolumeServer) Shutdown() { vs.store.Close() glog.V(0).Infoln("Shut down successfully!") } - -func (vs *VolumeServer) jwt(fileId string) security.EncodedJwt { - return security.GenJwt(vs.guard.SecretKey, fileId) -} diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go index 77b1274fd..14ad27d42 100644 --- a/weed/server/volume_server_handlers.go +++ b/weed/server/volume_server_handlers.go @@ -2,7 +2,10 @@ package weed_server import ( "net/http" + "strings" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" ) @@ -45,3 +48,47 @@ func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Req vs.GetOrHeadHandler(w, r) } } + +func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid string, isWrite bool) bool { + + var signingKey security.SigningKey + + if isWrite { + if len(vs.guard.SigningKey) == 0 { + return true + } else { + signingKey = vs.guard.SigningKey + } + } else { + if len(vs.guard.ReadSigningKey) == 0 { + return true + } else { + signingKey = vs.guard.ReadSigningKey + } + } + + tokenStr := security.GetJwt(r) + if tokenStr == "" { + glog.V(1).Infof("missing jwt from %s", r.RemoteAddr) + return false + } + + token, err := security.DecodeJwt(signingKey, tokenStr) + if err != nil { + glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err) + return false + } + if !token.Valid { + glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) + return false + } + + if sc, ok := token.Claims.(*security.SeaweedFileIdClaims); ok { + if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 { + fid = fid[:sepIndex] + } + return sc.Fid == vid+","+fid + } + glog.V(1).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr) + return false +} diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 92c728141..f30ffefaf 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -2,6 +2,8 @@ package weed_server import ( "bytes" + "context" + "errors" "io" "mime" "mime/multipart" @@ -17,16 +19,28 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) { - n := new(storage.Needle) + + stats.VolumeServerRequestCounter.WithLabelValues("get").Inc() + start := time.Now() + defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }() + + n := new(needle.Needle) vid, fid, filename, ext, _ := parseURLPath(r.URL.Path) - volumeId, err := storage.NewVolumeId(vid) + + if !vs.maybeCheckJwtAuthorization(r, vid, fid, false) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + + volumeId, err := needle.NewVolumeId(vid) if err != nil { glog.V(2).Infoln("parsing error:", err, r.URL.Path) w.WriteHeader(http.StatusBadRequest) @@ -40,7 +54,9 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } glog.V(4).Infoln("volume", volumeId, "reading", n) - if !vs.store.HasVolume(volumeId) { + hasVolume := vs.store.HasVolume(volumeId) + _, hasEcVolume := vs.store.FindEcVolume(volumeId) + if !hasVolume && !hasEcVolume { if !vs.ReadRedirect { glog.V(2).Infoln("volume is not local:", err, r.URL.Path) w.WriteHeader(http.StatusNotFound) @@ -65,10 +81,15 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } cookie := n.Cookie - count, e := vs.store.ReadVolumeNeedle(volumeId, n) - glog.V(4).Infoln("read bytes", count, "error", e) - if e != nil || count < 0 { - glog.V(0).Infof("read %s error: %v", r.URL.Path, e) + var count int + if hasVolume { + count, err = vs.store.ReadVolumeNeedle(volumeId, n) + } else if hasEcVolume { + count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) + } + glog.V(4).Infoln("read bytes", count, "error", err) + if err != nil || count < 0 { + glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) w.WriteHeader(http.StatusNotFound) return } @@ -132,7 +153,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { w.Header().Set("Content-Encoding", "gzip") } else { - if n.Data, err = operation.UnGzipData(n.Data); err != nil { + if n.Data, err = util.UnGzipData(n.Data); err != nil { glog.V(0).Infoln("ungzip error:", err, r.URL.Path) } } @@ -146,7 +167,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } -func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) { +func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) { if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" { return false } diff --git a/weed/server/volume_server_handlers_ui.go b/weed/server/volume_server_handlers_ui.go index b3d9a21fd..852f0b751 100644 --- a/weed/server/volume_server_handlers_ui.go +++ b/weed/server/volume_server_handlers_ui.go @@ -24,13 +24,15 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) Version string Masters []string Volumes interface{} + EcVolumes interface{} DiskStatuses interface{} Stats interface{} Counters *stats.ServerStats }{ util.VERSION, - vs.MasterNodes, + vs.SeedMasterNodes, vs.store.Status(), + vs.store.EcVolumes(), ds, infos, serverStats, diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index fd93142e1..db8fcb555 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -1,6 +1,7 @@ package weed_server import ( + "context" "errors" "fmt" "net/http" @@ -10,36 +11,53 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" ) func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { + + stats.VolumeServerRequestCounter.WithLabelValues("post").Inc() + start := time.Now() + defer func() { + stats.VolumeServerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) + }() + if e := r.ParseForm(); e != nil { glog.V(0).Infoln("form parse error:", e) writeJsonError(w, r, http.StatusBadRequest, e) return } - vid, _, _, _, _ := parseURLPath(r.URL.Path) - volumeId, ve := storage.NewVolumeId(vid) + + vid, fid, _, _, _ := parseURLPath(r.URL.Path) + volumeId, ve := needle.NewVolumeId(vid) if ve != nil { glog.V(0).Infoln("NewVolumeId error:", ve) writeJsonError(w, r, http.StatusBadRequest, ve) return } - needle, originalSize, ne := storage.CreateNeedleFromRequest(r, vs.FixJpgOrientation) + + if !vs.maybeCheckJwtAuthorization(r, vid, fid, true) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + + needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) return } ret := operation.UploadResult{} - _, errorStatus := topology.ReplicatedWrite(vs.GetMaster(), - vs.store, volumeId, needle, r) + _, isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) httpStatus := http.StatusCreated - if errorStatus != "" { + if isUnchanged { + httpStatus = http.StatusNotModified + } + if writeError != nil { httpStatus = http.StatusInternalServerError - ret.Error = errorStatus + ret.Error = writeError.Error() } if needle.HasName() { ret.Name = string(needle.Name) @@ -51,15 +69,35 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { } func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { - n := new(storage.Needle) + + stats.VolumeServerRequestCounter.WithLabelValues("delete").Inc() + start := time.Now() + defer func() { + stats.VolumeServerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) + }() + + n := new(needle.Needle) vid, fid, _, _, _ := parseURLPath(r.URL.Path) - volumeId, _ := storage.NewVolumeId(vid) + volumeId, _ := needle.NewVolumeId(vid) n.ParsePath(fid) + if !vs.maybeCheckJwtAuthorization(r, vid, fid, true) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + // glog.V(2).Infof("volume %s deleting %s", vid, n) cookie := n.Cookie + ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId) + + if hasEcVolume { + count, err := vs.store.DeleteEcShardNeedle(context.Background(), ecVolume, n, cookie) + writeDeleteResult(err, count, w, r) + return + } + _, ok := vs.store.ReadVolumeNeedle(volumeId, n) if ok != nil { m := make(map[string]uint32) @@ -83,7 +121,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { return } // make sure all chunks had deleted before delete manifest - if e := chunkManifest.DeleteChunks(vs.GetMaster()); e != nil { + if e := chunkManifest.DeleteChunks(vs.GetMaster(), vs.grpcDialOption); e != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e)) return } @@ -100,6 +138,11 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { _, err := topology.ReplicatedDelete(vs.GetMaster(), vs.store, volumeId, n, r) + writeDeleteResult(err, count, w, r) + +} + +func writeDeleteResult(err error, count int64, w http.ResponseWriter, r *http.Request) { if err == nil { m := make(map[string]int64) m["size"] = count @@ -107,7 +150,6 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { } else { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Deletion Failed: %v", err)) } - } func setEtag(w http.ResponseWriter, etag string) { diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index b9740510f..eafc0aaeb 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -128,6 +128,32 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` +
+

Erasure Coding Shards

+ + + + + + + + + + + + {{ range .EcVolumes }} + + + + + + + + {{ end }} + +
IdCollectionShard SizeShardsCreatedAt
{{ .VolumeId }}{{ .Collection }}{{ .ShardSize }} Bytes{{ .ShardIdList }}{{ .CreatedAt.Format "02 Jan 06 15:04 -0700" }}
+
+ diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go new file mode 100644 index 000000000..151b48a78 --- /dev/null +++ b/weed/server/webdav_server.go @@ -0,0 +1,578 @@ +package weed_server + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "golang.org/x/net/webdav" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/spf13/viper" +) + +type WebDavOption struct { + Filer string + FilerGrpcAddress string + DomainName string + BucketsPath string + GrpcDialOption grpc.DialOption + Collection string + Uid uint32 + Gid uint32 +} + +type WebDavServer struct { + option *WebDavOption + secret security.SigningKey + filer *filer2.Filer + grpcDialOption grpc.DialOption + Handler *webdav.Handler +} + +func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) { + + fs, _ := NewWebDavFileSystem(option) + + ws = &WebDavServer{ + option: option, + grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + Handler: &webdav.Handler{ + FileSystem: fs, + LockSystem: webdav.NewMemLS(), + }, + } + + return ws, nil +} + +// adapted from https://github.com/mattn/davfs/blob/master/plugin/mysql/mysql.go + +type WebDavFileSystem struct { + option *WebDavOption + secret security.SigningKey + filer *filer2.Filer + grpcDialOption grpc.DialOption +} + +type FileInfo struct { + name string + size int64 + mode os.FileMode + modifiledTime time.Time + isDirectory bool +} + +func (fi *FileInfo) Name() string { return fi.name } +func (fi *FileInfo) Size() int64 { return fi.size } +func (fi *FileInfo) Mode() os.FileMode { return fi.mode } +func (fi *FileInfo) ModTime() time.Time { return fi.modifiledTime } +func (fi *FileInfo) IsDir() bool { return fi.isDirectory } +func (fi *FileInfo) Sys() interface{} { return nil } + +type WebDavFile struct { + fs *WebDavFileSystem + name string + isDirectory bool + off int64 + entry *filer_pb.Entry + entryViewCache []filer2.VisibleInterval +} + +func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { + return &WebDavFileSystem{ + option: option, + }, nil +} + +func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { + + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) + +} + +func clearName(name string) (string, error) { + slashed := strings.HasSuffix(name, "/") + name = path.Clean(name) + if !strings.HasSuffix(name, "/") && slashed { + name += "/" + } + if !strings.HasPrefix(name, "/") { + return "", os.ErrInvalid + } + return name, nil +} + +func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm os.FileMode) error { + + glog.V(2).Infof("WebDavFileSystem.Mkdir %v", fullDirPath) + + if !strings.HasSuffix(fullDirPath, "/") { + fullDirPath += "/" + } + + var err error + if fullDirPath, err = clearName(fullDirPath); err != nil { + return err + } + + _, err = fs.stat(ctx, fullDirPath) + if err == nil { + return os.ErrExist + } + + return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + dir, name := filer2.FullPath(fullDirPath).DirAndName() + request := &filer_pb.CreateEntryRequest{ + Directory: dir, + Entry: &filer_pb.Entry{ + Name: name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(perm | os.ModeDir), + Uid: fs.option.Uid, + Gid: fs.option.Gid, + }, + }, + } + + glog.V(1).Infof("mkdir: %v", request) + if _, err := client.CreateEntry(ctx, request); err != nil { + return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) + } + + return nil + }) +} + +func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) { + + glog.V(2).Infof("WebDavFileSystem.OpenFile %v", fullFilePath) + + var err error + if fullFilePath, err = clearName(fullFilePath); err != nil { + return nil, err + } + + if flag&os.O_CREATE != 0 { + // file should not have / suffix. + if strings.HasSuffix(fullFilePath, "/") { + return nil, os.ErrInvalid + } + // based directory should be exists. + dir, _ := path.Split(fullFilePath) + _, err := fs.stat(ctx, dir) + if err != nil { + return nil, os.ErrInvalid + } + _, err = fs.stat(ctx, fullFilePath) + if err == nil { + if flag&os.O_EXCL != 0 { + return nil, os.ErrExist + } + fs.removeAll(ctx, fullFilePath) + } + + dir, name := filer2.FullPath(fullFilePath).DirAndName() + err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if _, err := client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + Directory: dir, + Entry: &filer_pb.Entry{ + Name: name, + IsDirectory: perm&os.ModeDir > 0, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(perm), + Uid: fs.option.Uid, + Gid: fs.option.Gid, + Collection: fs.option.Collection, + Replication: "000", + TtlSec: 0, + }, + }, + }); err != nil { + return fmt.Errorf("create %s: %v", fullFilePath, err) + } + return nil + }) + if err != nil { + return nil, err + } + return &WebDavFile{ + fs: fs, + name: fullFilePath, + isDirectory: false, + }, nil + } + + fi, err := fs.stat(ctx, fullFilePath) + if err != nil { + return nil, os.ErrNotExist + } + if !strings.HasSuffix(fullFilePath, "/") && fi.IsDir() { + fullFilePath += "/" + } + + return &WebDavFile{ + fs: fs, + name: fullFilePath, + isDirectory: false, + }, nil + +} + +func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) error { + var err error + if fullFilePath, err = clearName(fullFilePath); err != nil { + return err + } + + fi, err := fs.stat(ctx, fullFilePath) + if err != nil { + return err + } + + if fi.IsDir() { + //_, err = fs.db.Exec(`delete from filesystem where fullFilePath like $1 escape '\'`, strings.Replace(fullFilePath, `%`, `\%`, -1)+`%`) + } else { + //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) + } + dir, name := filer2.FullPath(fullFilePath).DirAndName() + err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.DeleteEntryRequest{ + Directory: dir, + Name: name, + IsDeleteData: true, + } + + glog.V(3).Infof("removing entry: %v", request) + _, err := client.DeleteEntry(ctx, request) + if err != nil { + return fmt.Errorf("remove %s: %v", fullFilePath, err) + } + + return nil + }) + return err +} + +func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error { + + glog.V(2).Infof("WebDavFileSystem.RemoveAll %v", name) + + return fs.removeAll(ctx, name) +} + +func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) error { + + glog.V(2).Infof("WebDavFileSystem.Rename %v to %v", oldName, newName) + + var err error + if oldName, err = clearName(oldName); err != nil { + return err + } + if newName, err = clearName(newName); err != nil { + return err + } + + of, err := fs.stat(ctx, oldName) + if err != nil { + return os.ErrExist + } + if of.IsDir() { + if strings.HasSuffix(oldName, "/") { + oldName = strings.TrimRight(oldName, "/") + } + if strings.HasSuffix(newName, "/") { + newName = strings.TrimRight(newName, "/") + } + } + + _, err = fs.stat(ctx, newName) + if err == nil { + return os.ErrExist + } + + oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName() + newDir, newBaseName := filer2.FullPath(newName).DirAndName() + + return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AtomicRenameEntryRequest{ + OldDirectory: oldDir, + OldName: oldBaseName, + NewDirectory: newDir, + NewName: newBaseName, + } + + _, err := client.AtomicRenameEntry(ctx, request) + if err != nil { + return fmt.Errorf("renaming %s/%s => %s/%s: %v", oldDir, oldBaseName, newDir, newBaseName, err) + } + + return nil + + }) +} + +func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.FileInfo, error) { + var err error + if fullFilePath, err = clearName(fullFilePath); err != nil { + return nil, err + } + + var fi FileInfo + entry, err := filer2.GetEntry(ctx, fs, fullFilePath) + if entry == nil { + return nil, os.ErrNotExist + } + if err != nil { + return nil, err + } + fi.size = int64(filer2.TotalSize(entry.GetChunks())) + fi.name = fullFilePath + fi.mode = os.FileMode(entry.Attributes.FileMode) + fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0) + fi.isDirectory = entry.IsDirectory + + _, fi.name = path.Split(path.Clean(fi.name)) + if fi.name == "" { + fi.name = "/" + fi.modifiledTime = time.Now() + fi.isDirectory = true + } + return &fi, nil +} + +func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) { + + glog.V(2).Infof("WebDavFileSystem.Stat %v", name) + + return fs.stat(ctx, name) +} + +func (f *WebDavFile) Write(buf []byte) (int, error) { + + glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) + + var err error + ctx := context.Background() + if f.entry == nil { + f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + } + + if f.entry == nil { + return 0, err + } + if err != nil { + return 0, err + } + + var fileId, host string + var auth security.EncodedJwt + + if err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: "000", + Collection: f.fs.option.Collection, + } + + resp, err := client.AssignVolume(ctx, request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + + return nil + }); err != nil { + return 0, fmt.Errorf("filerGrpcAddress assign volume: %v", err) + } + + fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + bufReader := bytes.NewReader(buf) + uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "application/octet-stream", nil, auth) + if err != nil { + glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err) + return 0, fmt.Errorf("upload data: %v", err) + } + if uploadResult.Error != "" { + glog.V(0).Infof("upload failure %v to %s: %v", f.name, fileUrl, err) + return 0, fmt.Errorf("upload result: %v", uploadResult.Error) + } + + chunk := &filer_pb.FileChunk{ + FileId: fileId, + Offset: f.off, + Size: uint64(len(buf)), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + } + + f.entry.Chunks = append(f.entry.Chunks, chunk) + dir, _ := filer2.FullPath(f.name).DirAndName() + + err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + f.entry.Attributes.Mtime = time.Now().Unix() + + request := &filer_pb.UpdateEntryRequest{ + Directory: dir, + Entry: f.entry, + } + + if _, err := client.UpdateEntry(ctx, request); err != nil { + return fmt.Errorf("update %s: %v", f.name, err) + } + + return nil + }) + + if err != nil { + f.off += int64(len(buf)) + } + return len(buf), err +} + +func (f *WebDavFile) Close() error { + + glog.V(2).Infof("WebDavFileSystem.Close %v", f.name) + + if f.entry != nil { + f.entry = nil + f.entryViewCache = nil + } + + return nil +} + +func (f *WebDavFile) Read(p []byte) (readSize int, err error) { + + glog.V(2).Infof("WebDavFileSystem.Read %v", f.name) + ctx := context.Background() + + if f.entry == nil { + f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + } + if f.entry == nil { + return 0, err + } + if err != nil { + return 0, err + } + if len(f.entry.Chunks) == 0 { + return 0, io.EOF + } + if f.entryViewCache == nil { + f.entryViewCache = filer2.NonOverlappingVisibleIntervals(f.entry.Chunks) + } + chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p)) + + totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, f.name, p, chunkViews, f.off) + if err != nil { + return 0, err + } + readSize = int(totalRead) + + f.off += totalRead + if readSize == 0 { + return 0, io.EOF + } + return +} + +func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { + + glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) + ctx := context.Background() + + dir := f.name + if dir != "/" && strings.HasSuffix(dir, "/") { + dir = dir[:len(dir)-1] + } + + err = filer2.ReadDirAllEntries(ctx, f.fs, dir, func(entry *filer_pb.Entry) { + fi := FileInfo{ + size: int64(filer2.TotalSize(entry.GetChunks())), + name: entry.Name, + mode: os.FileMode(entry.Attributes.FileMode), + modifiledTime: time.Unix(entry.Attributes.Mtime, 0), + isDirectory: entry.IsDirectory, + } + + if !strings.HasSuffix(fi.name, "/") && fi.IsDir() { + fi.name += "/" + } + glog.V(4).Infof("entry: %v", fi.name) + ret = append(ret, &fi) + }) + + old := f.off + if old >= int64(len(ret)) { + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.off += int64(count) + if f.off > int64(len(ret)) { + f.off = int64(len(ret)) + } + } else { + f.off = int64(len(ret)) + old = 0 + } + + return ret[old:f.off], nil +} + +func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) { + + glog.V(2).Infof("WebDavFile.Seek %v %v %v", f.name, offset, whence) + + ctx := context.Background() + + var err error + switch whence { + case 0: + f.off = 0 + case 2: + if fi, err := f.fs.stat(ctx, f.name); err != nil { + return 0, err + } else { + f.off = fi.Size() + } + } + f.off += offset + return f.off, err +} + +func (f *WebDavFile) Stat() (os.FileInfo, error) { + + glog.V(2).Infof("WebDavFile.Stat %v", f.name) + + ctx := context.Background() + + return f.fs.stat(ctx, f.name) +} diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go new file mode 100644 index 000000000..fbaddcd51 --- /dev/null +++ b/weed/shell/command_collection_delete.go @@ -0,0 +1,51 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "io" +) + +func init() { + Commands = append(Commands, &commandCollectionDelete{}) +} + +type commandCollectionDelete struct { +} + +func (c *commandCollectionDelete) Name() string { + return "collection.delete" +} + +func (c *commandCollectionDelete) Help() string { + return `delete specified collection + + collection.delete + +` +} + +func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if len(args) == 0 { + return nil + } + + collectionName := args[0] + + ctx := context.Background() + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + _, err = client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + Name: collectionName, + }) + return err + }) + if err != nil { + return + } + + fmt.Fprintf(writer, "collection %s is deleted.\n", collectionName) + + return nil +} diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go new file mode 100644 index 000000000..c4325c66f --- /dev/null +++ b/weed/shell/command_collection_list.go @@ -0,0 +1,59 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "io" +) + +func init() { + Commands = append(Commands, &commandCollectionList{}) +} + +type commandCollectionList struct { +} + +func (c *commandCollectionList) Name() string { + return "collection.list" +} + +func (c *commandCollectionList) Help() string { + return `list all collections` +} + +func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + collections, err := ListCollectionNames(commandEnv, true, true) + + if err != nil { + return err + } + + for _, c := range collections { + fmt.Fprintf(writer, "collection:\"%s\"\n", c) + } + + fmt.Fprintf(writer, "Total %d collections.\n", len(collections)) + + return nil +} + +func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) { + var resp *master_pb.CollectionListResponse + ctx := context.Background() + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{ + IncludeNormalVolumes: includeNormalVolumes, + IncludeEcVolumes: includeEcVolumes, + }) + return err + }) + if err != nil { + return + } + for _, c := range resp.Collections { + collections = append(collections, c.Name) + } + return +} diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go new file mode 100644 index 000000000..47ae7bad3 --- /dev/null +++ b/weed/shell/command_ec_balance.go @@ -0,0 +1,517 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "sort" + + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandEcBalance{}) +} + +type commandEcBalance struct { +} + +func (c *commandEcBalance) Name() string { + return "ec.balance" +} + +func (c *commandEcBalance) Help() string { + return `balance all ec shards among all racks and volume servers + + ec.balance [-c EACH_COLLECTION|] [-force] [-dataCenter ] + + Algorithm: + + For each type of volume server (different max volume count limit){ + for each collection: + balanceEcVolumes(collectionName) + for each rack: + balanceEcRack(rack) + } + + func balanceEcVolumes(collectionName){ + for each volume: + doDeduplicateEcShards(volumeId) + + tracks rack~shardCount mapping + for each volume: + doBalanceEcShardsAcrossRacks(volumeId) + + for each volume: + doBalanceEcShardsWithinRacks(volumeId) + } + + // spread ec shards into more racks + func doBalanceEcShardsAcrossRacks(volumeId){ + tracks rack~volumeIdShardCount mapping + averageShardsPerEcRack = totalShardNumber / numRacks // totalShardNumber is 14 for now, later could varies for each dc + ecShardsToMove = select overflown ec shards from racks with ec shard counts > averageShardsPerEcRack + for each ecShardsToMove { + destRack = pickOneRack(rack~shardCount, rack~volumeIdShardCount, averageShardsPerEcRack) + destVolumeServers = volume servers on the destRack + pickOneEcNodeAndMoveOneShard(destVolumeServers) + } + } + + func doBalanceEcShardsWithinRacks(volumeId){ + racks = collect all racks that the volume id is on + for rack, shards := range racks + doBalanceEcShardsWithinOneRack(volumeId, shards, rack) + } + + // move ec shards + func doBalanceEcShardsWithinOneRack(volumeId, shards, rackId){ + tracks volumeServer~volumeIdShardCount mapping + averageShardCount = len(shards) / numVolumeServers + volumeServersOverAverage = volume servers with volumeId's ec shard counts > averageShardsPerEcRack + ecShardsToMove = select overflown ec shards from volumeServersOverAverage + for each ecShardsToMove { + destVolumeServer = pickOneVolumeServer(volumeServer~shardCount, volumeServer~volumeIdShardCount, averageShardCount) + pickOneEcNodeAndMoveOneShard(destVolumeServers) + } + } + + // move ec shards while keeping shard distribution for the same volume unchanged or more even + func balanceEcRack(rack){ + averageShardCount = total shards / numVolumeServers + for hasMovedOneEcShard { + sort all volume servers ordered by the number of local ec shards + pick the volume server A with the lowest number of ec shards x + pick the volume server B with the highest number of ec shards y + if y > averageShardCount and x +1 <= averageShardCount { + if B has a ec shard with volume id v that A does not have { + move one ec shard v from B to A + hasMovedOneEcShard = true + } + } + } + } + +` +} + +func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection") + dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter") + applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan") + if err = balanceCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + + // collect all ec nodes + allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, *dc) + if err != nil { + return err + } + if totalFreeEcSlots < 1 { + return fmt.Errorf("no free ec shard slots. only %d left", totalFreeEcSlots) + } + + racks := collectRacks(allEcNodes) + + if *collection == "EACH_COLLECTION" { + collections, err := ListCollectionNames(commandEnv, false, true) + if err != nil { + return err + } + fmt.Printf("balanceEcVolumes collections %+v\n", len(collections)) + for _, c := range collections { + fmt.Printf("balanceEcVolumes collection %+v\n", c) + if err = balanceEcVolumes(commandEnv, c, allEcNodes, racks, *applyBalancing); err != nil { + return err + } + } + } else { + if err = balanceEcVolumes(commandEnv, *collection, allEcNodes, racks, *applyBalancing); err != nil { + return err + } + } + + if err := balanceEcRacks(ctx, commandEnv, racks, *applyBalancing); err != nil { + return fmt.Errorf("balance ec racks: %v", err) + } + + return nil +} + +func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack { + // collect racks info + racks := make(map[RackId]*EcRack) + for _, ecNode := range allEcNodes { + if racks[ecNode.rack] == nil { + racks[ecNode.rack] = &EcRack{ + ecNodes: make(map[EcNodeId]*EcNode), + } + } + racks[ecNode.rack].ecNodes[EcNodeId(ecNode.info.Id)] = ecNode + racks[ecNode.rack].freeEcSlot += ecNode.freeEcSlot + } + return racks +} + +func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { + + ctx := context.Background() + + fmt.Printf("balanceEcVolumes %s\n", collection) + + if err := deleteDuplicatedEcShards(ctx, commandEnv, allEcNodes, collection, applyBalancing); err != nil { + return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err) + } + + if err := balanceEcShardsAcrossRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) + } + + if err := balanceEcShardsWithinRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) + } + + return nil +} + +func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { + // vid => []ecNode + vidLocations := collectVolumeIdToEcNodes(allEcNodes) + // deduplicate ec shards + for vid, locations := range vidLocations { + if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil { + return err + } + } + return nil +} + +func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { + + // check whether this volume has ecNodes that are over average + shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount) + for _, ecNode := range locations { + shardBits := findEcVolumeShards(ecNode, vid) + for _, shardId := range shardBits.ShardIds() { + shardToLocations[shardId] = append(shardToLocations[shardId], ecNode) + } + } + for shardId, ecNodes := range shardToLocations { + if len(ecNodes) <= 1 { + continue + } + sortEcNodes(ecNodes) + fmt.Printf("ec shard %d.%d has %d copies, keeping %v\n", vid, shardId, len(ecNodes), ecNodes[0].info.Id) + if !applyBalancing { + continue + } + + duplicatedShardIds := []uint32{uint32(shardId)} + for _, ecNode := range ecNodes[1:] { + if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + return err + } + if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + return err + } + ecNode.deleteEcVolumeShards(vid, duplicatedShardIds) + } + } + return nil +} + +func balanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { + // collect vid => []ecNode, since previous steps can change the locations + vidLocations := collectVolumeIdToEcNodes(allEcNodes) + // spread the ec shards evenly + for vid, locations := range vidLocations { + if err := doBalanceEcShardsAcrossRacks(ctx, commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { + return err + } + } + return nil +} + +func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { + + // calculate average number of shards an ec rack should have for one volume + averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks)) + + // see the volume's shards are in how many racks, and how many in each rack + rackToShardCount := groupByCount(locations, func(ecNode *EcNode) (id string, count int) { + shardBits := findEcVolumeShards(ecNode, vid) + return string(ecNode.rack), shardBits.ShardIdCount() + }) + rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string { + return string(ecNode.rack) + }) + + // ecShardsToMove = select overflown ec shards from racks with ec shard counts > averageShardsPerEcRack + ecShardsToMove := make(map[erasure_coding.ShardId]*EcNode) + for rackId, count := range rackToShardCount { + if count > averageShardsPerEcRack { + possibleEcNodes := rackEcNodesWithVid[rackId] + for shardId, ecNode := range pickNEcShardsToMoveFrom(possibleEcNodes, vid, count-averageShardsPerEcRack) { + ecShardsToMove[shardId] = ecNode + } + } + } + + for shardId, ecNode := range ecShardsToMove { + rackId := pickOneRack(racks, rackToShardCount, averageShardsPerEcRack) + var possibleDestinationEcNodes []*EcNode + for _, n := range racks[rackId].ecNodes { + possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) + } + err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + if err != nil { + return err + } + rackToShardCount[string(rackId)] += 1 + rackToShardCount[string(ecNode.rack)] -= 1 + racks[rackId].freeEcSlot -= 1 + racks[ecNode.rack].freeEcSlot += 1 + } + + return nil +} + +func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]int, averageShardsPerEcRack int) RackId { + + // TODO later may need to add some randomness + + for rackId, rack := range rackToEcNodes { + if rackToShardCount[string(rackId)] >= averageShardsPerEcRack { + continue + } + + if rack.freeEcSlot <= 0 { + continue + } + + return rackId + } + + return "" +} + +func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { + // collect vid => []ecNode, since previous steps can change the locations + vidLocations := collectVolumeIdToEcNodes(allEcNodes) + + // spread the ec shards evenly + for vid, locations := range vidLocations { + + // see the volume's shards are in how many racks, and how many in each rack + rackToShardCount := groupByCount(locations, func(ecNode *EcNode) (id string, count int) { + shardBits := findEcVolumeShards(ecNode, vid) + return string(ecNode.rack), shardBits.ShardIdCount() + }) + rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string { + return string(ecNode.rack) + }) + + for rackId, _ := range rackToShardCount { + + var possibleDestinationEcNodes []*EcNode + for _, n := range racks[RackId(rackId)].ecNodes { + possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) + } + sourceEcNodes := rackEcNodesWithVid[rackId] + averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes)) + if err := doBalanceEcShardsWithinOneRack(ctx, commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { + return err + } + } + } + return nil +} + +func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { + + for _, ecNode := range existingLocations { + + shardBits := findEcVolumeShards(ecNode, vid) + overLimitCount := shardBits.ShardIdCount() - averageShardsPerEcNode + + for _, shardId := range shardBits.ShardIds() { + + if overLimitCount <= 0 { + break + } + + fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId) + + err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + if err != nil { + return err + } + + overLimitCount-- + } + } + + return nil +} + +func balanceEcRacks(ctx context.Context, commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { + + // balance one rack for all ec shards + for _, ecRack := range racks { + if err := doBalanceEcRack(ctx, commandEnv, ecRack, applyBalancing); err != nil { + return err + } + } + return nil +} + +func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { + + if len(ecRack.ecNodes) <= 1 { + return nil + } + + var rackEcNodes []*EcNode + for _, node := range ecRack.ecNodes { + rackEcNodes = append(rackEcNodes, node) + } + + ecNodeIdToShardCount := groupByCount(rackEcNodes, func(node *EcNode) (id string, count int) { + for _, ecShardInfo := range node.info.EcShardInfos { + count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount() + } + return node.info.Id, count + }) + + var totalShardCount int + for _, count := range ecNodeIdToShardCount { + totalShardCount += count + } + + averageShardCount := ceilDivide(totalShardCount, len(rackEcNodes)) + + hasMove := true + for hasMove { + hasMove = false + sort.Slice(rackEcNodes, func(i, j int) bool { + return rackEcNodes[i].freeEcSlot > rackEcNodes[j].freeEcSlot + }) + emptyNode, fullNode := rackEcNodes[0], rackEcNodes[len(rackEcNodes)-1] + emptyNodeShardCount, fullNodeShardCount := ecNodeIdToShardCount[emptyNode.info.Id], ecNodeIdToShardCount[fullNode.info.Id] + if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount { + + emptyNodeIds := make(map[uint32]bool) + for _, shards := range emptyNode.info.EcShardInfos { + emptyNodeIds[shards.Id] = true + } + for _, shards := range fullNode.info.EcShardInfos { + if _, found := emptyNodeIds[shards.Id]; !found { + for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() { + + fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id) + + err := moveMountedShardToEcNode(ctx, commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) + if err != nil { + return err + } + + ecNodeIdToShardCount[emptyNode.info.Id]++ + ecNodeIdToShardCount[fullNode.info.Id]-- + hasMove = true + break + } + break + } + } + } + } + + return nil +} + +func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, expectedTotalEcShards int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { + + sortEcNodes(possibleDestinationEcNodes) + averageShardsPerEcNode := ceilDivide(expectedTotalEcShards, len(possibleDestinationEcNodes)) + + for _, destEcNode := range possibleDestinationEcNodes { + if destEcNode.info.Id == existingLocation.info.Id { + continue + } + + if destEcNode.freeEcSlot <= 0 { + continue + } + if findEcVolumeShards(destEcNode, vid).ShardIdCount() >= averageShardsPerEcNode { + continue + } + + fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id) + + err := moveMountedShardToEcNode(ctx, commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) + if err != nil { + return err + } + + return nil + } + + return nil +} + +func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[erasure_coding.ShardId]*EcNode { + picked := make(map[erasure_coding.ShardId]*EcNode) + var candidateEcNodes []*CandidateEcNode + for _, ecNode := range ecNodes { + shardBits := findEcVolumeShards(ecNode, vid) + if shardBits.ShardIdCount() > 0 { + candidateEcNodes = append(candidateEcNodes, &CandidateEcNode{ + ecNode: ecNode, + shardCount: shardBits.ShardIdCount(), + }) + } + } + sort.Slice(candidateEcNodes, func(i, j int) bool { + return candidateEcNodes[i].shardCount > candidateEcNodes[j].shardCount + }) + for i := 0; i < n; i++ { + selectedEcNodeIndex := -1 + for i, candidateEcNode := range candidateEcNodes { + shardBits := findEcVolumeShards(candidateEcNode.ecNode, vid) + if shardBits > 0 { + selectedEcNodeIndex = i + for _, shardId := range shardBits.ShardIds() { + candidateEcNode.shardCount-- + picked[shardId] = candidateEcNode.ecNode + candidateEcNode.ecNode.deleteEcVolumeShards(vid, []uint32{uint32(shardId)}) + break + } + break + } + } + if selectedEcNodeIndex >= 0 { + ensureSortedEcNodes(candidateEcNodes, selectedEcNodeIndex, func(i, j int) bool { + return candidateEcNodes[i].shardCount > candidateEcNodes[j].shardCount + }) + } + + } + return picked +} + +func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode { + vidLocations := make(map[needle.VolumeId][]*EcNode) + for _, ecNode := range allEcNodes { + for _, shardInfo := range ecNode.info.EcShardInfos { + vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode) + } + } + return vidLocations +} diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go new file mode 100644 index 000000000..d0fe16a68 --- /dev/null +++ b/weed/shell/command_ec_common.go @@ -0,0 +1,336 @@ +package shell + +import ( + "context" + "fmt" + "math" + "sort" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" +) + +func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { + + copiedShardIds := []uint32{uint32(shardId)} + + if applyBalancing { + + // ask destination node to copy shard and the ecx file from source node, and mount it + copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, uint32(shardId), 1, vid, collection, existingLocation.info.Id) + if err != nil { + return err + } + + // unmount the to be deleted shards + err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) + if err != nil { + return err + } + + // ask source node to delete the shard, and maybe the ecx file + err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) + if err != nil { + return err + } + + fmt.Printf("moved ec shard %d.%d %s => %s\n", vid, shardId, existingLocation.info.Id, destinationEcNode.info.Id) + + } + + destinationEcNode.addEcVolumeShards(vid, collection, copiedShardIds) + existingLocation.deleteEcVolumeShards(vid, copiedShardIds) + + return nil + +} + +func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, + targetServer *EcNode, startFromShardId uint32, shardCount int, + volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) { + + var shardIdsToCopy []uint32 + for shardId := startFromShardId; shardId < startFromShardId+uint32(shardCount); shardId++ { + shardIdsToCopy = append(shardIdsToCopy, shardId) + } + fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) + + err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + if targetServer.info.Id != existingLocation { + + fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) + _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: shardIdsToCopy, + CopyEcxFile: true, + SourceDataNode: existingLocation, + }) + if copyErr != nil { + return fmt.Errorf("copy %d.%v %s => %s : %v\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id, copyErr) + } + } + + fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id) + _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: shardIdsToCopy, + }) + if mountErr != nil { + return fmt.Errorf("mount %d.%v on %s : %v\n", volumeId, shardIdsToCopy, targetServer.info.Id, mountErr) + } + + if targetServer.info.Id != existingLocation { + copiedShardIds = shardIdsToCopy + glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds) + } + + return nil + }) + + if err != nil { + return + } + + return +} + +func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, dn *master_pb.DataNodeInfo)) { + for _, dc := range topo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, dn := range rack.DataNodeInfos { + fn(dc.Id, RackId(rack.Id), dn) + } + } + } +} + +func sortEcNodes(ecNodes []*EcNode) { + sort.Slice(ecNodes, func(i, j int) bool { + return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot + }) +} + +type CandidateEcNode struct { + ecNode *EcNode + shardCount int +} + +// if the index node changed the freeEcSlot, need to keep every EcNode still sorted +func ensureSortedEcNodes(data []*CandidateEcNode, index int, lessThan func(i, j int) bool) { + for i := index - 1; i >= 0; i-- { + if lessThan(i+1, i) { + swap(data, i, i+1) + } else { + break + } + } + for i := index + 1; i < len(data); i++ { + if lessThan(i, i-1) { + swap(data, i, i-1) + } else { + break + } + } +} + +func swap(data []*CandidateEcNode, i, j int) { + t := data[i] + data[i] = data[j] + data[j] = t +} + +func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) { + for _, ecShardInfo := range ecShardInfos { + shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + count += shardBits.ShardIdCount() + } + return +} + +func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) { + return int(dn.FreeVolumeCount)*10 - countShards(dn.EcShardInfos) +} + +type RackId string +type EcNodeId string + +type EcNode struct { + info *master_pb.DataNodeInfo + dc string + rack RackId + freeEcSlot int +} + +type EcRack struct { + ecNodes map[EcNodeId]*EcNode + freeEcSlot int +} + +func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { + + // list all possible locations + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return nil, 0, err + } + + // find out all volume servers with one slot left. + eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + if selectedDataCenter != "" && selectedDataCenter != dc { + return + } + if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 { + ecNodes = append(ecNodes, &EcNode{ + info: dn, + dc: dc, + rack: rack, + freeEcSlot: int(freeEcSlots), + }) + totalFreeEcSlots += freeEcSlots + } + }) + + sortEcNodes(ecNodes) + + return +} + +func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, + collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { + + fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation) + + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: toBeDeletedShardIds, + }) + return deleteErr + }) + +} + +func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, + volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { + + fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation) + + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{ + VolumeId: uint32(volumeId), + ShardIds: toBeUnmountedhardIds, + }) + return deleteErr + }) +} + +func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, + collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { + + fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation) + + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: toBeMountedhardIds, + }) + return mountErr + }) +} + +func ceilDivide(total, n int) int { + return int(math.Ceil(float64(total) / float64(n))) +} + +func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits { + + for _, shardInfo := range ecNode.info.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + return erasure_coding.ShardBits(shardInfo.EcIndexBits) + } + } + + return 0 +} + +func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode { + + foundVolume := false + for _, shardInfo := range ecNode.info.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) + newShardBits := oldShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId)) + } + shardInfo.EcIndexBits = uint32(newShardBits) + ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() + foundVolume = true + break + } + } + + if !foundVolume { + var newShardBits erasure_coding.ShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId)) + } + ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{ + Id: uint32(vid), + Collection: collection, + EcIndexBits: uint32(newShardBits), + }) + ecNode.freeEcSlot -= len(shardIds) + } + + return ecNode +} + +func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode { + + for _, shardInfo := range ecNode.info.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) + newShardBits := oldShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId)) + } + shardInfo.EcIndexBits = uint32(newShardBits) + ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() + } + } + + return ecNode +} + +func groupByCount(data []*EcNode, identifierFn func(*EcNode) (id string, count int)) map[string]int { + countMap := make(map[string]int) + for _, d := range data { + id, count := identifierFn(d) + countMap[id] += count + } + return countMap +} + +func groupBy(data []*EcNode, identifierFn func(*EcNode) (id string)) map[string][]*EcNode { + groupMap := make(map[string][]*EcNode) + for _, d := range data { + id := identifierFn(d) + groupMap[id] = append(groupMap[id], d) + } + return groupMap +} diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go new file mode 100644 index 000000000..8ad0d51c8 --- /dev/null +++ b/weed/shell/command_ec_encode.go @@ -0,0 +1,289 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" +) + +func init() { + Commands = append(Commands, &commandEcEncode{}) +} + +type commandEcEncode struct { +} + +func (c *commandEcEncode) Name() string { + return "ec.encode" +} + +func (c *commandEcEncode) Help() string { + return `apply erasure coding to a volume + + ec.encode [-collection=""] [-fullPercent=95] [-quietFor=1h] + ec.encode [-collection=""] [-volumeId=] + + This command will: + 1. freeze one volume + 2. apply erasure coding to the volume + 3. move the encoded shards to multiple volume servers + + The erasure coding is 10.4. So ideally you have more than 14 volume servers, and you can afford + to lose 4 volume servers. + + If the number of volumes are not high, the worst case is that you only have 4 volume servers, + and the shards are spread as 4,4,3,3, respectively. You can afford to lose one volume server. + + If you only have less than 4 volume servers, with erasure coding, at least you can afford to + have 4 corrupted shard files. + +` +} + +func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := encodeCommand.Int("volumeId", 0, "the volume id") + collection := encodeCommand.String("collection", "", "the collection name") + fullPercentage := encodeCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") + quietPeriod := encodeCommand.Duration("quietFor", time.Hour, "select volumes without no writes for this period") + if err = encodeCommand.Parse(args); err != nil { + return nil + } + + ctx := context.Background() + vid := needle.VolumeId(*volumeId) + + // volumeId is provided + if vid != 0 { + return doEcEncode(ctx, commandEnv, *collection, vid) + } + + // apply to all volumes in the collection + volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + if err != nil { + return err + } + fmt.Printf("ec encode volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doEcEncode(ctx, commandEnv, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { + // find volume location + locations := commandEnv.MasterClient.GetLocations(uint32(vid)) + if len(locations) == 0 { + return fmt.Errorf("volume %d not found", vid) + } + + // mark the volume as readonly + err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + if err != nil { + return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) + } + + // generate ec shards + err = generateEcShards(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url) + if err != nil { + return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) + } + + // balance the ec shards to current cluster + err = spreadEcShards(ctx, commandEnv, vid, collection, locations) + if err != nil { + return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err) + } + + return nil +} + +func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { + + for _, location := range locations { + + err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{ + VolumeId: uint32(volumeId), + }) + return markErr + }) + + if err != nil { + return err + } + + } + + return nil +} + +func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + }) + return genErr + }) + + return err + +} + +func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { + + allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, "") + if err != nil { + return err + } + + if totalFreeEcSlots < erasure_coding.TotalShardsCount { + return fmt.Errorf("not enough free ec shard slots. only %d left", totalFreeEcSlots) + } + allocatedDataNodes := allEcNodes + if len(allocatedDataNodes) > erasure_coding.TotalShardsCount { + allocatedDataNodes = allocatedDataNodes[:erasure_coding.TotalShardsCount] + } + + // calculate how many shards to allocate for these servers + allocated := balancedEcDistribution(allocatedDataNodes) + + // ask the data nodes to copy from the source volume server + copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocated, volumeId, collection, existingLocations[0]) + if err != nil { + return err + } + + // unmount the to be deleted shards + err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) + if err != nil { + return err + } + + // ask the source volume server to clean up copied ec shards + err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) + if err != nil { + return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err) + } + + // ask the source volume server to delete the original volume + for _, location := range existingLocations { + err = deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, location.Url) + if err != nil { + return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err) + } + } + + return err + +} + +func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, + targetServers []*EcNode, allocated []int, + volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { + + // parallelize + shardIdChan := make(chan []uint32, len(targetServers)) + var wg sync.WaitGroup + startFromShardId := uint32(0) + for i, server := range targetServers { + if allocated[i] <= 0 { + continue + } + + wg.Add(1) + go func(server *EcNode, startFromShardId uint32, shardCount int) { + defer wg.Done() + copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server, + startFromShardId, shardCount, volumeId, collection, existingLocation.Url) + if copyErr != nil { + err = copyErr + } else { + shardIdChan <- copiedShardIds + server.addEcVolumeShards(volumeId, collection, copiedShardIds) + } + }(server, startFromShardId, allocated[i]) + startFromShardId += uint32(allocated[i]) + } + wg.Wait() + close(shardIdChan) + + if err != nil { + return nil, err + } + + for shardIds := range shardIdChan { + actuallyCopied = append(actuallyCopied, shardIds...) + } + + return +} + +func balancedEcDistribution(servers []*EcNode) (allocated []int) { + allocated = make([]int, len(servers)) + allocatedCount := 0 + for allocatedCount < erasure_coding.TotalShardsCount { + for i, server := range servers { + if server.freeEcSlot-allocated[i] > 0 { + allocated[i] += 1 + allocatedCount += 1 + } + if allocatedCount >= erasure_coding.TotalShardsCount { + break + } + } + } + + return allocated +} + +func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { + + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return + } + + quietSeconds := int64(quietPeriod / time.Second) + nowUnixSeconds := time.Now().Unix() + + fmt.Printf("ec encode volumes quiet for: %d seconds\n", quietSeconds) + + vidMap := make(map[uint32]bool) + eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.VolumeInfos { + if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds { + if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 { + vidMap[v.Id] = true + } + } + } + }) + + for vid, _ := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go new file mode 100644 index 000000000..63b7c4088 --- /dev/null +++ b/weed/shell/command_ec_rebuild.go @@ -0,0 +1,268 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" +) + +func init() { + Commands = append(Commands, &commandEcRebuild{}) +} + +type commandEcRebuild struct { +} + +func (c *commandEcRebuild) Name() string { + return "ec.rebuild" +} + +func (c *commandEcRebuild) Help() string { + return `find and rebuild missing ec shards among volume servers + + ec.rebuild [-c EACH_COLLECTION|] [-force] + + Algorithm: + + For each type of volume server (different max volume count limit){ + for each collection { + rebuildEcVolumes() + } + } + + func rebuildEcVolumes(){ + idealWritableVolumes = totalWritableVolumes / numVolumeServers + for { + sort all volume servers ordered by the number of local writable volumes + pick the volume server A with the lowest number of writable volumes x + pick the volume server B with the highest number of writable volumes y + if y > idealWritableVolumes and x +1 <= idealWritableVolumes { + if B has a writable volume id v that A does not have { + move writable volume v from A to B + } + } + } + } + +` +} + +func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection") + applyChanges := fixCommand.Bool("force", false, "apply the changes") + if err = fixCommand.Parse(args); err != nil { + return nil + } + + // collect all ec nodes + allEcNodes, _, err := collectEcNodes(context.Background(), commandEnv, "") + if err != nil { + return err + } + + if *collection == "EACH_COLLECTION" { + collections, err := ListCollectionNames(commandEnv, false, true) + if err != nil { + return err + } + fmt.Printf("rebuildEcVolumes collections %+v\n", len(collections)) + for _, c := range collections { + fmt.Printf("rebuildEcVolumes collection %+v\n", c) + if err = rebuildEcVolumes(commandEnv, allEcNodes, c, writer, *applyChanges); err != nil { + return err + } + } + } else { + if err = rebuildEcVolumes(commandEnv, allEcNodes, *collection, writer, *applyChanges); err != nil { + return err + } + } + + return nil +} + +func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error { + + ctx := context.Background() + + fmt.Printf("rebuildEcVolumes %s\n", collection) + + // collect vid => each shard locations, similar to ecShardMap in topology.go + ecShardMap := make(EcShardMap) + for _, ecNode := range allEcNodes { + ecShardMap.registerEcNode(ecNode, collection) + } + + for vid, locations := range ecShardMap { + shardCount := locations.shardCount() + if shardCount == erasure_coding.TotalShardsCount { + continue + } + if shardCount < erasure_coding.DataShardsCount { + return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount) + } + + sortEcNodes(allEcNodes) + + if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount { + return fmt.Errorf("disk space is not enough") + } + + if err := rebuildOneEcVolume(ctx, commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { + return err + } + } + + return nil +} + +func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { + + fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId) + + // collect shard files to rebuilder local disk + var generatedShardIds []uint32 + copiedShardIds, _, err := prepareDataToRecover(ctx, commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) + if err != nil { + return err + } + defer func() { + // clean up working files + + // ask the rebuilder to delete the copied shards + err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) + if err != nil { + fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds) + } + + }() + + if !applyChanges { + return nil + } + + // generate ec shards, and maybe ecx file + generatedShardIds, err = generateMissingShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) + if err != nil { + return err + } + + // mount the generated shards + err = mountEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) + if err != nil { + return err + } + + rebuilder.addEcVolumeShards(volumeId, collection, generatedShardIds) + + return nil +} + +func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, + collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { + + err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + }) + if rebultErr == nil { + rebuiltShardIds = resp.RebuiltShardIds + } + return rebultErr + }) + return +} + +func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { + + needEcxFile := true + var localShardBits erasure_coding.ShardBits + for _, ecShardInfo := range rebuilder.info.EcShardInfos { + if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId { + needEcxFile = false + localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + } + } + + for shardId, ecNodes := range locations { + + if len(ecNodes) == 0 { + fmt.Fprintf(writer, "missing shard %d.%d\n", volumeId, shardId) + continue + } + + if localShardBits.HasShardId(erasure_coding.ShardId(shardId)) { + localShardIds = append(localShardIds, uint32(shardId)) + fmt.Fprintf(writer, "use existing shard %d.%d\n", volumeId, shardId) + continue + } + + var copyErr error + if applyBalancing { + copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: []uint32{uint32(shardId)}, + CopyEcxFile: needEcxFile, + SourceDataNode: ecNodes[0].info.Id, + }) + return copyErr + }) + if copyErr == nil && needEcxFile { + needEcxFile = false + } + } + if copyErr != nil { + fmt.Fprintf(writer, "%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id, copyErr) + } else { + fmt.Fprintf(writer, "%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id) + copiedShardIds = append(copiedShardIds, uint32(shardId)) + } + + } + + if len(copiedShardIds)+len(localShardIds) >= erasure_coding.DataShardsCount { + return copiedShardIds, localShardIds, nil + } + + return nil, nil, fmt.Errorf("%d shards are not enough to recover volume %d", len(copiedShardIds)+len(localShardIds), volumeId) + +} + +type EcShardMap map[needle.VolumeId]EcShardLocations +type EcShardLocations [][]*EcNode + +func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) { + for _, shardInfo := range ecNode.info.EcShardInfos { + if shardInfo.Collection == collection { + existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)] + if !found { + existing = make([][]*EcNode, erasure_coding.TotalShardsCount) + ecShardMap[needle.VolumeId(shardInfo.Id)] = existing + } + for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() { + existing[shardId] = append(existing[shardId], ecNode) + } + } + } +} + +func (ecShardLocations EcShardLocations) shardCount() (count int) { + for _, locations := range ecShardLocations { + if len(locations) > 0 { + count++ + } + } + return +} diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go new file mode 100644 index 000000000..9e578ed28 --- /dev/null +++ b/weed/shell/command_ec_test.go @@ -0,0 +1,127 @@ +package shell + +import ( + "context" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func TestCommandEcBalanceSmall(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack2", "dn2", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceNothingToMove(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack1", "dn2", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceAddNewServers(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack1", "dn2", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}), + newEcNode("dc1", "rack1", "dn3", 100), + newEcNode("dc1", "rack1", "dn4", 100), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceAddNewRacks(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack1", "dn2", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}), + newEcNode("dc1", "rack2", "dn3", 100), + newEcNode("dc1", "rack2", "dn4", 100), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn_shared", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0}), + + newEcNode("dc1", "rack1", "dn_a1", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{1}), + newEcNode("dc1", "rack1", "dn_a2", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{2}), + newEcNode("dc1", "rack1", "dn_a3", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{3}), + newEcNode("dc1", "rack1", "dn_a4", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{4}), + newEcNode("dc1", "rack1", "dn_a5", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{5}), + newEcNode("dc1", "rack1", "dn_a6", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{6}), + newEcNode("dc1", "rack1", "dn_a7", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{7}), + newEcNode("dc1", "rack1", "dn_a8", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{8}), + newEcNode("dc1", "rack1", "dn_a9", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{9}), + newEcNode("dc1", "rack1", "dn_a10", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{10}), + newEcNode("dc1", "rack1", "dn_a11", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{11}), + newEcNode("dc1", "rack1", "dn_a12", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{12}), + newEcNode("dc1", "rack1", "dn_a13", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{13}), + + newEcNode("dc1", "rack1", "dn_b1", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{1}), + newEcNode("dc1", "rack1", "dn_b2", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{2}), + newEcNode("dc1", "rack1", "dn_b3", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{3}), + newEcNode("dc1", "rack1", "dn_b4", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{4}), + newEcNode("dc1", "rack1", "dn_b5", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{5}), + newEcNode("dc1", "rack1", "dn_b6", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{6}), + newEcNode("dc1", "rack1", "dn_b7", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{7}), + newEcNode("dc1", "rack1", "dn_b8", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{8}), + newEcNode("dc1", "rack1", "dn_b9", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{9}), + newEcNode("dc1", "rack1", "dn_b10", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{10}), + newEcNode("dc1", "rack1", "dn_b11", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{11}), + newEcNode("dc1", "rack1", "dn_b12", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{12}), + newEcNode("dc1", "rack1", "dn_b13", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{13}), + + newEcNode("dc1", "rack1", "dn3", 100), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) + balanceEcRacks(context.Background(), nil, racks, false) +} + +func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode { + return &EcNode{ + info: &master_pb.DataNodeInfo{ + Id: dataNodeId, + }, + dc: dc, + rack: RackId(rack), + freeEcSlot: freeEcSlot, + } +} + +func (ecNode *EcNode) addEcVolumeAndShardsForTest(vid uint32, collection string, shardIds []uint32) *EcNode { + return ecNode.addEcVolumeShards(needle.VolumeId(vid), collection, shardIds) +} diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go new file mode 100644 index 000000000..66ced46c5 --- /dev/null +++ b/weed/shell/command_fs_cat.go @@ -0,0 +1,68 @@ +package shell + +import ( + "context" + "fmt" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandFsCat{}) +} + +type commandFsCat struct { +} + +func (c *commandFsCat) Name() string { + return "fs.cat" +} + +func (c *commandFsCat) Help() string { + return `stream the file content on to the screen + + fs.cat /dir/ + fs.cat /dir/file_name + fs.cat /dir/file_prefix + fs.cat http://:/dir/ + fs.cat http://:/dir/file_name + fs.cat http://:/dir/file_prefix +` +} + +func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + input := findInputDirectory(args) + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) + if err != nil { + return err + } + + ctx := context.Background() + + if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + return fmt.Errorf("%s is a directory", path) + } + + dir, name := filer2.FullPath(path).DirAndName() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + if err != nil { + return err + } + + return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32) + + }) + +} diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go new file mode 100644 index 000000000..408ec86c8 --- /dev/null +++ b/weed/shell/command_fs_cd.go @@ -0,0 +1,59 @@ +package shell + +import ( + "context" + "io" +) + +func init() { + Commands = append(Commands, &commandFsCd{}) +} + +type commandFsCd struct { +} + +func (c *commandFsCd) Name() string { + return "fs.cd" +} + +func (c *commandFsCd) Help() string { + return `change directory to http://:/dir/ + + The full path can be too long to type. For example, + fs.ls http://:/some/path/to/file_name + + can be simplified as + + fs.cd http://:/some/path + fs.ls to/file_name +` +} + +func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + input := findInputDirectory(args) + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) + if err != nil { + return err + } + + if path == "/" { + commandEnv.option.FilerHost = filerServer + commandEnv.option.FilerPort = filerPort + commandEnv.option.Directory = "/" + return nil + } + + ctx := context.Background() + + err = commandEnv.checkDirectory(ctx, filerServer, filerPort, path) + + if err == nil { + commandEnv.option.FilerHost = filerServer + commandEnv.option.FilerPort = filerPort + commandEnv.option.Directory = path + } + + return err +} diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go new file mode 100644 index 000000000..5e634c82a --- /dev/null +++ b/weed/shell/command_fs_du.go @@ -0,0 +1,117 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "io" +) + +func init() { + Commands = append(Commands, &commandFsDu{}) +} + +type commandFsDu struct { +} + +func (c *commandFsDu) Name() string { + return "fs.du" +} + +func (c *commandFsDu) Help() string { + return `show disk usage + + fs.du http://:/dir + fs.du http://:/dir/file_name + fs.du http://:/dir/file_prefix +` +} + +func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + ctx := context.Background() + + if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + path = path + "/" + } + + dir, name := filer2.FullPath(path).DirAndName() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + _, _, err = paginateDirectory(ctx, writer, client, dir, name, 1000) + + return err + + }) + +} + +func paginateDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int) (blockCount uint64, byteCount uint64, err error) { + + paginatedCount := -1 + startFromFileName := "" + + for paginatedCount == -1 || paginatedCount == paginateSize { + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: name, + StartFromFileName: startFromFileName, + InclusiveStartFrom: false, + Limit: uint32(paginateSize), + }) + if listErr != nil { + err = listErr + return + } + + paginatedCount = len(resp.Entries) + + for _, entry := range resp.Entries { + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name + } + numBlock, numByte, err := paginateDirectory(ctx, writer, client, subDir, "", paginateSize) + if err == nil { + blockCount += numBlock + byteCount += numByte + } + } else { + blockCount += uint64(len(entry.Chunks)) + byteCount += filer2.TotalSize(entry.Chunks) + } + startFromFileName = entry.Name + + if name != "" && !entry.IsDirectory { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + } + } + } + + if name == "" { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) + } + + return + +} + +func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) + return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, env.option.GrpcDialOption) + +} diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go new file mode 100644 index 000000000..6979635e1 --- /dev/null +++ b/weed/shell/command_fs_ls.go @@ -0,0 +1,148 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "io" + "os" + "os/user" + "strconv" + "strings" +) + +func init() { + Commands = append(Commands, &commandFsLs{}) +} + +type commandFsLs struct { +} + +func (c *commandFsLs) Name() string { + return "fs.ls" +} + +func (c *commandFsLs) Help() string { + return `list all files under a directory + + fs.ls [-l] [-a] /dir/ + fs.ls [-l] [-a] /dir/file_name + fs.ls [-l] [-a] /dir/file_prefix + fs.ls [-l] [-a] http://:/dir/ + fs.ls [-l] [-a] http://:/dir/file_name + fs.ls [-l] [-a] http://:/dir/file_prefix +` +} + +func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + var isLongFormat, showHidden bool + for _, arg := range args { + if !strings.HasPrefix(arg, "-") { + break + } + for _, t := range arg { + switch t { + case 'a': + showHidden = true + case 'l': + isLongFormat = true + } + } + } + + input := findInputDirectory(args) + + filerServer, filerPort, path, err := commandEnv.parseUrl(input) + if err != nil { + return err + } + + ctx := context.Background() + + if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + path = path + "/" + } + + dir, name := filer2.FullPath(path).DirAndName() + + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + + return paginateOneDirectory(ctx, writer, client, dir, name, 1000, isLongFormat, showHidden) + + }) + +} + +func paginateOneDirectory(ctx context.Context, writer io.Writer, client filer_pb.SeaweedFilerClient, dir, name string, paginateSize int, isLongFormat, showHidden bool) (err error) { + + entryCount := 0 + paginatedCount := -1 + startFromFileName := "" + + for paginatedCount == -1 || paginatedCount == paginateSize { + resp, listErr := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: name, + StartFromFileName: startFromFileName, + InclusiveStartFrom: false, + Limit: uint32(paginateSize), + }) + if listErr != nil { + err = listErr + return + } + + paginatedCount = len(resp.Entries) + + for _, entry := range resp.Entries { + + if !showHidden && strings.HasPrefix(entry.Name, ".") { + continue + } + + entryCount++ + + if isLongFormat { + fileMode := os.FileMode(entry.Attributes.FileMode) + userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName + if userName == "" { + if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { + userName = user.Username + } + } + groupName := "" + if len(groupNames) > 0 { + groupName = groupNames[0] + } + if groupName == "" { + if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { + groupName = group.Name + } + } + + if dir == "/" { + // just for printing + dir = "" + } + fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", + fileMode, len(entry.Chunks), + userName, groupName, + filer2.TotalSize(entry.Chunks), dir, entry.Name) + } else { + fmt.Fprintf(writer, "%s\n", entry.Name) + } + + startFromFileName = entry.Name + + } + } + + if isLongFormat { + fmt.Fprintf(writer, "total %d\n", entryCount) + } + + return + +} diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go new file mode 100644 index 000000000..5ea8de9f5 --- /dev/null +++ b/weed/shell/command_fs_meta_load.go @@ -0,0 +1,108 @@ +package shell + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" +) + +func init() { + Commands = append(Commands, &commandFsMetaLoad{}) +} + +type commandFsMetaLoad struct { +} + +func (c *commandFsMetaLoad) Name() string { + return "fs.meta.load" +} + +func (c *commandFsMetaLoad) Help() string { + return `load saved filer meta data to restore the directory and file structure + + fs.meta.load --