diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0681f3dac..dba704800 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -40,6 +40,7 @@ jobs: goarch: ${{ matrix.goarch }} release_tag: dev overwrite: true + pre_command: export CGO_ENABLED=0 build_flags: -tags 5BytesOffset # optional, default is ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` @@ -55,8 +56,9 @@ jobs: goarch: ${{ matrix.goarch }} release_tag: dev overwrite: true + pre_command: export CGO_ENABLED=0 ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed - binary_name: weed- + binary_name: weed asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" diff --git a/README.md b/README.md index 58885eb61..e463092bc 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ [![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed) [![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki) [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/) +[![SeaweedFS on Maven Central](https://img.shields.io/maven-central/v/com.github.chrislusf/seaweedfs-client)](https://search.maven.org/search?q=g:com.github.chrislusf) ![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png) @@ -42,6 +43,7 @@ Your support will be really appreciated by me and other supporters! - [SeaweedFS on Twitter](https://twitter.com/SeaweedFS) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) +- [SeaweedFS White Paper](https://github.com/chrislusf/seaweedfs/wiki/SeaweedFS_Architecture.pdf) - [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction) Table of Contents @@ -70,7 +72,7 @@ Table of Contents * Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe` * Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway. -Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver=":9333" -port=8081` locally, or on a different machine, or on thoudsands of machines. That is it! +Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver=":9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it! ## Introduction ## @@ -79,17 +81,34 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two 1. to store billions of files! 2. to serve the files fast! -SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages volumes on volume servers, and these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation). +SeaweedFS started as an Object Store to handle small files efficiently. +Instead of managing all file metadata in a central master, +the central master only manages volumes on volume servers, +and these volume servers manage files and their metadata. +This relieves concurrency pressure from the central master and spreads file metadata into volume servers, +allowing faster file access (O(1), usually just one disk read operation). -SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity. What's more, the cloud storage access API cost is minimized. Faster and Cheaper than direct cloud storage! +SeaweedFS can transparently integrate with the cloud. +With hot data on local cluster, and warm data on the cloud with O(1) access time, +SeaweedFS can achieve both fast local access time and elastic cloud storage capacity. +What's more, the cloud storage access API cost is minimized. +Faster and Cheaper than direct cloud storage! +Signup for future managed SeaweedFS cluster offering at "seaweedfilesystem at gmail dot com". -There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. +There is only 40 bytes of disk storage overhead for each file's metadata. +It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. -SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) +SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). +Also, SeaweedFS implements erasure coding with ideas from +[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf) -On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, MemSql, TiDB, Etcd, CockroachDB, etc. +On top of the object store, optional [Filer] can support directories and POSIX attributes. +Filer is a separate linearly-scalable stateless server with customizable metadata stores, +e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, MemSql, TiDB, Etcd, CockroachDB, etc. -For any distributed key value stores, the large values can be offloaded to SeaweedFS. With the fast access speed and linearly scalable capacity, SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore]. +For any distributed key value stores, the large values can be offloaded to SeaweedFS. +With the fast access speed and linearly scalable capacity, +SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore]. [Back to TOC](#table-of-contents) @@ -105,6 +124,7 @@ For any distributed key value stores, the large values can be offloaded to Seawe * Support ETag, Accept-Range, Last-Modified, etc. * Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. +* [Customizable Multiple Storage Tiers][TieredStorage]: Customizable storage disk types to balance performance and cost. * [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data. * [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. @@ -135,6 +155,7 @@ For any distributed key value stores, the large values can be offloaded to Seawe [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System [WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV [ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage +[TieredStorage]: https://github.com/chrislusf/seaweedfs/wiki/Tiered-Storage [CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier [FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption [FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores diff --git a/backers.md b/backers.md index 725615237..dd8c69e67 100644 --- a/backers.md +++ b/backers.md @@ -1,3 +1,4 @@ +

Sponsors & Backers

- [Become a backer or sponsor on Patreon](https://www.patreon.com/seaweedfs). @@ -12,3 +13,4 @@ - [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/) - [Haravan - Ecommerce Platform](https://www.haravan.com) +- PeterCxy - Creator of Shelter App diff --git a/docker/Dockerfile b/docker/Dockerfile index 6309e7e2e..2165466ca 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,6 +1,7 @@ FROM alpine -ARG RELEASE=latest # 'latest' or 'dev' +# 'latest' or 'dev' +ARG RELEASE=latest RUN \ ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \ diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 64105ee29..1adf0f5ef 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -6,7 +6,7 @@ ARG BRANCH=${BRANCH:-master} RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ - && go install -ldflags "${LDFLAGS}" + && CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}" FROM alpine AS final LABEL author="Chris Lu" diff --git a/docker/Dockerfile.go_build_large b/docker/Dockerfile.go_build_large index 39f45cbde..48af3381d 100644 --- a/docker/Dockerfile.go_build_large +++ b/docker/Dockerfile.go_build_large @@ -6,7 +6,7 @@ ARG BRANCH=${BRANCH:-master} RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ - && go install -tags 5BytesOffset -ldflags "${LDFLAGS}" + && CGO_ENABLED=0 go install -tags 5BytesOffset -ldflags "-extldflags -static ${LDFLAGS}" FROM alpine AS final LABEL author="Chris Lu" diff --git a/docker/Dockerfile.s3tests b/docker/Dockerfile.s3tests index 18d57fad1..5b6d762de 100644 --- a/docker/Dockerfile.s3tests +++ b/docker/Dockerfile.s3tests @@ -28,4 +28,4 @@ ENV \ S3TEST_CONF="/s3test.conf" ENTRYPOINT ["/bin/bash", "-c"] -CMD ["exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"] \ No newline at end of file +CMD ["sleep 10 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"] \ No newline at end of file diff --git a/docker/Makefile b/docker/Makefile index 67ee9acdf..a933956b7 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -4,8 +4,10 @@ all: gen gen: dev -build: - cd ../weed; GOOS=linux go build; mv weed ../docker/ +binary: + cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static"; mv weed ../docker/ + +build: binary docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . rm ./weed @@ -15,9 +17,15 @@ s3tests_build: dev: build docker-compose -f compose/local-dev-compose.yml -p seaweedfs up +dev_tls: build certstrap + ENV_FILE="tls.env" docker-compose -f compose/local-dev-compose.yml -p seaweedfs up + dev_mount: build docker-compose -f compose/local-mount-compose.yml -p seaweedfs up +profile_mount: build + docker-compose -f compose/local-mount-profile-compose.yml -p seaweedfs up + k8s: build docker-compose -f compose/local-k8s-compose.yml -p seaweedfs up @@ -41,3 +49,15 @@ filer_etcd: build clean: rm ./weed + +certstrap: + go get github.com/square/certstrap + certstrap --depot-path compose/tls init --passphrase "" --common-name "SeaweedFS CA" || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name volume01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name master01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name filer01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name client01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" client01.dev || true \ No newline at end of file diff --git a/docker/compose/dev.env b/docker/compose/dev.env new file mode 100644 index 000000000..e69de29bb diff --git a/docker/compose/local-clusters-compose.yml b/docker/compose/local-clusters-compose.yml index 7bd16aa3f..f9e9a1589 100644 --- a/docker/compose/local-clusters-compose.yml +++ b/docker/compose/local-clusters-compose.yml @@ -11,6 +11,10 @@ services: - 8888:8888 - 18888:18888 command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1" + volumes: + - ./master-cloud.toml:/etc/seaweedfs/master.toml + depends_on: + - server2 server2: image: chrislusf/seaweedfs:local ports: diff --git a/docker/compose/local-dev-compose.yml b/docker/compose/local-dev-compose.yml index 18cccab3e..01d0594a6 100644 --- a/docker/compose/local-dev-compose.yml +++ b/docker/compose/local-dev-compose.yml @@ -6,33 +6,50 @@ services: ports: - 9333:9333 - 19333:19333 - command: "master -ip=master" + command: "-v=1 master -ip=master" + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} volume: image: chrislusf/seaweedfs:local ports: - 8080:8080 - 18080:18080 - command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1" + command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1" depends_on: - master + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} filer: image: chrislusf/seaweedfs:local ports: + - 8111:8111 - 8888:8888 - 18888:18888 - command: 'filer -master="master:9333"' + command: '-v=1 filer -master="master:9333" -iam' depends_on: - master - volume + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} s3: image: chrislusf/seaweedfs:local ports: - 8333:8333 - command: 's3 -filer="filer:8888"' + command: '-v=1 s3 -filer="filer:8888"' depends_on: - master - volume - filer + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} mount: image: chrislusf/seaweedfs:local privileged: true @@ -40,6 +57,10 @@ services: - SYS_ADMIN mem_limit: 4096m command: '-v=4 mount -filer="filer:8888" -dirAutoCreate -dir=/mnt/seaweedfs -cacheCapacityMB=100 -concurrentWriters=128' + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} depends_on: - master - volume diff --git a/docker/compose/local-minio-gateway-compose.yml b/docker/compose/local-minio-gateway-compose.yml new file mode 100644 index 000000000..fafee59c8 --- /dev/null +++ b/docker/compose/local-minio-gateway-compose.yml @@ -0,0 +1,50 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master -volumeSizeLimitMB=1024" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1" + depends_on: + - master + s3: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 8333:8333 + command: '-v 1 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333' + volumes: + - ./s3.json:/etc/seaweedfs/s3.json + depends_on: + - master + - volume + minio-gateway-s3: + image: minio/minio + ports: + - 9000:9000 + command: 'minio gateway s3 http://s3:8333' + restart: on-failure + environment: + MINIO_ACCESS_KEY: "some_access_key1" + MINIO_SECRET_KEY: "some_secret_key1" + depends_on: + - s3 + minio-warp: + image: minio/warp + command: 'mixed --duration=5m --obj.size=3mb --autoterm' + restart: on-failure + environment: + WARP_HOST: "minio-gateway-s3:9000" + WARP_ACCESS_KEY: "some_access_key1" + WARP_SECRET_KEY: "some_secret_key1" + depends_on: + - minio-gateway-s3 \ No newline at end of file diff --git a/docker/compose/local-mount-profile-compose.yml b/docker/compose/local-mount-profile-compose.yml new file mode 100644 index 000000000..4682591c4 --- /dev/null +++ b/docker/compose/local-mount-profile-compose.yml @@ -0,0 +1,47 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 7455:8080 + - 9325:9325 + volumes: + - /Volumes/mobile_disk/99:/data + command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 9326:9326 + volumes: + - /Volumes/mobile_disk/99:/data + command: 'filer -master="master:9333" -metricsPort=9326' + tty: true + stdin_open: true + depends_on: + - master + - volume + mount: + image: chrislusf/seaweedfs:local + privileged: true + cap_add: + - SYS_ADMIN + devices: + - fuse + volumes: + - /Volumes/mobile_disk/99:/data + entrypoint: '/bin/sh -c "mkdir -p t1 && weed mount -filer=filer:8888 -dir=./t1 -cacheCapacityMB=0 -memprofile=/data/mount.mem.pprof"' + depends_on: + - master + - volume + - filer diff --git a/docker/compose/local-registry-compose.yml b/docker/compose/local-registry-compose.yml index 7f3672cd0..b61278d66 100644 --- a/docker/compose/local-registry-compose.yml +++ b/docker/compose/local-registry-compose.yml @@ -15,24 +15,18 @@ services: command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1" depends_on: - master - filer: + s3: image: chrislusf/seaweedfs:local ports: - 8888:8888 - 18888:18888 - command: 'filer -master="master:9333"' - depends_on: - - master - - volume - s3: - image: chrislusf/seaweedfs:local - ports: - 8333:8333 - command: '-v 9 s3 -filer="filer:8888"' + command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333' + volumes: + - ./s3.json:/etc/seaweedfs/s3.json depends_on: - master - volume - - filer minio: image: minio/minio ports: diff --git a/docker/compose/local-s3tests-compose.yml b/docker/compose/local-s3tests-compose.yml index f78dfb3ad..381e3eb97 100644 --- a/docker/compose/local-s3tests-compose.yml +++ b/docker/compose/local-s3tests-compose.yml @@ -38,7 +38,7 @@ services: S3TEST_CONF: "s3tests.conf" NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3" NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy" - NOSETESTS_EXCLUDE: "(bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_delimiter_not_skip_special|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_head_zero_bytes|object_write_cache_control|object_write_expires|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifmatch_failed|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_authenticated|object_raw_response_headers|object_raw_authenticated_bucket_acl|object_raw_authenticated_object_acl|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_not_expired|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_create_exists|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|bucket_create_special_key_names|object_copy_zero_size|object_copy_verify_contenttype|object_copy_to_itself|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)" + NOSETESTS_EXCLUDE: "(bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_write_cache_control|object_write_expires|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifmatch_failed|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)" depends_on: - master - volume diff --git a/docker/compose/master-cloud.toml b/docker/compose/master-cloud.toml new file mode 100644 index 000000000..17289c114 --- /dev/null +++ b/docker/compose/master-cloud.toml @@ -0,0 +1,30 @@ + +# Put this file to one of the location, with descending priority +# ./master.toml +# $HOME/.seaweedfs/master.toml +# /etc/seaweedfs/master.toml +# this file is read by master + +[master.maintenance] +# periodically run these scripts are the same as running them from 'weed shell' +scripts = """ + lock + ec.encode -fullPercent=95 -quietFor=1h + ec.rebuild -force + ec.balance -force + volume.balance -force + volume.fix.replication + unlock +""" +sleep_minutes = 17 # sleep minutes between each script execution + +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency +[storage.backend] + [storage.backend.s3.default] + enabled = true + aws_access_key_id = "any" # if empty, loads from the shared credentials file (~/.aws/credentials). + aws_secret_access_key = "any" # if empty, loads from the shared credentials file (~/.aws/credentials). + region = "us-east-2" + bucket = "volume_bucket" # an existing bucket + endpoint = "http://server2:8333" diff --git a/docker/compose/tls.env b/docker/compose/tls.env new file mode 100644 index 000000000..a82954c4f --- /dev/null +++ b/docker/compose/tls.env @@ -0,0 +1,14 @@ +WEED_GRPC_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt +WEED_GRPC_ALLOWED_WILDCARD_DOMAIN=".dev" +WEED_GRPC_MASTER_CERT=/etc/seaweedfs/tls/master01.dev.crt +WEED_GRPC_MASTER_KEY=/etc/seaweedfs/tls/master01.dev.key +WEED_GRPC_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt +WEED_GRPC_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key +WEED_GRPC_FILER_CERT=/etc/seaweedfs/tls/filer01.dev.crt +WEED_GRPC_FILER_KEY=/etc/seaweedfs/tls/filer01.dev.key +WEED_GRPC_CLIENT_CERT=/etc/seaweedfs/tls/client01.dev.crt +WEED_GRPC_CLIENT_KEY=/etc/seaweedfs/tls/client01.dev.key +WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" +WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" +WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" +WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" \ No newline at end of file diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 5a858d993..a5a240575 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -60,9 +60,9 @@ case "$1" in 'cronjob') MASTER=${WEED_MASTER-localhost:9333} FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *} - echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab + echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "lock; volume.fix.replication; unlock" | weed shell -master='$MASTER > /crontab BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *} - echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab + echo "$BALANCING_CRON_SCHEDULE" 'echo "lock; volume.balance -collection ALL_COLLECTIONS -force; unlock" | weed shell -master='$MASTER >> /crontab echo "Running Crontab:" cat /crontab exec supercronic /crontab diff --git a/go.mod b/go.mod index a200370fd..70bc33070 100644 --- a/go.mod +++ b/go.mod @@ -13,8 +13,9 @@ require ( github.com/Shopify/sarama v1.23.1 github.com/aws/aws-sdk-go v1.34.30 github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 + github.com/bwmarrin/snowflake v0.3.0 github.com/cespare/xxhash v1.1.0 - github.com/chrislusf/raft v1.0.4 + github.com/chrislusf/raft v1.0.6 github.com/coreos/go-semver v0.3.0 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/disintegration/imaging v1.6.2 @@ -38,8 +39,10 @@ require ( github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jinzhu/copier v0.2.8 github.com/json-iterator/go v1.1.10 github.com/karlseguin/ccache v2.0.3+incompatible // indirect github.com/karlseguin/ccache/v2 v2.0.7 @@ -48,7 +51,7 @@ require ( github.com/klauspost/crc32 v1.2.0 github.com/klauspost/reedsolomon v1.9.2 github.com/kurin/blazer v0.5.3 - github.com/lib/pq v1.2.0 + github.com/lib/pq v1.10.0 github.com/lunixbochs/vtclean v1.0.0 // indirect github.com/magiconair/properties v1.8.1 // indirect github.com/mattn/go-colorable v0.1.2 // indirect @@ -59,7 +62,7 @@ require ( github.com/prometheus/client_golang v1.3.0 github.com/rakyll/statik v0.1.7 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect - github.com/seaweedfs/fuse v1.1.1 + github.com/seaweedfs/fuse v1.1.4 github.com/seaweedfs/goexif v1.0.2 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -87,7 +90,7 @@ require ( gocloud.dev/pubsub/rabbitpubsub v0.20.0 golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb - golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd golang.org/x/tools v0.0.0-20200608174601-1b747fd94509 google.golang.org/api v0.26.0 diff --git a/go.sum b/go.sum index cf4ef4f98..0409b1ae1 100644 --- a/go.sum +++ b/go.sum @@ -141,6 +141,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA= github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg= +github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0= +github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= @@ -153,6 +155,10 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chrislusf/raft v1.0.4 h1:THhbsVik2hxdE0/VXX834f64Wn9RzgVPp+E+XCWZdKM= github.com/chrislusf/raft v1.0.4/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= +github.com/chrislusf/raft v1.0.5 h1:g8GxKCSStfm0/bGBDpNEbmEXL6MJkpXX+NI0ksbX5D4= +github.com/chrislusf/raft v1.0.5/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= +github.com/chrislusf/raft v1.0.6 h1:wunb85WWhMKhNRn7EmdIw35D4Lmew0ZJv8oYDizR/+Y= +github.com/chrislusf/raft v1.0.6/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -433,6 +439,8 @@ github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjL github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE= +github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -496,6 +504,8 @@ github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= @@ -681,6 +691,10 @@ github.com/seaweedfs/fuse v1.1.0 h1:cL1qPHFNtFv0UuJTLjKKgWDzfJ4iZzTa4Y7ipc2acGw= github.com/seaweedfs/fuse v1.1.0/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= github.com/seaweedfs/fuse v1.1.1 h1:WD51YFJcBViOx8I89jeqPD+vAKl4EowzBy9GUw0plb0= github.com/seaweedfs/fuse v1.1.1/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= +github.com/seaweedfs/fuse v1.1.3 h1:0DddotXwSRGbYG2kynoJyr8GHCy30Z2SpdhP3vdyijY= +github.com/seaweedfs/fuse v1.1.3/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= +github.com/seaweedfs/fuse v1.1.4 h1:YYqkK86agMhXRSwR+wFbRI8ikMgk3kL6PNTna1MAHyQ= +github.com/seaweedfs/fuse v1.1.4/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E= github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk= github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw= @@ -955,6 +969,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0= golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 424307ff6..0025c9760 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -appVersion: "2.28" -version: 2.28 +appVersion: "2.41" +version: 2.41 diff --git a/k8s/seaweedfs/templates/volume-service.yaml b/k8s/seaweedfs/templates/volume-service.yaml index a2b0b540e..0a9173fde 100644 --- a/k8s/seaweedfs/templates/volume-service.yaml +++ b/k8s/seaweedfs/templates/volume-service.yaml @@ -23,6 +23,6 @@ spec: targetPort: {{ .Values.volume.metricsPort }} protocol: TCP {{- end }} -selector: + selector: app: {{ template "seaweedfs.name" . }} component: volume \ No newline at end of file diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 8768b32f6..a4abaccf3 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - # imageTag: "2.28" - started using {.Chart.appVersion} + # imageTag: "2.41" - started using {.Chart.appVersion} imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/note/SeaweedFS_Architecture.png b/note/SeaweedFS_Architecture.png new file mode 100644 index 000000000..f960dc3e1 Binary files /dev/null and b/note/SeaweedFS_Architecture.png differ diff --git a/note/SeaweedFS_Cluster_Backup.png b/note/SeaweedFS_Cluster_Backup.png new file mode 100644 index 000000000..d8f2e19ff Binary files /dev/null and b/note/SeaweedFS_Cluster_Backup.png differ diff --git a/note/SeaweedFS_XDR.png b/note/SeaweedFS_XDR.png new file mode 100644 index 000000000..24e468a62 Binary files /dev/null and b/note/SeaweedFS_XDR.png differ diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index a7da16a93..f4e522a3e 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -5,7 +5,7 @@ com.github.chrislusf seaweedfs-client - 1.6.2 + 1.6.4 org.sonatype.oss @@ -17,7 +17,7 @@ 3.9.1 1.23.0 - 28.0-jre + 30.0-jre diff --git a/other/java/client/pom.xml.deploy b/other/java/client/pom.xml.deploy index b45c193ec..9c8c4f45e 100644 --- a/other/java/client/pom.xml.deploy +++ b/other/java/client/pom.xml.deploy @@ -5,7 +5,7 @@ com.github.chrislusf seaweedfs-client - 1.6.2 + 1.6.4 org.sonatype.oss diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml index 217f708e9..12ea860c2 100644 --- a/other/java/client/pom_debug.xml +++ b/other/java/client/pom_debug.xml @@ -5,7 +5,7 @@ com.github.chrislusf seaweedfs-client - 1.6.2 + 1.6.4 org.sonatype.oss diff --git a/other/java/client/src/main/java/seaweedfs/client/ByteBufferPool.java b/other/java/client/src/main/java/seaweedfs/client/ByteBufferPool.java index 994bcaa2b..51053becd 100644 --- a/other/java/client/src/main/java/seaweedfs/client/ByteBufferPool.java +++ b/other/java/client/src/main/java/seaweedfs/client/ByteBufferPool.java @@ -3,6 +3,7 @@ package seaweedfs.client; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.nio.Buffer; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -34,7 +35,7 @@ public class ByteBufferPool { } public static synchronized void release(ByteBuffer obj) { - obj.clear(); + ((Buffer)obj).clear(); bufferList.add(0, obj); } diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index c2ffe0ac6..257a9873d 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -94,7 +94,7 @@ public class FilerClient extends FilerGrpcClient { return true; } File pathFile = new File(path); - String parent = pathFile.getParent(); + String parent = pathFile.getParent().replace('\\','/'); String name = pathFile.getName(); mkdirs(parent, mode, uid, gid, userName, groupNames); @@ -115,11 +115,11 @@ public class FilerClient extends FilerGrpcClient { public boolean mv(String oldPath, String newPath) { File oldPathFile = new File(oldPath); - String oldParent = oldPathFile.getParent(); + String oldParent = oldPathFile.getParent().replace('\\','/'); String oldName = oldPathFile.getName(); File newPathFile = new File(newPath); - String newParent = newPathFile.getParent(); + String newParent = newPathFile.getParent().replace('\\','/'); String newName = newPathFile.getName(); return atomicRenameEntry(oldParent, oldName, newParent, newName); @@ -129,7 +129,7 @@ public class FilerClient extends FilerGrpcClient { public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) { File pathFile = new File(path); - String parent = pathFile.getParent(); + String parent = pathFile.getParent().replace('\\','/'); String name = pathFile.getName(); return deleteEntry( @@ -148,7 +148,7 @@ public class FilerClient extends FilerGrpcClient { public boolean touch(String path, int mode, int uid, int gid, String userName, String[] groupNames) { File pathFile = new File(path); - String parent = pathFile.getParent(); + String parent = pathFile.getParent().replace('\\','/'); String name = pathFile.getName(); FilerProto.Entry entry = lookupEntry(parent, name); diff --git a/other/java/examples/pom.xml b/other/java/examples/pom.xml index d0f22a44d..950a3f494 100644 --- a/other/java/examples/pom.xml +++ b/other/java/examples/pom.xml @@ -11,13 +11,13 @@ com.github.chrislusf seaweedfs-client - 1.6.2 + 1.6.4 compile com.github.chrislusf seaweedfs-hadoop2-client - 1.6.2 + 1.6.4 compile diff --git a/other/java/hdfs-over-ftp/pom.xml b/other/java/hdfs-over-ftp/pom.xml new file mode 100644 index 000000000..0db422db5 --- /dev/null +++ b/other/java/hdfs-over-ftp/pom.xml @@ -0,0 +1,120 @@ + + + 4.0.0 + + hdfs-over-ftp + hdfs-over-ftp + 1.0 + + + org.springframework.boot + spring-boot-starter-parent + 2.4.3 + + + + + org.springframework.boot + spring-boot-starter + + + org.springframework.boot + spring-boot-starter-web + + + io.springfox + springfox-swagger2 + 2.9.2 + + + io.springfox + springfox-swagger-ui + 2.9.2 + + + org.apache.hadoop + hadoop-common + 3.2.1 + + + org.apache.hadoop + hadoop-client + 3.2.1 + + + org.apache.ftpserver + ftpserver-core + 1.1.1 + + + com.github.chrislusf + seaweedfs-hadoop3-client + 1.6.2 + + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.8 + 1.8 + UTF-8 + + + ${java.home}/lib/rt.jar + + + + + + org.apache.maven.plugins + maven-jar-plugin + 2.6 + + + + org.apache.hadoop.seaweed.ftp.ApplicationServer + true + lib/ + + + ./ + + + + + + + maven-assembly-plugin + + false + + src/main/resources/assembly.xml + + + + + make-assembly + package + + single + + + + + + + + \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/ApplicationServer.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/ApplicationServer.java new file mode 100644 index 000000000..b8ef1d840 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/ApplicationServer.java @@ -0,0 +1,14 @@ +package org.apache.hadoop.seaweed.ftp; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + + +@SpringBootApplication +public class ApplicationServer { + + public static void main(String[] args) { + SpringApplication.run(ApplicationServer.class, args); + } + +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/config/SwaggerConfig.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/config/SwaggerConfig.java new file mode 100644 index 000000000..3c395493d --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/config/SwaggerConfig.java @@ -0,0 +1,27 @@ +package org.apache.hadoop.seaweed.ftp.config; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import springfox.documentation.builders.ApiInfoBuilder; +import springfox.documentation.builders.PathSelectors; +import springfox.documentation.builders.RequestHandlerSelectors; +import springfox.documentation.spi.DocumentationType; +import springfox.documentation.spring.web.plugins.Docket; +import springfox.documentation.swagger2.annotations.EnableSwagger2; + +@Configuration +@EnableSwagger2 +public class SwaggerConfig { + @Bean + public Docket createRestApi() { + return new Docket(DocumentationType.SWAGGER_2) + .pathMapping("/") + .select() + .apis(RequestHandlerSelectors.basePackage("org.apache.hadoop.seaweed.ftp")) + .paths(PathSelectors.any()) + .build().apiInfo(new ApiInfoBuilder() + .title("FTP API Doc") + .version("1.0") + .build()); + } +} \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/FtpManagerController.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/FtpManagerController.java new file mode 100644 index 000000000..7a5a4e74d --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/FtpManagerController.java @@ -0,0 +1,71 @@ +package org.apache.hadoop.seaweed.ftp.controller; + +import io.swagger.annotations.Api; +import io.swagger.annotations.ApiOperation; +import org.apache.hadoop.seaweed.ftp.service.HFtpService; +import org.apache.hadoop.seaweed.ftp.controller.vo.Result; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.HashMap; +import java.util.Map; + +@RestController +@RequestMapping("/manager") +@Api(tags = "FTP操作管理") +public class FtpManagerController { + + private static Logger log = Logger.getLogger(FtpManagerController.class); + + @Autowired + private HFtpService hdfsOverFtpServer; + + @GetMapping("/status") + @ApiOperation("查看FTP服务状态") + public Result status() { + Map map = new HashMap<>(); + try { + boolean status = hdfsOverFtpServer.statusServer(); + map.put("is_running", status); + return new Result(true, map, "FTP 服务状态获取成功"); + }catch (Exception e) { + log.error(e); + map.put("is_running", false); + return new Result(true, map, "FTP 服务状态获取成功"); + } + } + + @PutMapping("/start") + @ApiOperation("启动FTP服务") + public Result start() { + try { + boolean status = hdfsOverFtpServer.statusServer(); + if(!status) { + hdfsOverFtpServer.startServer(); + } + return new Result(true, "FTP 服务启动成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "FTP 服务启动失败"); + } + } + + @PutMapping("/stop") + @ApiOperation("停止FTP服务") + public Result stop() { + try { + boolean status = hdfsOverFtpServer.statusServer(); + if(status) { + hdfsOverFtpServer.stopServer(); + } + return new Result(true, "FTP 服务停止成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "FTP 服务停止失败"); + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/UserController.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/UserController.java new file mode 100644 index 000000000..c4d2261b3 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/UserController.java @@ -0,0 +1,98 @@ +package org.apache.hadoop.seaweed.ftp.controller; + +import io.swagger.annotations.Api; +import io.swagger.annotations.ApiOperation; +import org.apache.ftpserver.ftplet.User; +import org.apache.ftpserver.usermanager.Md5PasswordEncryptor; +import org.apache.ftpserver.usermanager.UserFactory; +import org.apache.hadoop.seaweed.ftp.controller.vo.FtpUser; +import org.apache.hadoop.seaweed.ftp.controller.vo.Result; +import org.apache.hadoop.seaweed.ftp.users.HdfsUserManager; +import org.apache.log4j.Logger; +import org.springframework.web.bind.annotation.*; + +import java.io.File; + +@RestController +@RequestMapping("/user") +@Api(tags = "FTP用户管理") +public class UserController { + + private static Logger log = Logger.getLogger(UserController.class); + + /*** + * { + * "name": "test", + * "password": "test", + * "homeDirectory": "/buckets/test/" + * } + * @param ftpUser + * @return + */ + @PostMapping("/add") + @ApiOperation("新增/编辑用户") + public Result add(@RequestBody FtpUser ftpUser) { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + userManagerFactory.setPasswordEncryptor(new Md5PasswordEncryptor()); + + UserFactory userFactory = new UserFactory(); + userFactory.setHomeDirectory(ftpUser.getHomeDirectory()); + userFactory.setName(ftpUser.getName()); + userFactory.setPassword(ftpUser.getPassword()); + userFactory.setEnabled(ftpUser.isEnabled()); + userFactory.setMaxIdleTime(ftpUser.getMaxIdleTime()); + + User user = userFactory.createUser(); + userManagerFactory.save(user, ftpUser.isRenamePush()); + return new Result(true, "新建用户成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "新建用户失败"); + } + } + + @DeleteMapping("/delete/{user}") + @ApiOperation("删除用户") + public Result delete(@PathVariable(value = "user") String user) { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + userManagerFactory.delete(user); + return new Result(true, "删除用户成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "删除用户失败"); + } + } + + @GetMapping("/show/{userName}") + @ApiOperation("查看用户") + public Result show(@PathVariable(value = "userName") String userName) { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + User user = userManagerFactory.getUserByName(userName); + FtpUser ftpUser = new FtpUser(user.getHomeDirectory(), user.getPassword(), user.getEnabled(), user.getName(), user.getMaxIdleTime(), HdfsUserManager.getUserRenamePush(userName)); + return new Result(true, ftpUser, "获取用户信息成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "获取用户信息失败"); + } + } + + @GetMapping("/list") + @ApiOperation("列举用户") + public Result list() { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + String[] allUserNames = userManagerFactory.getAllUserNames(); + return new Result(true, allUserNames, "列举用户成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "列举用户失败"); + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/FtpUser.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/FtpUser.java new file mode 100644 index 000000000..953d08603 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/FtpUser.java @@ -0,0 +1,71 @@ +package org.apache.hadoop.seaweed.ftp.controller.vo; + +public class FtpUser { + + private String homeDirectory; + private String password; + private boolean enabled; + private String name; + private int maxIdleTime; + private boolean renamePush; + + public FtpUser() { + } + + public FtpUser(String homeDirectory, String password, boolean enabled, String name, int maxIdleTime, boolean renamePush) { + this.homeDirectory = homeDirectory; + this.password = password; + this.enabled = enabled; + this.name = name; + this.maxIdleTime = maxIdleTime; + this.renamePush = renamePush; + } + + public String getHomeDirectory() { + return homeDirectory; + } + + public void setHomeDirectory(String homeDirectory) { + this.homeDirectory = homeDirectory; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public int getMaxIdleTime() { + return maxIdleTime; + } + + public void setMaxIdleTime(int maxIdleTime) { + this.maxIdleTime = maxIdleTime; + } + + public boolean isRenamePush() { + return renamePush; + } + + public void setRenamePush(boolean renamePush) { + this.renamePush = renamePush; + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/Result.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/Result.java new file mode 100644 index 000000000..b6a480ba7 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/Result.java @@ -0,0 +1,43 @@ +package org.apache.hadoop.seaweed.ftp.controller.vo; + +public class Result { + + private boolean status; + private Object data; + private String message; + + public Result(boolean status, String message) { + this.status = status; + this.message = message; + } + + public Result(boolean status, Object data, String message) { + this.status = status; + this.message = message; + this.data = data; + } + + public boolean isStatus() { + return status; + } + + public void setStatus(boolean status) { + this.status = status; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public Object getData() { + return data; + } + + public void setData(Object data) { + this.data = data; + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java new file mode 100644 index 000000000..c3fa31872 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java @@ -0,0 +1,102 @@ +package org.apache.hadoop.seaweed.ftp.service; + +import org.apache.ftpserver.DataConnectionConfiguration; +import org.apache.ftpserver.DataConnectionConfigurationFactory; +import org.apache.ftpserver.FtpServer; +import org.apache.ftpserver.FtpServerFactory; +import org.apache.ftpserver.command.CommandFactoryFactory; +import org.apache.ftpserver.listener.ListenerFactory; +import org.apache.hadoop.seaweed.ftp.service.filesystem.HdfsFileSystemManager; +import org.apache.hadoop.seaweed.ftp.service.filesystem.HdfsOverFtpSystem; +import org.apache.hadoop.seaweed.ftp.users.HdfsUserManager; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +import java.io.File; + +/** + * reference: https://github.com/iponweb/hdfs-over-ftp + */ +@Component +public class HFtpService { + + private static Logger log = Logger.getLogger(HFtpService.class); + + @Value("${ftp.port}") + private int port = 0; + + @Value("${ftp.passive-address}") + private String passiveAddress; + + @Value("${ftp.passive-ports}") + private String passivePorts; + + @Value("${hdfs.uri}") + private String hdfsUri; + + @Value("${seaweedFs.enable}") + private boolean seaweedFsEnable; + + @Value("${seaweedFs.access}") + private String seaweedFsAccess; + + @Value("${seaweedFs.replication}") + private String seaweedFsReplication; + + private FtpServer ftpServer = null; + + public void startServer() throws Exception { + log.info("Starting HDFS-Over-Ftp server. port: " + port + " passive-address: " + passiveAddress + " passive-ports: " + passivePorts + " hdfs-uri: " + hdfsUri); + + HdfsOverFtpSystem.setHdfsUri(hdfsUri); + HdfsOverFtpSystem.setSeaweedFsEnable(seaweedFsEnable); + HdfsOverFtpSystem.setSeaweedFsAccess(seaweedFsAccess); + HdfsOverFtpSystem.setSeaweedFsReplication(seaweedFsReplication); + + FtpServerFactory server = new FtpServerFactory(); + server.setFileSystem(new HdfsFileSystemManager()); + + ListenerFactory factory = new ListenerFactory(); + factory.setPort(port); + + DataConnectionConfigurationFactory dccFactory = new DataConnectionConfigurationFactory(); + dccFactory.setPassiveAddress("0.0.0.0"); + dccFactory.setPassivePorts(passivePorts); + dccFactory.setPassiveExternalAddress(passiveAddress); + DataConnectionConfiguration dcc = dccFactory.createDataConnectionConfiguration(); + factory.setDataConnectionConfiguration(dcc); + + server.addListener("default", factory.createListener()); + + HdfsUserManager userManager = new HdfsUserManager(); + final File file = loadResource("/users.properties"); + userManager.setFile(file); + server.setUserManager(userManager); + + CommandFactoryFactory cmFact = new CommandFactoryFactory(); + cmFact.setUseDefaultCommands(true); + server.setCommandFactory(cmFact.createCommandFactory()); + + // start the server + ftpServer = server.createServer(); + ftpServer.start(); + } + + public void stopServer() { + log.info("Stopping Hdfs-Over-Ftp server. port: " + port + " passive-address: " + passiveAddress + " passive-ports: " + passivePorts + " hdfs-uri: " + hdfsUri); + ftpServer.stop(); + } + + public boolean statusServer() { + try { + return !ftpServer.isStopped(); + }catch (Exception e) { + return false; + } + } + + private static File loadResource(String resourceName) { + return new File(System.getProperty("user.dir") + resourceName); + } +} \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileObject.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileObject.java new file mode 100644 index 000000000..e97c2dc14 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileObject.java @@ -0,0 +1,333 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.ftpserver.ftplet.FtpFile; +import org.apache.ftpserver.ftplet.User; +import org.apache.hadoop.fs.*; +import org.apache.hadoop.seaweed.ftp.users.HdfsUser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * This class implements all actions to HDFS + */ +public class HdfsFileObject implements FtpFile { + + private final Logger log = LoggerFactory.getLogger(HdfsFileObject.class); + + private Path homePath; + private Path path; + private Path fullPath; + private HdfsUser user; + + /** + * Constructs HdfsFileObject from path + * + * @param path path to represent object + * @param user accessor of the object + */ + public HdfsFileObject(String homePath, String path, User user) { + this.homePath = new Path(homePath); + this.path = new Path(path); + this.fullPath = new Path(homePath + path); + this.user = (HdfsUser) user; + } + + public String getAbsolutePath() { + // strip the last '/' if necessary + String fullName = path.toString(); + int filelen = fullName.length(); + if ((filelen != 1) && (fullName.charAt(filelen - 1) == '/')) { + fullName = fullName.substring(0, filelen - 1); + } + + return fullName; + } + + public String getName() { + return path.getName(); + } + + /** + * HDFS has no hidden objects + * + * @return always false + */ + public boolean isHidden() { + return false; + } + + /** + * Checks if the object is a directory + * + * @return true if the object is a directory + */ + public boolean isDirectory() { + try { + log.debug("is directory? : " + fullPath); + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + return fs.isDir(); + } catch (IOException e) { + log.debug(fullPath + " is not dir", e); + return false; + } + } + + /** + * Checks if the object is a file + * + * @return true if the object is a file + */ + public boolean isFile() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + return dfs.isFile(fullPath); + } catch (IOException e) { + log.debug(fullPath + " is not file", e); + return false; + } + } + + /** + * Checks if the object does exist + * + * @return true if the object does exist + */ + public boolean doesExist() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + dfs.getFileStatus(fullPath); + return true; + } catch (IOException e) { + // log.debug(path + " does not exist", e); + return false; + } + } + + public boolean isReadable() { + return true; + } + + public boolean isWritable() { + return true; + } + + public boolean isRemovable() { + return true; + } + + /** + * Get owner of the object + * + * @return owner of the object + */ + public String getOwnerName() { + return "root"; + /* + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + String owner = fs.getOwner(); + if(owner.length() == 0) { + return "root"; + } + return owner; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + */ + } + + /** + * Get group of the object + * + * @return group of the object + */ + public String getGroupName() { + return "root"; + /* + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + String group = fs.getGroup(); + if(group.length() == 0) { + return "root"; + } + return group; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + */ + } + + /** + * Get link count + * + * @return 3 is for a directory and 1 is for a file + */ + public int getLinkCount() { + return isDirectory() ? 3 : 1; + } + + /** + * Get last modification date + * + * @return last modification date as a long + */ + public long getLastModified() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + return fs.getModificationTime(); + } catch (IOException e) { + e.printStackTrace(); + return 0; + } + } + + public boolean setLastModified(long l) { + return false; + } + + /** + * Get a size of the object + * + * @return size of the object in bytes + */ + public long getSize() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + log.debug("getSize(): " + fullPath + " : " + fs.getLen()); + return fs.getLen(); + } catch (IOException e) { + e.printStackTrace(); + return 0; + } + } + + public Object getPhysicalFile() { + return null; + } + + /** + * Create a new dir from the object + * + * @return true if dir is created + */ + public boolean mkdir() { + try { + FileSystem fs = HdfsOverFtpSystem.getDfs(); + fs.mkdirs(fullPath); +// fs.setOwner(path, user.getName(), user.getMainGroup()); + return true; + } catch (IOException e) { + e.printStackTrace(); + return false; + } + } + + /** + * Delete object from the HDFS filesystem + * + * @return true if the object is deleted + */ + public boolean delete() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + dfs.delete(fullPath, true); + return true; + } catch (IOException e) { + e.printStackTrace(); + return false; + } + } + + public boolean move(FtpFile ftpFile) { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + dfs.rename(fullPath, new Path(fullPath.getParent() + File.separator + ftpFile.getName())); + return true; + } catch (IOException e) { + e.printStackTrace(); + return false; + } + } + + + /** + * List files of the directory + * + * @return List of files in the directory + */ + public List listFiles() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fileStats[] = dfs.listStatus(fullPath); + + // get the virtual name of the base directory + String virtualFileStr = getAbsolutePath(); + if (virtualFileStr.charAt(virtualFileStr.length() - 1) != '/') { + virtualFileStr += '/'; + } + + FtpFile[] virtualFiles = new FtpFile[fileStats.length]; + for (int i = 0; i < fileStats.length; i++) { + File fileObj = new File(fileStats[i].getPath().toString()); + String fileName = virtualFileStr + fileObj.getName(); + virtualFiles[i] = new HdfsFileObject(homePath.toString(), fileName, user); + } + return Collections.unmodifiableList(Arrays.asList(virtualFiles)); + } catch (IOException e) { + log.debug("", e); + return null; + } + } + + /** + * Creates output stream to write to the object + * + * @param l is not used here + * @return OutputStream + * @throws IOException + */ + public OutputStream createOutputStream(long l) { + try { + FileSystem fs = HdfsOverFtpSystem.getDfs(); + FSDataOutputStream out = fs.create(fullPath); +// fs.setOwner(fullPath, user.getName(), user.getMainGroup()); + return out; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + } + + /** + * Creates input stream to read from the object + * + * @param l is not used here + * @return OutputStream + * @throws IOException + */ + public InputStream createInputStream(long l) { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FSDataInputStream in = dfs.open(fullPath); + return in; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemManager.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemManager.java new file mode 100644 index 000000000..533c2c3aa --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemManager.java @@ -0,0 +1,14 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.ftpserver.ftplet.FileSystemFactory; +import org.apache.ftpserver.ftplet.FileSystemView; +import org.apache.ftpserver.ftplet.User; + +/** + * Impelented FileSystemManager to use HdfsFileSystemView + */ +public class HdfsFileSystemManager implements FileSystemFactory { + public FileSystemView createFileSystemView(User user) { + return new HdfsFileSystemView(user); + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemView.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemView.java new file mode 100644 index 000000000..8b910e775 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemView.java @@ -0,0 +1,104 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.ftpserver.ftplet.FileSystemView; +import org.apache.ftpserver.ftplet.FtpFile; +import org.apache.ftpserver.ftplet.User; +import org.apache.hadoop.fs.Path; + +import java.io.File; + +/** + * Implemented FileSystemView to use HdfsFileObject + */ +public class HdfsFileSystemView implements FileSystemView { + + private String homePath; + private String currPath = File.separator; + private User user; + + /** + * Constructor - set the user object. + */ + protected HdfsFileSystemView(User user) { + if (user == null) { + throw new IllegalArgumentException("user can not be null"); + } + if (user.getHomeDirectory() == null) { + throw new IllegalArgumentException( + "User home directory can not be null"); + } + + this.homePath = user.getHomeDirectory(); + this.user = user; + } + + public FtpFile getHomeDirectory() { + return new HdfsFileObject(homePath, File.separator, user); + } + + public FtpFile getWorkingDirectory() { + FtpFile fileObj; + if (currPath.equals(File.separator)) { + fileObj = new HdfsFileObject(homePath, File.separator, user); + } else { + fileObj = new HdfsFileObject(homePath, currPath, user); + + } + return fileObj; + } + + public boolean changeWorkingDirectory(String dir) { + + Path path; + if (dir.startsWith(File.separator) || new Path(currPath).equals(new Path(dir))) { + path = new Path(dir); + } else if (currPath.length() > 1) { + path = new Path(currPath + File.separator + dir); + } else { + if(dir.startsWith("/")) { + path = new Path(dir); + }else { + path = new Path(File.separator + dir); + } + } + + // 防止退回根目录 + if (path.getName().equals("..")) { + path = new Path(File.separator); + } + + HdfsFileObject file = new HdfsFileObject(homePath, path.toString(), user); + if (file.isDirectory()) { + currPath = path.toString(); + return true; + } else { + return false; + } + } + + public FtpFile getFile(String file) { + String path; + if (file.startsWith(File.separator)) { + path = file; + } else if (currPath.length() > 1) { + path = currPath + File.separator + file; + } else { + path = File.separator + file; + } + return new HdfsFileObject(homePath, path, user); + } + + /** + * Is the file content random accessible? + */ + public boolean isRandomAccessible() { + return true; + } + + /** + * Dispose file system view - does nothing. + */ + public void dispose() { + } + +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsOverFtpSystem.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsOverFtpSystem.java new file mode 100644 index 000000000..149fd6857 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsOverFtpSystem.java @@ -0,0 +1,72 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * Class to store DFS connection + */ +public class HdfsOverFtpSystem { + + private static FileSystem fs = null; + + private static String hdfsUri; + + private static boolean seaweedFsEnable; + + private static String seaweedFsAccess; + + private static String seaweedFsReplication; + + private final static Logger log = LoggerFactory.getLogger(HdfsOverFtpSystem.class); + + private static void hdfsInit() throws IOException { + Configuration configuration = new Configuration(); + + configuration.set("fs.defaultFS", hdfsUri); + if(seaweedFsEnable) { + configuration.set("fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem"); + configuration.set("fs.seaweed.volume.server.access", seaweedFsAccess); + configuration.set("fs.seaweed.replication", seaweedFsReplication); + } + fs = FileSystem.get(configuration); + log.info("HDFS load success"); + } + + /** + * Get dfs + * + * @return dfs + * @throws IOException + */ + public static FileSystem getDfs() throws IOException { + if (fs == null) { + hdfsInit(); + } + return fs; + } + + public static void setHdfsUri(String hdfsUri) { + HdfsOverFtpSystem.hdfsUri = hdfsUri; + } + + public static String getHdfsUri() { + return hdfsUri; + } + + public static void setSeaweedFsEnable(boolean seaweedFsEnable) { + HdfsOverFtpSystem.seaweedFsEnable = seaweedFsEnable; + } + + public static void setSeaweedFsAccess(String seaweedFsAccess) { + HdfsOverFtpSystem.seaweedFsAccess = seaweedFsAccess; + } + + public static void setSeaweedFsReplication(String seaweedFsReplication) { + HdfsOverFtpSystem.seaweedFsReplication = seaweedFsReplication; + } +} \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUser.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUser.java new file mode 100644 index 000000000..c82f6516f --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUser.java @@ -0,0 +1,239 @@ +package org.apache.hadoop.seaweed.ftp.users; + +import org.apache.ftpserver.ftplet.Authority; +import org.apache.ftpserver.ftplet.AuthorizationRequest; +import org.apache.ftpserver.ftplet.User; +import org.apache.log4j.Logger; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class HdfsUser implements User, Serializable { + + private static final long serialVersionUID = -47371353779731294L; + + private String name = null; + + private String password = null; + + private int maxIdleTimeSec = 0; // no limit + + private String homeDir = null; + + private boolean isEnabled = true; + + private List authorities = new ArrayList(); + + private ArrayList groups = new ArrayList(); + + private Logger log = Logger.getLogger(HdfsUser.class); + + /** + * Default constructor. + */ + public HdfsUser() { + } + + /** + * Copy constructor. + */ + public HdfsUser(User user) { + name = user.getName(); + password = user.getPassword(); + authorities = user.getAuthorities(); + maxIdleTimeSec = user.getMaxIdleTime(); + homeDir = user.getHomeDirectory(); + isEnabled = user.getEnabled(); + } + + public ArrayList getGroups() { + return groups; + } + + /** + * Get the main group of the user + * + * @return main group of the user + */ + public String getMainGroup() { + if (groups.size() > 0) { + return groups.get(0); + } else { + log.error("User " + name + " is not a memer of any group"); + return "error"; + } + } + + /** + * Checks if user is a member of the group + * + * @param group to check + * @return true if the user id a member of the group + */ + public boolean isGroupMember(String group) { + for (String userGroup : groups) { + if (userGroup.equals(group)) { + return true; + } + } + return false; + } + + /** + * Set users' groups + * + * @param groups to set + */ + public void setGroups(ArrayList groups) { + if (groups.size() < 1) { + log.error("User " + name + " is not a memer of any group"); + } + this.groups = groups; + } + + /** + * Get the user name. + */ + public String getName() { + return name; + } + + /** + * Set user name. + */ + public void setName(String name) { + this.name = name; + } + + /** + * Get the user password. + */ + public String getPassword() { + return password; + } + + /** + * Set user password. + */ + public void setPassword(String pass) { + password = pass; + } + + public List getAuthorities() { + if (authorities != null) { + return Collections.unmodifiableList(authorities); + } else { + return null; + } + } + + public void setAuthorities(List authorities) { + if (authorities != null) { + this.authorities = Collections.unmodifiableList(authorities); + } else { + this.authorities = null; + } + } + + /** + * Get the maximum idle time in second. + */ + public int getMaxIdleTime() { + return maxIdleTimeSec; + } + + /** + * Set the maximum idle time in second. + */ + public void setMaxIdleTime(int idleSec) { + maxIdleTimeSec = idleSec; + if (maxIdleTimeSec < 0) { + maxIdleTimeSec = 0; + } + } + + /** + * Get the user enable status. + */ + public boolean getEnabled() { + return isEnabled; + } + + /** + * Set the user enable status. + */ + public void setEnabled(boolean enb) { + isEnabled = enb; + } + + /** + * Get the user home directory. + */ + public String getHomeDirectory() { + return homeDir; + } + + /** + * Set the user home directory. + */ + public void setHomeDirectory(String home) { + homeDir = home; + } + + /** + * String representation. + */ + public String toString() { + return name; + } + + /** + * {@inheritDoc} + */ + public AuthorizationRequest authorize(AuthorizationRequest request) { + List authorities = getAuthorities(); + + // check for no authorities at all + if (authorities == null) { + return null; + } + + boolean someoneCouldAuthorize = false; + for (Authority authority : authorities) { + if (authority.canAuthorize(request)) { + someoneCouldAuthorize = true; + + request = authority.authorize(request); + + // authorization failed, return null + if (request == null) { + return null; + } + } + + } + + if (someoneCouldAuthorize) { + return request; + } else { + return null; + } + } + + /** + * {@inheritDoc} + */ + public List getAuthorities(Class clazz) { + List selected = new ArrayList(); + + for (Authority authority : authorities) { + if (authority.getClass().equals(clazz)) { + selected.add(authority); + } + } + + return selected; + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUserManager.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUserManager.java new file mode 100644 index 000000000..7eb296160 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUserManager.java @@ -0,0 +1,453 @@ +package org.apache.hadoop.seaweed.ftp.users; + +import org.apache.ftpserver.FtpServerConfigurationException; +import org.apache.ftpserver.ftplet.*; +import org.apache.ftpserver.usermanager.*; +import org.apache.ftpserver.usermanager.impl.*; +import org.apache.ftpserver.util.BaseProperties; +import org.apache.ftpserver.util.IoUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.*; + +public class HdfsUserManager extends AbstractUserManager { + + private final Logger LOG = LoggerFactory + .getLogger(HdfsUserManager.class); + + private final static String DEPRECATED_PREFIX = "FtpServer.user."; + + private final static String PREFIX = "ftpserver.user."; + + private static BaseProperties userDataProp; + + private File userDataFile = new File("users.conf"); + + private boolean isConfigured = false; + + private PasswordEncryptor passwordEncryptor = new Md5PasswordEncryptor(); + + + /** + * Retrieve the file used to load and store users + * + * @return The file + */ + public File getFile() { + return userDataFile; + } + + /** + * Set the file used to store and read users. Must be set before + * {@link #configure()} is called. + * + * @param propFile A file containing users + */ + public void setFile(File propFile) { + if (isConfigured) { + throw new IllegalStateException("Must be called before configure()"); + } + + this.userDataFile = propFile; + } + + + /** + * Retrieve the password encryptor used for this user manager + * + * @return The password encryptor. Default to {@link Md5PasswordEncryptor} + * if no other has been provided + */ + public PasswordEncryptor getPasswordEncryptor() { + return passwordEncryptor; + } + + + /** + * Set the password encryptor to use for this user manager + * + * @param passwordEncryptor The password encryptor + */ + public void setPasswordEncryptor(PasswordEncryptor passwordEncryptor) { + this.passwordEncryptor = passwordEncryptor; + } + + + /** + * Lazy init the user manager + */ + private void lazyInit() { + if (!isConfigured) { + configure(); + } + } + + /** + * Configure user manager. + */ + public void configure() { + isConfigured = true; + try { + userDataProp = new BaseProperties(); + + if (userDataFile != null && userDataFile.exists()) { + FileInputStream fis = null; + try { + fis = new FileInputStream(userDataFile); + userDataProp.load(fis); + } finally { + IoUtils.close(fis); + } + } + } catch (IOException e) { + throw new FtpServerConfigurationException( + "Error loading user data file : " + + userDataFile.getAbsolutePath(), e); + } + + convertDeprecatedPropertyNames(); + } + + private void convertDeprecatedPropertyNames() { + Enumeration keys = userDataProp.propertyNames(); + + boolean doSave = false; + + while (keys.hasMoreElements()) { + String key = (String) keys.nextElement(); + + if (key.startsWith(DEPRECATED_PREFIX)) { + String newKey = PREFIX + + key.substring(DEPRECATED_PREFIX.length()); + userDataProp.setProperty(newKey, userDataProp.getProperty(key)); + userDataProp.remove(key); + + doSave = true; + } + } + + if (doSave) { + try { + saveUserData(); + } catch (FtpException e) { + throw new FtpServerConfigurationException( + "Failed to save updated user data", e); + } + } + } + + public synchronized void save(User usr, boolean renamePush) throws FtpException { + lazyInit(); + userDataProp.setProperty(PREFIX + usr.getName() + ".rename.push", renamePush); + save(usr); + } + + /** + * Save user data. Store the properties. + */ + public synchronized void save(User usr) throws FtpException { + lazyInit(); + + // null value check + if (usr.getName() == null) { + throw new NullPointerException("User name is null."); + } + String thisPrefix = PREFIX + usr.getName() + '.'; + + // set other properties + userDataProp.setProperty(thisPrefix + ATTR_PASSWORD, getPassword(usr)); + + String home = usr.getHomeDirectory(); + if (home == null) { + home = "/"; + } + userDataProp.setProperty(thisPrefix + ATTR_HOME, home); + userDataProp.setProperty(thisPrefix + ATTR_ENABLE, usr.getEnabled()); + userDataProp.setProperty(thisPrefix + ATTR_WRITE_PERM, usr + .authorize(new WriteRequest()) != null); + userDataProp.setProperty(thisPrefix + ATTR_MAX_IDLE_TIME, usr + .getMaxIdleTime()); + + TransferRateRequest transferRateRequest = new TransferRateRequest(); + transferRateRequest = (TransferRateRequest) usr + .authorize(transferRateRequest); + + if (transferRateRequest != null) { + userDataProp.setProperty(thisPrefix + ATTR_MAX_UPLOAD_RATE, + transferRateRequest.getMaxUploadRate()); + userDataProp.setProperty(thisPrefix + ATTR_MAX_DOWNLOAD_RATE, + transferRateRequest.getMaxDownloadRate()); + } else { + userDataProp.remove(thisPrefix + ATTR_MAX_UPLOAD_RATE); + userDataProp.remove(thisPrefix + ATTR_MAX_DOWNLOAD_RATE); + } + + // request that always will succeed + ConcurrentLoginRequest concurrentLoginRequest = new ConcurrentLoginRequest( + 0, 0); + concurrentLoginRequest = (ConcurrentLoginRequest) usr + .authorize(concurrentLoginRequest); + + if (concurrentLoginRequest != null) { + userDataProp.setProperty(thisPrefix + ATTR_MAX_LOGIN_NUMBER, + concurrentLoginRequest.getMaxConcurrentLogins()); + userDataProp.setProperty(thisPrefix + ATTR_MAX_LOGIN_PER_IP, + concurrentLoginRequest.getMaxConcurrentLoginsPerIP()); + } else { + userDataProp.remove(thisPrefix + ATTR_MAX_LOGIN_NUMBER); + userDataProp.remove(thisPrefix + ATTR_MAX_LOGIN_PER_IP); + } + + saveUserData(); + } + + /** + * @throws FtpException + */ + private void saveUserData() throws FtpException { + File dir = userDataFile.getAbsoluteFile().getParentFile(); + if (dir != null && !dir.exists() && !dir.mkdirs()) { + String dirName = dir.getAbsolutePath(); + throw new FtpServerConfigurationException( + "Cannot create directory for user data file : " + dirName); + } + + // save user data + FileOutputStream fos = null; + try { + fos = new FileOutputStream(userDataFile); + userDataProp.store(fos, "Generated file - don't edit (please)"); + } catch (IOException ex) { + LOG.error("Failed saving user data", ex); + throw new FtpException("Failed saving user data", ex); + } finally { + IoUtils.close(fos); + } + } + + + public synchronized void list() throws FtpException { + lazyInit(); + + Map dataMap = new HashMap(); + Enumeration propNames = (Enumeration) userDataProp.propertyNames(); + ArrayList a = Collections.list(propNames); + a.remove("i18nMap");//去除i18nMap + for(String attrName : a){ +// dataMap.put(attrName, propNames.); + } + + } + + /** + * Delete an user. Removes all this user entries from the properties. After + * removing the corresponding from the properties, save the data. + */ + public synchronized void delete(String usrName) throws FtpException { + lazyInit(); + + // remove entries from properties + String thisPrefix = PREFIX + usrName + '.'; + Enumeration propNames = userDataProp.propertyNames(); + ArrayList remKeys = new ArrayList(); + while (propNames.hasMoreElements()) { + String thisKey = propNames.nextElement().toString(); + if (thisKey.startsWith(thisPrefix)) { + remKeys.add(thisKey); + } + } + Iterator remKeysIt = remKeys.iterator(); + while (remKeysIt.hasNext()) { + userDataProp.remove(remKeysIt.next()); + } + + saveUserData(); + } + + /** + * Get user password. Returns the encrypted value. + *

+ *

+	 * If the password value is not null
+	 *    password = new password
+	 * else
+	 *   if user does exist
+	 *     password = old password
+	 *   else
+	 *     password = ""
+	 * 
+ */ + private String getPassword(User usr) { + String name = usr.getName(); + String password = usr.getPassword(); + + if (password != null) { + password = passwordEncryptor.encrypt(password); + } else { + String blankPassword = passwordEncryptor.encrypt(""); + + if (doesExist(name)) { + String key = PREFIX + name + '.' + ATTR_PASSWORD; + password = userDataProp.getProperty(key, blankPassword); + } else { + password = blankPassword; + } + } + return password; + } + + /** + * Get all user names. + */ + public synchronized String[] getAllUserNames() { + lazyInit(); + + // get all user names + String suffix = '.' + ATTR_HOME; + ArrayList ulst = new ArrayList(); + Enumeration allKeys = userDataProp.propertyNames(); + int prefixlen = PREFIX.length(); + int suffixlen = suffix.length(); + while (allKeys.hasMoreElements()) { + String key = (String) allKeys.nextElement(); + if (key.endsWith(suffix)) { + String name = key.substring(prefixlen); + int endIndex = name.length() - suffixlen; + name = name.substring(0, endIndex); + ulst.add(name); + } + } + + Collections.sort(ulst); + return ulst.toArray(new String[0]); + } + + private ArrayList parseGroups(String groupsLine) { + String groupsArray[] = groupsLine.split(","); + return new ArrayList(Arrays.asList(groupsArray)); + } + + public static synchronized boolean getUserRenamePush(String userName) { + return userDataProp.getBoolean(PREFIX + userName + ".rename.push", false); + } + + /** + * Load user data. + */ + public synchronized User getUserByName(String userName) { + lazyInit(); + + if (!doesExist(userName)) { + return null; + } + + String baseKey = PREFIX + userName + '.'; + HdfsUser user = new HdfsUser(); + user.setName(userName); + user.setEnabled(userDataProp.getBoolean(baseKey + ATTR_ENABLE, true)); + user.setHomeDirectory(userDataProp + .getProperty(baseKey + ATTR_HOME, "/")); + +// user.setGroups(parseGroups(userDataProp +// .getProperty(baseKey + "groups"))); + + List authorities = new ArrayList(); + + if (userDataProp.getBoolean(baseKey + ATTR_WRITE_PERM, false)) { + authorities.add(new WritePermission()); + } + + int maxLogin = userDataProp.getInteger(baseKey + ATTR_MAX_LOGIN_NUMBER, + 0); + int maxLoginPerIP = userDataProp.getInteger(baseKey + + ATTR_MAX_LOGIN_PER_IP, 0); + + authorities.add(new ConcurrentLoginPermission(maxLogin, maxLoginPerIP)); + + int uploadRate = userDataProp.getInteger( + baseKey + ATTR_MAX_UPLOAD_RATE, 0); + int downloadRate = userDataProp.getInteger(baseKey + + ATTR_MAX_DOWNLOAD_RATE, 0); + + authorities.add(new TransferRatePermission(downloadRate, uploadRate)); + + user.setAuthorities(authorities); + + user.setMaxIdleTime(userDataProp.getInteger(baseKey + + ATTR_MAX_IDLE_TIME, 0)); + + return user; + } + + /** + * User existance check + */ + public synchronized boolean doesExist(String name) { + lazyInit(); + + String key = PREFIX + name + '.' + ATTR_HOME; + return userDataProp.containsKey(key); + } + + /** + * User authenticate method + */ + public synchronized User authenticate(Authentication authentication) + throws AuthenticationFailedException { + lazyInit(); + + if (authentication instanceof UsernamePasswordAuthentication) { + UsernamePasswordAuthentication upauth = (UsernamePasswordAuthentication) authentication; + + String user = upauth.getUsername(); + String password = upauth.getPassword(); + + if (user == null) { + throw new AuthenticationFailedException("Authentication failed"); + } + + if (password == null) { + password = ""; + } + + String storedPassword = userDataProp.getProperty(PREFIX + user + '.' + + ATTR_PASSWORD); + + if (storedPassword == null) { + // user does not exist + throw new AuthenticationFailedException("Authentication failed"); + } + + if (passwordEncryptor.matches(password, storedPassword)) { + return getUserByName(user); + } else { + throw new AuthenticationFailedException("Authentication failed"); + } + + } else if (authentication instanceof AnonymousAuthentication) { + if (doesExist("anonymous")) { + return getUserByName("anonymous"); + } else { + throw new AuthenticationFailedException("Authentication failed"); + } + } else { + throw new IllegalArgumentException( + "Authentication not supported by this user manager"); + } + } + + /** + * Close the user manager - remove existing entries. + */ + public synchronized void dispose() { + if (userDataProp != null) { + userDataProp.clear(); + userDataProp = null; + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/resources/application.yml b/other/java/hdfs-over-ftp/src/main/resources/application.yml new file mode 100644 index 000000000..128bab1f9 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/resources/application.yml @@ -0,0 +1,15 @@ +server: + port: 8080 + +ftp: + port: 2222 + passive-address: localhost + passive-ports: 30000-30999 + +hdfs: + uri: seaweedfs://localhost:8888 + +seaweedFs: + enable: true + access: direct # direct/filerProxy/publicUrl + replication: "000" \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/resources/assembly.xml b/other/java/hdfs-over-ftp/src/main/resources/assembly.xml new file mode 100644 index 000000000..84fef56f8 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/resources/assembly.xml @@ -0,0 +1,39 @@ + + + + package + + + tar.gz + + false + + + + src/main/resources + / + + application.yml + logback-spring.xml + users.properties + kafka-producer.properties + + + + ${project.build.directory} + / + + *.jar + + + + + + false + lib + runtime + false + + + diff --git a/other/java/hdfs-over-ftp/src/main/resources/logback-spring.xml b/other/java/hdfs-over-ftp/src/main/resources/logback-spring.xml new file mode 100644 index 000000000..96b4c1d71 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/resources/logback-spring.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + + + + + + + ${LOG_HOME}/fileLog.log + + ${LOG_HOME}/fileLog.log.%d.%i + + 100 MB + + + + + %d %p (%file:%line\)- %m%n + + UTF-8 + + + + + + + + + + \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/users.properties b/other/java/hdfs-over-ftp/users.properties new file mode 100644 index 000000000..aeeab8e35 --- /dev/null +++ b/other/java/hdfs-over-ftp/users.properties @@ -0,0 +1,12 @@ +#Generated file - don't edit (please) +#Thu Mar 11 19:11:12 CST 2021 +ftpserver.user.test.idletime=0 +ftpserver.user.test.maxloginperip=0 +ftpserver.user.test.userpassword=44664D4D827C740293D2AA244FB60445 +ftpserver.user.test.enableflag=true +ftpserver.user.test.maxloginnumber=0 +ftpserver.user.test.rename.push=true +ftpserver.user.test.homedirectory=/buckets/test/ +ftpserver.user.test.downloadrate=0 +ftpserver.user.test.writepermission=true +ftpserver.user.test.uploadrate=0 diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index 64fb4da75..503e5fbdf 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -301,7 +301,7 @@ - 1.6.2 + 1.6.4 2.9.2 diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index 3bc9709a7..6eeba912e 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.6.2 + 1.6.4 2.9.2 diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index ea7c08c17..590d725d0 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -309,7 +309,7 @@ - 1.6.2 + 1.6.4 3.1.1 diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index 3e0f373f7..2b11035f5 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.6.2 + 1.6.4 3.1.1 diff --git a/test/random_access/pom.xml b/test/random_access/pom.xml index 44a3fd9df..36fe6b256 100644 --- a/test/random_access/pom.xml +++ b/test/random_access/pom.xml @@ -8,7 +8,7 @@ 1.0-SNAPSHOT - 28.0-jre + 30.0-jre diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index c1bc80c42..4fedb55f1 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -41,6 +41,7 @@ type BenchmarkOptions struct { grpcDialOption grpc.DialOption masterClient *wdclient.MasterClient fsync *bool + useTcp *bool } var ( @@ -67,6 +68,7 @@ func init() { b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write") + b.useTcp = cmdBenchmark.Flag.Bool("useTcp", false, "send data via tcp") sharedBytes = make([]byte, 1024) } @@ -223,6 +225,8 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { random := rand.New(rand.NewSource(time.Now().UnixNano())) + volumeTcpClient := wdclient.NewVolumeTcpClient() + for id := range idChan { start := time.Now() fileSize := int64(*b.fileSize + random.Intn(64)) @@ -243,7 +247,15 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { if !isSecure && assignResult.Auth != "" { isSecure = true } - if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil { + if *b.useTcp { + if uploadByTcp(volumeTcpClient, fp) { + fileIdLineChan <- fp.Fid + s.completed++ + s.transferred += fileSize + } else { + s.failed++ + } + } else if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} @@ -293,7 +305,7 @@ func readFiles(fileIdLineChan chan string, s *stat) { } var bytes []byte for _, url := range urls { - bytes, _, err = util.FastGet(url) + bytes, _, err = util.Get(url) if err == nil { break } @@ -329,6 +341,17 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b } } +func uploadByTcp(volumeTcpClient *wdclient.VolumeTcpClient, fp *operation.FilePart) bool { + + err := volumeTcpClient.PutFileChunk(fp.Server, fp.Fid, uint32(fp.FileSize), fp.Reader) + if err != nil { + glog.Errorf("upload chunk err: %v", err) + return false + } + + return true +} + func readFileIds(fileName string, fileIdLineChan chan string) { file, err := os.Open(fileName) // For read access. if err != nil { diff --git a/weed/command/command.go b/weed/command/command.go index 5506e6969..b6efcead2 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -17,13 +17,16 @@ var Commands = []*Command{ cmdFiler, cmdFilerBackup, cmdFilerCat, + cmdFilerMetaBackup, cmdFilerMetaTail, cmdFilerReplicate, cmdFilerSynchronize, cmdFix, + cmdGateway, cmdMaster, cmdMount, cmdS3, + cmdIam, cmdMsgBroker, cmdScaffold, cmdServer, diff --git a/weed/command/export.go b/weed/command/export.go index f100f3af5..1c32e1050 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool { err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner) if err != nil && err != io.EOF { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + glog.Errorf("Export Volume File [ERROR] %s\n", err) } return true } diff --git a/weed/command/filer.go b/weed/command/filer.go index 534bd9e04..a723b4d8a 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -25,6 +25,8 @@ var ( filerS3Options S3Options filerStartWebDav *bool filerWebDavOptions WebDavOption + filerStartIam *bool + filerIamOptions IamOptions ) type FilerOptions struct { @@ -47,6 +49,7 @@ type FilerOptions struct { metricsHttpPort *int saveToFilerLimit *int defaultLevelDbDirectory *string + concurrentUploadLimitMB *int } func init() { @@ -54,12 +57,12 @@ func init() { f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers") f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection") f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address") - f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") - f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") + f.maxMB = cmdFiler.Flag.Int("maxMB", 4, "split files larger than the limit") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center") f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack") @@ -69,6 +72,7 @@ func init() { f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store") f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory") + f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size") // start s3 on filer filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway") @@ -89,6 +93,10 @@ func init() { filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB") + + // start iam on filer + filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service") + filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port") } var cmdFiler = &Command{ @@ -106,7 +114,7 @@ var cmdFiler = &Command{ GET /path/to/ The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order. - If the "filer.toml" is not found, an embedded filer store will be craeted under "-defaultStoreDir". + If the "filer.toml" is not found, an embedded filer store will be created under "-defaultStoreDir". The example filer.toml configuration file can be generated by "weed scaffold -config=filer" @@ -119,22 +127,33 @@ func runFiler(cmd *Command, args []string) bool { go stats_collect.StartMetricsServer(*f.metricsHttpPort) + filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port) + startDelay := time.Duration(2) if *filerStartS3 { - filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port) filerS3Options.filer = &filerAddress go func() { - time.Sleep(2 * time.Second) + time.Sleep(startDelay * time.Second) filerS3Options.startS3Server() }() + startDelay++ } if *filerStartWebDav { - filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port) filerWebDavOptions.filer = &filerAddress go func() { - time.Sleep(2 * time.Second) + time.Sleep(startDelay * time.Second) filerWebDavOptions.startWebDav() }() + startDelay++ + } + + if *filerStartIam { + filerIamOptions.filer = &filerAddress + filerIamOptions.masters = f.masters + go func() { + time.Sleep(startDelay * time.Second) + filerIamOptions.startIamServer() + }() } f.startFiler() @@ -159,21 +178,22 @@ func (fo *FilerOptions) startFiler() { } fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ - Masters: strings.Split(*fo.masters, ","), - Collection: *fo.collection, - DefaultReplication: *fo.defaultReplicaPlacement, - DisableDirListing: *fo.disableDirListing, - MaxMB: *fo.maxMB, - DirListingLimit: *fo.dirListingLimit, - DataCenter: *fo.dataCenter, - Rack: *fo.rack, - DefaultLevelDbDir: defaultLevelDbDirectory, - DisableHttp: *fo.disableHttp, - Host: *fo.ip, - Port: uint32(*fo.port), - Cipher: *fo.cipher, - SaveToFilerLimit: *fo.saveToFilerLimit, - Filers: peers, + Masters: strings.Split(*fo.masters, ","), + Collection: *fo.collection, + DefaultReplication: *fo.defaultReplicaPlacement, + DisableDirListing: *fo.disableDirListing, + MaxMB: *fo.maxMB, + DirListingLimit: *fo.dirListingLimit, + DataCenter: *fo.dataCenter, + Rack: *fo.rack, + DefaultLevelDbDir: defaultLevelDbDirectory, + DisableHttp: *fo.disableHttp, + Host: *fo.ip, + Port: uint32(*fo.port), + Cipher: *fo.cipher, + SaveToFilerLimit: int64(*fo.saveToFilerLimit), + Filers: peers, + ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) diff --git a/weed/command/filer_backup.go b/weed/command/filer_backup.go index 8cb7441f6..888b46fe7 100644 --- a/weed/command/filer_backup.go +++ b/weed/command/filer_backup.go @@ -155,4 +155,3 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti }) } - diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go index a46098b04..c4281feba 100644 --- a/weed/command/filer_cat.go +++ b/weed/command/filer_cat.go @@ -110,7 +110,7 @@ func runFilerCat(cmd *Command, args []string) bool { filerCat.filerClient = client - return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) + return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) }) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 42f5ec4c3..e7a9b107f 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -56,7 +56,7 @@ func init() { copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name") copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") - copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") + copy.maxMB = cmdCopy.Flag.Int("maxMB", 4, "split files larger than the limit") copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") } @@ -207,16 +207,6 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi } mode := fi.Mode() - if mode.IsDir() { - files, _ := ioutil.ReadDir(fileOrDir) - for _, subFileOrDir := range files { - if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { - return err - } - } - return nil - } - uid, gid := util.GetFileUidGid(fi) fileCopyTaskChan <- FileCopyTask{ @@ -228,6 +218,16 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi gid: gid, } + if mode.IsDir() { + files, _ := ioutil.ReadDir(fileOrDir) + println("checking directory", fileOrDir) + for _, subFileOrDir := range files { + if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { + return err + } + } + } + return nil } @@ -293,20 +293,22 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err // upload the file content fileName := filepath.Base(f.Name()) - mimeType := detectMimeType(f) - data, err := ioutil.ReadAll(f) - if err != nil { - return err - } + var mimeType string var chunks []*filer_pb.FileChunk var assignResult *filer_pb.AssignVolumeResponse var assignError error - if task.fileSize > 0 { + if task.fileMode & os.ModeDir == 0 && task.fileSize > 0 { + + mimeType = detectMimeType(f) + data, err := ioutil.ReadAll(f) + if err != nil { + return err + } // assign a volume - err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go new file mode 100644 index 000000000..ba0b44659 --- /dev/null +++ b/weed/command/filer_meta_backup.go @@ -0,0 +1,268 @@ +package command + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/spf13/viper" + "google.golang.org/grpc" + "io" + "reflect" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + metaBackup FilerMetaBackupOptions +) + +type FilerMetaBackupOptions struct { + grpcDialOption grpc.DialOption + filerAddress *string + filerDirectory *string + restart *bool + backupFilerConfig *string + + store filer.FilerStore +} + +func init() { + cmdFilerMetaBackup.Run = runFilerMetaBackup // break init cycle + metaBackup.filerAddress = cmdFilerMetaBackup.Flag.String("filer", "localhost:8888", "filer hostname:port") + metaBackup.filerDirectory = cmdFilerMetaBackup.Flag.String("filerDir", "/", "a folder on the filer") + metaBackup.restart = cmdFilerMetaBackup.Flag.Bool("restart", false, "copy the full metadata before async incremental backup") + metaBackup.backupFilerConfig = cmdFilerMetaBackup.Flag.String("config", "", "path to filer.toml specifying backup filer store") +} + +var cmdFilerMetaBackup = &Command{ + UsageLine: "filer.meta.backup [-filer=localhost:8888] [-filerDir=/] [-restart] -config=/path/to/backup_filer.toml", + Short: "continuously backup filer meta data changes to anther filer store specified in a backup_filer.toml", + Long: `continuously backup filer meta data changes. +The backup writes to another filer store specified in a backup_filer.toml. + + weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888" + weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888" -restart + + `, +} + +func runFilerMetaBackup(cmd *Command, args []string) bool { + + metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + // load backup_filer.toml + v := viper.New() + v.SetConfigFile(*metaBackup.backupFilerConfig) + + if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file + glog.Fatalf("Failed to load %s file.\nPlease use this command to generate the a %s.toml file\n"+ + " weed scaffold -config=%s -output=.\n\n\n", + *metaBackup.backupFilerConfig, "backup_filer", "filer") + } + + if err := metaBackup.initStore(v); err != nil { + glog.V(0).Infof("init backup filer store: %v", err) + return true + } + + missingPreviousBackup := false + _, err := metaBackup.getOffset() + if err != nil { + missingPreviousBackup = true + } + + if *metaBackup.restart || missingPreviousBackup { + glog.V(0).Infof("traversing metadata tree...") + startTime := time.Now() + if err := metaBackup.traverseMetadata(); err != nil { + glog.Errorf("traverse meta data: %v", err) + return true + } + glog.V(0).Infof("metadata copied up to %v", startTime) + if err := metaBackup.setOffset(startTime); err != nil { + startTime = time.Now() + } + } + + for { + err := metaBackup.streamMetadataBackup() + if err != nil { + glog.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err) + time.Sleep(1747 * time.Millisecond) + } + } + + return true +} + +func (metaBackup *FilerMetaBackupOptions) initStore(v *viper.Viper) error { + // load configuration for default filer store + hasDefaultStoreConfigured := false + for _, store := range filer.Stores { + if v.GetBool(store.GetName() + ".enabled") { + store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(filer.FilerStore) + if err := store.Initialize(v, store.GetName()+"."); err != nil { + glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + } + glog.V(0).Infof("configured filer store to %s", store.GetName()) + hasDefaultStoreConfigured = true + metaBackup.store = filer.NewFilerStoreWrapper(store) + break + } + } + if !hasDefaultStoreConfigured { + return fmt.Errorf("no filer store enabled in %s", v.ConfigFileUsed()) + } + + return nil +} + +func (metaBackup *FilerMetaBackupOptions) traverseMetadata() (err error) { + var saveErr error + + traverseErr := filer_pb.TraverseBfs(metaBackup, util.FullPath(*metaBackup.filerDirectory), func(parentPath util.FullPath, entry *filer_pb.Entry) { + + println("+", parentPath.Child(entry.Name)) + if err := metaBackup.store.InsertEntry(context.Background(), filer.FromPbEntry(string(parentPath), entry)); err != nil { + saveErr = fmt.Errorf("insert entry error: %v\n", err) + return + } + + }) + + if traverseErr != nil { + return fmt.Errorf("traverse: %v", traverseErr) + } + return saveErr +} + +var ( + MetaBackupKey = []byte("metaBackup") +) + +func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error { + + startTime, err := metaBackup.getOffset() + if err != nil { + startTime = time.Now() + } + glog.V(0).Infof("streaming from %v", startTime) + + store := metaBackup.store + + eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error { + + ctx := context.Background() + message := resp.EventNotification + + if message.OldEntry == nil && message.NewEntry == nil { + return nil + } + if message.OldEntry == nil && message.NewEntry != nil { + println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry) + return store.InsertEntry(ctx, entry) + } + if message.OldEntry != nil && message.NewEntry == nil { + println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + return store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + } + if message.OldEntry != nil && message.NewEntry != nil { + if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name { + println("~", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry) + return store.UpdateEntry(ctx, entry) + } + println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + if err := store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name)); err != nil { + return err + } + println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + return store.InsertEntry(ctx, filer.FromPbEntry(message.NewParentPath, message.NewEntry)) + } + + return nil + } + + tailErr := pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "meta_backup", + PathPrefix: *metaBackup.filerDirectory, + SinceNs: startTime.UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + var counter int64 + var lastWriteTime time.Time + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + if err = eachEntryFunc(resp); err != nil { + return err + } + + counter++ + if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { + glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, time.Unix(0, resp.TsNs), float64(counter)/float64(3)) + counter = 0 + lastWriteTime = time.Now() + if err2 := metaBackup.setOffset(lastWriteTime); err2 != nil { + return err2 + } + } + + } + + }) + return tailErr +} + +func (metaBackup *FilerMetaBackupOptions) getOffset() (lastWriteTime time.Time, err error) { + value, err := metaBackup.store.KvGet(context.Background(), MetaBackupKey) + if err != nil { + return + } + tsNs := util.BytesToUint64(value) + + return time.Unix(0, int64(tsNs)), nil +} + +func (metaBackup *FilerMetaBackupOptions) setOffset(lastWriteTime time.Time) error { + valueBuf := make([]byte, 8) + util.Uint64toBytes(valueBuf, uint64(lastWriteTime.UnixNano())) + + if err := metaBackup.store.KvPut(context.Background(), MetaBackupKey, valueBuf); err != nil { + return err + } + return nil +} + +var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{}) + +func (metaBackup *FilerMetaBackupOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return fn(client) + }) + +} + +func (metaBackup *FilerMetaBackupOptions) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go index 189cacefc..8451ffd78 100644 --- a/weed/command/filer_meta_tail.go +++ b/weed/command/filer_meta_tail.go @@ -23,7 +23,7 @@ func init() { } var cmdFilerMetaTail = &Command{ - UsageLine: "filer.meta.tail [-filer=localhost:8888] [-target=/]", + UsageLine: "filer.meta.tail [-filer=localhost:8888] [-pathPrefix=/]", Short: "see continuous changes on a filer", Long: `See continuous changes on a filer. @@ -36,7 +36,7 @@ var cmdFilerMetaTail = &Command{ var ( tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port") - tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer") + tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or common prefix for the folders or files on filer") tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ") esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://") diff --git a/weed/command/gateway.go b/weed/command/gateway.go new file mode 100644 index 000000000..8a6f852a5 --- /dev/null +++ b/weed/command/gateway.go @@ -0,0 +1,93 @@ +package command + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + gatewayOptions GatewayOptions +) + +type GatewayOptions struct { + masters *string + filers *string + bindIp *string + port *int + maxMB *int +} + +func init() { + cmdGateway.Run = runGateway // break init cycle + gatewayOptions.masters = cmdGateway.Flag.String("master", "localhost:9333", "comma-separated master servers") + gatewayOptions.filers = cmdGateway.Flag.String("filer", "localhost:8888", "comma-separated filer servers") + gatewayOptions.bindIp = cmdGateway.Flag.String("ip.bind", "localhost", "ip address to bind to") + gatewayOptions.port = cmdGateway.Flag.Int("port", 5647, "gateway http listen port") + gatewayOptions.maxMB = cmdGateway.Flag.Int("maxMB", 4, "split files larger than the limit") +} + +var cmdGateway = &Command{ + UsageLine: "gateway -port=8888 -master=[,]* -filer=[,]*", + Short: "start a gateway server that points to a list of master servers or a list of filers", + Long: `start a gateway server which accepts REST operation to write any blobs, files, or topic messages. + + POST /blobs/ + upload the blob and return a chunk id + DELETE /blobs/ + delete a chunk id + + /* + POST /files/path/to/a/file + save /path/to/a/file on filer + DELETE /files/path/to/a/file + delete /path/to/a/file on filer + + POST /topics/topicName + save on filer to /topics/topicName//ts.json + */ +`, +} + +func runGateway(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + gatewayOptions.startGateway() + + return true +} + +func (gw *GatewayOptions) startGateway() { + + defaultMux := http.NewServeMux() + + _, gws_err := weed_server.NewGatewayServer(defaultMux, &weed_server.GatewayOption{ + Masters: strings.Split(*gw.masters, ","), + Filers: strings.Split(*gw.filers, ","), + MaxMB: *gw.maxMB, + }) + if gws_err != nil { + glog.Fatalf("Gateway startup error: %v", gws_err) + } + + glog.V(0).Infof("Start Seaweed Gateway %s at %s:%d", util.Version(), *gw.bindIp, *gw.port) + gatewayListener, e := util.NewListener( + *gw.bindIp+":"+strconv.Itoa(*gw.port), + time.Duration(10)*time.Second, + ) + if e != nil { + glog.Fatalf("Filer listener error: %v", e) + } + + httpS := &http.Server{Handler: defaultMux} + if err := httpS.Serve(gatewayListener); err != nil { + glog.Fatalf("Gateway Fail to serve: %v", e) + } + +} diff --git a/weed/command/iam.go b/weed/command/iam.go new file mode 100644 index 000000000..17d0832cb --- /dev/null +++ b/weed/command/iam.go @@ -0,0 +1,97 @@ +package command + +import ( + "context" + "fmt" + "net/http" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/iamapi" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/gorilla/mux" + "time" +) + +var ( + iamStandaloneOptions IamOptions +) + +type IamOptions struct { + filer *string + masters *string + port *int +} + +func init() { + cmdIam.Run = runIam // break init cycle + iamStandaloneOptions.filer = cmdIam.Flag.String("filer", "localhost:8888", "filer server address") + iamStandaloneOptions.masters = cmdIam.Flag.String("master", "localhost:9333", "comma-separated master servers") + iamStandaloneOptions.port = cmdIam.Flag.Int("port", 8111, "iam server http listen port") +} + +var cmdIam = &Command{ + UsageLine: "iam [-port=8111] [-filer=] [-masters=,]", + Short: "start a iam API compatible server", + Long: "start a iam API compatible server.", +} + +func runIam(cmd *Command, args []string) bool { + return iamStandaloneOptions.startIamServer() +} + +func (iamopt *IamOptions) startIamServer() bool { + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + glog.V(0).Infof("IAM read filer configuration: %s", resp) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress) + break + } + } + + router := mux.NewRouter().SkipClean(true) + _, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{ + Filer: *iamopt.filer, + Port: *iamopt.port, + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: grpcDialOption, + }) + glog.V(0).Info("NewIamApiServer created") + if iamApiServer_err != nil { + glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err) + } + + httpS := &http.Server{Handler: router} + + listenAddress := fmt.Sprintf(":%d", *iamopt.port) + iamApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) + if err != nil { + glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err) + } + + glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port) + if err = httpS.Serve(iamApiListener); err != nil { + glog.Fatalf("IAM API Server Fail to serve: %v", err) + } + + return true +} diff --git a/weed/command/master.go b/weed/command/master.go index d569919cd..0f5e2156d 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -6,7 +6,6 @@ import ( "google.golang.org/grpc/reflection" "net/http" "os" - "runtime" "sort" "strconv" "strings" @@ -48,8 +47,8 @@ type MasterOptions struct { func init() { cmdMaster.Run = runMaster // break init cycle m.port = cmdMaster.Flag.Int("port", 9333, "http listen port") - m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master | address") - m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master | address, also used as identifier") + m.ipBind = cmdMaster.Flag.String("ip.bind", "", "ip address to bind to") m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095") m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") @@ -86,7 +85,6 @@ func runMaster(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) - runtime.GOMAXPROCS(runtime.NumCPU()) grace.SetupProfiling(*masterCpuProfile, *masterMemProfile) parent, _ := util.FullPath(*m.metaFolder).DirAndName() @@ -138,7 +136,6 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { if err != nil { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } - // Create your protocol servers. grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) protobuf.RegisterRaftServer(grpcS, raftServer) diff --git a/weed/command/mount.go b/weed/command/mount.go index ea439af7c..5811f0b99 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -25,6 +25,7 @@ type MountOptions struct { volumeServerAccess *string uidMap *string gidMap *string + readOnly *bool } var ( @@ -45,7 +46,7 @@ func init() { mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files") - mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0") + mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers if not 0") mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") @@ -55,6 +56,7 @@ func init() { mountOptions.volumeServerAccess = cmdMount.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl|filerProxy]") mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated :") mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated :") + mountOptions.readOnly = cmdMount.Flag.Bool("readOnly", false, "read only") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index a6d562d40..2474cf7dd 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -53,7 +53,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { filer := *option.filer // parse filer grpc address - filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer) + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(filer) if err != nil { glog.V(0).Infof("ParseFilerGrpcAddress: %v", err) return true @@ -63,16 +63,23 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { // try to connect to filer, filerBucketsPath may be useful later grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") var cipher bool - err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + for i := 0; i < 10; i++ { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) if err != nil { - return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err) + glog.V(0).Infof("failed to talk to filer %s: %v", filerGrpcAddress, err) + glog.V(0).Infof("wait for %d seconds ...", i+1) + time.Sleep(time.Duration(i+1) * time.Second) } - cipher = resp.Cipher - return nil - }) + } if err != nil { - glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err) + glog.Errorf("failed to talk to filer %s: %v", filerGrpcAddress, err) return true } @@ -142,8 +149,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { fuse.Subtype("seaweedfs"), // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders fuse.NoAppleXattr(), - fuse.NoBrowse(), - fuse.AutoXattr(), fuse.ExclCreate(), fuse.DaemonTimeout("3600"), fuse.AllowSUID(), @@ -162,6 +167,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { if *option.nonempty { options = append(options, fuse.AllowNonEmptyMount()) } + if *option.readOnly { + options = append(options, fuse.ReadOnly()) + } // find mount point mountRoot := filerMountRootPath @@ -186,7 +194,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { CacheDir: *option.cacheDir, CacheSizeMB: *option.cacheSizeMB, DataCenter: *option.dataCenter, - EntryCacheTtl: 3 * time.Second, MountUid: uid, MountGid: gid, MountMode: mountMode, diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go index b4b5855ff..db0b4148d 100644 --- a/weed/command/msg_broker.go +++ b/weed/command/msg_broker.go @@ -63,7 +63,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool { grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile) - filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer) + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*msgBrokerOpt.filer) if err != nil { glog.Fatal(err) return false diff --git a/weed/command/s3.go b/weed/command/s3.go index d8e3e306b..c8292a7d5 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -137,7 +137,7 @@ func runS3(cmd *Command, args []string) bool { func (s3opt *S3Options) startS3Server() bool { - filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer) + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*s3opt.filer) if err != nil { glog.Fatal(err) return false diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index c2d53e4bd..88dc94df1 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -103,9 +103,9 @@ dir = "./filerrdb" # directory to store rocksdb files [mysql] # or memsql, tidb # CREATE TABLE IF NOT EXISTS filemeta ( -# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', -# name VARCHAR(1000) COMMENT 'directory or file name', -# directory TEXT COMMENT 'full path to parent directory', +# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', +# name VARCHAR(1000) BINARY COMMENT 'directory or file name', +# directory TEXT COMMENT 'full path to parent directory', # meta LONGBLOB, # PRIMARY KEY (dirhash, name) # ) DEFAULT CHARSET=utf8; @@ -120,13 +120,16 @@ connection_max_idle = 2 connection_max_open = 100 connection_max_lifetime_seconds = 0 interpolateParams = false +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" [mysql2] # or memsql, tidb enabled = false createTable = """ CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` ( dirhash BIGINT, - name VARCHAR(1000), + name VARCHAR(1000) BINARY, directory TEXT, meta LONGBLOB, PRIMARY KEY (dirhash, name) @@ -141,6 +144,9 @@ connection_max_idle = 2 connection_max_open = 100 connection_max_lifetime_seconds = 0 interpolateParams = false +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" [postgres] # or cockroachdb, YugabyteDB # CREATE TABLE IF NOT EXISTS filemeta ( @@ -161,6 +167,9 @@ sslmode = "disable" connection_max_idle = 100 connection_max_open = 100 connection_max_lifetime_seconds = 0 +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" [postgres2] enabled = false @@ -183,6 +192,9 @@ sslmode = "disable" connection_max_idle = 100 connection_max_open = 100 connection_max_lifetime_seconds = 0 +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" [cassandra] # CREATE TABLE filemeta ( @@ -269,7 +281,7 @@ index.max_result_window = 10000 # Make sure they are not the same if using the same store type! # 4. Set enabled to true # -# The following is just using cassandra as an example +# The following is just using redis as an example ########################## [redis2.tmp] enabled = false @@ -440,22 +452,28 @@ expires_after_seconds = 10 # seconds # the host name is not checked, so the PERM files can be shared. [grpc] ca = "" +# Set wildcard domain for enable TLS authentication by common names +allowed_wildcard_domain = "" # .mycompany.com [grpc.volume] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names [grpc.master] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names [grpc.filer] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names [grpc.msg_broker] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names # use this for any place needs a grpc client # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" @@ -463,7 +481,6 @@ key = "" cert = "" key = "" - # volume server https options # Note: work in progress! # this does not work with other clients, e.g., "weed filer|mount" etc, yet. @@ -501,7 +518,7 @@ default = "localhost:8888" # used by maintenance scripts if the scripts needs [master.sequencer] -type = "raft" # Choose [raft|etcd] type for storing the file id sequence +type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence # example : http://127.0.0.1:2379,http://127.0.0.1:2389 sequencer_etcd_urls = "http://127.0.0.1:2379" diff --git a/weed/command/server.go b/weed/command/server.go index 611578953..6eb3bf97c 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -2,9 +2,8 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/util/grace" "os" - "runtime" - "runtime/pprof" "strings" "time" @@ -16,6 +15,7 @@ import ( type ServerOptions struct { cpuprofile *string + memprofile *string v VolumeServerOptions } @@ -49,8 +49,8 @@ var cmdServer = &Command{ } var ( - serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name") - serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier") + serverBindIp = cmdServer.Flag.String("ip.bind", "", "ip address to bind to") serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") @@ -76,6 +76,7 @@ var ( func init() { serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file") + serverOptions.memprofile = cmdServer.Flag.String("memprofile", "", "memory profile output file") masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") @@ -93,11 +94,12 @@ func init() { filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") - filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") + filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list") filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.") + filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") @@ -107,10 +109,12 @@ func init() { serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") + serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files") + serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, " enable tcp port") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") @@ -137,14 +141,7 @@ func runServer(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) - if *serverOptions.cpuprofile != "" { - f, err := os.Create(*serverOptions.cpuprofile) - if err != nil { - glog.Fatal(err) - } - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } + grace.SetupProfiling(*serverOptions.cpuprofile, *serverOptions.memprofile) if *isStartingS3 { *isStartingFiler = true @@ -156,19 +153,21 @@ func runServer(cmd *Command, args []string) bool { *isStartingFiler = true } - _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) - peers := strings.Join(peerList, ",") - masterOptions.peers = &peers + if *isStartingMasterServer { + _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) + peers := strings.Join(peerList, ",") + masterOptions.peers = &peers + } // ip address masterOptions.ip = serverIp masterOptions.ipBind = serverBindIp - filerOptions.masters = &peers + filerOptions.masters = masterOptions.peers filerOptions.ip = serverIp filerOptions.bindIp = serverBindIp serverOptions.v.ip = serverIp serverOptions.v.bindIp = serverBindIp - serverOptions.v.masters = &peers + serverOptions.v.masters = masterOptions.peers serverOptions.v.idleConnectionTimeout = serverTimeout serverOptions.v.dataCenter = serverDataCenter serverOptions.v.rack = serverRack @@ -189,7 +188,6 @@ func runServer(cmd *Command, args []string) bool { webdavOptions.filer = &filerAddress msgBrokerOptions.filer = &filerAddress - runtime.GOMAXPROCS(runtime.NumCPU()) go stats_collect.StartMetricsServer(*serverMetricsHttpPort) folders := strings.Split(*volumeDataFolders, ",") diff --git a/weed/command/upload.go b/weed/command/upload.go index 67fde2185..0f9361b40 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -43,7 +43,7 @@ func init() { upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.diskType = cmdUpload.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit") + upload.maxMB = cmdUpload.Flag.Int("maxMB", 4, "split files larger than the limit") upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server") } diff --git a/weed/command/volume.go b/weed/command/volume.go index 659c93d96..9df500178 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -6,7 +6,6 @@ import ( "net/http" httppprof "net/http/pprof" "os" - "runtime" "runtime/pprof" "strconv" "strings" @@ -36,41 +35,43 @@ var ( ) type VolumeServerOptions struct { - port *int - publicPort *int - folders []string - folderMaxLimits []int - idxFolder *string - ip *string - publicUrl *string - bindIp *string - masters *string - idleConnectionTimeout *int - dataCenter *string - rack *string - whiteList []string - indexType *string - diskType *string - fixJpgOrientation *bool - readRedirect *bool - cpuProfile *string - memProfile *string - compactionMBPerSecond *int - fileSizeLimitMB *int - minFreeSpacePercents []float32 - pprof *bool - preStopSeconds *int - metricsHttpPort *int + port *int + publicPort *int + folders []string + folderMaxLimits []int + idxFolder *string + ip *string + publicUrl *string + bindIp *string + masters *string + idleConnectionTimeout *int + dataCenter *string + rack *string + whiteList []string + indexType *string + diskType *string + fixJpgOrientation *bool + readRedirect *bool + cpuProfile *string + memProfile *string + compactionMBPerSecond *int + fileSizeLimitMB *int + concurrentUploadLimitMB *int + minFreeSpacePercents []float32 + pprof *bool + preStopSeconds *int + metricsHttpPort *int // pulseSeconds *int + enableTcp *bool } func init() { cmdVolume.Run = runVolume // break init cycle v.port = cmdVolume.Flag.Int("port", 8080, "http listen port") v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public") - v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name") + v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier") v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address") - v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + v.bindIp = cmdVolume.Flag.String("ip.bind", "", "ip address to bind to") v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers") v.preStopSeconds = cmdVolume.Flag.Int("preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") // v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") @@ -85,9 +86,11 @@ func init() { v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") + v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size") v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") + v.enableTcp = cmdVolume.Flag.Bool("tcp", false, " enable tcp port") } var cmdVolume = &Command{ @@ -109,8 +112,6 @@ func runVolume(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - runtime.GOMAXPROCS(runtime.NumCPU()) - // If --pprof is set we assume the caller wants to be able to collect // cpu and memory profiles via go tool pprof if !*v.pprof { @@ -238,6 +239,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v *v.fixJpgOrientation, *v.readRedirect, *v.compactionMBPerSecond, *v.fileSizeLimitMB, + int64(*v.concurrentUploadLimitMB)*1024*1024, ) // starting grpc server grpcS := v.startGrpcService(volumeServer) @@ -251,6 +253,11 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } + // starting tcp server + if *v.enableTcp { + go v.startTcpService(volumeServer) + } + // starting the cluster http server clusterHttpServer := v.startClusterHttpService(volumeMux) @@ -368,3 +375,22 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd }() return clusterHttpServer } + +func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) { + listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port+20000) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress) + listener, e := util.NewListener(listeningAddress, 0) + if e != nil { + glog.Fatalf("Volume server listener error on %s:%v", listeningAddress, e) + } + defer listener.Close() + + for { + c, err := listener.Accept() + if err != nil { + fmt.Println(err) + return + } + go volumeServer.HandleTcpConnection(c) + } +} diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 2bd4a3c61..781ea1e36 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -78,7 +78,7 @@ func (wo *WebDavOption) startWebDav() bool { } // parse filer grpc address - filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer) + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*wo.filer) if err != nil { glog.Fatal(err) return false diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go index 07ce56145..ab8f6bcbd 100644 --- a/weed/filer/abstract_sql/abstract_sql_store.go +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -13,15 +13,15 @@ import ( ) type SqlGenerator interface { - GetSqlInsert(bucket string) string - GetSqlUpdate(bucket string) string - GetSqlFind(bucket string) string - GetSqlDelete(bucket string) string - GetSqlDeleteFolderChildren(bucket string) string - GetSqlListExclusive(bucket string) string - GetSqlListInclusive(bucket string) string - GetSqlCreateTable(bucket string) string - GetSqlDropTable(bucket string) string + GetSqlInsert(tableName string) string + GetSqlUpdate(tableName string) string + GetSqlFind(tableName string) string + GetSqlDelete(tableName string) string + GetSqlDeleteFolderChildren(tableName string) string + GetSqlListExclusive(tableName string) string + GetSqlListInclusive(tableName string) string + GetSqlCreateTable(tableName string) string + GetSqlDropTable(tableName string) string } type AbstractSqlStore struct { @@ -32,6 +32,29 @@ type AbstractSqlStore struct { dbsLock sync.Mutex } +func (store *AbstractSqlStore) OnBucketCreation(bucket string) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + store.CreateTable(context.Background(), bucket) + + if store.dbs == nil { + return + } + store.dbs[bucket] = true +} +func (store *AbstractSqlStore) OnBucketDeletion(bucket string) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + store.deleteTable(context.Background(), bucket) + + if store.dbs == nil { + return + } + delete(store.dbs, bucket) +} + const ( DEFAULT_TABLE = "filemeta" ) @@ -253,7 +276,9 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat } } - res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath) + glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) + + res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath)) if err != nil { return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) } diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index 99a62c90c..c709dc819 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -97,20 +97,20 @@ func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) ([]byte, error) { var err error - var buffer bytes.Buffer var shouldRetry bool + receivedData := make([]byte, 0, size) for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 { for _, urlString := range urlStrings { - shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) { - buffer.Write(data) + receivedData = receivedData[:0] + shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) { + receivedData = append(receivedData, data...) }) if !shouldRetry { break } if err != nil { glog.V(0).Infof("read %s failed, err: %v", urlString, err) - buffer.Reset() } else { break } @@ -123,7 +123,8 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool } } - return buffer.Bytes(), err + return receivedData, err + } func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) { diff --git a/weed/filer/filer.go b/weed/filer/filer.go index e59887763..effdc0e4e 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -241,12 +241,12 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er if oldEntry != nil { entry.Attr.Crtime = oldEntry.Attr.Crtime if oldEntry.IsDirectory() && !entry.IsDirectory() { - glog.Errorf("existing %s is a directory", entry.FullPath) - return fmt.Errorf("existing %s is a directory", entry.FullPath) + glog.Errorf("existing %s is a directory", oldEntry.FullPath) + return fmt.Errorf("existing %s is a directory", oldEntry.FullPath) } if !oldEntry.IsDirectory() && entry.IsDirectory() { - glog.Errorf("existing %s is a file", entry.FullPath) - return fmt.Errorf("existing %s is a file", entry.FullPath) + glog.Errorf("existing %s is a file", oldEntry.FullPath) + return fmt.Errorf("existing %s is a file", oldEntry.FullPath) } } return f.Store.UpdateEntry(ctx, entry) diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go index ba170f02e..43fb000c9 100644 --- a/weed/filer/filer_buckets.go +++ b/weed/filer/filer_buckets.go @@ -29,7 +29,7 @@ func (f *Filer) LoadBuckets() { limit := int64(math.MaxInt32) - entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "") + entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "", "") if err != nil { glog.V(1).Infof("no buckets found: %v", err) diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go index 8e549f5ad..ab5afc5cc 100644 --- a/weed/filer/filer_conf.go +++ b/weed/filer/filer_conf.go @@ -18,6 +18,7 @@ const ( FilerConfName = "filer.conf" IamConfigDirecotry = "/etc/iam" IamIdentityFile = "identity.json" + IamPoliciesFile = "policies.json" ) type FilerConf struct { diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go index 50a669f40..3ef3cfff9 100644 --- a/weed/filer/filer_delete_entry.go +++ b/weed/filer/filer_delete_entry.go @@ -11,6 +11,10 @@ import ( type HardLinkId []byte +const ( + MsgFailDelNonEmptyFolder = "fail to delete non-empty folder" +) + func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) { if p == "/" { return nil @@ -69,7 +73,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry includeLastFile := false if !isDeletingBucket { for { - entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "") + entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "") if err != nil { glog.Errorf("list folder %s: %v", entry.FullPath, err) return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) @@ -77,7 +81,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) - return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) + return nil, nil, fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath) } for _, sub := range entries { diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index c461a82b8..7ab101102 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -116,13 +116,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func( sizeBuf := make([]byte, 4) startTsNs := startTime.UnixNano() - dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "") + dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "") if listDayErr != nil { return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr) } for _, dayEntry := range dayEntries { // println("checking day", dayEntry.FullPath) - hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "") + hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "", "") if listHourMinuteErr != nil { return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) } diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go index 295a5039e..a91faeb24 100644 --- a/weed/filer/filer_on_meta_event.go +++ b/weed/filer/filer_on_meta_event.go @@ -11,6 +11,28 @@ import ( // onMetadataChangeEvent is triggered after filer processed change events from local or remote filers func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) { + f.maybeReloadFilerConfiguration(event) + f.onBucketEvents(event) +} + +func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) { + message := event.EventNotification + for _, sig := range message.Signatures { + if sig == f.Signature { + return + } + } + if f.DirBucketsPath == event.Directory { + if message.OldEntry == nil && message.NewEntry != nil { + f.Store.OnBucketCreation(message.NewEntry.Name) + } + if message.OldEntry != nil && message.NewEntry == nil { + f.Store.OnBucketDeletion(message.OldEntry.Name) + } + } +} + +func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataResponse) { if DirectoryEtcSeaweedFS != event.Directory { if DirectoryEtcSeaweedFS != event.EventNotification.NewParentPath { return @@ -26,12 +48,11 @@ func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) if entry.Name == FilerConfName { f.reloadFilerConfiguration(entry) } - } func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) { var buf bytes.Buffer - err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64) + err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64, false) if err != nil { return nil, err } diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go index 0a14d3756..f43312cfa 100644 --- a/weed/filer/filer_search.go +++ b/weed/filer/filer_search.go @@ -20,9 +20,9 @@ func splitPattern(pattern string) (prefix string, restPattern string) { } // For now, prefix and namePattern are mutually exclusive -func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string) (entries []*Entry, hasMore bool, err error) { +func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) { - _, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, func(entry *Entry) bool { + _, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool { entries = append(entries, entry) return true }) @@ -36,7 +36,7 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, start } // For now, prefix and namePattern are mutually exclusive -func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { +func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { if strings.HasSuffix(string(p), "/") && len(p) > 1 { p = p[0 : len(p)-1] } @@ -47,30 +47,38 @@ func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, } var missedCount int64 - missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, eachEntryFunc) + missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, namePatternExclude, eachEntryFunc) for missedCount > 0 && err == nil { - missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, eachEntryFunc) + missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, namePatternExclude, eachEntryFunc) } return } -func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) { +func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) { - if len(restNamePattern) == 0 { + if len(restNamePattern) == 0 && len(namePatternExclude) == 0{ lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc) return 0, lastFileName, err } lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool { - nameToTest := strings.ToLower(entry.Name()) - if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && matched { - if !eachEntryFunc(entry) { - return false + nameToTest := entry.Name() + if len(namePatternExclude) > 0 { + if matched, matchErr := filepath.Match(namePatternExclude, nameToTest); matchErr == nil && matched { + missedCount++ + return true } - } else { - missedCount++ + } + if len(restNamePattern) > 0 { + if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && !matched { + missedCount++ + return true + } + } + if !eachEntryFunc(entry) { + return false } return true }) diff --git a/weed/filer/filerstore.go b/weed/filer/filerstore.go index 8955a25c7..a5b2f25de 100644 --- a/weed/filer/filerstore.go +++ b/weed/filer/filerstore.go @@ -39,3 +39,8 @@ type FilerStore interface { Shutdown() } + +type BucketAware interface { + OnBucketCreation(bucket string) + OnBucketDeletion(bucket string) +} diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go index 64baac371..cd7c0bea3 100644 --- a/weed/filer/filerstore_wrapper.go +++ b/weed/filer/filerstore_wrapper.go @@ -21,6 +21,8 @@ type VirtualFilerStore interface { DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error DeleteOneEntry(ctx context.Context, entry *Entry) error AddPathSpecificStore(path string, storeId string, store FilerStore) + OnBucketCreation(bucket string) + OnBucketDeletion(bucket string) } type FilerStoreWrapper struct { @@ -40,6 +42,27 @@ func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { } } +func (fsw *FilerStoreWrapper) OnBucketCreation(bucket string) { + for _, store := range fsw.storeIdToStore { + if ba, ok := store.(BucketAware); ok { + ba.OnBucketCreation(bucket) + } + } + if ba, ok := fsw.defaultStore.(BucketAware); ok { + ba.OnBucketCreation(bucket) + } +} +func (fsw *FilerStoreWrapper) OnBucketDeletion(bucket string) { + for _, store := range fsw.storeIdToStore { + if ba, ok := store.(BucketAware); ok { + ba.OnBucketDeletion(bucket) + } + } + if ba, ok := fsw.defaultStore.(BucketAware); ok { + ba.OnBucketDeletion(bucket) + } +} + func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string, store FilerStore) { fsw.storeIdToStore[storeId] = NewFilerStorePathTranlator(path, store) err := fsw.pathToStore.Put([]byte(path), storeId) @@ -126,8 +149,8 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) ( stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "find").Observe(time.Since(start).Seconds()) }() - glog.V(4).Infof("FindEntry %s", fp) entry, err = actualStore.FindEntry(ctx, fp) + glog.V(4).Infof("FindEntry %s: %v", fp, err) if err != nil { return nil, err } diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go index ac0ad4ba6..ce454f36a 100644 --- a/weed/filer/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -8,6 +8,7 @@ import ( leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "os" "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" @@ -38,6 +39,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre func (store *LevelDBStore) initialize(dir string) (err error) { glog.Infof("filer store dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go index 9c342605e..d437895f5 100644 --- a/weed/filer/leveldb/leveldb_store_test.go +++ b/weed/filer/leveldb/leveldb_store_test.go @@ -51,14 +51,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "") + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -77,7 +77,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go index c76340da4..124d61c1c 100644 --- a/weed/filer/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -38,6 +38,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { glog.Infof("filer store leveldb2 dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } diff --git a/weed/filer/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go index 495c73fdd..fd0ad18a3 100644 --- a/weed/filer/leveldb2/leveldb2_store_test.go +++ b/weed/filer/leveldb2/leveldb2_store_test.go @@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "") + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go index c81ea7bbe..d1cdfbbf6 100644 --- a/weed/filer/leveldb3/leveldb3_store.go +++ b/weed/filer/leveldb3/leveldb3_store.go @@ -45,6 +45,7 @@ func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, pr func (store *LevelDB3Store) initialize(dir string) (err error) { glog.Infof("filer store leveldb3 dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } diff --git a/weed/filer/leveldb3/leveldb3_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go index 53b0e927f..0b970a539 100644 --- a/weed/filer/leveldb3/leveldb3_store_test.go +++ b/weed/filer/leveldb3/leveldb3_store_test.go @@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "") + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 9437e9992..5c368a57e 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -69,6 +69,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string peerSignature, err = ma.readFilerStoreSignature(peer) } + // when filer store is not shared by multiple filers if peerSignature != f.Signature { if prevTsNs, err := ma.readOffset(f, peer, peerSignature); err == nil { lastTsNs = prevTsNs diff --git a/weed/filer/mysql/mysql_sql_gen.go b/weed/filer/mysql/mysql_sql_gen.go index 4213cf965..93d3e3f9e 100644 --- a/weed/filer/mysql/mysql_sql_gen.go +++ b/weed/filer/mysql/mysql_sql_gen.go @@ -2,6 +2,7 @@ package mysql import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" _ "github.com/go-sql-driver/mysql" ) @@ -9,44 +10,49 @@ import ( type SqlGenMysql struct { CreateTableSqlTemplate string DropTableSqlTemplate string + UpsertQueryTemplate string } var ( _ = abstract_sql.SqlGenerator(&SqlGenMysql{}) ) -func (gen *SqlGenMysql) GetSqlInsert(bucket string) string { - return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", bucket) +func (gen *SqlGenMysql) GetSqlInsert(tableName string) string { + if gen.UpsertQueryTemplate != "" { + return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) + } else { + return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", tableName) + } } -func (gen *SqlGenMysql) GetSqlUpdate(bucket string) string { - return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", bucket) +func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string { + return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", tableName) } -func (gen *SqlGenMysql) GetSqlFind(bucket string) string { - return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", bucket) +func (gen *SqlGenMysql) GetSqlFind(tableName string) string { + return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) } -func (gen *SqlGenMysql) GetSqlDelete(bucket string) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", bucket) +func (gen *SqlGenMysql) GetSqlDelete(tableName string) string { + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) } -func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(bucket string) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", bucket) +func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string { + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", tableName) } -func (gen *SqlGenMysql) GetSqlListExclusive(bucket string) string { - return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket) +func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string { + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) } -func (gen *SqlGenMysql) GetSqlListInclusive(bucket string) string { - return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", bucket) +func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string { + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) } -func (gen *SqlGenMysql) GetSqlCreateTable(bucket string) string { - return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket) +func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string { + return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName) } -func (gen *SqlGenMysql) GetSqlDropTable(bucket string) string { - return fmt.Sprintf(gen.DropTableSqlTemplate, bucket) +func (gen *SqlGenMysql) GetSqlDropTable(tableName string) string { + return fmt.Sprintf(gen.DropTableSqlTemplate, tableName) } diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go index 501ab1d39..fbaa4d5f9 100644 --- a/weed/filer/mysql/mysql_store.go +++ b/weed/filer/mysql/mysql_store.go @@ -3,9 +3,10 @@ package mysql import ( "database/sql" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer" "time" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" "github.com/chrislusf/seaweedfs/weed/util" _ "github.com/go-sql-driver/mysql" @@ -29,6 +30,8 @@ func (store *MysqlStore) GetName() string { func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), configuration.GetString(prefix+"username"), configuration.GetString(prefix+"password"), configuration.GetString(prefix+"hostname"), @@ -41,13 +44,17 @@ func (store *MysqlStore) Initialize(configuration util.Configuration, prefix str ) } -func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen, +func (store *MysqlStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, maxLifetimeSeconds int, interpolateParams bool) (err error) { store.SupportBucketTable = false + if !enableUpsert { + upsertQuery = "" + } store.SqlGenerator = &SqlGenMysql{ CreateTableSqlTemplate: "", DropTableSqlTemplate: "drop table `%s`", + UpsertQueryTemplate: upsertQuery, } sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go index c796cd6aa..a1f54455a 100644 --- a/weed/filer/mysql2/mysql2_store.go +++ b/weed/filer/mysql2/mysql2_store.go @@ -32,6 +32,8 @@ func (store *MysqlStore2) GetName() string { func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( configuration.GetString(prefix+"createTable"), + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), configuration.GetString(prefix+"username"), configuration.GetString(prefix+"password"), configuration.GetString(prefix+"hostname"), @@ -44,13 +46,17 @@ func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix st ) } -func (store *MysqlStore2) initialize(createTable, user, password, hostname string, port int, database string, maxIdle, maxOpen, +func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, maxLifetimeSeconds int, interpolateParams bool) (err error) { store.SupportBucketTable = true + if !enableUpsert { + upsertQuery = "" + } store.SqlGenerator = &mysql.SqlGenMysql{ CreateTableSqlTemplate: createTable, DropTableSqlTemplate: "drop table `%s`", + UpsertQueryTemplate: upsertQuery, } sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) diff --git a/weed/filer/postgres/postgres_sql_gen.go b/weed/filer/postgres/postgres_sql_gen.go index e13070c3d..6cee3d2da 100644 --- a/weed/filer/postgres/postgres_sql_gen.go +++ b/weed/filer/postgres/postgres_sql_gen.go @@ -10,44 +10,49 @@ import ( type SqlGenPostgres struct { CreateTableSqlTemplate string DropTableSqlTemplate string + UpsertQueryTemplate string } var ( _ = abstract_sql.SqlGenerator(&SqlGenPostgres{}) ) -func (gen *SqlGenPostgres) GetSqlInsert(bucket string) string { - return fmt.Sprintf(`INSERT INTO "%s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)`, bucket) +func (gen *SqlGenPostgres) GetSqlInsert(tableName string) string { + if gen.UpsertQueryTemplate != "" { + return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) + } else { + return fmt.Sprintf(`INSERT INTO "%s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)`, tableName) + } } -func (gen *SqlGenPostgres) GetSqlUpdate(bucket string) string { - return fmt.Sprintf(`UPDATE "%s" SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4`, bucket) +func (gen *SqlGenPostgres) GetSqlUpdate(tableName string) string { + return fmt.Sprintf(`UPDATE "%s" SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4`, tableName) } -func (gen *SqlGenPostgres) GetSqlFind(bucket string) string { - return fmt.Sprintf(`SELECT meta FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, bucket) +func (gen *SqlGenPostgres) GetSqlFind(tableName string) string { + return fmt.Sprintf(`SELECT meta FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName) } -func (gen *SqlGenPostgres) GetSqlDelete(bucket string) string { - return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, bucket) +func (gen *SqlGenPostgres) GetSqlDelete(tableName string) string { + return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName) } -func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(bucket string) string { - return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND directory=$2`, bucket) +func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(tableName string) string { + return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND directory=$2`, tableName) } -func (gen *SqlGenPostgres) GetSqlListExclusive(bucket string) string { - return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, bucket) +func (gen *SqlGenPostgres) GetSqlListExclusive(tableName string) string { + return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName) } -func (gen *SqlGenPostgres) GetSqlListInclusive(bucket string) string { - return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, bucket) +func (gen *SqlGenPostgres) GetSqlListInclusive(tableName string) string { + return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName) } -func (gen *SqlGenPostgres) GetSqlCreateTable(bucket string) string { - return fmt.Sprintf(gen.CreateTableSqlTemplate, bucket) +func (gen *SqlGenPostgres) GetSqlCreateTable(tableName string) string { + return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName) } -func (gen *SqlGenPostgres) GetSqlDropTable(bucket string) string { - return fmt.Sprintf(gen.DropTableSqlTemplate, bucket) +func (gen *SqlGenPostgres) GetSqlDropTable(tableName string) string { + return fmt.Sprintf(gen.DropTableSqlTemplate, tableName) } diff --git a/weed/filer/postgres/postgres_store.go b/weed/filer/postgres/postgres_store.go index 9e4ff7c32..a1e16a92a 100644 --- a/weed/filer/postgres/postgres_store.go +++ b/weed/filer/postgres/postgres_store.go @@ -29,6 +29,8 @@ func (store *PostgresStore) GetName() string { func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), configuration.GetString(prefix+"username"), configuration.GetString(prefix+"password"), configuration.GetString(prefix+"hostname"), @@ -42,12 +44,16 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix ) } -func (store *PostgresStore) initialize(user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { +func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { store.SupportBucketTable = false + if !enableUpsert { + upsertQuery = "" + } store.SqlGenerator = &SqlGenPostgres{ CreateTableSqlTemplate: "", DropTableSqlTemplate: `drop table "%s"`, + UpsertQueryTemplate: upsertQuery, } sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) diff --git a/weed/filer/postgres2/postgres2_store.go b/weed/filer/postgres2/postgres2_store.go index 92893bf7a..0f573d8d0 100644 --- a/weed/filer/postgres2/postgres2_store.go +++ b/weed/filer/postgres2/postgres2_store.go @@ -32,6 +32,8 @@ func (store *PostgresStore2) GetName() string { func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( configuration.GetString(prefix+"createTable"), + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), configuration.GetString(prefix+"username"), configuration.GetString(prefix+"password"), configuration.GetString(prefix+"hostname"), @@ -45,12 +47,16 @@ func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix ) } -func (store *PostgresStore2) initialize(createTable, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { +func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { store.SupportBucketTable = true + if !enableUpsert { + upsertQuery = "" + } store.SqlGenerator = &postgres.SqlGenPostgres{ CreateTableSqlTemplate: createTable, DropTableSqlTemplate: `drop table "%s"`, + UpsertQueryTemplate: upsertQuery, } sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go index 7a6da3beb..d92d526d5 100644 --- a/weed/filer/read_write.go +++ b/weed/filer/read_write.go @@ -27,7 +27,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed return err } - return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) + return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) } @@ -35,14 +35,18 @@ func ReadContent(filerAddress string, dir, name string) ([]byte, error) { target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name) - data, _, err := util.FastGet(target) + data, _, err := util.Get(target) return data, err } func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error { - - target := fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name) + var target string + if port == 0 { + target = fmt.Sprintf("http://%s%s/%s", host, dir, name) + } else { + target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name) + } // set the HTTP method, url, and request body req, err := http.NewRequest(http.MethodPut, target, byteBuffer) diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go index 70c301725..379a18c62 100644 --- a/weed/filer/rocksdb/rocksdb_store.go +++ b/weed/filer/rocksdb/rocksdb_store.go @@ -8,6 +8,7 @@ import ( "crypto/md5" "fmt" "io" + "os" "github.com/tecbot/gorocksdb" @@ -56,6 +57,7 @@ func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, pre func (store *RocksDBStore) initialize(dir string) (err error) { glog.Infof("filer store rocksdb dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go index 439663524..f6e755b4b 100644 --- a/weed/filer/rocksdb/rocksdb_store_test.go +++ b/weed/filer/rocksdb/rocksdb_store_test.go @@ -53,14 +53,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "") + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -79,7 +79,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "") + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 6a87a2b7d..661a210ea 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -3,6 +3,7 @@ package filer import ( "bytes" "fmt" + "golang.org/x/sync/errgroup" "io" "math" "strings" @@ -13,9 +14,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/wdclient" ) -func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { +func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error { - // fmt.Printf("start to stream content for chunks: %+v\n", chunks) + glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks) chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) fileId2Url := make(map[string][]string) @@ -26,19 +27,33 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c if err != nil { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err + } else if len(urlStrings) == 0 { + glog.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + return fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) } fileId2Url[chunkView.FileId] = urlStrings } + if isCheck { + // Pre-check all chunkViews urls + gErr := new(errgroup.Group) + CheckAllChunkViews(chunkViews, &fileId2Url, gErr) + if err := gErr.Wait(); err != nil { + glog.Errorf("check all chunks: %v", err) + return fmt.Errorf("check all chunks: %v", err) + } + return nil + } + for _, chunkView := range chunkViews { urlStrings := fileId2Url[chunkView.FileId] - data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) if err != nil { glog.Errorf("read chunk: %v", err) return fmt.Errorf("read chunk: %v", err) } + _, err = w.Write(data) if err != nil { glog.Errorf("write chunk: %v", err) @@ -50,6 +65,17 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c } +func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) { + for _, chunkView := range chunkViews { + urlStrings := (*fileId2Url)[chunkView.FileId] + glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings) + gErr.Go(func() error { + _, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + return err + }) + } +} + // ---------------- ReadAllReader ---------------------------------- func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) { @@ -181,7 +207,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { var buffer bytes.Buffer var shouldRetry bool for _, urlString := range urlStrings { - shouldRetry, err = util.FastReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { + shouldRetry, err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { buffer.Write(data) }) if !shouldRetry { diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 10a0a2b44..6ee20974b 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -24,9 +24,12 @@ type Dir struct { wfs *WFS entry *filer_pb.Entry parent *Dir + id uint64 } var _ = fs.Node(&Dir{}) + +//var _ = fs.NodeIdentifier(&Dir{}) var _ = fs.NodeCreater(&Dir{}) var _ = fs.NodeMknoder(&Dir{}) var _ = fs.NodeMkdirer(&Dir{}) @@ -42,6 +45,10 @@ var _ = fs.NodeRemovexattrer(&Dir{}) var _ = fs.NodeListxattrer(&Dir{}) var _ = fs.NodeForgetter(&Dir{}) +func (dir *Dir) xId() uint64 { + return dir.id +} + func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { // https://github.com/bazil/fuse/issues/196 @@ -53,17 +60,18 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { return nil } - if err := dir.maybeLoadEntry(); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err) return err } - // attr.Inode = util.FullPath(dir.FullPath()).AsInode() - attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir - attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) - attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0) - attr.Gid = dir.entry.Attributes.Gid - attr.Uid = dir.entry.Attributes.Uid + // attr.Inode = dir.Id() + attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir + attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) + attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) + attr.Gid = entry.Attributes.Gid + attr.Uid = entry.Attributes.Uid glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr) @@ -74,16 +82,18 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f glog.V(4).Infof("dir Getxattr %s", dir.FullPath()) - if err := dir.maybeLoadEntry(); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - return getxattr(dir.entry, req, resp) + return getxattr(entry, req, resp) } func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { // attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() attr.Valid = time.Second + attr.Inode = 1 // dir.Id() attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid attr.Mode = dir.wfs.option.MountMode @@ -102,45 +112,67 @@ func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { return nil } -func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { - f := dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node { - return &File{ - Name: name, - dir: dir, - wfs: dir.wfs, - entry: entry, - entryViewCache: nil, - } - }) - f.(*File).dir = dir // in case dir node was created later - return f +func (dir *Dir) newFile(name string) fs.Node { + + fileFullPath := util.NewFullPath(dir.FullPath(), name) + fileId := fileFullPath.AsInode() + dir.wfs.handlesLock.Lock() + existingHandle, found := dir.wfs.handles[fileId] + dir.wfs.handlesLock.Unlock() + + if found { + glog.V(4).Infof("newFile found opened file handle: %+v", fileFullPath) + return existingHandle.f + } + return &File{ + Name: name, + dir: dir, + wfs: dir.wfs, + id: fileId, + } } -func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node { +func (dir *Dir) newDirectory(fullpath util.FullPath) fs.Node { + + return &Dir{name: fullpath.Name(), wfs: dir.wfs, parent: dir, id: fullpath.AsInode()} - d := dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node { - return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir} - }) - d.(*Dir).parent = dir // in case dir node was created later - return d } func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { - request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, req.Flags&fuse.OpenExclusive != 0) + exclusive := req.Flags&fuse.OpenExclusive != 0 + isDirectory := req.Mode&os.ModeDir > 0 - if err != nil { - return nil, nil, err + if exclusive || isDirectory { + _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, exclusive) + if err != nil { + return nil, nil, err + } } var node fs.Node - if request.Entry.IsDirectory { - node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry) + if isDirectory { + node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name)) return node, nil, nil } - node = dir.newFile(req.Name, request.Entry) + node = dir.newFile(req.Name) file := node.(*File) + file.entry = &filer_pb.Entry{ + Name: req.Name, + IsDirectory: req.Mode&os.ModeDir > 0, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + Collection: dir.wfs.option.Collection, + Replication: dir.wfs.option.Replication, + TtlSec: dir.wfs.option.TtlSec, + }, + } + file.dirtyMetadata = true fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) return file, fh, nil @@ -148,19 +180,20 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) { - request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false) + _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false) if err != nil { return nil, err } var node fs.Node - node = dir.newFile(req.Name, request.Entry) + node = dir.newFile(req.Name) return node, nil } -func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exlusive bool) (*filer_pb.CreateEntryRequest, error) { +func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exclusive bool) (*filer_pb.CreateEntryRequest, error) { + dirFullPath := dir.FullPath() request := &filer_pb.CreateEntryRequest{ - Directory: dir.FullPath(), + Directory: dirFullPath, Entry: &filer_pb.Entry{ Name: name, IsDirectory: mode&os.ModeDir > 0, @@ -175,10 +208,10 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex TtlSec: dir.wfs.option.TtlSec, }, }, - OExcl: exlusive, + OExcl: exclusive, Signatures: []int32{dir.wfs.signature}, } - glog.V(1).Infof("create %s/%s", dir.FullPath(), name) + glog.V(1).Infof("create %s/%s", dirFullPath, name) err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -189,11 +222,14 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex if strings.Contains(err.Error(), "EEXIST") { return fuse.EEXIST } - glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), name, err) + glog.V(0).Infof("create %s/%s: %v", dirFullPath, name, err) return fuse.EIO } - dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("local InsertEntry dir %s/%s: %v", dirFullPath, name, err) + return fuse.EIO + } return nil }) @@ -216,84 +252,89 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }, } + dirFullPath := dir.FullPath() + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir.wfs.mapPbIdFromLocalToFiler(newEntry) defer dir.wfs.mapPbIdFromFilerToLocal(newEntry) request := &filer_pb.CreateEntryRequest{ - Directory: dir.FullPath(), + Directory: dirFullPath, Entry: newEntry, Signatures: []int32{dir.wfs.signature}, } glog.V(1).Infof("mkdir: %v", request) if err := filer_pb.CreateEntry(client, request); err != nil { - glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err) + glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err) return err } - dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("local mkdir dir %s/%s: %v", dirFullPath, req.Name, err) + return fuse.EIO + } return nil }) if err == nil { - node := dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), newEntry) + node := dir.newDirectory(util.NewFullPath(dirFullPath, req.Name)) return node, nil } - glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err) + glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err) return nil, fuse.EIO } func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { - glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String()) - - fullFilePath := util.NewFullPath(dir.FullPath(), req.Name) dirPath := util.FullPath(dir.FullPath()) + glog.V(4).Infof("dir Lookup %s: %s by %s", dirPath, req.Name, req.Header.String()) + + fullFilePath := dirPath.Child(req.Name) visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath) if visitErr != nil { glog.Errorf("dir Lookup %s: %v", dirPath, visitErr) return nil, fuse.EIO } - cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath) + localEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath) if cacheErr == filer_pb.ErrNotFound { return nil, fuse.ENOENT } - entry := cachedEntry.ToProtoEntry() - if entry == nil { + if localEntry == nil { // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) - entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath) + entry, err := filer_pb.GetEntry(dir.wfs, fullFilePath) if err != nil { glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } + localEntry = filer.FromPbEntry(string(dirPath), entry) } else { glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) } - if entry != nil { - if entry.IsDirectory { - node = dir.newDirectory(fullFilePath, entry) + if localEntry != nil { + if localEntry.IsDirectory() { + node = dir.newDirectory(fullFilePath) } else { - node = dir.newFile(req.Name, entry) + node = dir.newFile(req.Name) } // resp.EntryValid = time.Second - // resp.Attr.Inode = fullFilePath.AsInode() + resp.Attr.Inode = fullFilePath.AsInode() resp.Attr.Valid = time.Second - resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) - resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) - resp.Attr.Gid = entry.Attributes.Gid - resp.Attr.Uid = entry.Attributes.Uid - if entry.HardLinkCounter > 0 { - resp.Attr.Nlink = uint32(entry.HardLinkCounter) + resp.Attr.Mtime = localEntry.Attr.Mtime + resp.Attr.Crtime = localEntry.Attr.Crtime + resp.Attr.Mode = localEntry.Attr.Mode + resp.Attr.Gid = localEntry.Attr.Gid + resp.Attr.Uid = localEntry.Attr.Uid + if localEntry.HardLinkCounter > 0 { + resp.Attr.Nlink = uint32(localEntry.HardLinkCounter) } return node, nil @@ -305,26 +346,25 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath()) + dirPath := util.FullPath(dir.FullPath()) + glog.V(4).Infof("dir ReadDirAll %s", dirPath) - processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error { - if entry.IsDirectory { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir} + processEachEntryFn := func(entry *filer.Entry, isLast bool) { + if entry.IsDirectory() { + dirent := fuse.Dirent{Name: entry.Name(), Type: fuse.DT_Dir, Inode: dirPath.Child(entry.Name()).AsInode()} ret = append(ret, dirent) } else { - dirent := fuse.Dirent{Name: entry.Name, Type: findFileType(uint16(entry.Attributes.FileMode))} + dirent := fuse.Dirent{Name: entry.Name(), Type: findFileType(uint16(entry.Attr.Mode)), Inode: dirPath.Child(entry.Name()).AsInode()} ret = append(ret, dirent) } - return nil } - dirPath := util.FullPath(dir.FullPath()) if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil { glog.Errorf("dir ReadDirAll %s: %v", dirPath, err) return nil, fuse.EIO } - listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool { - processEachEntryFn(entry.ToProtoEntry(), false) + listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool { + processEachEntryFn(entry, false) return true }) if listErr != nil { @@ -366,40 +406,32 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { - filePath := util.NewFullPath(dir.FullPath(), req.Name) + dirFullPath := dir.FullPath() + filePath := util.NewFullPath(dirFullPath, req.Name) entry, err := filer_pb.GetEntry(dir.wfs, filePath) if err != nil { return err } - if entry == nil { - return nil - } // first, ensure the filer store can correctly delete glog.V(3).Infof("remove file: %v", req) - isDeleteData := entry.HardLinkCounter <= 1 - err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature}) + isDeleteData := entry != nil && entry.HardLinkCounter <= 1 + err = filer_pb.Remove(dir.wfs, dirFullPath, req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature}) if err != nil { - glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err) + glog.V(3).Infof("not found remove file %s: %v", filePath, err) return fuse.ENOENT } // then, delete meta cache and fsNode cache - dir.wfs.metaCache.DeleteEntry(context.Background(), filePath) - - // clear entry inside the file - fsNode := dir.wfs.fsNodeCache.GetFsNode(filePath) - if fsNode != nil { - if file, ok := fsNode.(*File); ok { - file.clearEntry() - } + if err = dir.wfs.metaCache.DeleteEntry(context.Background(), filePath); err != nil { + glog.V(3).Infof("local DeleteEntry %s: %v", filePath, err) + return fuse.ESTALE } - dir.wfs.fsNodeCache.DeleteFsNode(filePath) // remove current file handle if any dir.wfs.handlesLock.Lock() defer dir.wfs.handlesLock.Unlock() - inodeId := util.NewFullPath(dir.FullPath(), req.Name).AsInode() + inodeId := filePath.AsInode() delete(dir.wfs.handles, inodeId) return nil @@ -408,20 +440,20 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { + dirFullPath := dir.FullPath() glog.V(3).Infof("remove directory entry: %v", req) ignoreRecursiveErr := true // ignore recursion error since the OS should manage it - err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature}) + err := filer_pb.Remove(dir.wfs, dirFullPath, req.Name, true, true, ignoreRecursiveErr, false, []int32{dir.wfs.signature}) if err != nil { - glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err) + glog.V(0).Infof("remove %s/%s: %v", dirFullPath, req.Name, err) if strings.Contains(err.Error(), "non-empty") { return fuse.EEXIST } return fuse.ENOENT } - t := util.NewFullPath(dir.FullPath(), req.Name) + t := util.NewFullPath(dirFullPath, req.Name) dir.wfs.metaCache.DeleteEntry(context.Background(), t) - dir.wfs.fsNodeCache.DeleteFsNode(t) return nil @@ -431,27 +463,28 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req) - if err := dir.maybeLoadEntry(); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } if req.Valid.Mode() { - dir.entry.Attributes.FileMode = uint32(req.Mode) + entry.Attributes.FileMode = uint32(req.Mode) } if req.Valid.Uid() { - dir.entry.Attributes.Uid = req.Uid + entry.Attributes.Uid = req.Uid } if req.Valid.Gid() { - dir.entry.Attributes.Gid = req.Gid + entry.Attributes.Gid = req.Gid } if req.Valid.Mtime() { - dir.entry.Attributes.Mtime = req.Mtime.Unix() + entry.Attributes.Mtime = req.Mtime.Unix() } - return dir.saveEntry() + return dir.saveEntry(entry) } @@ -459,15 +492,16 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name) - if err := dir.maybeLoadEntry(); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := setxattr(dir.entry, req); err != nil { + if err := setxattr(entry, req); err != nil { return err } - return dir.saveEntry() + return dir.saveEntry(entry) } @@ -475,15 +509,16 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name) - if err := dir.maybeLoadEntry(); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := removexattr(dir.entry, req); err != nil { + if err := removexattr(entry, req); err != nil { return err } - return dir.saveEntry() + return dir.saveEntry(entry) } @@ -491,11 +526,12 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp glog.V(4).Infof("dir Listxattr %s", dir.FullPath()) - if err := dir.maybeLoadEntry(); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := listxattr(dir.entry, req, resp); err != nil { + if err := listxattr(entry, req, resp); err != nil { return err } @@ -505,34 +541,25 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp func (dir *Dir) Forget() { glog.V(4).Infof("Forget dir %s", dir.FullPath()) - - dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath())) } -func (dir *Dir) maybeLoadEntry() error { - if dir.entry == nil { - parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName() - entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name) - if err != nil { - return err - } - dir.entry = entry - } - return nil +func (dir *Dir) maybeLoadEntry() (*filer_pb.Entry, error) { + parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName() + return dir.wfs.maybeLoadEntry(parentDirPath, name) } -func (dir *Dir) saveEntry() error { +func (dir *Dir) saveEntry(entry *filer_pb.Entry) error { parentDir, name := util.FullPath(dir.FullPath()).DirAndName() return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir.wfs.mapPbIdFromLocalToFiler(dir.entry) - defer dir.wfs.mapPbIdFromFilerToLocal(dir.entry) + dir.wfs.mapPbIdFromLocalToFiler(entry) + defer dir.wfs.mapPbIdFromFilerToLocal(entry) request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, - Entry: dir.entry, + Entry: entry, Signatures: []int32{dir.wfs.signature}, } @@ -543,7 +570,10 @@ func (dir *Dir) saveEntry() error { return fuse.EIO } - dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + if err := dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err) + return fuse.ESTALE + } return nil }) diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index ba3280f03..acdcd2de4 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -31,19 +31,24 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName) - if _, err := oldFile.maybeLoadEntry(ctx); err != nil { + oldEntry, err := oldFile.maybeLoadEntry(ctx) + if err != nil { return nil, err } - // update old file to hardlink mode - if len(oldFile.entry.HardLinkId) == 0 { - oldFile.entry.HardLinkId = append(util.RandomBytes(16), HARD_LINK_MARKER) - oldFile.entry.HardLinkCounter = 1 + if oldEntry == nil { + return nil, fuse.EIO } - oldFile.entry.HardLinkCounter++ + + // update old file to hardlink mode + if len(oldEntry.HardLinkId) == 0 { + oldEntry.HardLinkId = append(util.RandomBytes(16), HARD_LINK_MARKER) + oldEntry.HardLinkCounter = 1 + } + oldEntry.HardLinkCounter++ updateOldEntryRequest := &filer_pb.UpdateEntryRequest{ Directory: oldFile.dir.FullPath(), - Entry: oldFile.entry, + Entry: oldEntry, Signatures: []int32{dir.wfs.signature}, } @@ -53,17 +58,17 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f Entry: &filer_pb.Entry{ Name: req.NewName, IsDirectory: false, - Attributes: oldFile.entry.Attributes, - Chunks: oldFile.entry.Chunks, - Extended: oldFile.entry.Extended, - HardLinkId: oldFile.entry.HardLinkId, - HardLinkCounter: oldFile.entry.HardLinkCounter, + Attributes: oldEntry.Attributes, + Chunks: oldEntry.Chunks, + Extended: oldEntry.Extended, + HardLinkId: oldEntry.HardLinkId, + HardLinkCounter: oldEntry.HardLinkCounter, }, Signatures: []int32{dir.wfs.signature}, } // apply changes to the filer, and also apply to local metaCache - err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir.wfs.mapPbIdFromLocalToFiler(request.Entry) defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) @@ -83,13 +88,14 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f return nil }) - // create new file node - newNode := dir.newFile(req.NewName, request.Entry) - newFile := newNode.(*File) - if _, err := newFile.maybeLoadEntry(ctx); err != nil { - return nil, err + if err != nil { + return nil, fuse.EIO } + // create new file node + newNode := dir.newFile(req.NewName) + newFile := newNode.(*File) + return newFile, err } @@ -130,7 +136,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, return nil }) - symlink := dir.newFile(req.NewName, request.Entry) + symlink := dir.newFile(req.NewName) return symlink, err diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index d2acad4b2..b07710d17 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -64,19 +64,28 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector return fuse.EIO } - // fmt.Printf("rename path: %v => %v\n", oldPath, newPath) - dir.wfs.fsNodeCache.Move(oldPath, newPath) + oldFsNode := NodeWithId(oldPath.AsInode()) + newFsNode := NodeWithId(newPath.AsInode()) + dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) { + if file, ok := internalNode.(*File); ok { + glog.V(4).Infof("internal node %s", file.Name) + file.Name = req.NewName + file.id = uint64(newFsNode) + } + }) // change file handle dir.wfs.handlesLock.Lock() defer dir.wfs.handlesLock.Unlock() inodeId := oldPath.AsInode() existingHandle, found := dir.wfs.handles[inodeId] + glog.V(4).Infof("has open filehandle %s: %v", oldPath, found) if !found || existingHandle == nil { - return err + return nil } + glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath) delete(dir.wfs.handles, inodeId) dir.wfs.handles[newPath.AsInode()] = existingHandle - return err + return nil } diff --git a/weed/filesys/dir_test.go b/weed/filesys/dir_test.go deleted file mode 100644 index 49c76eb5e..000000000 --- a/weed/filesys/dir_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package filesys - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDirPath(t *testing.T) { - - p := &Dir{name: "/some"} - p = &Dir{name: "path", parent: p} - p = &Dir{name: "to", parent: p} - p = &Dir{name: "a", parent: p} - p = &Dir{name: "file", parent: p} - - assert.Equal(t, "/some/path/to/a/file", p.FullPath()) - - p = &Dir{name: "/some"} - assert.Equal(t, "/some", p.FullPath()) - - p = &Dir{name: "/"} - assert.Equal(t, "/", p.FullPath()) - - p = &Dir{name: "/"} - p = &Dir{name: "path", parent: p} - assert.Equal(t, "/path", p.FullPath()) - - p = &Dir{name: "/"} - p = &Dir{name: "path", parent: p} - p = &Dir{name: "to", parent: p} - assert.Equal(t, "/path/to", p.FullPath()) - -} diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index f05a3a56a..8888cff96 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -30,7 +30,7 @@ func newDirtyPages(file *File) *ContinuousDirtyPages { func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) { - glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize) + glog.V(4).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data))) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. @@ -69,7 +69,12 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (hasSavedD return false } - fileSize := int64(pages.f.entry.Attributes.FileSize) + entry := pages.f.getEntry() + if entry == nil { + return false + } + + fileSize := int64(entry.Attributes.FileSize) chunkSize := min(maxList.Size(), fileSize-maxList.Offset()) if chunkSize == 0 { diff --git a/weed/filesys/file.go b/weed/filesys/file.go index a210c5152..bb57988cd 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -2,7 +2,6 @@ package filesys import ( "context" - "io" "os" "sort" "time" @@ -19,6 +18,7 @@ import ( const blockSize = 512 var _ = fs.Node(&File{}) +var _ = fs.NodeIdentifier(&File{}) var _ = fs.NodeOpener(&File{}) var _ = fs.NodeFsyncer(&File{}) var _ = fs.NodeSetattrer(&File{}) @@ -29,32 +29,37 @@ var _ = fs.NodeListxattrer(&File{}) var _ = fs.NodeForgetter(&File{}) type File struct { - Name string - dir *Dir - wfs *WFS - entry *filer_pb.Entry - entryViewCache []filer.VisibleInterval - isOpen int - reader io.ReaderAt - dirtyMetadata bool + Name string + dir *Dir + wfs *WFS + entry *filer_pb.Entry + isOpen int + dirtyMetadata bool + id uint64 } func (file *File) fullpath() util.FullPath { return util.NewFullPath(file.dir.FullPath(), file.Name) } +func (file *File) Id() uint64 { + return file.id +} + func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) { glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr) - entry := file.entry - if file.isOpen <= 0 || entry == nil { - if entry, err = file.maybeLoadEntry(ctx); err != nil { - return err - } + entry, err := file.maybeLoadEntry(ctx) + if err != nil { + return err } - // attr.Inode = file.fullpath().AsInode() + if entry == nil { + return fuse.ENOENT + } + + attr.Inode = file.Id() attr.Valid = time.Second attr.Mode = os.FileMode(entry.Attributes.FileMode) attr.Size = filer.FileSize(entry) @@ -106,13 +111,13 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req) - _, err := file.maybeLoadEntry(ctx) + entry, err := file.maybeLoadEntry(ctx) if err != nil { return err } if file.isOpen > 0 { file.wfs.handlesLock.Lock() - fileHandle := file.wfs.handles[file.fullpath().AsInode()] + fileHandle := file.wfs.handles[file.Id()] file.wfs.handlesLock.Unlock() if fileHandle != nil { @@ -123,12 +128,12 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f if req.Valid.Size() { - glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks)) - if req.Size < filer.FileSize(file.entry) { + glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(entry.Chunks)) + if req.Size < filer.FileSize(entry) { // fmt.Printf("truncate %v \n", fullPath) var chunks []*filer_pb.FileChunk var truncatedChunks []*filer_pb.FileChunk - for _, chunk := range file.entry.Chunks { + for _, chunk := range entry.Chunks { int64Size := int64(chunk.Size) if chunk.Offset+int64Size > int64(req.Size) { // this chunk is truncated @@ -143,36 +148,34 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f } } } - file.entry.Chunks = chunks - file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), chunks) - file.reader = nil + entry.Chunks = chunks } - file.entry.Attributes.FileSize = req.Size + entry.Attributes.FileSize = req.Size file.dirtyMetadata = true } if req.Valid.Mode() { - file.entry.Attributes.FileMode = uint32(req.Mode) + entry.Attributes.FileMode = uint32(req.Mode) file.dirtyMetadata = true } if req.Valid.Uid() { - file.entry.Attributes.Uid = req.Uid + entry.Attributes.Uid = req.Uid file.dirtyMetadata = true } if req.Valid.Gid() { - file.entry.Attributes.Gid = req.Gid + entry.Attributes.Gid = req.Gid file.dirtyMetadata = true } if req.Valid.Crtime() { - file.entry.Attributes.Crtime = req.Crtime.Unix() + entry.Attributes.Crtime = req.Crtime.Unix() file.dirtyMetadata = true } if req.Valid.Mtime() { - file.entry.Attributes.Mtime = req.Mtime.Unix() + entry.Attributes.Mtime = req.Mtime.Unix() file.dirtyMetadata = true } @@ -188,7 +191,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f return nil } - return file.saveEntry(file.entry) + return file.saveEntry(entry) } @@ -254,14 +257,20 @@ func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { func (file *File) Forget() { t := util.NewFullPath(file.dir.FullPath(), file.Name) glog.V(4).Infof("Forget file %s", t) - file.wfs.fsNodeCache.DeleteFsNode(t) + file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode())) } func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) { + + file.wfs.handlesLock.Lock() + handle, found := file.wfs.handles[file.Id()] + file.wfs.handlesLock.Unlock() entry = file.entry - if file.isOpen > 0 { - return entry, nil + if found { + glog.V(4).Infof("maybeLoadEntry found opened file %s/%s: %v %v", file.dir.FullPath(), file.Name, handle.f.entry, entry) + entry = handle.f.entry } + if entry != nil { if len(entry.HardLinkId) == 0 { // only always reload hard link @@ -274,7 +283,7 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er return entry, err } if entry != nil { - file.setEntry(entry) + // file.entry = entry } else { glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err) } @@ -299,8 +308,13 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) { } } + entry := file.getEntry() + if entry == nil { + return + } + // pick out-of-order chunks from existing chunks - for _, chunk := range file.entry.Chunks { + for _, chunk := range entry.Chunks { if lessThan(earliestChunk, chunk) { chunks = append(chunks, chunk) } @@ -311,28 +325,9 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) { return lessThan(chunks[i], chunks[j]) }) - // add to entry view cache - for _, chunk := range chunks { - file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk) - } + glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks)) - file.reader = nil - - glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks)) - - file.entry.Chunks = append(file.entry.Chunks, newChunks...) -} - -func (file *File) setEntry(entry *filer_pb.Entry) { - file.entry = entry - file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), entry.Chunks) - file.reader = nil -} - -func (file *File) clearEntry() { - file.entry = nil - file.entryViewCache = nil - file.reader = nil + entry.Chunks = append(entry.Chunks, newChunks...) } func (file *File) saveEntry(entry *filer_pb.Entry) error { @@ -359,3 +354,7 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error { return nil }) } + +func (file *File) getEntry() *filer_pb.Entry { + return file.entry +} diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index fb073c9cd..27ffab6e1 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -20,10 +20,12 @@ import ( type FileHandle struct { // cache file has been written to - dirtyPages *ContinuousDirtyPages - contentType string - handle uint64 - sync.RWMutex + dirtyPages *ContinuousDirtyPages + entryViewCache []filer.VisibleInterval + reader io.ReaderAt + contentType string + handle uint64 + sync.Mutex f *File RequestId fuse.RequestID // unique ID for request @@ -40,8 +42,9 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle { Uid: uid, Gid: gid, } - if fh.f.entry != nil { - fh.f.entry.Attributes.FileSize = filer.FileSize(fh.f.entry) + entry := fh.f.getEntry() + if entry != nil { + entry.Attributes.FileSize = filer.FileSize(entry) } return fh @@ -58,8 +61,8 @@ var _ = fs.HandleReleaser(&FileHandle{}) func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data)) - fh.RLock() - defer fh.RUnlock() + fh.Lock() + defer fh.Unlock() if req.Size <= 0 { return nil @@ -104,42 +107,48 @@ func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxSto func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { - fileSize := int64(filer.FileSize(fh.f.entry)) - - if fileSize == 0 { - glog.V(1).Infof("empty fh %v", fh.f.fullpath()) + entry := fh.f.getEntry() + if entry == nil { return 0, io.EOF } - if offset+int64(len(buff)) <= int64(len(fh.f.entry.Content)) { - totalRead := copy(buff, fh.f.entry.Content[offset:]) - glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead) + fileSize := int64(filer.FileSize(entry)) + fileFullPath := fh.f.fullpath() + + if fileSize == 0 { + glog.V(1).Infof("empty fh %v", fileFullPath) + return 0, io.EOF + } + + if offset+int64(len(buff)) <= int64(len(entry.Content)) { + totalRead := copy(buff, entry.Content[offset:]) + glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) return int64(totalRead), nil } var chunkResolveErr error - if fh.f.entryViewCache == nil { - fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), fh.f.entry.Chunks) + if fh.entryViewCache == nil { + fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks) if chunkResolveErr != nil { return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) } - fh.f.reader = nil + fh.reader = nil } - reader := fh.f.reader + reader := fh.reader if reader == nil { - chunkViews := filer.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64) + chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, math.MaxInt64) reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize) } - fh.f.reader = reader + fh.reader = reader totalRead, err := reader.ReadAt(buff, offset) if err != nil && err != io.EOF { - glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + glog.Errorf("file handle read %s: %v", fileFullPath, err) } - glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err) + glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err) return int64(totalRead), err } @@ -158,8 +167,13 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f copy(data, req.Data) } - fh.f.entry.Content = nil - fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize))) + entry := fh.f.getEntry() + if entry == nil { + return fuse.EIO + } + + entry.Content = nil + entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(entry.Attributes.FileSize))) glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) fh.dirtyPages.AddPage(req.Offset, data) @@ -179,30 +193,27 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { - glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle) + glog.V(4).Infof("Release %v fh %d open=%d", fh.f.fullpath(), fh.handle, fh.f.isOpen) fh.Lock() defer fh.Unlock() + fh.f.isOpen-- + if fh.f.isOpen <= 0 { + fh.f.entry = nil + fh.entryViewCache = nil + fh.reader = nil + + fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + } + + if fh.f.isOpen < 0 { glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0) fh.f.isOpen = 0 return nil } - if fh.f.isOpen == 1 { - - fh.f.isOpen-- - - fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) - if closer, ok := fh.f.reader.(io.Closer); ok { - if closer != nil { - closer.Close() - } - } - fh.f.reader = nil - } - return nil } @@ -242,35 +253,40 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error { err := fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - if fh.f.entry.Attributes != nil { - fh.f.entry.Attributes.Mime = fh.contentType - if fh.f.entry.Attributes.Uid == 0 { - fh.f.entry.Attributes.Uid = header.Uid + entry := fh.f.getEntry() + if entry == nil { + return nil + } + + if entry.Attributes != nil { + entry.Attributes.Mime = fh.contentType + if entry.Attributes.Uid == 0 { + entry.Attributes.Uid = header.Uid } - if fh.f.entry.Attributes.Gid == 0 { - fh.f.entry.Attributes.Gid = header.Gid + if entry.Attributes.Gid == 0 { + entry.Attributes.Gid = header.Gid } - if fh.f.entry.Attributes.Crtime == 0 { - fh.f.entry.Attributes.Crtime = time.Now().Unix() + if entry.Attributes.Crtime == 0 { + entry.Attributes.Crtime = time.Now().Unix() } - fh.f.entry.Attributes.Mtime = time.Now().Unix() - fh.f.entry.Attributes.FileMode = uint32(os.FileMode(fh.f.entry.Attributes.FileMode) &^ fh.f.wfs.option.Umask) - fh.f.entry.Attributes.Collection = fh.dirtyPages.collection - fh.f.entry.Attributes.Replication = fh.dirtyPages.replication + entry.Attributes.Mtime = time.Now().Unix() + entry.Attributes.FileMode = uint32(os.FileMode(entry.Attributes.FileMode) &^ fh.f.wfs.option.Umask) + entry.Attributes.Collection = fh.dirtyPages.collection + entry.Attributes.Replication = fh.dirtyPages.replication } request := &filer_pb.CreateEntryRequest{ Directory: fh.f.dir.FullPath(), - Entry: fh.f.entry, + Entry: entry, Signatures: []int32{fh.f.wfs.signature}, } - glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks)) - for i, chunk := range fh.f.entry.Chunks { + glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(entry.Chunks)) + for i, chunk := range entry.Chunks { glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } - manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks) + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks) chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks) chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks) @@ -278,7 +294,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error { // not good, but should be ok glog.V(0).Infof("MaybeManifestize: %v", manifestErr) } - fh.f.entry.Chunks = append(chunks, manifestChunks...) + entry.Chunks = append(chunks, manifestChunks...) fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry) defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry) diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go index fdec8253c..6b1012090 100644 --- a/weed/filesys/fscache.go +++ b/weed/filesys/fscache.go @@ -124,8 +124,9 @@ func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode { } if f, ok := src.node.(*File); ok { f.Name = target.name - if f.entry != nil { - f.entry.Name = f.Name + entry := f.getEntry() + if entry != nil { + entry.Name = f.Name } } parent.disconnectChild(target) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index c6d9080a1..42816d23d 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -41,7 +41,6 @@ type Option struct { CacheDir string CacheSizeMB int64 DataCenter string - EntryCacheTtl time.Duration Umask os.FileMode MountUid uint32 @@ -104,24 +103,16 @@ func NewSeaweedFileSystem(option *Option) *WFS { } wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), util.FullPath(option.FilerMountRootPath), option.UidGidMapper, func(filePath util.FullPath) { - fsNode := wfs.fsNodeCache.GetFsNode(filePath) - if fsNode != nil { - if file, ok := fsNode.(*File); ok { - if err := wfs.Server.InvalidateNodeData(file); err != nil { - glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err) - } - file.clearEntry() - } + + fsNode := NodeWithId(filePath.AsInode()) + if err := wfs.Server.InvalidateNodeData(fsNode); err != nil { + glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err) } + dir, name := filePath.DirAndName() - parent := wfs.root - if dir != "/" { - parent = wfs.fsNodeCache.GetFsNode(util.FullPath(dir)) - } - if parent != nil { - if err := wfs.Server.InvalidateEntry(parent, name); err != nil { - glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err) - } + parent := NodeWithId(util.FullPath(dir).AsInode()) + if err := wfs.Server.InvalidateEntry(parent, name); err != nil { + glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err) } }) startTime := time.Now() @@ -130,8 +121,7 @@ func NewSeaweedFileSystem(option *Option) *WFS { wfs.metaCache.Shutdown() }) - entry, _ := filer_pb.GetEntry(wfs, util.FullPath(wfs.option.FilerMountRootPath)) - wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, entry: entry} + wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs} wfs.fsNodeCache = newFsCache(wfs.root) if wfs.option.ConcurrentWriters > 0 { @@ -150,25 +140,28 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand fullpath := file.fullpath() glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid) - wfs.handlesLock.Lock() - defer wfs.handlesLock.Unlock() + inodeId := file.Id() - inodeId := file.fullpath().AsInode() - if file.isOpen > 0 { - existingHandle, found := wfs.handles[inodeId] - if found && existingHandle != nil { - file.isOpen++ - return existingHandle - } + wfs.handlesLock.Lock() + existingHandle, found := wfs.handles[inodeId] + wfs.handlesLock.Unlock() + if found && existingHandle != nil { + existingHandle.f.isOpen++ + glog.V(4).Infof("Acquired Handle %s open %d", fullpath, existingHandle.f.isOpen) + return existingHandle } + entry, _ := file.maybeLoadEntry(context.Background()) + file.entry = entry fileHandle = newFileHandle(file, uid, gid) - file.maybeLoadEntry(context.Background()) file.isOpen++ + wfs.handlesLock.Lock() wfs.handles[inodeId] = fileHandle + wfs.handlesLock.Unlock() fileHandle.handle = inodeId + glog.V(4).Infof("Acquired new Handle %s open %d", fullpath, file.isOpen) return } @@ -176,9 +169,9 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) { wfs.handlesLock.Lock() defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + glog.V(4).Infof("ReleaseHandle %s id %d current handles length %d", fullpath, handleId, len(wfs.handles)) - delete(wfs.handles, fullpath.AsInode()) + delete(wfs.handles, uint64(handleId)) return } @@ -268,3 +261,11 @@ func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType { return filer.LookupFn(wfs) } + +type NodeWithId uint64 +func (n NodeWithId) Id() uint64 { + return uint64(n) +} +func (n NodeWithId) Attr(ctx context.Context, attr *fuse.Attr) error { + return nil +} diff --git a/weed/glog/glog.go b/weed/glog/glog.go index adb6ab5aa..352a7e185 100644 --- a/weed/glog/glog.go +++ b/weed/glog/glog.go @@ -398,7 +398,7 @@ type flushSyncWriter interface { func init() { flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", true, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.verbosity, "v", "log levels [0|1|2|3|4], default to 0") flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") diff --git a/weed/iamapi/iamapi_handlers.go b/weed/iamapi/iamapi_handlers.go new file mode 100644 index 000000000..2e5f709f3 --- /dev/null +++ b/weed/iamapi/iamapi_handlers.go @@ -0,0 +1,105 @@ +package iamapi + +import ( + "bytes" + "encoding/xml" + "fmt" + "strconv" + + "net/http" + "net/url" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + + "github.com/aws/aws-sdk-go/service/iam" +) + +type mimeType string + +const ( + mimeNone mimeType = "" + mimeXML mimeType = "application/xml" +) + +func setCommonHeaders(w http.ResponseWriter) { + w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("Accept-Ranges", "bytes") +} + +// Encodes the response headers into XML format. +func encodeResponse(response interface{}) []byte { + var bytesBuffer bytes.Buffer + bytesBuffer.WriteString(xml.Header) + e := xml.NewEncoder(&bytesBuffer) + e.Encode(response) + return bytesBuffer.Bytes() +} + +// If none of the http routes match respond with MethodNotAllowed +func notFoundHandler(w http.ResponseWriter, r *http.Request) { + glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI) + writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL) +} + +func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) { + apiError := s3err.GetAPIError(errorCode) + errorResponse := getRESTErrorResponse(apiError, reqURL.Path) + encodedErrorResponse := encodeResponse(errorResponse) + writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML) +} + +func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) { + errCode := err.Error() + errorResp := ErrorResponse{} + errorResp.Error.Type = "Sender" + errorResp.Error.Code = &errCode + if msg != nil { + errMsg := msg.Error() + errorResp.Error.Message = &errMsg + } + glog.Errorf("Response %+v", err) + switch errCode { + case iam.ErrCodeNoSuchEntityException: + msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value) + errorResp.Error.Message = &msg + writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML) + case iam.ErrCodeServiceFailureException: + writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML) + default: + writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML) + } +} + +func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse { + return s3err.RESTErrorResponse{ + Code: err.Code, + Message: err.Description, + Resource: resource, + RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), + } +} + +func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { + setCommonHeaders(w) + if response != nil { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + } + if mType != mimeNone { + w.Header().Set("Content-Type", string(mType)) + } + w.WriteHeader(statusCode) + if response != nil { + glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response)) + _, err := w.Write(response) + if err != nil { + glog.V(0).Infof("write err: %v", err) + } + w.(http.Flusher).Flush() + } +} + +func writeSuccessResponseXML(w http.ResponseWriter, response []byte) { + writeResponse(w, http.StatusOK, response, mimeXML) +} diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go new file mode 100644 index 000000000..b00ada234 --- /dev/null +++ b/weed/iamapi/iamapi_management_handlers.go @@ -0,0 +1,449 @@ +package iamapi + +import ( + "crypto/sha1" + "encoding/json" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "math/rand" + "net/http" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/service/iam" +) + +const ( + charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/" + policyDocumentVersion = "2012-10-17" + StatementActionAdmin = "*" + StatementActionWrite = "Put*" + StatementActionRead = "Get*" + StatementActionList = "List*" + StatementActionTagging = "Tagging*" +) + +var ( + seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + policyDocuments = map[string]*PolicyDocument{} + policyLock = sync.RWMutex{} +) + +func MapToStatementAction(action string) string { + switch action { + case StatementActionAdmin: + return s3_constants.ACTION_ADMIN + case StatementActionWrite: + return s3_constants.ACTION_WRITE + case StatementActionRead: + return s3_constants.ACTION_READ + case StatementActionList: + return s3_constants.ACTION_LIST + case StatementActionTagging: + return s3_constants.ACTION_TAGGING + default: + return "" + } +} + +func MapToIdentitiesAction(action string) string { + switch action { + case s3_constants.ACTION_ADMIN: + return StatementActionAdmin + case s3_constants.ACTION_WRITE: + return StatementActionWrite + case s3_constants.ACTION_READ: + return StatementActionRead + case s3_constants.ACTION_LIST: + return StatementActionList + case s3_constants.ACTION_TAGGING: + return StatementActionTagging + default: + return "" + } +} + +type Statement struct { + Effect string `json:"Effect"` + Action []string `json:"Action"` + Resource []string `json:"Resource"` +} + +type Policies struct { + Policies map[string]PolicyDocument `json:"policies"` +} + +type PolicyDocument struct { + Version string `json:"Version"` + Statement []*Statement `json:"Statement"` +} + +func (p PolicyDocument) String() string { + b, _ := json.Marshal(p) + return string(b) +} + +func Hash(s *string) string { + h := sha1.New() + h.Write([]byte(*s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func StringWithCharset(length int, charset string) string { + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +func (iama *IamApiServer) ListUsers(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListUsersResponse) { + for _, ident := range s3cfg.Identities { + resp.ListUsersResult.Users = append(resp.ListUsersResult.Users, &iam.User{UserName: &ident.Name}) + } + return resp +} + +func (iama *IamApiServer) ListAccessKeys(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListAccessKeysResponse) { + status := iam.StatusTypeActive + for _, ident := range s3cfg.Identities { + for _, cred := range ident.Credentials { + resp.ListAccessKeysResult.AccessKeyMetadata = append(resp.ListAccessKeysResult.AccessKeyMetadata, + &iam.AccessKeyMetadata{UserName: &ident.Name, AccessKeyId: &cred.AccessKey, Status: &status}, + ) + } + } + return resp +} + +func (iama *IamApiServer) CreateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateUserResponse) { + userName := values.Get("UserName") + resp.CreateUserResult.User.UserName = &userName + s3cfg.Identities = append(s3cfg.Identities, &iam_pb.Identity{Name: userName}) + return resp +} + +func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err error) { + for i, ident := range s3cfg.Identities { + if userName == ident.Name { + s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...) + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err error) { + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + resp.GetUserResult.User = iam.User{UserName: &ident.Name} + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) { + if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil { + return PolicyDocument{}, err + } + return policyDocument, err +} + +func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, err error) { + policyName := values.Get("PolicyName") + policyDocumentString := values.Get("PolicyDocument") + policyDocument, err := GetPolicyDocument(&policyDocumentString) + if err != nil { + return CreatePolicyResponse{}, err + } + policyId := Hash(&policyDocumentString) + arn := fmt.Sprintf("arn:aws:iam:::policy/%s", policyName) + resp.CreatePolicyResult.Policy.PolicyName = &policyName + resp.CreatePolicyResult.Policy.Arn = &arn + resp.CreatePolicyResult.Policy.PolicyId = &policyId + policies := Policies{} + policyLock.Lock() + defer policyLock.Unlock() + if err = iama.s3ApiConfig.GetPolicies(&policies); err != nil { + return resp, err + } + policies.Policies[policyName] = policyDocument + if err = iama.s3ApiConfig.PutPolicies(&policies); err != nil { + return resp, err + } + return resp, nil +} + +func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { + userName := values.Get("UserName") + policyName := values.Get("PolicyName") + policyDocumentString := values.Get("PolicyDocument") + policyDocument, err := GetPolicyDocument(&policyDocumentString) + if err != nil { + return PutUserPolicyResponse{}, err + } + policyDocuments[policyName] = &policyDocument + actions := GetActions(&policyDocument) + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + for _, action := range actions { + ident.Actions = append(ident.Actions, action) + } + break + } + } + return resp, nil +} + +func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err error) { + userName := values.Get("UserName") + policyName := values.Get("PolicyName") + for _, ident := range s3cfg.Identities { + if userName != ident.Name { + continue + } + + resp.GetUserPolicyResult.UserName = userName + resp.GetUserPolicyResult.PolicyName = policyName + if len(ident.Actions) == 0 { + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) + } + + policyDocument := PolicyDocument{Version: policyDocumentVersion} + statements := make(map[string][]string) + for _, action := range ident.Actions { + // parse "Read:EXAMPLE-BUCKET" + act := strings.Split(action, ":") + + resource := "*" + if len(act) == 2 { + resource = fmt.Sprintf("arn:aws:s3:::%s/*", act[1]) + } + statements[resource] = append(statements[resource], + fmt.Sprintf("s3:%s", MapToIdentitiesAction(act[0])), + ) + } + for resource, actions := range statements { + isEqAction := false + for i, statement := range policyDocument.Statement { + if reflect.DeepEqual(statement.Action, actions) { + policyDocument.Statement[i].Resource = append( + policyDocument.Statement[i].Resource, resource) + isEqAction = true + break + } + } + if isEqAction { + continue + } + policyDocumentStatement := Statement{ + Effect: "Allow", + Action: actions, + } + policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource) + policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement) + } + resp.GetUserPolicyResult.PolicyDocument = policyDocument.String() + return resp, nil + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { + userName := values.Get("UserName") + for i, ident := range s3cfg.Identities { + if ident.Name == userName { + s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...) + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func GetActions(policy *PolicyDocument) (actions []string) { + for _, statement := range policy.Statement { + if statement.Effect != "Allow" { + continue + } + for _, resource := range statement.Resource { + // Parse "arn:aws:s3:::my-bucket/shared/*" + res := strings.Split(resource, ":") + if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" { + glog.Infof("not match resource: %s", res) + continue + } + for _, action := range statement.Action { + // Parse "s3:Get*" + act := strings.Split(action, ":") + if len(act) != 2 || act[0] != "s3" { + glog.Infof("not match action: %s", act) + continue + } + statementAction := MapToStatementAction(act[1]) + if res[5] == "*" { + actions = append(actions, statementAction) + continue + } + // Parse my-bucket/shared/* + path := strings.Split(res[5], "/") + if len(path) != 2 || path[1] != "*" { + glog.Infof("not match bucket: %s", path) + continue + } + actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path[0])) + } + } + } + return actions +} + +func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateAccessKeyResponse) { + userName := values.Get("UserName") + status := iam.StatusTypeActive + accessKeyId := StringWithCharset(21, charsetUpper) + secretAccessKey := StringWithCharset(42, charset) + resp.CreateAccessKeyResult.AccessKey.AccessKeyId = &accessKeyId + resp.CreateAccessKeyResult.AccessKey.SecretAccessKey = &secretAccessKey + resp.CreateAccessKeyResult.AccessKey.UserName = &userName + resp.CreateAccessKeyResult.AccessKey.Status = &status + changed := false + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + ident.Credentials = append(ident.Credentials, + &iam_pb.Credential{AccessKey: accessKeyId, SecretKey: secretAccessKey}) + changed = true + break + } + } + if !changed { + s3cfg.Identities = append(s3cfg.Identities, + &iam_pb.Identity{Name: userName, + Credentials: []*iam_pb.Credential{ + { + AccessKey: accessKeyId, + SecretKey: secretAccessKey, + }, + }, + }, + ) + } + return resp +} + +func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp DeleteAccessKeyResponse) { + userName := values.Get("UserName") + accessKeyId := values.Get("AccessKeyId") + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + for i, cred := range ident.Credentials { + if cred.AccessKey == accessKeyId { + ident.Credentials = append(ident.Credentials[:i], ident.Credentials[i+1:]...) + break + } + } + break + } + } + return resp +} + +func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + values := r.PostForm + var s3cfgLock sync.RWMutex + s3cfgLock.RLock() + s3cfg := &iam_pb.S3ApiConfiguration{} + if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + s3cfgLock.RUnlock() + + glog.V(4).Infof("DoActions: %+v", values) + var response interface{} + var err error + changed := true + switch r.Form.Get("Action") { + case "ListUsers": + response = iama.ListUsers(s3cfg, values) + changed = false + case "ListAccessKeys": + response = iama.ListAccessKeys(s3cfg, values) + changed = false + case "CreateUser": + response = iama.CreateUser(s3cfg, values) + case "GetUser": + userName := values.Get("UserName") + response, err = iama.GetUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, err, "user", userName, nil) + return + } + changed = false + case "DeleteUser": + userName := values.Get("UserName") + response, err = iama.DeleteUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, err, "user", userName, nil) + return + } + case "CreateAccessKey": + response = iama.CreateAccessKey(s3cfg, values) + case "DeleteAccessKey": + response = iama.DeleteAccessKey(s3cfg, values) + case "CreatePolicy": + response, err = iama.CreatePolicy(s3cfg, values) + if err != nil { + glog.Errorf("CreatePolicy: %+v", err) + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + case "PutUserPolicy": + response, err = iama.PutUserPolicy(s3cfg, values) + if err != nil { + glog.Errorf("PutUserPolicy: %+v", err) + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + case "GetUserPolicy": + response, err = iama.GetUserPolicy(s3cfg, values) + if err != nil { + writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil) + return + } + changed = false + case "DeleteUserPolicy": + if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil { + writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil) + } + default: + errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented) + errorResponse := ErrorResponse{} + errorResponse.Error.Code = &errNotImplemented.Code + errorResponse.Error.Message = &errNotImplemented.Description + writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML) + return + } + if changed { + s3cfgLock.Lock() + err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg) + s3cfgLock.Unlock() + if err != nil { + writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err) + return + } + } + writeSuccessResponseXML(w, encodeResponse(response)) +} diff --git a/weed/iamapi/iamapi_response.go b/weed/iamapi/iamapi_response.go new file mode 100644 index 000000000..77328b608 --- /dev/null +++ b/weed/iamapi/iamapi_response.go @@ -0,0 +1,103 @@ +package iamapi + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/iam" +) + +type CommonResponse struct { + ResponseMetadata struct { + RequestId string `xml:"RequestId"` + } `xml:"ResponseMetadata"` +} + +type ListUsersResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListUsersResponse"` + ListUsersResult struct { + Users []*iam.User `xml:"Users>member"` + IsTruncated bool `xml:"IsTruncated"` + } `xml:"ListUsersResult"` +} + +type ListAccessKeysResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListAccessKeysResponse"` + ListAccessKeysResult struct { + AccessKeyMetadata []*iam.AccessKeyMetadata `xml:"AccessKeyMetadata>member"` + IsTruncated bool `xml:"IsTruncated"` + } `xml:"ListAccessKeysResult"` +} + +type DeleteAccessKeyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteAccessKeyResponse"` +} + +type CreatePolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreatePolicyResponse"` + CreatePolicyResult struct { + Policy iam.Policy `xml:"Policy"` + } `xml:"CreatePolicyResult"` +} + +type CreateUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateUserResponse"` + CreateUserResult struct { + User iam.User `xml:"User"` + } `xml:"CreateUserResult"` +} + +type DeleteUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteUserResponse"` +} + +type GetUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserResponse"` + GetUserResult struct { + User iam.User `xml:"User"` + } `xml:"GetUserResult"` +} + +type CreateAccessKeyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateAccessKeyResponse"` + CreateAccessKeyResult struct { + AccessKey iam.AccessKey `xml:"AccessKey"` + } `xml:"CreateAccessKeyResult"` +} + +type PutUserPolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ PutUserPolicyResponse"` +} + +type GetUserPolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserPolicyResponse"` + GetUserPolicyResult struct { + UserName string `xml:"UserName"` + PolicyName string `xml:"PolicyName"` + PolicyDocument string `xml:"PolicyDocument"` + } `xml:"GetUserPolicyResult"` +} + +type ErrorResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ErrorResponse"` + Error struct { + iam.ErrorDetails + Type string `xml:"Type"` + } `xml:"Error"` +} + +func (r *CommonResponse) SetRequestId() { + r.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano()) +} diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go new file mode 100644 index 000000000..18af1a919 --- /dev/null +++ b/weed/iamapi/iamapi_server.go @@ -0,0 +1,149 @@ +package iamapi + +// https://docs.aws.amazon.com/cli/latest/reference/iam/list-roles.html + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "net/http" + "strings" +) + +type IamS3ApiConfig interface { + GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) + PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) + GetPolicies(policies *Policies) (err error) + PutPolicies(policies *Policies) (err error) +} + +type IamS3ApiConfigure struct { + option *IamServerOption + masterClient *wdclient.MasterClient +} + +type IamServerOption struct { + Masters string + Filer string + Port int + FilerGrpcAddress string + GrpcDialOption grpc.DialOption +} + +type IamApiServer struct { + s3ApiConfig IamS3ApiConfig + iam *s3api.IdentityAccessManagement +} + +var s3ApiConfigure IamS3ApiConfig + +func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) { + s3ApiConfigure = IamS3ApiConfigure{ + option: option, + masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(option.Masters, ",")), + } + s3Option := s3api.S3ApiServerOption{Filer: option.Filer} + iamApiServer = &IamApiServer{ + s3ApiConfig: s3ApiConfigure, + iam: s3api.NewIdentityAccessManagement(&s3Option), + } + + iamApiServer.registerRouter(router) + + return iamApiServer, nil +} + +func (iama *IamApiServer) registerRouter(router *mux.Router) { + // API Router + apiRouter := router.PathPrefix("/").Subrouter() + // ListBuckets + + // apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST")) + apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN)) + // + // NotFound + apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) +} + +func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + var buf bytes.Buffer + err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + if buf.Len() > 0 { + if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil { + return err + } + } + return nil +} + +func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + buf := bytes.Buffer{} + if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil { + return fmt.Errorf("S3ConfigurationToText: %s", err) + } + return pb.WithGrpcFilerClient( + iam.option.FilerGrpcAddress, + iam.option.GrpcDialOption, + func(client filer_pb.SeaweedFilerClient) error { + if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()); err != nil { + return err + } + return nil + }, + ) +} + +func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { + var buf bytes.Buffer + err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamPoliciesFile, &buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + if buf.Len() == 0 { + policies.Policies = make(map[string]PolicyDocument) + return nil + } + if err := json.Unmarshal(buf.Bytes(), policies); err != nil { + return err + } + return nil +} + +func (iam IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) { + var b []byte + if b, err = json.Marshal(policies); err != nil { + return err + } + return pb.WithGrpcFilerClient( + iam.option.FilerGrpcAddress, + iam.option.GrpcDialOption, + func(client filer_pb.SeaweedFilerClient) error { + if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamPoliciesFile, b); err != nil { + return err + } + return nil + }, + ) +} diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go new file mode 100644 index 000000000..09aaf0ac8 --- /dev/null +++ b/weed/iamapi/iamapi_test.go @@ -0,0 +1,181 @@ +package iamapi + +import ( + "encoding/xml" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/gorilla/mux" + "github.com/jinzhu/copier" + "github.com/stretchr/testify/assert" + "net/http" + "net/http/httptest" + "testing" +) + +var GetS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error) +var PutS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error) +var GetPolicies func(policies *Policies) (err error) +var PutPolicies func(policies *Policies) (err error) + +var s3config = iam_pb.S3ApiConfiguration{} +var policiesFile = Policies{Policies: make(map[string]PolicyDocument)} +var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}} + +type iamS3ApiConfigureMock struct{} + +func (iam iamS3ApiConfigureMock) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + _ = copier.Copy(&s3cfg.Identities, &s3config.Identities) + return nil +} + +func (iam iamS3ApiConfigureMock) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + _ = copier.Copy(&s3config.Identities, &s3cfg.Identities) + return nil +} + +func (iam iamS3ApiConfigureMock) GetPolicies(policies *Policies) (err error) { + _ = copier.Copy(&policies, &policiesFile) + return nil +} + +func (iam iamS3ApiConfigureMock) PutPolicies(policies *Policies) (err error) { + _ = copier.Copy(&policiesFile, &policies) + return nil +} + +func TestCreateUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.CreateUserInput{UserName: userName} + req, _ := iam.New(session.New()).CreateUserRequest(params) + _ = req.Build() + out := CreateUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) + //assert.Equal(t, out.XMLName, "lol") +} + +func TestListUsers(t *testing.T) { + params := &iam.ListUsersInput{} + req, _ := iam.New(session.New()).ListUsersRequest(params) + _ = req.Build() + out := ListUsersResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestListAccessKeys(t *testing.T) { + svc := iam.New(session.New()) + params := &iam.ListAccessKeysInput{} + req, _ := svc.ListAccessKeysRequest(params) + _ = req.Build() + out := ListAccessKeysResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestGetUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.GetUserInput{UserName: userName} + req, _ := iam.New(session.New()).GetUserRequest(params) + _ = req.Build() + out := GetUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +// Todo flat statement +func TestCreatePolicy(t *testing.T) { + params := &iam.CreatePolicyInput{ + PolicyName: aws.String("S3-read-only-example-bucket"), + PolicyDocument: aws.String(` + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": [ + "arn:aws:s3:::EXAMPLE-BUCKET", + "arn:aws:s3:::EXAMPLE-BUCKET/*" + ] + } + ] + }`), + } + req, _ := iam.New(session.New()).CreatePolicyRequest(params) + _ = req.Build() + out := CreatePolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestPutUserPolicy(t *testing.T) { + userName := aws.String("Test") + params := &iam.PutUserPolicyInput{ + UserName: userName, + PolicyName: aws.String("S3-read-only-example-bucket"), + PolicyDocument: aws.String( + `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": [ + "arn:aws:s3:::EXAMPLE-BUCKET", + "arn:aws:s3:::EXAMPLE-BUCKET/*" + ] + } + ] + }`), + } + req, _ := iam.New(session.New()).PutUserPolicyRequest(params) + _ = req.Build() + out := PutUserPolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestGetUserPolicy(t *testing.T) { + userName := aws.String("Test") + params := &iam.GetUserPolicyInput{UserName: userName, PolicyName: aws.String("S3-read-only-example-bucket")} + req, _ := iam.New(session.New()).GetUserPolicyRequest(params) + _ = req.Build() + out := GetUserPolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestDeleteUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.DeleteUserInput{UserName: userName} + req, _ := iam.New(session.New()).DeleteUserRequest(params) + _ = req.Build() + out := DeleteUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) { + rr := httptest.NewRecorder() + apiRouter := mux.NewRouter().SkipClean(true) + apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions) + apiRouter.ServeHTTP(rr, req) + return rr, xml.Unmarshal(rr.Body.Bytes(), &v) +} diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index cc1359961..ffd3e4938 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -86,6 +86,7 @@ func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest continue } + break } return ret, lastError diff --git a/weed/operation/buffer_pool.go b/weed/operation/buffer_pool.go new file mode 100644 index 000000000..9cbe4787f --- /dev/null +++ b/weed/operation/buffer_pool.go @@ -0,0 +1,24 @@ +package operation + +import ( + "github.com/valyala/bytebufferpool" + "sync/atomic" +) + +var bufferCounter int64 + +func GetBuffer() *bytebufferpool.ByteBuffer { + defer func() { + atomic.AddInt64(&bufferCounter, 1) + // println("+", bufferCounter) + }() + return bytebufferpool.Get() +} + +func PutBuffer(buf *bytebufferpool.ByteBuffer) { + defer func() { + atomic.AddInt64(&bufferCounter, -1) + // println("-", bufferCounter) + }() + bytebufferpool.Put(buf) +} diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 70428bb07..944186eeb 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -11,7 +11,6 @@ import ( "net/http" "net/textproto" "path/filepath" - "runtime/debug" "strings" "time" @@ -19,7 +18,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/valyala/bytebufferpool" ) type UploadResult struct { @@ -31,6 +29,7 @@ type UploadResult struct { Mime string `json:"mime,omitempty"` Gzip uint32 `json:"gzip,omitempty"` ContentMd5 string `json:"contentMd5,omitempty"` + RetryCount int `json:"-"` } func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk { @@ -96,6 +95,7 @@ func retriedUploadData(uploadUrl string, filename string, cipher bool, data []by for i := 0; i < 3; i++ { uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) if err == nil { + uploadResult.RetryCount = i return } else { glog.Warningf("uploading to %s: %v", uploadUrl, err) @@ -188,8 +188,8 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i } func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, originalDataSize int, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - buf := bytebufferpool.Get() - defer bytebufferpool.Put(buf) + buf := GetBuffer() + defer PutBuffer(buf) body_writer := multipart.NewWriter(buf) h := make(textproto.MIMEHeader) h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, fileNameEscaper.Replace(filename))) @@ -234,8 +234,12 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error // print("+") resp, post_err := HttpClient.Do(req) if post_err != nil { - glog.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err) - debug.PrintStack() + if strings.Contains(post_err.Error(), "connection reset by peer") || + strings.Contains(post_err.Error(), "use of closed network connection") { + resp, post_err = HttpClient.Do(req) + } + } + if post_err != nil { return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err) } // print("-") diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 607a2f26e..902c39514 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.24.0 +// protoc-gen-go v1.25.0 // protoc v3.12.3 // source: filer.proto diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go index 079fbd671..65bd85c84 100644 --- a/weed/pb/filer_pb/filer_client.go +++ b/weed/pb/filer_pb/filer_client.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "math" "os" "strings" "time" @@ -101,12 +102,16 @@ func SeaweedList(client SeaweedFilerClient, parentDirectoryPath, prefix string, } func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { - + // Redundancy limit to make it correctly judge whether it is the last file. + redLimit := limit + if limit != math.MaxInt32 && limit != 0 { + redLimit = limit + 1 + } request := &ListEntriesRequest{ Directory: string(fullDirPath), Prefix: prefix, StartFromFileName: startFrom, - Limit: limit, + Limit: redLimit, InclusiveStartFrom: inclusive, } @@ -119,6 +124,7 @@ func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix } var prevEntry *Entry + count := 0 for { resp, recvErr := stream.Recv() if recvErr != nil { @@ -139,6 +145,10 @@ func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix } } prevEntry = resp.Entry + count++ + if count > int(limit) && limit != 0 { + prevEntry = nil + } } return nil @@ -172,6 +182,26 @@ func Exists(filerClient FilerClient, parentDirectoryPath string, entryName strin return } +func Touch(filerClient FilerClient, parentDirectoryPath string, entryName string, entry *Entry) (err error) { + + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &UpdateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request) + if err := UpdateEntry(client, request); err != nil { + glog.V(0).Infof("touch exists entry %v: %v", request, err) + return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err) + } + + return nil + }) + +} + func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go index 910114313..9efcd9bdc 100644 --- a/weed/pb/grpc_client_server.go +++ b/weed/pb/grpc_client_server.go @@ -3,6 +3,7 @@ package pb import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" "net/http" "strconv" "strings" @@ -108,51 +109,55 @@ func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts } func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) { - colonIndex := strings.LastIndex(server, ":") - if colonIndex < 0 { - return "", fmt.Errorf("server should have hostname:port format: %v", server) - } + return ParseServerAddress(server, 10000) +} - port, parseErr := strconv.ParseUint(server[colonIndex+1:], 10, 64) +func ParseServerAddress(server string, deltaPort int) (newServerAddress string, err error) { + + host, port, parseErr := hostAndPort(server) if parseErr != nil { return "", fmt.Errorf("server port parse error: %v", parseErr) } - grpcPort := int(port) + 10000 + newPort := int(port) + deltaPort - return fmt.Sprintf("%s:%d", server[:colonIndex], grpcPort), nil + return fmt.Sprintf("%s:%d", host, newPort), nil +} + +func hostAndPort(address string) (host string, port uint64, err error) { + colonIndex := strings.LastIndex(address, ":") + if colonIndex < 0 { + return "", 0, fmt.Errorf("server should have hostname:port format: %v", address) + } + port, err = strconv.ParseUint(address[colonIndex+1:], 10, 64) + if err != nil { + return "", 0, fmt.Errorf("server port parse error: %v", err) + } + + return address[:colonIndex], port, err } func ServerToGrpcAddress(server string) (serverGrpcAddress string) { - hostnameAndPort := strings.Split(server, ":") - if len(hostnameAndPort) != 2 { - return fmt.Sprintf("unexpected server address: %s", server) - } - port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + host, port, parseErr := hostAndPort(server) if parseErr != nil { - return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1]) + glog.Fatalf("server address %s parse error: %v", server, parseErr) } grpcPort := int(port) + 10000 - return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort) + return fmt.Sprintf("%s:%d", host, grpcPort) } func GrpcAddressToServerAddress(grpcAddress string) (serverAddress string) { - hostnameAndPort := strings.Split(grpcAddress, ":") - if len(hostnameAndPort) != 2 { - return fmt.Sprintf("unexpected grpcAddress: %s", grpcAddress) - } - - grpcPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + host, grpcPort, parseErr := hostAndPort(grpcAddress) if parseErr != nil { - return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1]) + glog.Fatalf("server grpc address %s parse error: %v", grpcAddress, parseErr) } port := int(grpcPort) - 10000 - return fmt.Sprintf("%s:%d", hostnameAndPort[0], port) + return fmt.Sprintf("%s:%d", host, port) } func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { @@ -197,19 +202,3 @@ func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption }, filerGrpcAddress, grpcDialOption) } - -func ParseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { - hostnameAndPort := strings.Split(filer, ":") - if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) - } - - filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) - if parseErr != nil { - return "", fmt.Errorf("filer port parse error: %v", parseErr) - } - - filerGrpcPort := int(filerPort) + 10000 - - return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil -} diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go index 93bc854cc..7d0b6281b 100644 --- a/weed/pb/iam_pb/iam.pb.go +++ b/weed/pb/iam_pb/iam.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.24.0 +// protoc-gen-go v1.25.0 // protoc v3.12.3 // source: iam.proto diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 6a1758ccc..cdb49d1e3 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -288,6 +288,7 @@ message LeaseAdminTokenRequest { int64 previous_token = 1; int64 previous_lock_time = 2; string lock_name = 3; + string client_name = 4; } message LeaseAdminTokenResponse { int64 token = 1; diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 9b49d2df5..29d8499f8 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.24.0 +// protoc-gen-go v1.25.0 // protoc v3.12.3 // source: master.proto @@ -2468,6 +2468,7 @@ type LeaseAdminTokenRequest struct { PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` + ClientName string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` } func (x *LeaseAdminTokenRequest) Reset() { @@ -2523,6 +2524,13 @@ func (x *LeaseAdminTokenRequest) GetLockName() string { return "" } +func (x *LeaseAdminTokenRequest) GetClientName() string { + if x != nil { + return x.ClientName + } + return "" +} + type LeaseAdminTokenResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3268,7 +3276,7 @@ var file_master_proto_rawDesc = []byte{ 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x8a, + 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0xab, 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -3277,103 +3285,105 @@ var file_master_proto_rawDesc = []byte{ 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, 0x4c, - 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x0a, - 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x52, - 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69, - 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, - 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, - 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65, - 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, - 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a, - 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1f, - 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, - 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e, - 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, - 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, - 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, - 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x22, - 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, - 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, - 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, - 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, - 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, - 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, - 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, - 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, - 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, + 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, + 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, + 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, + 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, + 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, + 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, + 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go index 90b4b724a..591406347 100644 --- a/weed/pb/messaging_pb/messaging.pb.go +++ b/weed/pb/messaging_pb/messaging.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.24.0 +// protoc-gen-go v1.25.0 // protoc v3.12.3 // source: messaging.proto diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go index c4f733f5c..cae9e018f 100644 --- a/weed/pb/volume_info.go +++ b/weed/pb/volume_info.go @@ -15,40 +15,49 @@ import ( ) // MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil -func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, error) { +func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeInfo, hasRemoteFile bool, hasVolumeInfoFile bool, err error) { - volumeInfo := &volume_server_pb.VolumeInfo{} + volumeInfo = &volume_server_pb.VolumeInfo{} glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { if !exists { - return volumeInfo, false, nil + return } + hasVolumeInfoFile = true if !canRead { glog.Warningf("can not read %s", fileName) - return volumeInfo, false, fmt.Errorf("can not read %s", fileName) + err = fmt.Errorf("can not read %s", fileName) + return } - return volumeInfo, false, nil + return } + hasVolumeInfoFile = true + glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) tierData, readErr := ioutil.ReadFile(fileName) if readErr != nil { glog.Warningf("fail to read %s : %v", fileName, readErr) - return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr) + err = fmt.Errorf("fail to read %s : %v", fileName, readErr) + return + } glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) - if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { + if err = jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { glog.Warningf("unmarshal error: %v", err) - return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err) + err = fmt.Errorf("unmarshal error: %v", err) + return } if len(volumeInfo.GetFiles()) == 0 { - return volumeInfo, false, nil + return } - return volumeInfo, true, nil + hasRemoteFile = true + + return } func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error { diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 7faf5d4f5..f9836c402 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -52,6 +52,11 @@ service VolumeServer { rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) { } + rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) { + } + rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) { + } + rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) { } rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) { @@ -253,6 +258,25 @@ message CopyFileResponse { bytes file_content = 1; } +message ReadNeedleBlobRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; + int64 offset = 3; // actual offset + int32 size = 4; +} +message ReadNeedleBlobResponse { + bytes needle_blob = 1; +} + +message WriteNeedleBlobRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; + int32 size = 3; + bytes needle_blob = 4; +} +message WriteNeedleBlobResponse { +} + message VolumeTailSenderRequest { uint32 volume_id = 1; uint64 since_ns = 2; diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index dbcc738e5..c642142ba 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.24.0 +// protoc-gen-go v1.25.0 // protoc v3.12.3 // source: volume_server.proto @@ -1973,6 +1973,233 @@ func (x *CopyFileResponse) GetFileContent() []byte { return nil } +type ReadNeedleBlobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset + Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *ReadNeedleBlobRequest) Reset() { + *x = ReadNeedleBlobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadNeedleBlobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadNeedleBlobRequest) ProtoMessage() {} + +func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead. +func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{38} +} + +func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +type ReadNeedleBlobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleBlob []byte `protobuf:"bytes,1,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` +} + +func (x *ReadNeedleBlobResponse) Reset() { + *x = ReadNeedleBlobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadNeedleBlobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadNeedleBlobResponse) ProtoMessage() {} + +func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead. +func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{39} +} + +func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte { + if x != nil { + return x.NeedleBlob + } + return nil +} + +type WriteNeedleBlobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + NeedleBlob []byte `protobuf:"bytes,4,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` +} + +func (x *WriteNeedleBlobRequest) Reset() { + *x = WriteNeedleBlobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteNeedleBlobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteNeedleBlobRequest) ProtoMessage() {} + +func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead. +func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{40} +} + +func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetNeedleBlob() []byte { + if x != nil { + return x.NeedleBlob + } + return nil +} + +type WriteNeedleBlobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *WriteNeedleBlobResponse) Reset() { + *x = WriteNeedleBlobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteNeedleBlobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteNeedleBlobResponse) ProtoMessage() {} + +func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead. +func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{41} +} + type VolumeTailSenderRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1986,7 +2213,7 @@ type VolumeTailSenderRequest struct { func (x *VolumeTailSenderRequest) Reset() { *x = VolumeTailSenderRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[38] + mi := &file_volume_server_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1999,7 +2226,7 @@ func (x *VolumeTailSenderRequest) String() string { func (*VolumeTailSenderRequest) ProtoMessage() {} func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[38] + mi := &file_volume_server_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2012,7 +2239,7 @@ func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead. func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{38} + return file_volume_server_proto_rawDescGZIP(), []int{42} } func (x *VolumeTailSenderRequest) GetVolumeId() uint32 { @@ -2049,7 +2276,7 @@ type VolumeTailSenderResponse struct { func (x *VolumeTailSenderResponse) Reset() { *x = VolumeTailSenderResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[39] + mi := &file_volume_server_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2062,7 +2289,7 @@ func (x *VolumeTailSenderResponse) String() string { func (*VolumeTailSenderResponse) ProtoMessage() {} func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[39] + mi := &file_volume_server_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2075,7 +2302,7 @@ func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead. func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{39} + return file_volume_server_proto_rawDescGZIP(), []int{43} } func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte { @@ -2113,7 +2340,7 @@ type VolumeTailReceiverRequest struct { func (x *VolumeTailReceiverRequest) Reset() { *x = VolumeTailReceiverRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[40] + mi := &file_volume_server_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2126,7 +2353,7 @@ func (x *VolumeTailReceiverRequest) String() string { func (*VolumeTailReceiverRequest) ProtoMessage() {} func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[40] + mi := &file_volume_server_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2139,7 +2366,7 @@ func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{40} + return file_volume_server_proto_rawDescGZIP(), []int{44} } func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 { @@ -2179,7 +2406,7 @@ type VolumeTailReceiverResponse struct { func (x *VolumeTailReceiverResponse) Reset() { *x = VolumeTailReceiverResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[41] + mi := &file_volume_server_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2192,7 +2419,7 @@ func (x *VolumeTailReceiverResponse) String() string { func (*VolumeTailReceiverResponse) ProtoMessage() {} func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[41] + mi := &file_volume_server_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2205,7 +2432,7 @@ func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{41} + return file_volume_server_proto_rawDescGZIP(), []int{45} } type VolumeEcShardsGenerateRequest struct { @@ -2220,7 +2447,7 @@ type VolumeEcShardsGenerateRequest struct { func (x *VolumeEcShardsGenerateRequest) Reset() { *x = VolumeEcShardsGenerateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[42] + mi := &file_volume_server_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2233,7 +2460,7 @@ func (x *VolumeEcShardsGenerateRequest) String() string { func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[42] + mi := &file_volume_server_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2246,7 +2473,7 @@ func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{42} + return file_volume_server_proto_rawDescGZIP(), []int{46} } func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { @@ -2272,7 +2499,7 @@ type VolumeEcShardsGenerateResponse struct { func (x *VolumeEcShardsGenerateResponse) Reset() { *x = VolumeEcShardsGenerateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[43] + mi := &file_volume_server_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2285,7 +2512,7 @@ func (x *VolumeEcShardsGenerateResponse) String() string { func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[43] + mi := &file_volume_server_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2298,7 +2525,7 @@ func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{43} + return file_volume_server_proto_rawDescGZIP(), []int{47} } type VolumeEcShardsRebuildRequest struct { @@ -2313,7 +2540,7 @@ type VolumeEcShardsRebuildRequest struct { func (x *VolumeEcShardsRebuildRequest) Reset() { *x = VolumeEcShardsRebuildRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[44] + mi := &file_volume_server_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2326,7 +2553,7 @@ func (x *VolumeEcShardsRebuildRequest) String() string { func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[44] + mi := &file_volume_server_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2339,7 +2566,7 @@ func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{44} + return file_volume_server_proto_rawDescGZIP(), []int{48} } func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { @@ -2367,7 +2594,7 @@ type VolumeEcShardsRebuildResponse struct { func (x *VolumeEcShardsRebuildResponse) Reset() { *x = VolumeEcShardsRebuildResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[45] + mi := &file_volume_server_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2380,7 +2607,7 @@ func (x *VolumeEcShardsRebuildResponse) String() string { func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[45] + mi := &file_volume_server_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2393,7 +2620,7 @@ func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{45} + return file_volume_server_proto_rawDescGZIP(), []int{49} } func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { @@ -2420,7 +2647,7 @@ type VolumeEcShardsCopyRequest struct { func (x *VolumeEcShardsCopyRequest) Reset() { *x = VolumeEcShardsCopyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[46] + mi := &file_volume_server_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2433,7 +2660,7 @@ func (x *VolumeEcShardsCopyRequest) String() string { func (*VolumeEcShardsCopyRequest) ProtoMessage() {} func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[46] + mi := &file_volume_server_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2446,7 +2673,7 @@ func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{46} + return file_volume_server_proto_rawDescGZIP(), []int{50} } func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { @@ -2507,7 +2734,7 @@ type VolumeEcShardsCopyResponse struct { func (x *VolumeEcShardsCopyResponse) Reset() { *x = VolumeEcShardsCopyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[47] + mi := &file_volume_server_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2520,7 +2747,7 @@ func (x *VolumeEcShardsCopyResponse) String() string { func (*VolumeEcShardsCopyResponse) ProtoMessage() {} func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[47] + mi := &file_volume_server_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2533,7 +2760,7 @@ func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{47} + return file_volume_server_proto_rawDescGZIP(), []int{51} } type VolumeEcShardsDeleteRequest struct { @@ -2549,7 +2776,7 @@ type VolumeEcShardsDeleteRequest struct { func (x *VolumeEcShardsDeleteRequest) Reset() { *x = VolumeEcShardsDeleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[48] + mi := &file_volume_server_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2562,7 +2789,7 @@ func (x *VolumeEcShardsDeleteRequest) String() string { func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[48] + mi := &file_volume_server_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2575,7 +2802,7 @@ func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{48} + return file_volume_server_proto_rawDescGZIP(), []int{52} } func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { @@ -2608,7 +2835,7 @@ type VolumeEcShardsDeleteResponse struct { func (x *VolumeEcShardsDeleteResponse) Reset() { *x = VolumeEcShardsDeleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[49] + mi := &file_volume_server_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2621,7 +2848,7 @@ func (x *VolumeEcShardsDeleteResponse) String() string { func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[49] + mi := &file_volume_server_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2634,7 +2861,7 @@ func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{49} + return file_volume_server_proto_rawDescGZIP(), []int{53} } type VolumeEcShardsMountRequest struct { @@ -2650,7 +2877,7 @@ type VolumeEcShardsMountRequest struct { func (x *VolumeEcShardsMountRequest) Reset() { *x = VolumeEcShardsMountRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[50] + mi := &file_volume_server_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2663,7 +2890,7 @@ func (x *VolumeEcShardsMountRequest) String() string { func (*VolumeEcShardsMountRequest) ProtoMessage() {} func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[50] + mi := &file_volume_server_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2676,7 +2903,7 @@ func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{50} + return file_volume_server_proto_rawDescGZIP(), []int{54} } func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 { @@ -2709,7 +2936,7 @@ type VolumeEcShardsMountResponse struct { func (x *VolumeEcShardsMountResponse) Reset() { *x = VolumeEcShardsMountResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[51] + mi := &file_volume_server_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2722,7 +2949,7 @@ func (x *VolumeEcShardsMountResponse) String() string { func (*VolumeEcShardsMountResponse) ProtoMessage() {} func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[51] + mi := &file_volume_server_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2735,7 +2962,7 @@ func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{51} + return file_volume_server_proto_rawDescGZIP(), []int{55} } type VolumeEcShardsUnmountRequest struct { @@ -2750,7 +2977,7 @@ type VolumeEcShardsUnmountRequest struct { func (x *VolumeEcShardsUnmountRequest) Reset() { *x = VolumeEcShardsUnmountRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[52] + mi := &file_volume_server_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2763,7 +2990,7 @@ func (x *VolumeEcShardsUnmountRequest) String() string { func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[52] + mi := &file_volume_server_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2776,7 +3003,7 @@ func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{52} + return file_volume_server_proto_rawDescGZIP(), []int{56} } func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { @@ -2802,7 +3029,7 @@ type VolumeEcShardsUnmountResponse struct { func (x *VolumeEcShardsUnmountResponse) Reset() { *x = VolumeEcShardsUnmountResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[53] + mi := &file_volume_server_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2815,7 +3042,7 @@ func (x *VolumeEcShardsUnmountResponse) String() string { func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[53] + mi := &file_volume_server_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2828,7 +3055,7 @@ func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{53} + return file_volume_server_proto_rawDescGZIP(), []int{57} } type VolumeEcShardReadRequest struct { @@ -2846,7 +3073,7 @@ type VolumeEcShardReadRequest struct { func (x *VolumeEcShardReadRequest) Reset() { *x = VolumeEcShardReadRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[54] + mi := &file_volume_server_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2859,7 +3086,7 @@ func (x *VolumeEcShardReadRequest) String() string { func (*VolumeEcShardReadRequest) ProtoMessage() {} func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[54] + mi := &file_volume_server_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2872,7 +3099,7 @@ func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{54} + return file_volume_server_proto_rawDescGZIP(), []int{58} } func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 { @@ -2922,7 +3149,7 @@ type VolumeEcShardReadResponse struct { func (x *VolumeEcShardReadResponse) Reset() { *x = VolumeEcShardReadResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[55] + mi := &file_volume_server_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2935,7 +3162,7 @@ func (x *VolumeEcShardReadResponse) String() string { func (*VolumeEcShardReadResponse) ProtoMessage() {} func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[55] + mi := &file_volume_server_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2948,7 +3175,7 @@ func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{55} + return file_volume_server_proto_rawDescGZIP(), []int{59} } func (x *VolumeEcShardReadResponse) GetData() []byte { @@ -2979,7 +3206,7 @@ type VolumeEcBlobDeleteRequest struct { func (x *VolumeEcBlobDeleteRequest) Reset() { *x = VolumeEcBlobDeleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[56] + mi := &file_volume_server_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2992,7 +3219,7 @@ func (x *VolumeEcBlobDeleteRequest) String() string { func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[56] + mi := &file_volume_server_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3005,7 +3232,7 @@ func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{56} + return file_volume_server_proto_rawDescGZIP(), []int{60} } func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { @@ -3045,7 +3272,7 @@ type VolumeEcBlobDeleteResponse struct { func (x *VolumeEcBlobDeleteResponse) Reset() { *x = VolumeEcBlobDeleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[57] + mi := &file_volume_server_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3058,7 +3285,7 @@ func (x *VolumeEcBlobDeleteResponse) String() string { func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[57] + mi := &file_volume_server_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3071,7 +3298,7 @@ func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{57} + return file_volume_server_proto_rawDescGZIP(), []int{61} } type VolumeEcShardsToVolumeRequest struct { @@ -3086,7 +3313,7 @@ type VolumeEcShardsToVolumeRequest struct { func (x *VolumeEcShardsToVolumeRequest) Reset() { *x = VolumeEcShardsToVolumeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[58] + mi := &file_volume_server_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3099,7 +3326,7 @@ func (x *VolumeEcShardsToVolumeRequest) String() string { func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[58] + mi := &file_volume_server_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3112,7 +3339,7 @@ func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{58} + return file_volume_server_proto_rawDescGZIP(), []int{62} } func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { @@ -3138,7 +3365,7 @@ type VolumeEcShardsToVolumeResponse struct { func (x *VolumeEcShardsToVolumeResponse) Reset() { *x = VolumeEcShardsToVolumeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[59] + mi := &file_volume_server_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3151,7 +3378,7 @@ func (x *VolumeEcShardsToVolumeResponse) String() string { func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[59] + mi := &file_volume_server_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3164,7 +3391,7 @@ func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{59} + return file_volume_server_proto_rawDescGZIP(), []int{63} } type ReadVolumeFileStatusRequest struct { @@ -3178,7 +3405,7 @@ type ReadVolumeFileStatusRequest struct { func (x *ReadVolumeFileStatusRequest) Reset() { *x = ReadVolumeFileStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[60] + mi := &file_volume_server_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3191,7 +3418,7 @@ func (x *ReadVolumeFileStatusRequest) String() string { func (*ReadVolumeFileStatusRequest) ProtoMessage() {} func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[60] + mi := &file_volume_server_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3204,7 +3431,7 @@ func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{60} + return file_volume_server_proto_rawDescGZIP(), []int{64} } func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { @@ -3233,7 +3460,7 @@ type ReadVolumeFileStatusResponse struct { func (x *ReadVolumeFileStatusResponse) Reset() { *x = ReadVolumeFileStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[61] + mi := &file_volume_server_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3246,7 +3473,7 @@ func (x *ReadVolumeFileStatusResponse) String() string { func (*ReadVolumeFileStatusResponse) ProtoMessage() {} func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[61] + mi := &file_volume_server_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3259,7 +3486,7 @@ func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{61} + return file_volume_server_proto_rawDescGZIP(), []int{65} } func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { @@ -3342,7 +3569,7 @@ type DiskStatus struct { func (x *DiskStatus) Reset() { *x = DiskStatus{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[62] + mi := &file_volume_server_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3355,7 +3582,7 @@ func (x *DiskStatus) String() string { func (*DiskStatus) ProtoMessage() {} func (x *DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[62] + mi := &file_volume_server_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3368,7 +3595,7 @@ func (x *DiskStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead. func (*DiskStatus) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{62} + return file_volume_server_proto_rawDescGZIP(), []int{66} } func (x *DiskStatus) GetDir() string { @@ -3437,7 +3664,7 @@ type MemStatus struct { func (x *MemStatus) Reset() { *x = MemStatus{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[63] + mi := &file_volume_server_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3450,7 +3677,7 @@ func (x *MemStatus) String() string { func (*MemStatus) ProtoMessage() {} func (x *MemStatus) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[63] + mi := &file_volume_server_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3463,7 +3690,7 @@ func (x *MemStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use MemStatus.ProtoReflect.Descriptor instead. func (*MemStatus) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{63} + return file_volume_server_proto_rawDescGZIP(), []int{67} } func (x *MemStatus) GetGoroutines() int32 { @@ -3533,7 +3760,7 @@ type RemoteFile struct { func (x *RemoteFile) Reset() { *x = RemoteFile{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[64] + mi := &file_volume_server_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3546,7 +3773,7 @@ func (x *RemoteFile) String() string { func (*RemoteFile) ProtoMessage() {} func (x *RemoteFile) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[64] + mi := &file_volume_server_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3559,7 +3786,7 @@ func (x *RemoteFile) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead. func (*RemoteFile) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{64} + return file_volume_server_proto_rawDescGZIP(), []int{68} } func (x *RemoteFile) GetBackendType() string { @@ -3624,7 +3851,7 @@ type VolumeInfo struct { func (x *VolumeInfo) Reset() { *x = VolumeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[65] + mi := &file_volume_server_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3637,7 +3864,7 @@ func (x *VolumeInfo) String() string { func (*VolumeInfo) ProtoMessage() {} func (x *VolumeInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[65] + mi := &file_volume_server_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3650,7 +3877,7 @@ func (x *VolumeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead. func (*VolumeInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{65} + return file_volume_server_proto_rawDescGZIP(), []int{69} } func (x *VolumeInfo) GetFiles() []*RemoteFile { @@ -3688,7 +3915,7 @@ type VolumeTierMoveDatToRemoteRequest struct { func (x *VolumeTierMoveDatToRemoteRequest) Reset() { *x = VolumeTierMoveDatToRemoteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[66] + mi := &file_volume_server_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3701,7 +3928,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) String() string { func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[66] + mi := &file_volume_server_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3714,7 +3941,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{66} + return file_volume_server_proto_rawDescGZIP(), []int{70} } func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { @@ -3757,7 +3984,7 @@ type VolumeTierMoveDatToRemoteResponse struct { func (x *VolumeTierMoveDatToRemoteResponse) Reset() { *x = VolumeTierMoveDatToRemoteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[67] + mi := &file_volume_server_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3770,7 +3997,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) String() string { func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[67] + mi := &file_volume_server_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3783,7 +4010,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{67} + return file_volume_server_proto_rawDescGZIP(), []int{71} } func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { @@ -3813,7 +4040,7 @@ type VolumeTierMoveDatFromRemoteRequest struct { func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { *x = VolumeTierMoveDatFromRemoteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[68] + mi := &file_volume_server_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3826,7 +4053,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) String() string { func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[68] + mi := &file_volume_server_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3839,7 +4066,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{68} + return file_volume_server_proto_rawDescGZIP(), []int{72} } func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { @@ -3875,7 +4102,7 @@ type VolumeTierMoveDatFromRemoteResponse struct { func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { *x = VolumeTierMoveDatFromRemoteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[69] + mi := &file_volume_server_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3888,7 +4115,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) String() string { func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[69] + mi := &file_volume_server_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3901,7 +4128,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Messag // Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{69} + return file_volume_server_proto_rawDescGZIP(), []int{73} } func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { @@ -3927,7 +4154,7 @@ type VolumeServerStatusRequest struct { func (x *VolumeServerStatusRequest) Reset() { *x = VolumeServerStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[70] + mi := &file_volume_server_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3940,7 +4167,7 @@ func (x *VolumeServerStatusRequest) String() string { func (*VolumeServerStatusRequest) ProtoMessage() {} func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[70] + mi := &file_volume_server_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3953,7 +4180,7 @@ func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{70} + return file_volume_server_proto_rawDescGZIP(), []int{74} } type VolumeServerStatusResponse struct { @@ -3968,7 +4195,7 @@ type VolumeServerStatusResponse struct { func (x *VolumeServerStatusResponse) Reset() { *x = VolumeServerStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[71] + mi := &file_volume_server_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3981,7 +4208,7 @@ func (x *VolumeServerStatusResponse) String() string { func (*VolumeServerStatusResponse) ProtoMessage() {} func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[71] + mi := &file_volume_server_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3994,7 +4221,7 @@ func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{71} + return file_volume_server_proto_rawDescGZIP(), []int{75} } func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { @@ -4020,7 +4247,7 @@ type VolumeServerLeaveRequest struct { func (x *VolumeServerLeaveRequest) Reset() { *x = VolumeServerLeaveRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[72] + mi := &file_volume_server_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4033,7 +4260,7 @@ func (x *VolumeServerLeaveRequest) String() string { func (*VolumeServerLeaveRequest) ProtoMessage() {} func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[72] + mi := &file_volume_server_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4046,7 +4273,7 @@ func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{72} + return file_volume_server_proto_rawDescGZIP(), []int{76} } type VolumeServerLeaveResponse struct { @@ -4058,7 +4285,7 @@ type VolumeServerLeaveResponse struct { func (x *VolumeServerLeaveResponse) Reset() { *x = VolumeServerLeaveResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[73] + mi := &file_volume_server_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4071,7 +4298,7 @@ func (x *VolumeServerLeaveResponse) String() string { func (*VolumeServerLeaveResponse) ProtoMessage() {} func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[73] + mi := &file_volume_server_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4084,7 +4311,7 @@ func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{73} + return file_volume_server_proto_rawDescGZIP(), []int{77} } // select on volume servers @@ -4103,7 +4330,7 @@ type QueryRequest struct { func (x *QueryRequest) Reset() { *x = QueryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[74] + mi := &file_volume_server_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4116,7 +4343,7 @@ func (x *QueryRequest) String() string { func (*QueryRequest) ProtoMessage() {} func (x *QueryRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[74] + mi := &file_volume_server_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4129,7 +4356,7 @@ func (x *QueryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. func (*QueryRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74} + return file_volume_server_proto_rawDescGZIP(), []int{78} } func (x *QueryRequest) GetSelections() []string { @@ -4178,7 +4405,7 @@ type QueriedStripe struct { func (x *QueriedStripe) Reset() { *x = QueriedStripe{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[75] + mi := &file_volume_server_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4191,7 +4418,7 @@ func (x *QueriedStripe) String() string { func (*QueriedStripe) ProtoMessage() {} func (x *QueriedStripe) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[75] + mi := &file_volume_server_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4204,7 +4431,7 @@ func (x *QueriedStripe) ProtoReflect() protoreflect.Message { // Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. func (*QueriedStripe) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{75} + return file_volume_server_proto_rawDescGZIP(), []int{79} } func (x *QueriedStripe) GetRecords() []byte { @@ -4226,7 +4453,7 @@ type VolumeNeedleStatusRequest struct { func (x *VolumeNeedleStatusRequest) Reset() { *x = VolumeNeedleStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[76] + mi := &file_volume_server_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4239,7 +4466,7 @@ func (x *VolumeNeedleStatusRequest) String() string { func (*VolumeNeedleStatusRequest) ProtoMessage() {} func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[76] + mi := &file_volume_server_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4252,7 +4479,7 @@ func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{76} + return file_volume_server_proto_rawDescGZIP(), []int{80} } func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { @@ -4285,7 +4512,7 @@ type VolumeNeedleStatusResponse struct { func (x *VolumeNeedleStatusResponse) Reset() { *x = VolumeNeedleStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[77] + mi := &file_volume_server_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4298,7 +4525,7 @@ func (x *VolumeNeedleStatusResponse) String() string { func (*VolumeNeedleStatusResponse) ProtoMessage() {} func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[77] + mi := &file_volume_server_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4311,7 +4538,7 @@ func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{77} + return file_volume_server_proto_rawDescGZIP(), []int{81} } func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { @@ -4369,7 +4596,7 @@ type QueryRequest_Filter struct { func (x *QueryRequest_Filter) Reset() { *x = QueryRequest_Filter{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[78] + mi := &file_volume_server_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4382,7 +4609,7 @@ func (x *QueryRequest_Filter) String() string { func (*QueryRequest_Filter) ProtoMessage() {} func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[78] + mi := &file_volume_server_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4395,7 +4622,7 @@ func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 0} + return file_volume_server_proto_rawDescGZIP(), []int{78, 0} } func (x *QueryRequest_Filter) GetField() string { @@ -4434,7 +4661,7 @@ type QueryRequest_InputSerialization struct { func (x *QueryRequest_InputSerialization) Reset() { *x = QueryRequest_InputSerialization{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[79] + mi := &file_volume_server_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4447,7 +4674,7 @@ func (x *QueryRequest_InputSerialization) String() string { func (*QueryRequest_InputSerialization) ProtoMessage() {} func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[79] + mi := &file_volume_server_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4460,7 +4687,7 @@ func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 1} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1} } func (x *QueryRequest_InputSerialization) GetCompressionType() string { @@ -4503,7 +4730,7 @@ type QueryRequest_OutputSerialization struct { func (x *QueryRequest_OutputSerialization) Reset() { *x = QueryRequest_OutputSerialization{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4516,7 +4743,7 @@ func (x *QueryRequest_OutputSerialization) String() string { func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[80] + mi := &file_volume_server_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4529,7 +4756,7 @@ func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 2} + return file_volume_server_proto_rawDescGZIP(), []int{78, 2} } func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -4564,7 +4791,7 @@ type QueryRequest_InputSerialization_CSVInput struct { func (x *QueryRequest_InputSerialization_CSVInput) Reset() { *x = QueryRequest_InputSerialization_CSVInput{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4577,7 +4804,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) String() string { func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[81] + mi := &file_volume_server_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4590,7 +4817,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.M // Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 1, 0} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 0} } func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -4653,7 +4880,7 @@ type QueryRequest_InputSerialization_JSONInput struct { func (x *QueryRequest_InputSerialization_JSONInput) Reset() { *x = QueryRequest_InputSerialization_JSONInput{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4666,7 +4893,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) String() string { func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[82] + mi := &file_volume_server_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4679,7 +4906,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect. // Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 1, 1} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 1} } func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -4698,7 +4925,7 @@ type QueryRequest_InputSerialization_ParquetInput struct { func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { *x = QueryRequest_InputSerialization_ParquetInput{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4711,7 +4938,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) String() string { func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[83] + mi := &file_volume_server_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4724,7 +4951,7 @@ func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protorefle // Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 1, 2} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { @@ -4742,7 +4969,7 @@ type QueryRequest_OutputSerialization_CSVOutput struct { func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { *x = QueryRequest_OutputSerialization_CSVOutput{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4755,7 +4982,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[84] + mi := &file_volume_server_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4768,7 +4995,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect // Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 2, 0} + return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 0} } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -4817,7 +5044,7 @@ type QueryRequest_OutputSerialization_JSONOutput struct { func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { *x = QueryRequest_OutputSerialization_JSONOutput{} if protoimpl.UnsafeEnabled { - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4830,7 +5057,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[85] + mi := &file_volume_server_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4843,7 +5070,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflec // Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74, 2, 1} + return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 1} } func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -5037,510 +5264,545 @@ var file_volume_server_proto_rawDesc = []byte{ 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, - 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, - 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, - 0x84, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, - 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, - 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x15, 0x52, 0x65, 0x61, + 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x39, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, + 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, + 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, + 0x6c, 0x6f, 0x62, 0x22, 0x87, 0x01, 0x0a, 0x16, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, + 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x19, 0x0a, + 0x17, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, - 0x0a, 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, - 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, - 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, - 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, - 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x84, + 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x75, + 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, 0x74, + 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, + 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, + 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, + 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, - 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, - 0x46, 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, - 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, - 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, - 0x69, 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x0a, + 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x12, + 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, + 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, 0x69, + 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, + 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x1a, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, 0x0a, + 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, + 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, + 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, 0x64, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x22, 0x8a, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, 0x72, 0x65, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x55, 0x73, + 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, + 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, + 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, + 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, + 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, - 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, - 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, - 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, - 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, - 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x99, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, - 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x8a, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x64, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, 0x72, 0x65, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x55, - 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, - 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, - 0x0a, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, - 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, - 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, - 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, - 0x68, 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, - 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, - 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, - 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x32, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, - 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, - 0x6c, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, - 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0xc8, 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, - 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, - 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, - 0x65, 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, - 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, - 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, - 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, - 0x92, 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, - 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x46, 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, - 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, - 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, - 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, - 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, - 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, - 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, - 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x1a, 0x0a, 0x18, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, - 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, - 0x14, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x13, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, - 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65, + 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, + 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92, + 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46, + 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, + 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73, + 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, - 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, - 0x5a, 0x0a, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x52, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, - 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, + 0x0a, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x1a, 0xc8, 0x02, 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, - 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, - 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, - 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, - 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, - 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, - 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, - 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, - 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, - 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, - 0x13, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x12, 0x5e, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x1a, 0xe3, 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, - 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, - 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, - 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, - 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, - 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, - 0x22, 0x29, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x55, 0x0a, 0x19, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, - 0x49, 0x64, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, - 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, - 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x63, 0x72, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x63, 0x72, - 0x63, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x74, 0x74, 0x6c, 0x32, 0xd8, 0x1f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, - 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, - 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, - 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x61, 0x63, 0x75, - 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x2b, - 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, - 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, - 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, - 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, - 0x75, 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61, + 0x72, 0x71, 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, + 0xc8, 0x02, 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, + 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, + 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, + 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53, + 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50, + 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x5e, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x1a, 0xe3, 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, + 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, + 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, + 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, + 0x29, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x55, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, + 0x64, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, + 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x63, 0x72, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x63, 0x72, 0x63, + 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, + 0x74, 0x6c, 0x32, 0xa9, 0x21, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, - 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, - 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x2e, 0x76, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, - 0x12, 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, - 0x0a, 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x76, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, 0x2e, 0x76, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, + 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x61, 0x63, 0x75, 0x75, + 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, + 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, + 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, + 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, + 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, + 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, + 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, + 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, - 0x70, 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x77, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, - 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, + 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, + 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, + 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, + 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, + 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x77, + 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, 0x79, - 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, - 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, 0x79, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, + 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, + 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, @@ -5686,7 +5948,7 @@ func file_volume_server_proto_rawDescGZIP() []byte { return file_volume_server_proto_rawDescData } -var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 86) +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 90) var file_volume_server_proto_goTypes = []interface{}{ (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse @@ -5726,68 +5988,72 @@ var file_volume_server_proto_goTypes = []interface{}{ (*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse (*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest (*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse - (*VolumeTailSenderRequest)(nil), // 38: volume_server_pb.VolumeTailSenderRequest - (*VolumeTailSenderResponse)(nil), // 39: volume_server_pb.VolumeTailSenderResponse - (*VolumeTailReceiverRequest)(nil), // 40: volume_server_pb.VolumeTailReceiverRequest - (*VolumeTailReceiverResponse)(nil), // 41: volume_server_pb.VolumeTailReceiverResponse - (*VolumeEcShardsGenerateRequest)(nil), // 42: volume_server_pb.VolumeEcShardsGenerateRequest - (*VolumeEcShardsGenerateResponse)(nil), // 43: volume_server_pb.VolumeEcShardsGenerateResponse - (*VolumeEcShardsRebuildRequest)(nil), // 44: volume_server_pb.VolumeEcShardsRebuildRequest - (*VolumeEcShardsRebuildResponse)(nil), // 45: volume_server_pb.VolumeEcShardsRebuildResponse - (*VolumeEcShardsCopyRequest)(nil), // 46: volume_server_pb.VolumeEcShardsCopyRequest - (*VolumeEcShardsCopyResponse)(nil), // 47: volume_server_pb.VolumeEcShardsCopyResponse - (*VolumeEcShardsDeleteRequest)(nil), // 48: volume_server_pb.VolumeEcShardsDeleteRequest - (*VolumeEcShardsDeleteResponse)(nil), // 49: volume_server_pb.VolumeEcShardsDeleteResponse - (*VolumeEcShardsMountRequest)(nil), // 50: volume_server_pb.VolumeEcShardsMountRequest - (*VolumeEcShardsMountResponse)(nil), // 51: volume_server_pb.VolumeEcShardsMountResponse - (*VolumeEcShardsUnmountRequest)(nil), // 52: volume_server_pb.VolumeEcShardsUnmountRequest - (*VolumeEcShardsUnmountResponse)(nil), // 53: volume_server_pb.VolumeEcShardsUnmountResponse - (*VolumeEcShardReadRequest)(nil), // 54: volume_server_pb.VolumeEcShardReadRequest - (*VolumeEcShardReadResponse)(nil), // 55: volume_server_pb.VolumeEcShardReadResponse - (*VolumeEcBlobDeleteRequest)(nil), // 56: volume_server_pb.VolumeEcBlobDeleteRequest - (*VolumeEcBlobDeleteResponse)(nil), // 57: volume_server_pb.VolumeEcBlobDeleteResponse - (*VolumeEcShardsToVolumeRequest)(nil), // 58: volume_server_pb.VolumeEcShardsToVolumeRequest - (*VolumeEcShardsToVolumeResponse)(nil), // 59: volume_server_pb.VolumeEcShardsToVolumeResponse - (*ReadVolumeFileStatusRequest)(nil), // 60: volume_server_pb.ReadVolumeFileStatusRequest - (*ReadVolumeFileStatusResponse)(nil), // 61: volume_server_pb.ReadVolumeFileStatusResponse - (*DiskStatus)(nil), // 62: volume_server_pb.DiskStatus - (*MemStatus)(nil), // 63: volume_server_pb.MemStatus - (*RemoteFile)(nil), // 64: volume_server_pb.RemoteFile - (*VolumeInfo)(nil), // 65: volume_server_pb.VolumeInfo - (*VolumeTierMoveDatToRemoteRequest)(nil), // 66: volume_server_pb.VolumeTierMoveDatToRemoteRequest - (*VolumeTierMoveDatToRemoteResponse)(nil), // 67: volume_server_pb.VolumeTierMoveDatToRemoteResponse - (*VolumeTierMoveDatFromRemoteRequest)(nil), // 68: volume_server_pb.VolumeTierMoveDatFromRemoteRequest - (*VolumeTierMoveDatFromRemoteResponse)(nil), // 69: volume_server_pb.VolumeTierMoveDatFromRemoteResponse - (*VolumeServerStatusRequest)(nil), // 70: volume_server_pb.VolumeServerStatusRequest - (*VolumeServerStatusResponse)(nil), // 71: volume_server_pb.VolumeServerStatusResponse - (*VolumeServerLeaveRequest)(nil), // 72: volume_server_pb.VolumeServerLeaveRequest - (*VolumeServerLeaveResponse)(nil), // 73: volume_server_pb.VolumeServerLeaveResponse - (*QueryRequest)(nil), // 74: volume_server_pb.QueryRequest - (*QueriedStripe)(nil), // 75: volume_server_pb.QueriedStripe - (*VolumeNeedleStatusRequest)(nil), // 76: volume_server_pb.VolumeNeedleStatusRequest - (*VolumeNeedleStatusResponse)(nil), // 77: volume_server_pb.VolumeNeedleStatusResponse - (*QueryRequest_Filter)(nil), // 78: volume_server_pb.QueryRequest.Filter - (*QueryRequest_InputSerialization)(nil), // 79: volume_server_pb.QueryRequest.InputSerialization - (*QueryRequest_OutputSerialization)(nil), // 80: volume_server_pb.QueryRequest.OutputSerialization - (*QueryRequest_InputSerialization_CSVInput)(nil), // 81: volume_server_pb.QueryRequest.InputSerialization.CSVInput - (*QueryRequest_InputSerialization_JSONInput)(nil), // 82: volume_server_pb.QueryRequest.InputSerialization.JSONInput - (*QueryRequest_InputSerialization_ParquetInput)(nil), // 83: volume_server_pb.QueryRequest.InputSerialization.ParquetInput - (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 84: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 85: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + (*ReadNeedleBlobRequest)(nil), // 38: volume_server_pb.ReadNeedleBlobRequest + (*ReadNeedleBlobResponse)(nil), // 39: volume_server_pb.ReadNeedleBlobResponse + (*WriteNeedleBlobRequest)(nil), // 40: volume_server_pb.WriteNeedleBlobRequest + (*WriteNeedleBlobResponse)(nil), // 41: volume_server_pb.WriteNeedleBlobResponse + (*VolumeTailSenderRequest)(nil), // 42: volume_server_pb.VolumeTailSenderRequest + (*VolumeTailSenderResponse)(nil), // 43: volume_server_pb.VolumeTailSenderResponse + (*VolumeTailReceiverRequest)(nil), // 44: volume_server_pb.VolumeTailReceiverRequest + (*VolumeTailReceiverResponse)(nil), // 45: volume_server_pb.VolumeTailReceiverResponse + (*VolumeEcShardsGenerateRequest)(nil), // 46: volume_server_pb.VolumeEcShardsGenerateRequest + (*VolumeEcShardsGenerateResponse)(nil), // 47: volume_server_pb.VolumeEcShardsGenerateResponse + (*VolumeEcShardsRebuildRequest)(nil), // 48: volume_server_pb.VolumeEcShardsRebuildRequest + (*VolumeEcShardsRebuildResponse)(nil), // 49: volume_server_pb.VolumeEcShardsRebuildResponse + (*VolumeEcShardsCopyRequest)(nil), // 50: volume_server_pb.VolumeEcShardsCopyRequest + (*VolumeEcShardsCopyResponse)(nil), // 51: volume_server_pb.VolumeEcShardsCopyResponse + (*VolumeEcShardsDeleteRequest)(nil), // 52: volume_server_pb.VolumeEcShardsDeleteRequest + (*VolumeEcShardsDeleteResponse)(nil), // 53: volume_server_pb.VolumeEcShardsDeleteResponse + (*VolumeEcShardsMountRequest)(nil), // 54: volume_server_pb.VolumeEcShardsMountRequest + (*VolumeEcShardsMountResponse)(nil), // 55: volume_server_pb.VolumeEcShardsMountResponse + (*VolumeEcShardsUnmountRequest)(nil), // 56: volume_server_pb.VolumeEcShardsUnmountRequest + (*VolumeEcShardsUnmountResponse)(nil), // 57: volume_server_pb.VolumeEcShardsUnmountResponse + (*VolumeEcShardReadRequest)(nil), // 58: volume_server_pb.VolumeEcShardReadRequest + (*VolumeEcShardReadResponse)(nil), // 59: volume_server_pb.VolumeEcShardReadResponse + (*VolumeEcBlobDeleteRequest)(nil), // 60: volume_server_pb.VolumeEcBlobDeleteRequest + (*VolumeEcBlobDeleteResponse)(nil), // 61: volume_server_pb.VolumeEcBlobDeleteResponse + (*VolumeEcShardsToVolumeRequest)(nil), // 62: volume_server_pb.VolumeEcShardsToVolumeRequest + (*VolumeEcShardsToVolumeResponse)(nil), // 63: volume_server_pb.VolumeEcShardsToVolumeResponse + (*ReadVolumeFileStatusRequest)(nil), // 64: volume_server_pb.ReadVolumeFileStatusRequest + (*ReadVolumeFileStatusResponse)(nil), // 65: volume_server_pb.ReadVolumeFileStatusResponse + (*DiskStatus)(nil), // 66: volume_server_pb.DiskStatus + (*MemStatus)(nil), // 67: volume_server_pb.MemStatus + (*RemoteFile)(nil), // 68: volume_server_pb.RemoteFile + (*VolumeInfo)(nil), // 69: volume_server_pb.VolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 70: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 71: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 72: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 73: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 74: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 75: volume_server_pb.VolumeServerStatusResponse + (*VolumeServerLeaveRequest)(nil), // 76: volume_server_pb.VolumeServerLeaveRequest + (*VolumeServerLeaveResponse)(nil), // 77: volume_server_pb.VolumeServerLeaveResponse + (*QueryRequest)(nil), // 78: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 79: volume_server_pb.QueriedStripe + (*VolumeNeedleStatusRequest)(nil), // 80: volume_server_pb.VolumeNeedleStatusRequest + (*VolumeNeedleStatusResponse)(nil), // 81: volume_server_pb.VolumeNeedleStatusResponse + (*QueryRequest_Filter)(nil), // 82: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 83: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 84: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 85: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 86: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 87: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 88: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 89: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput } var file_volume_server_proto_depIdxs = []int32{ 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult - 64, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 62, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus - 63, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus - 78, // 4: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter - 79, // 5: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization - 80, // 6: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization - 81, // 7: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput - 82, // 8: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput - 83, // 9: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput - 84, // 10: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - 85, // 11: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 68, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 66, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 67, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 82, // 4: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 83, // 5: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 84, // 6: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 85, // 7: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 86, // 8: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 87, // 9: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 88, // 10: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 89, // 11: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput 0, // 12: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest 4, // 13: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest 6, // 14: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest @@ -5805,63 +6071,67 @@ var file_volume_server_proto_depIdxs = []int32{ 30, // 26: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest 32, // 27: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest 34, // 28: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest - 60, // 29: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 64, // 29: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest 36, // 30: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest - 38, // 31: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest - 40, // 32: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest - 42, // 33: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest - 44, // 34: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest - 46, // 35: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest - 48, // 36: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest - 50, // 37: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest - 52, // 38: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest - 54, // 39: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest - 56, // 40: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest - 58, // 41: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest - 66, // 42: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest - 68, // 43: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest - 70, // 44: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest - 72, // 45: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest - 74, // 46: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest - 76, // 47: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest - 1, // 48: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse - 5, // 49: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse - 7, // 50: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse - 9, // 51: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse - 11, // 52: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse - 13, // 53: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse - 15, // 54: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse - 17, // 55: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse - 19, // 56: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse - 21, // 57: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse - 23, // 58: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse - 25, // 59: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse - 27, // 60: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse - 29, // 61: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse - 31, // 62: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse - 33, // 63: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse - 35, // 64: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse - 61, // 65: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse - 37, // 66: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse - 39, // 67: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse - 41, // 68: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse - 43, // 69: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse - 45, // 70: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse - 47, // 71: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse - 49, // 72: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse - 51, // 73: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse - 53, // 74: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse - 55, // 75: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse - 57, // 76: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse - 59, // 77: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse - 67, // 78: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse - 69, // 79: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse - 71, // 80: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse - 73, // 81: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse - 75, // 82: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe - 77, // 83: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse - 48, // [48:84] is the sub-list for method output_type - 12, // [12:48] is the sub-list for method input_type + 38, // 31: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest + 40, // 32: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest + 42, // 33: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 44, // 34: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 46, // 35: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 48, // 36: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 50, // 37: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 52, // 38: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 54, // 39: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 56, // 40: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 58, // 41: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 60, // 42: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 62, // 43: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 70, // 44: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 72, // 45: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 74, // 46: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 76, // 47: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest + 78, // 48: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 80, // 49: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest + 1, // 50: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 5, // 51: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 7, // 52: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 9, // 53: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 11, // 54: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 13, // 55: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 15, // 56: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 17, // 57: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 19, // 58: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 21, // 59: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 23, // 60: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 25, // 61: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 27, // 62: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 29, // 63: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse + 31, // 64: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 33, // 65: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse + 35, // 66: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 65, // 67: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 37, // 68: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 39, // 69: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse + 41, // 70: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse + 43, // 71: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 45, // 72: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 47, // 73: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 49, // 74: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 51, // 75: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 53, // 76: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 55, // 77: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 57, // 78: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 59, // 79: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 61, // 80: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 63, // 81: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 71, // 82: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 73, // 83: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 75, // 84: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 77, // 85: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse + 79, // 86: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 81, // 87: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse + 50, // [50:88] is the sub-list for method output_type + 12, // [12:50] is the sub-list for method input_type 12, // [12:12] is the sub-list for extension type_name 12, // [12:12] is the sub-list for extension extendee 0, // [0:12] is the sub-list for field type_name @@ -6330,7 +6600,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTailSenderRequest); i { + switch v := v.(*ReadNeedleBlobRequest); i { case 0: return &v.state case 1: @@ -6342,7 +6612,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTailSenderResponse); i { + switch v := v.(*ReadNeedleBlobResponse); i { case 0: return &v.state case 1: @@ -6354,7 +6624,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTailReceiverRequest); i { + switch v := v.(*WriteNeedleBlobRequest); i { case 0: return &v.state case 1: @@ -6366,7 +6636,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTailReceiverResponse); i { + switch v := v.(*WriteNeedleBlobResponse); i { case 0: return &v.state case 1: @@ -6378,7 +6648,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsGenerateRequest); i { + switch v := v.(*VolumeTailSenderRequest); i { case 0: return &v.state case 1: @@ -6390,7 +6660,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsGenerateResponse); i { + switch v := v.(*VolumeTailSenderResponse); i { case 0: return &v.state case 1: @@ -6402,7 +6672,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsRebuildRequest); i { + switch v := v.(*VolumeTailReceiverRequest); i { case 0: return &v.state case 1: @@ -6414,7 +6684,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsRebuildResponse); i { + switch v := v.(*VolumeTailReceiverResponse); i { case 0: return &v.state case 1: @@ -6426,7 +6696,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsCopyRequest); i { + switch v := v.(*VolumeEcShardsGenerateRequest); i { case 0: return &v.state case 1: @@ -6438,7 +6708,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsCopyResponse); i { + switch v := v.(*VolumeEcShardsGenerateResponse); i { case 0: return &v.state case 1: @@ -6450,7 +6720,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsDeleteRequest); i { + switch v := v.(*VolumeEcShardsRebuildRequest); i { case 0: return &v.state case 1: @@ -6462,7 +6732,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsDeleteResponse); i { + switch v := v.(*VolumeEcShardsRebuildResponse); i { case 0: return &v.state case 1: @@ -6474,7 +6744,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsMountRequest); i { + switch v := v.(*VolumeEcShardsCopyRequest); i { case 0: return &v.state case 1: @@ -6486,7 +6756,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsMountResponse); i { + switch v := v.(*VolumeEcShardsCopyResponse); i { case 0: return &v.state case 1: @@ -6498,7 +6768,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsUnmountRequest); i { + switch v := v.(*VolumeEcShardsDeleteRequest); i { case 0: return &v.state case 1: @@ -6510,7 +6780,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsUnmountResponse); i { + switch v := v.(*VolumeEcShardsDeleteResponse); i { case 0: return &v.state case 1: @@ -6522,7 +6792,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardReadRequest); i { + switch v := v.(*VolumeEcShardsMountRequest); i { case 0: return &v.state case 1: @@ -6534,7 +6804,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardReadResponse); i { + switch v := v.(*VolumeEcShardsMountResponse); i { case 0: return &v.state case 1: @@ -6546,7 +6816,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcBlobDeleteRequest); i { + switch v := v.(*VolumeEcShardsUnmountRequest); i { case 0: return &v.state case 1: @@ -6558,7 +6828,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcBlobDeleteResponse); i { + switch v := v.(*VolumeEcShardsUnmountResponse); i { case 0: return &v.state case 1: @@ -6570,7 +6840,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsToVolumeRequest); i { + switch v := v.(*VolumeEcShardReadRequest); i { case 0: return &v.state case 1: @@ -6582,7 +6852,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeEcShardsToVolumeResponse); i { + switch v := v.(*VolumeEcShardReadResponse); i { case 0: return &v.state case 1: @@ -6594,7 +6864,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadVolumeFileStatusRequest); i { + switch v := v.(*VolumeEcBlobDeleteRequest); i { case 0: return &v.state case 1: @@ -6606,7 +6876,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadVolumeFileStatusResponse); i { + switch v := v.(*VolumeEcBlobDeleteResponse); i { case 0: return &v.state case 1: @@ -6618,7 +6888,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DiskStatus); i { + switch v := v.(*VolumeEcShardsToVolumeRequest); i { case 0: return &v.state case 1: @@ -6630,7 +6900,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MemStatus); i { + switch v := v.(*VolumeEcShardsToVolumeResponse); i { case 0: return &v.state case 1: @@ -6642,7 +6912,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoteFile); i { + switch v := v.(*ReadVolumeFileStatusRequest); i { case 0: return &v.state case 1: @@ -6654,7 +6924,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeInfo); i { + switch v := v.(*ReadVolumeFileStatusResponse); i { case 0: return &v.state case 1: @@ -6666,7 +6936,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTierMoveDatToRemoteRequest); i { + switch v := v.(*DiskStatus); i { case 0: return &v.state case 1: @@ -6678,7 +6948,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTierMoveDatToRemoteResponse); i { + switch v := v.(*MemStatus); i { case 0: return &v.state case 1: @@ -6690,7 +6960,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i { + switch v := v.(*RemoteFile); i { case 0: return &v.state case 1: @@ -6702,7 +6972,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i { + switch v := v.(*VolumeInfo); i { case 0: return &v.state case 1: @@ -6714,7 +6984,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeServerStatusRequest); i { + switch v := v.(*VolumeTierMoveDatToRemoteRequest); i { case 0: return &v.state case 1: @@ -6726,7 +6996,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeServerStatusResponse); i { + switch v := v.(*VolumeTierMoveDatToRemoteResponse); i { case 0: return &v.state case 1: @@ -6738,7 +7008,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeServerLeaveRequest); i { + switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i { case 0: return &v.state case 1: @@ -6750,7 +7020,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeServerLeaveResponse); i { + switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i { case 0: return &v.state case 1: @@ -6762,7 +7032,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest); i { + switch v := v.(*VolumeServerStatusRequest); i { case 0: return &v.state case 1: @@ -6774,7 +7044,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueriedStripe); i { + switch v := v.(*VolumeServerStatusResponse); i { case 0: return &v.state case 1: @@ -6786,7 +7056,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeNeedleStatusRequest); i { + switch v := v.(*VolumeServerLeaveRequest); i { case 0: return &v.state case 1: @@ -6798,7 +7068,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VolumeNeedleStatusResponse); i { + switch v := v.(*VolumeServerLeaveResponse); i { case 0: return &v.state case 1: @@ -6810,7 +7080,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest_Filter); i { + switch v := v.(*QueryRequest); i { case 0: return &v.state case 1: @@ -6822,7 +7092,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest_InputSerialization); i { + switch v := v.(*QueriedStripe); i { case 0: return &v.state case 1: @@ -6834,7 +7104,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest_OutputSerialization); i { + switch v := v.(*VolumeNeedleStatusRequest); i { case 0: return &v.state case 1: @@ -6846,7 +7116,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest_InputSerialization_CSVInput); i { + switch v := v.(*VolumeNeedleStatusResponse); i { case 0: return &v.state case 1: @@ -6858,7 +7128,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest_InputSerialization_JSONInput); i { + switch v := v.(*QueryRequest_Filter); i { case 0: return &v.state case 1: @@ -6870,7 +7140,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i { + switch v := v.(*QueryRequest_InputSerialization); i { case 0: return &v.state case 1: @@ -6882,7 +7152,7 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i { + switch v := v.(*QueryRequest_OutputSerialization); i { case 0: return &v.state case 1: @@ -6894,6 +7164,54 @@ func file_volume_server_proto_init() { } } file_volume_server_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_CSVInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_JSONInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i { case 0: return &v.state @@ -6912,7 +7230,7 @@ func file_volume_server_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_volume_server_proto_rawDesc, NumEnums: 0, - NumMessages: 86, + NumMessages: 90, NumExtensions: 0, NumServices: 1, }, @@ -6959,6 +7277,8 @@ type VolumeServerClient interface { VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) + ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) + WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) // erasure coding @@ -7206,6 +7526,24 @@ func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { return m, nil } +func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) { + out := new(ReadNeedleBlobResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleBlob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) { + out := new(WriteNeedleBlobResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { @@ -7497,6 +7835,8 @@ type VolumeServerServer interface { VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error + ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) + WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) // erasure coding @@ -7580,6 +7920,12 @@ func (*UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *R func (*UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error { return status.Errorf(codes.Unimplemented, "method CopyFile not implemented") } +func (*UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented") +} +func (*UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented") +} func (*UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented") } @@ -7984,6 +8330,42 @@ func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { return x.ServerStream.SendMsg(m) } +func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadNeedleBlobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).ReadNeedleBlob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleBlob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).ReadNeedleBlob(ctx, req.(*ReadNeedleBlobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteNeedleBlobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).WriteNeedleBlob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/WriteNeedleBlob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).WriteNeedleBlob(ctx, req.(*WriteNeedleBlobRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTailSenderRequest) if err := stream.RecvMsg(m); err != nil { @@ -8377,6 +8759,14 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "ReadVolumeFileStatus", Handler: _VolumeServer_ReadVolumeFileStatus_Handler, }, + { + MethodName: "ReadNeedleBlob", + Handler: _VolumeServer_ReadNeedleBlob_Handler, + }, + { + MethodName: "WriteNeedleBlob", + Handler: _VolumeServer_WriteNeedleBlob_Handler, + }, { MethodName: "VolumeTailReceiver", Handler: _VolumeServer_VolumeTailReceiver_Handler, diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go index f642bb801..519a9a201 100644 --- a/weed/replication/repl_util/replication_util.go +++ b/weed/replication/repl_util/replication_util.go @@ -20,7 +20,7 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer var shouldRetry bool for _, fileUrl := range fileUrls { - shouldRetry, err = util.FastReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { + shouldRetry, err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { writeErr = writeFunc(data) }) if err != nil { diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 865f1b25c..d13a1049b 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -42,7 +42,7 @@ func (g *AzureSink) IsIncremental() bool { } func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error { - g.isIncremental = configuration.GetBool(prefix+"is_incremental") + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( configuration.GetString(prefix+"account_name"), configuration.GetString(prefix+"account_key"), diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 8738231d5..90a0bb2e8 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -14,10 +14,10 @@ import ( ) type B2Sink struct { - client *b2.Client - bucket string - dir string - filerSource *source.FilerSource + client *b2.Client + bucket string + dir string + filerSource *source.FilerSource isIncremental bool } @@ -38,7 +38,7 @@ func (g *B2Sink) IsIncremental() bool { } func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error { - g.isIncremental = configuration.GetBool(prefix+"is_incremental") + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( configuration.GetString(prefix+"b2_account_id"), configuration.GetString(prefix+"b2_master_application_key"), diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 4165e87be..d7c5fccc3 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -30,7 +30,7 @@ type FilerSink struct { grpcDialOption grpc.DialOption address string writeChunkByFiler bool - isIncremental bool + isIncremental bool } func init() { @@ -50,7 +50,7 @@ func (fs *FilerSink) IsIncremental() bool { } func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { - fs.isIncremental = configuration.GetBool(prefix+"is_incremental") + fs.isIncremental = configuration.GetBool(prefix + "is_incremental") return fs.DoInitialize( "", configuration.GetString(prefix+"grpcAddress"), diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 02f482862..5cf5b7317 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -18,10 +18,10 @@ import ( ) type GcsSink struct { - client *storage.Client - bucket string - dir string - filerSource *source.FilerSource + client *storage.Client + bucket string + dir string + filerSource *source.FilerSource isIncremental bool } @@ -42,7 +42,7 @@ func (g *GcsSink) IsIncremental() bool { } func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error { - g.isIncremental = configuration.GetBool(prefix+"is_incremental") + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( configuration.GetString(prefix+"google_application_credentials"), configuration.GetString(prefix+"bucket"), diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index bf1ad9b76..3dde52616 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -24,7 +24,7 @@ func (s3sink *S3Sink) deleteObject(key string) error { result, err := s3sink.conn.DeleteObject(input) if err == nil { - glog.V(0).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err) } @@ -43,7 +43,7 @@ func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) ( result, err := s3sink.conn.CreateMultipartUpload(input) if err == nil { - glog.V(0).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err) return "", err @@ -94,7 +94,7 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId result, err := s3sink.conn.CompleteMultipartUpload(input) if err == nil { - glog.V(1).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) return fmt.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) @@ -123,7 +123,7 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer. result, err := s3sink.conn.UploadPart(input) if err == nil { - glog.V(0).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result) + glog.V(2).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result) } else { glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err) } diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go index 5ef7439c8..0df26e6fc 100644 --- a/weed/s3api/auth_signature_v4.go +++ b/weed/s3api/auth_signature_v4.go @@ -24,6 +24,7 @@ import ( "crypto/subtle" "encoding/hex" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "io/ioutil" "net/http" "net/url" "regexp" @@ -132,6 +133,17 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r // Query string. queryStr := req.URL.Query().Encode() + // Get hashed Payload + if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil { + buf, _ := ioutil.ReadAll(r.Body) + r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) + b, _ := ioutil.ReadAll(bytes.NewBuffer(buf)) + if len(b) != 0 { + bodyHash := sha256.Sum256(b) + hashedPayload = hex.EncodeToString(bodyHash[:]) + } + } + // Get canonical request. canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) @@ -139,7 +151,10 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region) + signingKey := getSigningKey(cred.SecretKey, + signV4Values.Credential.scope.date, + signV4Values.Credential.scope.region, + signV4Values.Credential.scope.service) // Calculate signature. newSignature := getSignature(signingKey, stringToSign) @@ -310,7 +325,7 @@ func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http. } // Get signing key. - signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region) + signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service) // Get signature. newSignature := getSignature(signingKey, formValues.Get("Policy")) @@ -427,7 +442,10 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) // Get hmac presigned signing key. - presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region) + presignedSigningKey := getSigningKey(cred.SecretKey, + pSignValues.Credential.scope.date, + pSignValues.Credential.scope.region, + pSignValues.Credential.scope.service) // Get new signature. newSignature := getSignature(presignedSigningKey, presignedStringToSign) @@ -655,11 +673,11 @@ func sumHMAC(key []byte, data []byte) []byte { } // getSigningKey hmac seed to calculate final signature. -func getSigningKey(secretKey string, t time.Time, region string) []byte { +func getSigningKey(secretKey string, t time.Time, region string, service string) []byte { date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) regionBytes := sumHMAC(date, []byte(region)) - service := sumHMAC(regionBytes, []byte("s3")) - signingKey := sumHMAC(service, []byte("aws4_request")) + serviceBytes := sumHMAC(regionBytes, []byte(service)) + signingKey := sumHMAC(serviceBytes, []byte("aws4_request")) return signingKey } diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go index 4c8255768..b47cd5f2d 100644 --- a/weed/s3api/auto_signature_v4_test.go +++ b/weed/s3api/auto_signature_v4_test.go @@ -370,7 +370,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i queryStr := strings.Replace(query.Encode(), "+", "%20", -1) canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) stringToSign := getStringToSign(canonicalRequest, date, scope) - signingKey := getSigningKey(secretAccessKey, date, region) + signingKey := getSigningKey(secretAccessKey, date, region, "s3") signature := getSignature(signingKey, stringToSign) req.URL.RawQuery = query.Encode() diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index 734c9faee..b163ec2f6 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -45,7 +45,7 @@ func getChunkSignature(secretKey string, seedSignature string, region string, da hashedChunk // Get hmac signing key. - signingKey := getSigningKey(secretKey, date, region) + signingKey := getSigningKey(secretKey, date, region, "s3") // Calculate signature. newSignature := getSignature(signingKey, stringToSign) @@ -117,7 +117,7 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region) + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, "s3") // Calculate signature. newSignature := getSignature(signingKey, stringToSign) diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 3626ece98..1803332a3 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -31,6 +31,10 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl return nil }, startFrom, inclusive, limit) + if len(entries) == 0 { + isLast = true + } + return } @@ -75,6 +79,12 @@ func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isD } +func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) { + + return filer_pb.Touch(s3a, parentDirectoryPath, entryName, entry) + +} + func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) { fullPath := util.NewFullPath(parentDirectoryPath, entryName) return filer_pb.GetEntry(s3a, fullPath) diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 338f82668..48e8cb047 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -51,7 +51,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques var buckets []*s3.Bucket for _, entry := range entries { if entry.IsDirectory { - if identity != nil && !identity.canDo(s3_constants.ACTION_ADMIN, entry.Name) { + if identity != nil && !identity.canDo(s3_constants.ACTION_LIST, entry.Name) { continue } buckets = append(buckets, &s3.Bucket{ diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go index ca578e7e5..84a85fd78 100644 --- a/weed/s3api/s3api_object_copy_handlers.go +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + weed_server "github.com/chrislusf/seaweedfs/weed/server" "net/http" "net/url" "strconv" @@ -25,6 +26,26 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request } srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + + if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) { + fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) + dir, name := fullPath.DirAndName() + entry, err := s3a.getEntry(dir, name) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + } + entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r)) + err = s3a.touch(dir, name, entry) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + } + writeSuccessResponseXML(w, encodeResponse(CopyObjectResult{ + ETag: fmt.Sprintf("%x", entry.Attributes.Md5), + LastModified: time.Now().UTC(), + })) + return + } + // If source object is empty or bucket is empty, reply back invalid copy source. if srcObject == "" || srcBucket == "" { writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) @@ -32,7 +53,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request } if srcBucket == dstBucket && srcObject == dstObject { - writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + writeErrorResponse(w, s3err.ErrInvalidCopyDest, r.URL) return } @@ -147,3 +168,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req writeSuccessResponseXML(w, encodeResponse(response)) } + +func isReplace(r *http.Request) bool { + return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" +} diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 19d85c495..f1a539ac5 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -5,9 +5,11 @@ import ( "encoding/json" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" "io" "io/ioutil" "net/http" + "net/url" "sort" "strings" @@ -69,7 +71,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) return } } else { - uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) @@ -84,6 +86,14 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) writeSuccessResponseEmpty(w) } +func urlPathEscape(object string) string { + var escapedParts []string + for _, part := range strings.Split(object, "/") { + escapedParts = append(escapedParts, url.PathEscape(part)) + } + return strings.Join(escapedParts, "/") +} + func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) { bucket, object := getBucketAndObject(r) @@ -94,7 +104,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) } destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.proxyToFiler(w, r, destUrl, passThroughResponse) @@ -105,7 +115,7 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request bucket, object := getBucketAndObject(r) destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.proxyToFiler(w, r, destUrl, passThroughResponse) @@ -116,7 +126,7 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque bucket, object := getBucketAndObject(r) destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) { for k, v := range proxyResponse.Header { @@ -196,6 +206,8 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h if err == nil { directoriesWithDeletion[parentDirectoryPath]++ deletedObjects = append(deletedObjects, object) + } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { + deletedObjects = append(deletedObjects, object) } else { delete(directoriesWithDeletion, parentDirectoryPath) deleteErrors = append(deleteErrors, DeleteError{ @@ -299,7 +311,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des } defer util.CloseResponse(resp) - if (resp.ContentLength == -1 || resp.StatusCode == 404) && !strings.HasSuffix(destUrl, "/") { + if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 { if r.Method != "DELETE" { writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) return @@ -314,7 +326,11 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) { for k, v := range proxyResponse.Header { w.Header()[k] = v } - w.WriteHeader(proxyResponse.StatusCode) + if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 { + w.WriteHeader(http.StatusPartialContent) + } else { + w.WriteHeader(proxyResponse.StatusCode) + } io.Copy(w, proxyResponse.Body) } diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go index 044e732db..035302ae6 100644 --- a/weed/s3api/s3api_object_handlers_postpolicy.go +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -110,7 +110,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R } } - uploadUrl := fmt.Sprintf("http://%s%s/%s/%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody) diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 2d36c6ec9..739cdd8f9 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -206,7 +206,6 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d isTruncated = isTruncated || subIsTruncated maxKeys -= subCounter nextMarker = subDir + "/" + subNextMarker - counter += subCounter // finished processing this sub directory marker = subDir } diff --git a/weed/security/tls.go b/weed/security/tls.go index b4bf84e2d..7d3ffcdca 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -1,10 +1,16 @@ package security import ( + "context" "crypto/tls" "crypto/x509" "github.com/chrislusf/seaweedfs/weed/util" + grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "io/ioutil" + "strings" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -12,21 +18,29 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" ) -func LoadServerTLS(config *util.ViperProxy, component string) grpc.ServerOption { +type Authenticator struct { + AllowedWildcardDomain string + AllowedCommonNames map[string]bool +} + +func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption, grpc.ServerOption) { if config == nil { - return nil + return nil, nil } // load cert/key, ca cert cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) if err != nil { - glog.V(1).Infof("load cert/key error: %v", err) - return nil + glog.V(1).Infof("load cert: %s / key: %s error: %v", + config.GetString(component+".cert"), + config.GetString(component+".key"), + err) + return nil, nil } caCert, err := ioutil.ReadFile(config.GetString("grpc.ca")) if err != nil { - glog.V(1).Infof("read ca cert file error: %v", err) - return nil + glog.V(1).Infof("read ca cert file %s error: %v", config.GetString("grpc.ca"), err) + return nil, nil } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) @@ -36,7 +50,20 @@ func LoadServerTLS(config *util.ViperProxy, component string) grpc.ServerOption ClientAuth: tls.RequireAndVerifyClientCert, }) - return grpc.Creds(ta) + allowedCommonNames := config.GetString(component + ".allowed_commonNames") + allowedWildcardDomain := config.GetString("grpc.allowed_wildcard_domain") + if allowedCommonNames != "" || allowedWildcardDomain != "" { + allowedCommonNamesMap := make(map[string]bool) + for _, s := range strings.Split(allowedCommonNames, ",") { + allowedCommonNamesMap[s] = true + } + auther := Authenticator{ + AllowedCommonNames: allowedCommonNamesMap, + AllowedWildcardDomain: allowedWildcardDomain, + } + return grpc.Creds(ta), grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(auther.Authenticate)) + } + return grpc.Creds(ta), nil } func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { @@ -70,3 +97,28 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { }) return grpc.WithTransportCredentials(ta) } + +func (a Authenticator) Authenticate(ctx context.Context) (newCtx context.Context, err error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Error(codes.Unauthenticated, "no peer found") + } + + tlsAuth, ok := p.AuthInfo.(credentials.TLSInfo) + if !ok { + return ctx, status.Error(codes.Unauthenticated, "unexpected peer transport credentials") + } + if len(tlsAuth.State.VerifiedChains) == 0 || len(tlsAuth.State.VerifiedChains[0]) == 0 { + return ctx, status.Error(codes.Unauthenticated, "could not verify peer certificate") + } + + commonName := tlsAuth.State.VerifiedChains[0][0].Subject.CommonName + if a.AllowedWildcardDomain != "" && strings.HasSuffix(commonName, a.AllowedWildcardDomain) { + return ctx, nil + } + if _, ok := a.AllowedCommonNames[commonName]; ok { + return ctx, nil + } + + return ctx, status.Errorf(codes.Unauthenticated, "invalid subject common name: %s", commonName) +} diff --git a/weed/sequence/snowflake_sequencer.go b/weed/sequence/snowflake_sequencer.go new file mode 100644 index 000000000..300449fa0 --- /dev/null +++ b/weed/sequence/snowflake_sequencer.go @@ -0,0 +1,46 @@ +package sequence + +import ( + "fmt" + "hash/fnv" + + "github.com/bwmarrin/snowflake" + "github.com/chrislusf/seaweedfs/weed/glog" +) + +// a simple snowflake Sequencer +type SnowflakeSequencer struct { + node *snowflake.Node +} + +func NewSnowflakeSequencer(nodeid string) (*SnowflakeSequencer, error) { + nodeid_hash := hash(nodeid) & 0x3ff + glog.V(0).Infof("use snowflake seq id generator, nodeid:%s hex_of_nodeid: %x", nodeid, nodeid_hash) + node, err := snowflake.NewNode(int64(nodeid_hash)) + if err != nil { + fmt.Println(err) + return nil, err + } + + sequencer := &SnowflakeSequencer{node: node} + return sequencer, nil +} + +func hash(s string) uint32 { + h := fnv.New32a() + h.Write([]byte(s)) + return h.Sum32() +} + +func (m *SnowflakeSequencer) NextFileId(count uint64) uint64 { + return uint64(m.node.Generate().Int64()) +} + +// ignore setmax as we are snowflake +func (m *SnowflakeSequencer) SetMax(seenValue uint64) { +} + +// return a new id as no Peek is stored +func (m *SnowflakeSequencer) Peek() uint64 { + return uint64(m.node.Generate().Int64()) +} diff --git a/weed/server/common.go b/weed/server/common.go index 9001a3b33..5c5f1b8eb 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -234,12 +234,12 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file } } -func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64, httpStatusCode int) error) { +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { rangeReq := r.Header.Get("Range") if rangeReq == "" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := writeFn(w, 0, totalSize, 0); err != nil { + if err := writeFn(w, 0, totalSize); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -279,7 +279,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) w.Header().Set("Content-Range", ra.contentRange(totalSize)) - err = writeFn(w, ra.start, ra.length, http.StatusPartialContent) + err = writeFn(w, ra.start, ra.length) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -307,7 +307,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 pw.CloseWithError(e) return } - if e = writeFn(part, ra.start, ra.length, 0); e != nil { + if e = writeFn(part, ra.start, ra.length); e != nil { pw.CloseWithError(e) return } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index a4bb721ef..3821de6a9 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -63,7 +63,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file var listErr error for limit > 0 { var hasEntries bool - lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", func(entry *filer.Entry) bool { + lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", "", func(entry *filer.Entry) bool { hasEntries = true if err = stream.Send(&filer_pb.ListEntriesResponse{ Entry: &filer_pb.Entry{ diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index 5b68b64de..eadb970d5 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -33,8 +33,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err) } - var events MoveEvents - moveErr := fs.moveEntry(ctx, oldParent, oldEntry, newParent, req.NewName, &events) + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, newParent, req.NewName) if moveErr != nil { fs.filer.RollbackTransaction(ctx) return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, moveErr) @@ -48,11 +47,11 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom return &filer_pb.AtomicRenameEntryResponse{}, nil } -func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error { - if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error { + if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, func() error { if entry.IsDirectory() { - if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil { + if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName); err != nil { return err } } @@ -64,7 +63,7 @@ func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, e return nil } -func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error { currentDirPath := oldParent.Child(entry.Name()) newDirPath := newParent.Child(newName) @@ -75,7 +74,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util. includeLastFile := false for { - entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "") + entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "", "") if err != nil { return err } @@ -85,7 +84,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util. for _, item := range entries { lastFileName = item.Name() // println("processing", lastFileName) - err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events) + err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name()) if err != nil { return err } @@ -97,8 +96,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util. return nil } -func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, events *MoveEvents, - moveFolderSubEntries func() error) error { +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error) error { oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) @@ -122,8 +120,6 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat return createErr } - events.newEntries = append(events.newEntries, newEntry) - if moveFolderSubEntries != nil { if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil { return moveChildrenErr @@ -136,13 +132,6 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat return deleteErr } - events.oldEntries = append(events.oldEntries, entry) - return nil } - -type MoveEvents struct { - oldEntries []*filer.Entry - newEntries []*filer.Entry -} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 22474a5e2..2734223ea 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -45,22 +45,23 @@ import ( ) type FilerOption struct { - Masters []string - Collection string - DefaultReplication string - DisableDirListing bool - MaxMB int - DirListingLimit int - DataCenter string - Rack string - DefaultLevelDbDir string - DisableHttp bool - Host string - Port uint32 - recursiveDelete bool - Cipher bool - SaveToFilerLimit int - Filers []string + Masters []string + Collection string + DefaultReplication string + DisableDirListing bool + MaxMB int + DirListingLimit int + DataCenter string + Rack string + DefaultLevelDbDir string + DisableHttp bool + Host string + Port uint32 + recursiveDelete bool + Cipher bool + SaveToFilerLimit int64 + Filers []string + ConcurrentUploadLimit int64 } type FilerServer struct { @@ -79,14 +80,18 @@ type FilerServer struct { brokers map[string]map[string]bool brokersLock sync.Mutex + + inFlightDataSize int64 + inFlightDataLimitCond *sync.Cond } func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { fs = &FilerServer{ - option: option, - grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), - brokers: make(map[string]map[string]bool), + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), + brokers: make(map[string]map[string]bool), + inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)), } fs.listenersCond = sync.NewCond(&fs.listenersLock) @@ -153,7 +158,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) func (fs *FilerServer) checkWithMaster() { for _, master := range fs.option.Masters { - _, err := pb.ParseFilerGrpcAddress(master) + _, err := pb.ParseServerToGrpcAddress(master) if err != nil { glog.Fatalf("invalid master address %s: %v", master, err) } diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index 3bc0c5d0d..ed6bbb6f6 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -4,6 +4,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" "net/http" "strings" + "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/stats" @@ -47,18 +48,34 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { fs.DeleteHandler(w, r) } stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) - case "PUT": - stats.FilerRequestCounter.WithLabelValues("put").Inc() - if _, ok := r.URL.Query()["tagging"]; ok { - fs.PutTaggingHandler(w, r) - } else { - fs.PostHandler(w, r) + case "POST", "PUT": + + // wait until in flight data is less than the limit + contentLength := getContentLength(r) + fs.inFlightDataLimitCond.L.Lock() + for atomic.LoadInt64(&fs.inFlightDataSize) > fs.option.ConcurrentUploadLimit { + fs.inFlightDataLimitCond.Wait() + } + atomic.AddInt64(&fs.inFlightDataSize, contentLength) + fs.inFlightDataLimitCond.L.Unlock() + defer func() { + atomic.AddInt64(&fs.inFlightDataSize, -contentLength) + fs.inFlightDataLimitCond.Signal() + }() + + if r.Method == "PUT" { + stats.FilerRequestCounter.WithLabelValues("put").Inc() + if _, ok := r.URL.Query()["tagging"]; ok { + fs.PutTaggingHandler(w, r) + } else { + fs.PostHandler(w, r, contentLength) + } + stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) + } else { // method == "POST" + stats.FilerRequestCounter.WithLabelValues("post").Inc() + fs.PostHandler(w, r, contentLength) + stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) } - stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) - case "POST": - stats.FilerRequestCounter.WithLabelValues("post").Inc() - fs.PostHandler(w, r) - stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) case "OPTIONS": stats.FilerRequestCounter.WithLabelValues("options").Inc() OptionsHandler(w, r, false) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index e210f4bdf..6bc09e953 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -79,7 +79,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat)) if r.Header.Get("If-Modified-Since") != "" { if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil { - if t.After(entry.Attr.Mtime) { + if !t.Before(entry.Attr.Mtime) { w.WriteHeader(http.StatusNotModified) return } @@ -131,6 +131,9 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, true) + }) return } @@ -150,16 +153,15 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, } } - processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error { - if httpStatusCode != 0 { - w.WriteHeader(httpStatusCode) - } + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { if offset+size <= int64(len(entry.Content)) { _, err := writer.Write(entry.Content[offset : offset+size]) - glog.Errorf("failed to write entry content: %v", err) + if err != nil { + glog.Errorf("failed to write entry content: %v", err) + } return err } - return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size) + return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, false) }) } diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 9cf79ab41..307c411b6 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -35,8 +35,9 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName := r.FormValue("lastFileName") namePattern := r.FormValue("namePattern") + namePatternExclude := r.FormValue("namePatternExclude") - entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern) + entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 0ce3e4a58..95eba9d3d 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -52,7 +52,7 @@ func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, u return } -func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { +func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, contentLength int64) { ctx := context.Background() @@ -66,7 +66,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { query.Get("rack"), ) - fs.autoChunk(ctx, w, r, so) + fs.autoChunk(ctx, w, r, contentLength, so) util.CloseRequest(r) } diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index d3ce7e605..c4f10d94e 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -2,11 +2,8 @@ package weed_server import ( "context" - "crypto/md5" "fmt" - "hash" "io" - "io/ioutil" "net/http" "os" "path" @@ -19,13 +16,12 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) { +func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, contentLength int64, so *operation.StorageOption) { // autoChunking can be set at the command-line level or as a query param. Query param overrides command-line query := r.URL.Query() @@ -38,10 +34,10 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * chunkSize := 1024 * 1024 * maxMB - stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() + stats.FilerRequestCounter.WithLabelValues("chunk").Inc() start := time.Now() defer func() { - stats.FilerRequestHistogram.WithLabelValues("postAutoChunk").Observe(time.Since(start).Seconds()) + stats.FilerRequestHistogram.WithLabelValues("chunk").Observe(time.Since(start).Seconds()) }() var reply *FilerPostResult @@ -51,14 +47,16 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") { reply, err = fs.mkdir(ctx, w, r) } else { - reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, so) + reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, contentLength, so) } } else { - reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, so) + reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, contentLength, so) } if err != nil { if strings.HasPrefix(err.Error(), "read input:") { writeJsonError(w, r, 499, err) + } else if strings.HasSuffix(err.Error(), "is a file") { + writeJsonError(w, r, http.StatusConflict, err) } else { writeJsonError(w, r, http.StatusInternalServerError, err) } @@ -70,7 +68,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * } } -func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { +func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { @@ -91,7 +89,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite contentType = "" } - fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, so) + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, contentLength, so) if err != nil { return nil, nil, err } @@ -102,7 +100,7 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite return } -func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { +func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { fileName := path.Base(r.URL.Path) contentType := r.Header.Get("Content-Type") @@ -110,7 +108,7 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter contentType = "" } - fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so) + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, contentLength, so) if err != nil { return nil, nil, err } @@ -144,6 +142,14 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa if fileName != "" { path += fileName } + } else { + if fileName != "" { + if possibleDirEntry, findDirErr := fs.filer.FindEntry(ctx, util.FullPath(path)); findDirErr == nil { + if possibleDirEntry.IsDirectory() { + path += "/" + fileName + } + } + } } var entry *filer.Entry @@ -212,7 +218,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa entry.Extended = make(map[string][]byte) } - fs.saveAmzMetaData(r, entry) + SaveAmzMetaData(r, entry.Extended, false) for k, v := range r.Header { if len(v) > 0 && strings.HasPrefix(k, needle.PairNamePrefix) { @@ -229,89 +235,6 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa return filerResult, replyerr } -func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) { - var fileChunks []*filer_pb.FileChunk - - md5Hash := md5.New() - var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) - - chunkOffset := int64(0) - var smallContent []byte - - for { - limitedReader := io.LimitReader(partReader, int64(chunkSize)) - - data, err := ioutil.ReadAll(limitedReader) - if err != nil { - return nil, nil, 0, err, nil - } - if chunkOffset == 0 && !isAppend(r) { - if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 { - smallContent = data - chunkOffset += int64(len(data)) - break - } - } - dataReader := util.NewBytesReader(data) - - // retry to assign a different file id - var fileId, urlLocation string - var auth security.EncodedJwt - var assignErr, uploadErr error - var uploadResult *operation.UploadResult - for i := 0; i < 3; i++ { - // assign one file id for one chunk - fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so) - if assignErr != nil { - return nil, nil, 0, assignErr, nil - } - - // upload the chunk to the volume server - uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth) - if uploadErr != nil { - time.Sleep(251 * time.Millisecond) - continue - } - break - } - if uploadErr != nil { - return nil, nil, 0, uploadErr, nil - } - - // if last chunk exhausted the reader exactly at the border - if uploadResult.Size == 0 { - break - } - - // Save to chunk manifest structure - fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset)) - - glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size)) - - // reset variables for the next chunk - chunkOffset = chunkOffset + int64(uploadResult.Size) - - // if last chunk was not at full chunk size, but already exhausted the reader - if int64(uploadResult.Size) < int64(chunkSize) { - break - } - } - - return fileChunks, md5Hash, chunkOffset, nil, smallContent -} - -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) { - - stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() - start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) - }() - - uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth) - return uploadResult, err, data -} - func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType { return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) { @@ -380,17 +303,24 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http return filerResult, replyerr } -func (fs *FilerServer) saveAmzMetaData(r *http.Request, entry *filer.Entry) { +func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool) (metadata map[string][]byte) { + + metadata = make(map[string][]byte) + if !isReplace { + for k, v := range existing { + metadata[k] = v + } + } if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" { - entry.Extended[xhttp.AmzStorageClass] = []byte(sc) + metadata[xhttp.AmzStorageClass] = []byte(sc) } if tags := r.Header.Get(xhttp.AmzObjectTagging); tags != "" { for _, v := range strings.Split(tags, "&") { tag := strings.Split(v, "=") if len(tag) == 2 { - entry.Extended[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) + metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) } } } @@ -398,8 +328,11 @@ func (fs *FilerServer) saveAmzMetaData(r *http.Request, entry *filer.Entry) { for header, values := range r.Header { if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) { for _, value := range values { - entry.Extended[header] = []byte(value) + metadata[header] = []byte(value) } } } + + return + } diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go new file mode 100644 index 000000000..3ab45453e --- /dev/null +++ b/weed/server/filer_server_handlers_write_upload.go @@ -0,0 +1,105 @@ +package weed_server + +import ( + "crypto/md5" + "hash" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) { + var fileChunks []*filer_pb.FileChunk + + md5Hash := md5.New() + var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) + + chunkOffset := int64(0) + var smallContent []byte + + for { + limitedReader := io.LimitReader(partReader, int64(chunkSize)) + + data, err := ioutil.ReadAll(limitedReader) + if err != nil { + return nil, nil, 0, err, nil + } + if chunkOffset == 0 && !isAppend(r) { + if len(data) < int(fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 { + smallContent = data + chunkOffset += int64(len(data)) + break + } + } + dataReader := util.NewBytesReader(data) + + // retry to assign a different file id + var fileId, urlLocation string + var auth security.EncodedJwt + var assignErr, uploadErr error + var uploadResult *operation.UploadResult + for i := 0; i < 3; i++ { + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so) + if assignErr != nil { + return nil, nil, 0, assignErr, nil + } + + // upload the chunk to the volume server + uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth) + if uploadErr != nil { + time.Sleep(251 * time.Millisecond) + continue + } + break + } + if uploadErr != nil { + return nil, nil, 0, uploadErr, nil + } + + // if last chunk exhausted the reader exactly at the border + if uploadResult.Size == 0 { + break + } + + // Save to chunk manifest structure + fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset)) + + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size)) + + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadResult.Size) + + // if last chunk was not at full chunk size, but already exhausted the reader + if int64(uploadResult.Size) < int64(chunkSize) { + break + } + } + + return fileChunks, md5Hash, chunkOffset, nil, smallContent +} + +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) { + + stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds()) + }() + + uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth) + if uploadResult != nil && uploadResult.RetryCount > 0 { + stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount)) + } + return uploadResult, err, data +} diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index f21cce7d1..5016117a8 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -1,4 +1,4 @@ -package master_ui +package filer_ui import ( "strings" diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index 3f0647119..648b97f22 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -1,4 +1,4 @@ -package master_ui +package filer_ui import ( "github.com/dustin/go-humanize" diff --git a/weed/server/gateway_server.go b/weed/server/gateway_server.go new file mode 100644 index 000000000..608217ed7 --- /dev/null +++ b/weed/server/gateway_server.go @@ -0,0 +1,106 @@ +package weed_server + +import ( + "github.com/chrislusf/seaweedfs/weed/operation" + "google.golang.org/grpc" + "math/rand" + "net/http" + + "github.com/chrislusf/seaweedfs/weed/util" + + _ "github.com/chrislusf/seaweedfs/weed/filer/cassandra" + _ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7" + _ "github.com/chrislusf/seaweedfs/weed/filer/etcd" + _ "github.com/chrislusf/seaweedfs/weed/filer/hbase" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3" + _ "github.com/chrislusf/seaweedfs/weed/filer/mongodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis2" + "github.com/chrislusf/seaweedfs/weed/glog" + _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" + _ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub" + _ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub" + _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" + _ "github.com/chrislusf/seaweedfs/weed/notification/log" + "github.com/chrislusf/seaweedfs/weed/security" +) + +type GatewayOption struct { + Masters []string + Filers []string + MaxMB int + IsSecure bool +} + +type GatewayServer struct { + option *GatewayOption + secret security.SigningKey + grpcDialOption grpc.DialOption +} + +func NewGatewayServer(defaultMux *http.ServeMux, option *GatewayOption) (fs *GatewayServer, err error) { + + fs = &GatewayServer{ + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), + } + + if len(option.Masters) == 0 { + glog.Fatal("master list is required!") + } + + defaultMux.HandleFunc("/blobs/", fs.blobsHandler) + defaultMux.HandleFunc("/files/", fs.filesHandler) + defaultMux.HandleFunc("/topics/", fs.topicsHandler) + + return fs, nil +} + +func (fs *GatewayServer) getMaster() string { + randMaster := rand.Intn(len(fs.option.Masters)) + return fs.option.Masters[randMaster] +} + +func (fs *GatewayServer) blobsHandler(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "DELETE": + chunkId := r.URL.Path[len("/blobs/"):] + fullUrl, err := operation.LookupFileId(fs.getMaster, chunkId) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + var jwtAuthorization security.EncodedJwt + if fs.option.IsSecure { + jwtAuthorization = operation.LookupJwt(fs.getMaster(), chunkId) + } + body, statusCode, err := util.DeleteProxied(fullUrl, string(jwtAuthorization)) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + w.WriteHeader(statusCode) + w.Write(body) + case "POST": + submitForClientHandler(w, r, fs.getMaster, fs.grpcDialOption) + } +} + +func (fs *GatewayServer) filesHandler(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "DELETE": + case "POST": + } +} + +func (fs *GatewayServer) topicsHandler(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "POST": + } +} diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 0f0b7f101..3e6d9bb9e 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -80,10 +80,14 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ dn.AdjustMaxVolumeCounts(heartbeat.MaxVolumeCounts) glog.V(4).Infof("master received heartbeat %s", heartbeat.String()) + var dataCenter string + if dc := dn.GetDataCenter(); dc != nil { + dataCenter = string(dc.Id()) + } message := &master_pb.VolumeLocation{ Url: dn.Url(), PublicUrl: dn.PublicUrl, - DataCenter: string(dn.GetDataCenter().Id()), + DataCenter: dataCenter, } if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 { // process delta volume ids if exists for fast volume id updates diff --git a/weed/server/master_grpc_server_admin.go b/weed/server/master_grpc_server_admin.go index 7e7dcb36b..93c9e4e4e 100644 --- a/weed/server/master_grpc_server_admin.go +++ b/weed/server/master_grpc_server_admin.go @@ -3,6 +3,7 @@ package weed_server import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" "math/rand" "sync" "time" @@ -60,6 +61,7 @@ const ( type AdminLock struct { accessSecret int64 accessLockTime time.Time + lastClient string } type AdminLocks struct { @@ -73,14 +75,15 @@ func NewAdminLocks() *AdminLocks { } } -func (locks *AdminLocks) isLocked(lockName string) bool { +func (locks *AdminLocks) isLocked(lockName string) (clientName string, isLocked bool) { locks.RLock() defer locks.RUnlock() adminLock, found := locks.locks[lockName] if !found { - return false + return "", false } - return adminLock.accessLockTime.Add(LockDuration).After(time.Now()) + glog.V(4).Infof("isLocked %v", adminLock.lastClient) + return adminLock.lastClient, adminLock.accessLockTime.Add(LockDuration).After(time.Now()) } func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64) bool { @@ -93,12 +96,13 @@ func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64 return adminLock.accessLockTime.Equal(ts) && adminLock.accessSecret == token } -func (locks *AdminLocks) generateToken(lockName string) (ts time.Time, token int64) { +func (locks *AdminLocks) generateToken(lockName string, clientName string) (ts time.Time, token int64) { locks.Lock() defer locks.Unlock() lock := &AdminLock{ accessSecret: rand.Int63(), accessLockTime: time.Now(), + lastClient: clientName, } locks.locks[lockName] = lock return lock.accessLockTime, lock.accessSecret @@ -113,18 +117,19 @@ func (locks *AdminLocks) deleteLock(lockName string) { func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.LeaseAdminTokenRequest) (*master_pb.LeaseAdminTokenResponse, error) { resp := &master_pb.LeaseAdminTokenResponse{} - if ms.adminLocks.isLocked(req.LockName) { + if lastClient, isLocked := ms.adminLocks.isLocked(req.LockName); isLocked { + glog.V(4).Infof("LeaseAdminToken %v", lastClient) if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) { // for renew - ts, token := ms.adminLocks.generateToken(req.LockName) + ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName) resp.Token, resp.LockTsNs = token, ts.UnixNano() return resp, nil } // refuse since still locked - return resp, fmt.Errorf("already locked") + return resp, fmt.Errorf("already locked by " + lastClient) } // for fresh lease request - ts, token := ms.adminLocks.generateToken(req.LockName) + ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName) resp.Token, resp.LockTsNs = token, ts.UnixNano() return resp, nil } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 9404081b4..e2b2df18d 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -277,6 +277,13 @@ func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer glog.Error(err) seq = nil } + case "snowflake": + var err error + seq, err = sequence.NewSnowflakeSequencer(fmt.Sprintf("%s:%d", option.Host, option.Port)) + if err != nil { + glog.Error(err) + seq = nil + } default: seq = sequence.NewMemorySequencer() } diff --git a/weed/server/master_server_handlers_ui.go b/weed/server/master_server_handlers_ui.go index 9cd58158b..3822c6113 100644 --- a/weed/server/master_server_handlers_ui.go +++ b/weed/server/master_server_handlers_ui.go @@ -14,17 +14,19 @@ func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) infos := make(map[string]interface{}) infos["Up Time"] = time.Now().Sub(startTime).String() args := struct { - Version string - Topology interface{} - RaftServer raft.Server - Stats map[string]interface{} - Counters *stats.ServerStats + Version string + Topology interface{} + RaftServer raft.Server + Stats map[string]interface{} + Counters *stats.ServerStats + VolumeSizeLimitMB uint }{ util.Version(), ms.Topo.ToMap(), ms.Topo.RaftServer, infos, serverStats, + ms.option.VolumeSizeLimitMB, } ui.StatusTpl.Execute(w, args) } diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go index 60873f6aa..31b6353e9 100644 --- a/weed/server/master_ui/templates.go +++ b/weed/server/master_ui/templates.go @@ -22,8 +22,12 @@ var StatusTpl = template.Must(template.New("status").Parse(`

Cluster status

- +
+ + + + @@ -38,8 +42,8 @@ var StatusTpl = template.Must(template.New("status").Parse(` - - +
Volume Size Limit{{ .VolumeSizeLimitMB }}MB
Free {{ .Topology.Free }} {{ .Leader }}
    +
Other Masters
    {{ range $k, $p := .Peers }}
  • {{ $p.Name }}
  • {{ end }} diff --git a/weed/server/volume_grpc_read_write.go b/weed/server/volume_grpc_read_write.go new file mode 100644 index 000000000..988e9e145 --- /dev/null +++ b/weed/server/volume_grpc_read_write.go @@ -0,0 +1,38 @@ +package weed_server + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func (vs *VolumeServer) ReadNeedleBlob(ctx context.Context, req *volume_server_pb.ReadNeedleBlobRequest) (resp *volume_server_pb.ReadNeedleBlobResponse, err error) { + resp = &volume_server_pb.ReadNeedleBlobResponse{} + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp.NeedleBlob, err = v.ReadNeedleBlob(req.Offset, types.Size(req.Size)) + if err != nil { + return nil, fmt.Errorf("read needle blob offset %d size %d: %v", req.Offset, req.Size, err) + } + + return resp, nil +} + +func (vs *VolumeServer) WriteNeedleBlob(ctx context.Context, req *volume_server_pb.WriteNeedleBlobRequest) (resp *volume_server_pb.WriteNeedleBlobResponse, err error) { + resp = &volume_server_pb.WriteNeedleBlobResponse{} + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + if err = v.WriteNeedleBlob(types.NeedleId(req.NeedleId), req.NeedleBlob, types.Size(req.Size)); err != nil { + return nil, fmt.Errorf("write blob needle %d size %d: %v", req.NeedleId, req.Size, err) + } + + return resp, nil +} diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index e496b1ce2..e11d607a4 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/chrislusf/seaweedfs/weed/storage/types" "net/http" + "sync" "google.golang.org/grpc" @@ -34,6 +35,10 @@ type VolumeServer struct { fileSizeLimitBytes int64 isHeartbeating bool stopChan chan bool + + inFlightDataSize int64 + inFlightDataLimitCond *sync.Cond + concurrentUploadLimit int64 } func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, @@ -48,6 +53,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, readRedirect bool, compactionMBPerSecond int, fileSizeLimitMB int, + concurrentUploadLimit int64, ) *VolumeServer { v := util.GetViper() @@ -72,6 +78,8 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, isHeartbeating: true, stopChan: make(chan bool), + inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)), + concurrentUploadLimit: concurrentUploadLimit, } vs.SeedMasterNodes = masterNodes diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go index 7852c950a..4527add44 100644 --- a/weed/server/volume_server_handlers.go +++ b/weed/server/volume_server_handlers.go @@ -2,7 +2,9 @@ package weed_server import ( "net/http" + "strconv" "strings" + "sync/atomic" "github.com/chrislusf/seaweedfs/weed/util" @@ -40,8 +42,24 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque stats.DeleteRequest() vs.guard.WhiteList(vs.DeleteHandler)(w, r) case "PUT", "POST": + + // wait until in flight data is less than the limit + contentLength := getContentLength(r) + vs.inFlightDataLimitCond.L.Lock() + for atomic.LoadInt64(&vs.inFlightDataSize) > vs.concurrentUploadLimit { + vs.inFlightDataLimitCond.Wait() + } + atomic.AddInt64(&vs.inFlightDataSize, contentLength) + vs.inFlightDataLimitCond.L.Unlock() + defer func() { + atomic.AddInt64(&vs.inFlightDataSize, -contentLength) + vs.inFlightDataLimitCond.Signal() + }() + + // processs uploads stats.WriteRequest() vs.guard.WhiteList(vs.PostHandler)(w, r) + case "OPTIONS": stats.ReadRequest() w.Header().Add("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") @@ -49,6 +67,18 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque } } +func getContentLength(r *http.Request) int64 { + contentLength := r.Header.Get("Content-Length") + if contentLength != "" { + length, err := strconv.ParseInt(contentLength, 10, 64) + if err != nil { + return 0 + } + return length + } + return 0 +} + func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION) if r.Header.Get("Origin") != "" { diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 2db46ac9b..3e977cfd4 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -27,7 +27,7 @@ var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`) func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) { - // println(r.Method + " " + r.URL.Path) + glog.V(9).Info(r.Method + " " + r.URL.Path + " " + r.Header.Get("Range")) stats.VolumeServerRequestCounter.WithLabelValues("get").Inc() start := time.Now() @@ -261,13 +261,10 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re return nil } - processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error { + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { if _, e = rs.Seek(offset, 0); e != nil { return e } - if httpStatusCode != 0 { - w.WriteHeader(httpStatusCode) - } _, e = io.CopyN(writer, rs, size) return e }) diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 602b147e1..3d752eda6 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -13,7 +13,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" - "github.com/chrislusf/seaweedfs/weed/util" ) func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { @@ -68,7 +67,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { ret.Name = string(reqNeedle.Name) } ret.Size = uint32(originalSize) - ret.ETag = fmt.Sprintf("%x", util.Base64Md5ToBytes(contentMd5)) + ret.ETag = reqNeedle.Etag() ret.Mime = string(reqNeedle.Mime) setEtag(w, ret.ETag) w.Header().Set("Content-MD5", contentMd5) diff --git a/weed/server/volume_server_tcp_handlers_write.go b/weed/server/volume_server_tcp_handlers_write.go new file mode 100644 index 000000000..a009611da --- /dev/null +++ b/weed/server/volume_server_tcp_handlers_write.go @@ -0,0 +1,137 @@ +package weed_server + +import ( + "bufio" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "net" + "strings" +) + +func (vs *VolumeServer) HandleTcpConnection(c net.Conn) { + defer c.Close() + + glog.V(0).Infof("Serving writes from %s", c.RemoteAddr().String()) + + bufReader := bufio.NewReaderSize(c, 1024*1024) + bufWriter := bufio.NewWriterSize(c, 1024*1024) + + for { + cmd, err := bufReader.ReadString('\n') + if err != nil { + if err != io.EOF { + glog.Errorf("read command from %s: %v", c.RemoteAddr().String(), err) + } + return + } + cmd = cmd[:len(cmd)-1] + switch cmd[0] { + case '+': + fileId := cmd[1:] + err = vs.handleTcpPut(fileId, bufReader) + if err == nil { + bufWriter.Write([]byte("+OK\n")) + } else { + bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n")) + } + case '-': + fileId := cmd[1:] + err = vs.handleTcpDelete(fileId) + if err == nil { + bufWriter.Write([]byte("+OK\n")) + } else { + bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n")) + } + case '?': + fileId := cmd[1:] + err = vs.handleTcpGet(fileId, bufWriter) + case '!': + bufWriter.Flush() + } + + } + +} + +func (vs *VolumeServer) handleTcpGet(fileId string, writer *bufio.Writer) (err error) { + + volumeId, n, err2 := vs.parseFileId(fileId) + if err2 != nil { + return err2 + } + + volume := vs.store.GetVolume(volumeId) + if volume == nil { + return fmt.Errorf("volume %d not found", volumeId) + } + + err = volume.StreamRead(n, writer) + if err != nil { + return err + } + + return nil +} + +func (vs *VolumeServer) handleTcpPut(fileId string, bufReader *bufio.Reader) (err error) { + + volumeId, n, err2 := vs.parseFileId(fileId) + if err2 != nil { + return err2 + } + + volume := vs.store.GetVolume(volumeId) + if volume == nil { + return fmt.Errorf("volume %d not found", volumeId) + } + + sizeBuf := make([]byte, 4) + if _, err = bufReader.Read(sizeBuf); err != nil { + return err + } + dataSize := util.BytesToUint32(sizeBuf) + + err = volume.StreamWrite(n, bufReader, dataSize) + if err != nil { + return err + } + + return nil +} + +func (vs *VolumeServer) handleTcpDelete(fileId string) (err error) { + + volumeId, n, err2 := vs.parseFileId(fileId) + if err2 != nil { + return err2 + } + + _, err = vs.store.DeleteVolumeNeedle(volumeId, n) + if err != nil { + return err + } + + return nil +} + +func (vs *VolumeServer) parseFileId(fileId string) (needle.VolumeId, *needle.Needle, error) { + + commaIndex := strings.LastIndex(fileId, ",") + if commaIndex <= 0 { + return 0, nil, fmt.Errorf("unknown fileId %s", fileId) + } + + vid, fid := fileId[0:commaIndex], fileId[commaIndex+1:] + + volumeId, ve := needle.NewVolumeId(vid) + if ve != nil { + return 0, nil, fmt.Errorf("unknown volume id in fileId %s", fileId) + } + + n := new(needle.Needle) + n.ParsePath(fid) + return volumeId, n, nil +} diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index 6a8bb6f55..ee4c2e31d 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -1,4 +1,4 @@ -package master_ui +package volume_server_ui import ( "fmt" diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index edacf22c6..634cb11e2 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -274,7 +274,7 @@ func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection stri quietSeconds := int64(quietPeriod / time.Second) nowUnixSeconds := time.Now().Unix() - fmt.Printf("ec encode volumes quiet for: %d seconds\n", quietSeconds) + fmt.Printf("collect volumes quiet for: %d seconds\n", quietSeconds) vidMap := make(map[uint32]bool) eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 3c5e13663..df43d93dc 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -52,7 +52,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write return err } - return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) + return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) }) diff --git a/weed/shell/command_fs_lock_unlock.go b/weed/shell/command_fs_lock_unlock.go index 8a6e8f71b..33458bb6f 100644 --- a/weed/shell/command_fs_lock_unlock.go +++ b/weed/shell/command_fs_lock_unlock.go @@ -1,6 +1,7 @@ package shell import ( + "github.com/chrislusf/seaweedfs/weed/util" "io" ) @@ -26,7 +27,7 @@ func (c *commandLock) Help() string { func (c *commandLock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - commandEnv.locker.RequestLock() + commandEnv.locker.RequestLock(util.DetectedHostAddress()) return nil } diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index 69ae9454c..46dc07e9a 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "os" + "strings" "github.com/golang/protobuf/proto" @@ -72,6 +73,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return err } + fullEntry.Entry.Name = strings.ReplaceAll(fullEntry.Entry.Name, "/", "x") if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: fullEntry.Dir, Entry: fullEntry.Entry, diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index ed19e3d01..37d94fe42 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -5,6 +5,8 @@ import ( "fmt" "io" "os" + "path/filepath" + "strings" "sync" "sync/atomic" "time" @@ -46,6 +48,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files") outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file") + isObfuscate := fsMetaSaveCommand.Bool("obfuscate", false, "obfuscate the file names") // chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file") if err = fsMetaSaveCommand.Parse(args); err != nil { return nil @@ -69,6 +72,11 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. } defer dst.Close() + var cipherKey util.CipherKey + if *isObfuscate { + cipherKey = util.GenCipherKey() + } + err = doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan interface{}) { sizeBuf := make([]byte, 4) for item := range outputChan { @@ -78,6 +86,13 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. dst.Write(b) } }, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) { + if !entry.Entry.IsDirectory { + ext := filepath.Ext(entry.Entry.Name) + if encrypted, encErr := util.Encrypt([]byte(entry.Entry.Name), cipherKey); encErr == nil { + entry.Entry.Name = util.Base64Encode(encrypted)[:len(entry.Entry.Name)] + ext + entry.Entry.Name = strings.ReplaceAll(entry.Entry.Name, "/", "x") + } + } bytes, err := proto.Marshal(entry) if err != nil { fmt.Fprintf(writer, "marshall error: %v\n", err) diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 0a7eed02d..2448c8f61 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -36,6 +36,10 @@ func (c *commandFsMv) Help() string { func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + if len(args) != 2 { + return fmt.Errorf("need to have 2 arguments") + } + sourcePath, err := commandEnv.parseUrl(args[0]) if err != nil { return err diff --git a/weed/shell/command_s3_clean_uploads.go b/weed/shell/command_s3_clean_uploads.go new file mode 100644 index 000000000..1ba31292c --- /dev/null +++ b/weed/shell/command_s3_clean_uploads.go @@ -0,0 +1,92 @@ +package shell + +import ( + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "math" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandS3CleanUploads{}) +} + +type commandS3CleanUploads struct { +} + +func (c *commandS3CleanUploads) Name() string { + return "s3.clean.uploads" +} + +func (c *commandS3CleanUploads) Help() string { + return `clean up stale multipart uploads + + Example: + s3.clean.uploads -timeAgo 1.5h + +` +} + +func (c *commandS3CleanUploads) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + uploadedTimeAgo := bucketCommand.Duration("timeAgo", 24*time.Hour, "created time before now. \"1.5h\" or \"2h45m\". Valid time units are \"m\", \"h\"") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(commandEnv) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } + + var buckets []string + err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error { + buckets = append(buckets, entry.Name) + return nil + }, "", false, math.MaxUint32) + if err != nil { + return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + } + + for _, bucket := range buckets { + c.cleanupUploads(commandEnv, writer, filerBucketsPath, bucket, *uploadedTimeAgo) + } + + return err + +} + +func (c *commandS3CleanUploads) cleanupUploads(commandEnv *CommandEnv, writer io.Writer, filerBucketsPath string, bucket string, timeAgo time.Duration) error { + uploadsDir := filerBucketsPath + "/" + bucket + "/.uploads" + var staleUploads []string + now := time.Now() + err := filer_pb.List(commandEnv, uploadsDir, "", func(entry *filer_pb.Entry, isLast bool) error { + ctime := time.Unix(entry.Attributes.Crtime, 0) + if ctime.Add(timeAgo).Before(now) { + staleUploads = append(staleUploads, entry.Name) + } + return nil + }, "", false, math.MaxUint32) + if err != nil { + return fmt.Errorf("list uploads under %v: %v", uploadsDir, err) + } + + for _, staleUpload := range staleUploads { + deleteUrl := fmt.Sprintf("http://%s:%d%s/%s?recursive=true&ignoreRecursiveError=true", commandEnv.option.FilerHost, commandEnv.option.FilerPort, uploadsDir, staleUpload) + fmt.Fprintf(writer, "purge %s\n", deleteUrl) + + err = util.Delete(deleteUrl, "") + if err != nil { + return fmt.Errorf("purge %s/%s: %v", uploadsDir, staleUpload, err) + } + } + + return nil + +} diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index e0c41f310..ad7da0e44 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -393,9 +393,8 @@ func adjustAfterMove(v *master_pb.VolumeInformationMessage, volumeReplicas map[u if replica.location.dataNode.Id == fullNode.info.Id && replica.location.rack == fullNode.rack && replica.location.dc == fullNode.dc { - replica.location.dc = emptyNode.dc - replica.location.rack = emptyNode.rack - replica.location.dataNode = emptyNode.info + loc := newLocation(emptyNode.dc, emptyNode.rack, emptyNode.info) + replica.location = &loc return } } diff --git a/weed/shell/command_volume_balance_test.go b/weed/shell/command_volume_balance_test.go index 696bc7fac..b77811f51 100644 --- a/weed/shell/command_volume_balance_test.go +++ b/weed/shell/command_volume_balance_test.go @@ -169,3 +169,15 @@ func TestIsGoodMove(t *testing.T) { } } + +func TestBalance(t *testing.T) { + topologyInfo := parseOutput(topoData) + volumeServers := collectVolumeServersByDc(topologyInfo, "") + volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) + diskTypes := collectVolumeDiskTypes(topologyInfo) + + if err := balanceVolumeServers(nil, diskTypes, volumeReplicas, volumeServers, 30*1024*1024*1024, "ALL_COLLECTIONS", false); err != nil { + t.Errorf("balance: %v", err) + } + +} diff --git a/weed/shell/command_volume_check_disk.go b/weed/shell/command_volume_check_disk.go new file mode 100644 index 000000000..0f156ac2f --- /dev/null +++ b/weed/shell/command_volume_check_disk.go @@ -0,0 +1,258 @@ +package shell + +import ( + "bytes" + "context" + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "io" + "math" + "sort" +) + +func init() { + Commands = append(Commands, &commandVolumeCheckDisk{}) +} + +type commandVolumeCheckDisk struct { + env *CommandEnv +} + +func (c *commandVolumeCheckDisk) Name() string { + return "volume.check.disk" +} + +func (c *commandVolumeCheckDisk) Help() string { + return `check all replicated volumes to find and fix inconsistencies + + How it works: + + find all volumes that are replicated + for each volume id, if there are more than 2 replicas, find one pair with the largest 2 in file count. + for the pair volume A and B + append entries in A and not in B to B + append entries in B and not in A to A + +` +} + +func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same") + verbose := fsckCommand.Bool("v", false, "verbose mode") + applyChanges := fsckCommand.Bool("force", false, "apply the fix") + nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit") + if err = fsckCommand.Parse(args); err != nil { + return nil + } + + c.env = commandEnv + + // collect topology information + topologyInfo, _, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo) + + // pick 1 pairs of volume replica + fileCount := func(replica *VolumeReplica) uint64 { + return replica.info.FileCount - replica.info.DeleteCount + } + aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb() + defer aDB.Close() + defer bDB.Close() + + for _, replicas := range volumeReplicas { + sort.Slice(replicas, func(i, j int) bool { + return fileCount(replicas[i]) > fileCount(replicas[j]) + }) + for len(replicas) >= 2 { + a, b := replicas[0], replicas[1] + if !*slowMode { + if fileCount(a) == fileCount(b) { + replicas = replicas[1:] + continue + } + } + if a.info.ReadOnly || b.info.ReadOnly { + fmt.Fprintf(writer, "skipping readonly volume %d on %s and %s\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id) + replicas = replicas[1:] + continue + } + + // reset index db + aDB.Close() + bDB.Close() + aDB, bDB = needle_map.NewMemDb(), needle_map.NewMemDb() + + // read index db + if err := c.readIndexDatabase(aDB, a.info.Collection, a.info.Id, a.location.dataNode.Id, *verbose, writer); err != nil { + return err + } + if err := c.readIndexDatabase(bDB, b.info.Collection, b.info.Id, b.location.dataNode.Id, *verbose, writer); err != nil { + return err + } + + // find and make up the differnces + if err := c.doVolumeCheckDisk(aDB, bDB, a, b, *verbose, writer, *applyChanges, *nonRepairThreshold); err != nil { + return err + } + if err := c.doVolumeCheckDisk(bDB, aDB, b, a, *verbose, writer, *applyChanges, *nonRepairThreshold); err != nil { + return err + } + replicas = replicas[1:] + } + } + + return nil +} + +func (c *commandVolumeCheckDisk) doVolumeCheckDisk(subtrahend, minuend *needle_map.MemDb, source, target *VolumeReplica, verbose bool, writer io.Writer, applyChanges bool, nonRepairThreshold float64) error { + + // find missing keys + // hash join, can be more efficient + var missingNeedles []needle_map.NeedleValue + var counter int + subtrahend.AscendingVisit(func(value needle_map.NeedleValue) error { + counter++ + if _, found := minuend.Get(value.Key); !found { + missingNeedles = append(missingNeedles, value) + } + return nil + }) + + fmt.Fprintf(writer, "volume %d %s has %d entries, %s missed %d entries\n", source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles)) + + if counter == 0 || len(missingNeedles) == 0 { + return nil + } + + missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter) + if missingNeedlesFraction > nonRepairThreshold { + return fmt.Errorf( + "failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f", + source.info.Id, missingNeedlesFraction, nonRepairThreshold) + } + + for _, needleValue := range missingNeedles { + + needleBlob, err := c.readSourceNeedleBlob(source.location.dataNode.Id, source.info.Id, needleValue) + if err != nil { + return err + } + + if !applyChanges { + continue + } + + if verbose { + fmt.Fprintf(writer, "read %d,%x %s => %s \n", source.info.Id, needleValue.Key, source.location.dataNode.Id, target.location.dataNode.Id) + } + + if err := c.writeNeedleBlobToTarget(target.location.dataNode.Id, source.info.Id, needleValue, needleBlob); err != nil { + return err + } + + } + + return nil +} + +func (c *commandVolumeCheckDisk) readSourceNeedleBlob(sourceVolumeServer string, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) { + + err = operation.WithVolumeServerClient(sourceVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{ + VolumeId: volumeId, + NeedleId: uint64(needleValue.Key), + Offset: needleValue.Offset.ToActualOffset(), + Size: int32(needleValue.Size), + }) + if err != nil { + return err + } + needleBlob = resp.NeedleBlob + return nil + }) + return +} + +func (c *commandVolumeCheckDisk) writeNeedleBlobToTarget(targetVolumeServer string, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error { + + return operation.WithVolumeServerClient(targetVolumeServer, c.env.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{ + VolumeId: volumeId, + NeedleId: uint64(needleValue.Key), + Size: int32(needleValue.Size), + NeedleBlob: needleBlob, + }) + return err + }) + +} + +func (c *commandVolumeCheckDisk) readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer string, verbose bool, writer io.Writer) error { + + var buf bytes.Buffer + if err := c.copyVolumeIndexFile(collection, volumeId, volumeServer, &buf, verbose, writer); err != nil { + return err + } + + if verbose { + fmt.Fprintf(writer, "load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer) + } + + return db.LoadFromReaderAt(bytes.NewReader(buf.Bytes())) + +} + +func (c *commandVolumeCheckDisk) copyVolumeIndexFile(collection string, volumeId uint32, volumeServer string, buf *bytes.Buffer, verbose bool, writer io.Writer) error { + + return operation.WithVolumeServerClient(volumeServer, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + ext := ".idx" + + copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{ + VolumeId: volumeId, + Ext: ".idx", + CompactionRevision: math.MaxUint32, + StopOffset: math.MaxInt64, + Collection: collection, + IsEcVolume: false, + IgnoreSourceFileNotFound: false, + }) + if err != nil { + return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err) + } + + err = writeToBuffer(copyFileClient, buf) + if err != nil { + return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err) + } + + return nil + + }) +} + +func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error { + for { + resp, receiveErr := client.Recv() + if receiveErr == io.EOF { + break + } + if receiveErr != nil { + return fmt.Errorf("receiving: %v", receiveErr) + } + buf.Write(resp.FileContent) + } + return nil +} diff --git a/weed/shell/command_volume_list_test.go b/weed/shell/command_volume_list_test.go new file mode 100644 index 000000000..72c76f242 --- /dev/null +++ b/weed/shell/command_volume_list_test.go @@ -0,0 +1,893 @@ +package shell + +import ( + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "strconv" + "strings" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +func TestParsing(t *testing.T) { + topo := parseOutput(topoData) + + assert.Equal(t, 5, len(topo.DataCenterInfos)) + +} + +func parseOutput(output string) *master_pb.TopologyInfo { + lines := strings.Split(output, "\n") + var topo *master_pb.TopologyInfo + var dc *master_pb.DataCenterInfo + var rack *master_pb.RackInfo + var dn *master_pb.DataNodeInfo + var disk *master_pb.DiskInfo + for _, line := range lines { + line = strings.TrimSpace(line) + parts := strings.Split(line, " ") + switch parts[0] { + case "Topology": + if topo == nil { + topo = &master_pb.TopologyInfo{} + } + case "DataCenter": + if dc == nil { + dc = &master_pb.DataCenterInfo{ + Id: parts[1], + } + topo.DataCenterInfos = append(topo.DataCenterInfos, dc) + } else { + dc = nil + } + case "Rack": + if rack == nil { + rack = &master_pb.RackInfo{ + Id: parts[1], + } + dc.RackInfos = append(dc.RackInfos, rack) + } else { + rack = nil + } + case "DataNode": + if dn == nil { + dn = &master_pb.DataNodeInfo{ + Id: parts[1], + DiskInfos: make(map[string]*master_pb.DiskInfo), + } + rack.DataNodeInfos = append(rack.DataNodeInfos, dn) + } else { + dn = nil + } + case "Disk": + if disk == nil { + diskType := parts[1][:strings.Index(parts[1], "(")] + maxVolumeCountStr := parts[1][strings.Index(parts[1], "/")+1:] + maxVolumeCount, _ := strconv.Atoi(maxVolumeCountStr) + disk = &master_pb.DiskInfo{ + Type: diskType, + MaxVolumeCount: uint64(maxVolumeCount), + } + dn.DiskInfos[types.ToDiskType(diskType).String()] = disk + } else { + disk = nil + } + case "volume": + volumeLine := line[len("volume "):] + volume := &master_pb.VolumeInformationMessage{} + proto.UnmarshalText(volumeLine, volume) + disk.VolumeInfos = append(disk.VolumeInfos, volume) + } + } + + return topo +} + +const topoData = ` +Topology volumeSizeLimit:1024 MB hdd(volume:760/7280 active:760 free:6520 remote:0) + DataCenter dc1 hdd(volume:0/0 active:0 free:0 remote:0) + Rack DefaultRack hdd(volume:0/0 active:0 free:0 remote:0) + Rack DefaultRack total size:0 file_count:0 + DataCenter dc1 total size:0 file_count:0 + DataCenter dc2 hdd(volume:86/430 active:86 free:344 remote:0) + Rack rack1 hdd(volume:50/240 active:50 free:190 remote:0) + DataNode 192.168.1.4:8080 hdd(volume:50/240 active:50 free:190 remote:0) + Disk hdd(volume:50/240 active:50 free:190 remote:0) + volume id:15 size:1115965064 collection:"collection0" file_count:83 replica_placement:100 version:3 modified_at_second:1609923671 + volume id:21 size:1097631536 collection:"collection0" file_count:82 delete_count:7 deleted_byte_count:68975485 replica_placement:100 version:3 modified_at_second:1609929578 + volume id:22 size:1086828272 collection:"collection0" file_count:75 replica_placement:100 version:3 modified_at_second:1609930001 + volume id:23 size:1076380216 collection:"collection0" file_count:68 replica_placement:100 version:3 modified_at_second:1609930434 + volume id:24 size:1074139776 collection:"collection0" file_count:90 replica_placement:100 version:3 modified_at_second:1609930909 + volume id:25 size:690757512 collection:"collection0" file_count:38 replica_placement:100 version:3 modified_at_second:1611144216 + volume id:27 size:298886792 file_count:1608 replica_placement:100 version:3 modified_at_second:1615632482 + volume id:28 size:308919192 file_count:1591 delete_count:1 deleted_byte_count:125280 replica_placement:100 version:3 modified_at_second:1615631762 + volume id:29 size:281582680 file_count:1537 replica_placement:100 version:3 modified_at_second:1615629422 + volume id:30 size:289466144 file_count:1566 delete_count:1 deleted_byte_count:124972 replica_placement:100 version:3 modified_at_second:1615632422 + volume id:31 size:273363256 file_count:1498 replica_placement:100 version:3 modified_at_second:1615631642 + volume id:33 size:1130226400 collection:"collection1" file_count:7322 delete_count:172 deleted_byte_count:45199399 replica_placement:100 version:3 modified_at_second:1615618789 + volume id:38 size:1075545744 collection:"collection1" file_count:13324 delete_count:100 deleted_byte_count:25223906 replica_placement:100 version:3 modified_at_second:1615569830 + volume id:51 size:1076796120 collection:"collection1" file_count:10550 delete_count:39 deleted_byte_count:12723654 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615547786 + volume id:52 size:1083529728 collection:"collection1" file_count:10128 delete_count:32 deleted_byte_count:10608391 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615599195 + volume id:54 size:1045022344 collection:"collection1" file_count:9408 delete_count:30 deleted_byte_count:15132106 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812 + volume id:63 size:956941112 collection:"collection1" file_count:8271 delete_count:32 deleted_byte_count:15876189 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632036 + volume id:69 size:869213648 collection:"collection1" file_count:7293 delete_count:102 deleted_byte_count:30643207 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630534 + volume id:74 size:957046128 collection:"collection1" file_count:6982 delete_count:258 deleted_byte_count:73054259 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631460 + volume id:80 size:827912928 collection:"collection1" file_count:6914 delete_count:17 deleted_byte_count:5689635 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631157 + volume id:84 size:873121856 collection:"collection1" file_count:8200 delete_count:13 deleted_byte_count:3131676 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631161 + volume id:85 size:1023869320 collection:"collection1" file_count:7788 delete_count:234 deleted_byte_count:78037967 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631723 + volume id:97 size:1053112992 collection:"collection1" file_count:6789 delete_count:50 deleted_byte_count:38894001 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631193 + volume id:98 size:1077836440 collection:"collection1" file_count:7605 delete_count:202 deleted_byte_count:73180379 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615523691 + volume id:105 size:1073996824 collection:"collection1" file_count:6872 delete_count:20 deleted_byte_count:14482293 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615499757 + volume id:106 size:1075458664 collection:"collection1" file_count:7182 delete_count:307 deleted_byte_count:69349053 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615598137 + volume id:112 size:1076392512 collection:"collection1" file_count:8291 delete_count:156 deleted_byte_count:74120183 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569823 + volume id:116 size:1074489504 collection:"collection1" file_count:9981 delete_count:174 deleted_byte_count:53998777 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615611565 + volume id:119 size:1075940104 collection:"collection1" file_count:9003 delete_count:12 deleted_byte_count:9128155 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573878 + volume id:128 size:1074874632 collection:"collection1" file_count:9821 delete_count:148 deleted_byte_count:43633334 replica_placement:100 version:3 modified_at_second:1615602670 + volume id:133 size:1075952760 collection:"collection1" file_count:9538 delete_count:74 deleted_byte_count:19558008 replica_placement:100 version:3 modified_at_second:1615584779 + volume id:136 size:1074433552 collection:"collection1" file_count:9593 delete_count:72 deleted_byte_count:26912512 replica_placement:100 version:3 modified_at_second:1615376036 + volume id:138 size:1074465744 collection:"collection1" file_count:10120 delete_count:55 deleted_byte_count:15875438 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:140 size:1076203744 collection:"collection1" file_count:11219 delete_count:57 deleted_byte_count:19864498 replica_placement:100 version:3 modified_at_second:1615571947 + volume id:144 size:1074549720 collection:"collection1" file_count:8780 delete_count:50 deleted_byte_count:52475146 replica_placement:100 version:3 modified_at_second:1615573451 + volume id:161 size:1077397192 collection:"collection1" file_count:9988 delete_count:28 deleted_byte_count:12509164 replica_placement:100 version:3 modified_at_second:1615631452 + volume id:173 size:1074154704 collection:"collection1" file_count:30884 delete_count:34 deleted_byte_count:2578509 replica_placement:100 version:3 modified_at_second:1615591904 + volume id:174 size:1073824232 collection:"collection1" file_count:30689 delete_count:36 deleted_byte_count:2160116 replica_placement:100 version:3 modified_at_second:1615598914 + volume id:197 size:1075423240 collection:"collection1" file_count:16473 delete_count:15 deleted_byte_count:12552442 replica_placement:100 version:3 modified_at_second:1615485254 + volume id:219 size:1092298904 collection:"collection1" file_count:3193 delete_count:17 deleted_byte_count:2047576 replica_placement:100 version:3 modified_at_second:1615579316 + volume id:263 size:1077167352 collection:"collection2" file_count:20227 delete_count:4 deleted_byte_count:97887 replica_placement:100 version:3 modified_at_second:1614871567 + volume id:272 size:1076146040 collection:"collection2" file_count:21034 delete_count:2 deleted_byte_count:216564 replica_placement:100 version:3 modified_at_second:1614884139 + volume id:291 size:1076256760 collection:"collection2" file_count:28301 delete_count:5 deleted_byte_count:116027 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:299 size:1075147824 collection:"collection2" file_count:22927 delete_count:4 deleted_byte_count:345569 replica_placement:100 version:3 modified_at_second:1614918454 + volume id:301 size:1074655600 collection:"collection2" file_count:22543 delete_count:6 deleted_byte_count:136968 replica_placement:100 version:3 modified_at_second:1614918378 + volume id:302 size:1077559792 collection:"collection2" file_count:23124 delete_count:7 deleted_byte_count:293111 replica_placement:100 version:3 modified_at_second:1614925500 + volume id:339 size:1078402392 collection:"collection2" file_count:22309 replica_placement:100 version:3 modified_at_second:1614969996 + volume id:345 size:1074560760 collection:"collection2" file_count:22117 delete_count:2 deleted_byte_count:373286 replica_placement:100 version:3 modified_at_second:1614977458 + volume id:355 size:1075239792 collection:"collection2" file_count:22244 delete_count:1 deleted_byte_count:23282 replica_placement:100 version:3 modified_at_second:1614992157 + volume id:373 size:1080928000 collection:"collection2" file_count:22617 delete_count:4 deleted_byte_count:91849 replica_placement:100 version:3 modified_at_second:1615016877 + Disk hdd total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253 + DataNode 192.168.1.4:8080 total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253 + Rack rack1 total size:48630015544 file_count:537880 deleted_file:2580 deleted_bytes:929560253 + Rack rack2 hdd(volume:36/190 active:36 free:154 remote:0) + DataNode 192.168.1.2:8080 hdd(volume:36/190 active:36 free:154 remote:0) + Disk hdd(volume:36/190 active:36 free:154 remote:0) + volume id:2 size:289228560 file_count:1640 delete_count:4 deleted_byte_count:480564 replica_placement:100 version:3 compact_revision:6 modified_at_second:1615630622 + volume id:3 size:308743136 file_count:1638 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632242 + volume id:4 size:285986968 file_count:1641 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632302 + volume id:6 size:302411024 file_count:1604 delete_count:2 deleted_byte_count:274587 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631402 + volume id:7 size:1924728 collection:"collection4" file_count:15 replica_placement:100 version:3 modified_at_second:1609331040 + volume id:9 size:77337416 collection:"collection3" file_count:58 replica_placement:100 version:3 ttl:772 modified_at_second:1615513762 + volume id:10 size:1212784656 collection:"collection0" file_count:58 replica_placement:100 version:3 modified_at_second:1609814550 + volume id:12 size:1110923848 collection:"collection0" file_count:45 replica_placement:100 version:3 modified_at_second:1609819732 + volume id:13 size:1184910656 collection:"collection0" file_count:47 replica_placement:100 version:3 modified_at_second:1609827837 + volume id:14 size:1107475720 collection:"collection0" file_count:80 delete_count:3 deleted_byte_count:6870 replica_placement:100 version:3 modified_at_second:1612956980 + volume id:16 size:1113666104 collection:"collection0" file_count:73 delete_count:5 deleted_byte_count:6318 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:17 size:1095115800 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:7099 replica_placement:100 version:3 modified_at_second:1612957000 + volume id:21 size:1097631664 collection:"collection0" file_count:82 delete_count:11 deleted_byte_count:68985100 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:56 size:1001897616 collection:"collection1" file_count:8762 delete_count:37 deleted_byte_count:65375405 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632014 + volume id:81 size:880693104 collection:"collection1" file_count:7481 delete_count:236 deleted_byte_count:80386421 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631396 + volume id:104 size:1076383624 collection:"collection1" file_count:7663 delete_count:184 deleted_byte_count:100728071 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615602658 + volume id:107 size:1073811840 collection:"collection1" file_count:7436 delete_count:168 deleted_byte_count:57747484 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615293569 + volume id:113 size:1076709184 collection:"collection1" file_count:9355 delete_count:177 deleted_byte_count:59796765 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569822 + volume id:139 size:1074163936 collection:"collection1" file_count:9315 delete_count:42 deleted_byte_count:10630966 replica_placement:100 version:3 modified_at_second:1615571946 + volume id:151 size:1098659752 collection:"collection1" file_count:10808 delete_count:24 deleted_byte_count:7088102 replica_placement:100 version:3 modified_at_second:1615586389 + volume id:155 size:1075140688 collection:"collection1" file_count:10882 delete_count:32 deleted_byte_count:9076141 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:167 size:1073958176 collection:"collection1" file_count:25229 delete_count:48 deleted_byte_count:25871565 replica_placement:100 version:3 modified_at_second:1615602669 + volume id:177 size:1074120216 collection:"collection1" file_count:22293 delete_count:16 deleted_byte_count:3803952 replica_placement:100 version:3 modified_at_second:1615516892 + volume id:179 size:1074313920 collection:"collection1" file_count:21829 delete_count:24 deleted_byte_count:45552859 replica_placement:100 version:3 modified_at_second:1615580308 + volume id:182 size:1076131280 collection:"collection1" file_count:31987 delete_count:21 deleted_byte_count:1452346 replica_placement:100 version:3 modified_at_second:1615568922 + volume id:215 size:1068268216 collection:"collection1" file_count:2813 delete_count:10 deleted_byte_count:5676795 replica_placement:100 version:3 modified_at_second:1615586386 + volume id:217 size:1075381872 collection:"collection1" file_count:3331 delete_count:14 deleted_byte_count:2009141 replica_placement:100 version:3 modified_at_second:1615401638 + volume id:283 size:1080178944 collection:"collection2" file_count:19462 delete_count:7 deleted_byte_count:660407 replica_placement:100 version:3 modified_at_second:1614896626 + volume id:303 size:1075944504 collection:"collection2" file_count:22541 delete_count:2 deleted_byte_count:13617 replica_placement:100 version:3 modified_at_second:1614925431 + volume id:309 size:1075178624 collection:"collection2" file_count:22692 delete_count:3 deleted_byte_count:171124 replica_placement:100 version:3 modified_at_second:1614931409 + volume id:323 size:1074608200 collection:"collection2" file_count:21605 delete_count:4 deleted_byte_count:172090 replica_placement:100 version:3 modified_at_second:1614950526 + volume id:344 size:1075035448 collection:"collection2" file_count:21765 delete_count:1 deleted_byte_count:24623 replica_placement:100 version:3 modified_at_second:1614977465 + volume id:347 size:1075145496 collection:"collection2" file_count:22178 delete_count:1 deleted_byte_count:79392 replica_placement:100 version:3 modified_at_second:1614984727 + volume id:357 size:1074276208 collection:"collection2" file_count:23137 delete_count:4 deleted_byte_count:188487 replica_placement:100 version:3 modified_at_second:1614998792 + volume id:380 size:1010760456 collection:"collection2" file_count:14921 delete_count:6 deleted_byte_count:65678 replica_placement:100 version:3 modified_at_second:1615632322 + volume id:381 size:939292792 collection:"collection2" file_count:14619 delete_count:2 deleted_byte_count:5119 replica_placement:100 version:3 modified_at_second:1615632324 + Disk hdd total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088 + DataNode 192.168.1.2:8080 total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088 + Rack rack2 total size:33468194376 file_count:369168 deleted_file:1091 deleted_bytes:546337088 + DataCenter dc2 total size:82098209920 file_count:907048 deleted_file:3671 deleted_bytes:1475897341 + DataCenter dc3 hdd(volume:108/1850 active:108 free:1742 remote:0) + Rack rack3 hdd(volume:108/1850 active:108 free:1742 remote:0) + DataNode 192.168.1.6:8080 hdd(volume:108/1850 active:108 free:1742 remote:0) + Disk hdd(volume:108/1850 active:108 free:1742 remote:0) + volume id:1 size:284685936 file_count:1557 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632062 + volume id:32 size:281390512 file_count:1496 delete_count:6 deleted_byte_count:546403 replica_placement:100 version:3 modified_at_second:1615632362 + volume id:47 size:444599784 collection:"collection1" file_count:709 delete_count:19 deleted_byte_count:11913451 replica_placement:100 version:3 modified_at_second:1615632397 + volume id:49 size:1078775288 collection:"collection1" file_count:9636 delete_count:22 deleted_byte_count:5625976 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630446 + volume id:68 size:898630584 collection:"collection1" file_count:6934 delete_count:95 deleted_byte_count:27460707 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:88 size:1073767976 collection:"collection1" file_count:14995 delete_count:206 deleted_byte_count:81222360 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615629897 + volume id:202 size:1077533160 collection:"collection1" file_count:2847 delete_count:67 deleted_byte_count:65172985 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615588497 + volume id:203 size:1027316272 collection:"collection1" file_count:3040 delete_count:11 deleted_byte_count:3993230 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631728 + volume id:205 size:1078485304 collection:"collection1" file_count:2869 delete_count:43 deleted_byte_count:18290259 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615579314 + volume id:206 size:1082045848 collection:"collection1" file_count:2979 delete_count:225 deleted_byte_count:88220074 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615564274 + volume id:209 size:1074083592 collection:"collection1" file_count:3238 delete_count:4 deleted_byte_count:1494244 replica_placement:100 version:3 modified_at_second:1615419954 + volume id:211 size:1080610712 collection:"collection1" file_count:3247 delete_count:7 deleted_byte_count:1891456 replica_placement:100 version:3 modified_at_second:1615269124 + volume id:212 size:1078293360 collection:"collection1" file_count:3106 delete_count:6 deleted_byte_count:2085755 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:213 size:1093587976 collection:"collection1" file_count:3681 delete_count:12 deleted_byte_count:3138791 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:214 size:1074486992 collection:"collection1" file_count:3217 delete_count:10 deleted_byte_count:6392871 replica_placement:100 version:3 modified_at_second:1615586383 + volume id:216 size:1080073496 collection:"collection1" file_count:3316 delete_count:4 deleted_byte_count:179819 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:222 size:1106623104 collection:"collection1" file_count:3273 delete_count:11 deleted_byte_count:2114627 replica_placement:100 version:3 modified_at_second:1615586243 + volume id:223 size:1075233064 collection:"collection1" file_count:2966 delete_count:9 deleted_byte_count:744001 replica_placement:100 version:3 modified_at_second:1615586244 + volume id:227 size:1106699896 collection:"collection1" file_count:2827 delete_count:20 deleted_byte_count:5496790 replica_placement:100 version:3 modified_at_second:1615609989 + volume id:229 size:1109855312 collection:"collection1" file_count:2857 delete_count:22 deleted_byte_count:2839883 replica_placement:100 version:3 modified_at_second:1615609988 + volume id:230 size:1080722984 collection:"collection1" file_count:2898 delete_count:15 deleted_byte_count:3929261 replica_placement:100 version:3 modified_at_second:1615610537 + volume id:231 size:1112917696 collection:"collection1" file_count:3151 delete_count:20 deleted_byte_count:2989828 replica_placement:100 version:3 modified_at_second:1615611350 + volume id:233 size:1080526464 collection:"collection1" file_count:3136 delete_count:61 deleted_byte_count:17991717 replica_placement:100 version:3 modified_at_second:1615611352 + volume id:234 size:1073835280 collection:"collection1" file_count:2965 delete_count:41 deleted_byte_count:4960354 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:235 size:1075586104 collection:"collection1" file_count:2767 delete_count:33 deleted_byte_count:3216540 replica_placement:100 version:3 modified_at_second:1615611354 + volume id:237 size:375722792 collection:"collection1" file_count:736 delete_count:16 deleted_byte_count:4464870 replica_placement:100 version:3 modified_at_second:1615631727 + volume id:239 size:426569024 collection:"collection1" file_count:693 delete_count:19 deleted_byte_count:13020783 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630838 + volume id:241 size:380217424 collection:"collection1" file_count:633 delete_count:6 deleted_byte_count:1715768 replica_placement:100 version:3 modified_at_second:1615632006 + volume id:244 size:1080295352 collection:"collection2" file_count:10812 delete_count:1 deleted_byte_count:795 replica_placement:100 version:3 modified_at_second:1614852171 + volume id:245 size:1074597056 collection:"collection2" file_count:10371 delete_count:3 deleted_byte_count:209701 replica_placement:100 version:3 modified_at_second:1614852093 + volume id:246 size:1075998648 collection:"collection2" file_count:10365 delete_count:1 deleted_byte_count:13112 replica_placement:100 version:3 modified_at_second:1614852105 + volume id:248 size:1084301184 collection:"collection2" file_count:11217 delete_count:4 deleted_byte_count:746488 replica_placement:100 version:3 modified_at_second:1614856285 + volume id:249 size:1074819136 collection:"collection2" file_count:10763 delete_count:2 deleted_byte_count:271699 replica_placement:100 version:3 modified_at_second:1614856230 + volume id:251 size:1075684488 collection:"collection2" file_count:10847 replica_placement:100 version:3 modified_at_second:1614856270 + volume id:252 size:1075065208 collection:"collection2" file_count:14622 delete_count:2 deleted_byte_count:5228 replica_placement:100 version:3 modified_at_second:1614861196 + volume id:253 size:1087328816 collection:"collection2" file_count:14920 delete_count:3 deleted_byte_count:522994 replica_placement:100 version:3 modified_at_second:1614861255 + volume id:255 size:1079581640 collection:"collection2" file_count:14877 delete_count:3 deleted_byte_count:101223 replica_placement:100 version:3 modified_at_second:1614861233 + volume id:256 size:1074283592 collection:"collection2" file_count:14157 delete_count:1 deleted_byte_count:18156 replica_placement:100 version:3 modified_at_second:1614861100 + volume id:258 size:1075527216 collection:"collection2" file_count:18421 delete_count:4 deleted_byte_count:267833 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:259 size:1075507776 collection:"collection2" file_count:18079 delete_count:2 deleted_byte_count:71992 replica_placement:100 version:3 modified_at_second:1614866381 + volume id:264 size:1081624192 collection:"collection2" file_count:21151 replica_placement:100 version:3 modified_at_second:1614871629 + volume id:265 size:1076401104 collection:"collection2" file_count:19932 delete_count:2 deleted_byte_count:160823 replica_placement:100 version:3 modified_at_second:1615629130 + volume id:266 size:1075617464 collection:"collection2" file_count:20075 delete_count:1 deleted_byte_count:1039 replica_placement:100 version:3 modified_at_second:1614871526 + volume id:267 size:1075699544 collection:"collection2" file_count:21039 delete_count:3 deleted_byte_count:59956 replica_placement:100 version:3 modified_at_second:1614877294 + volume id:268 size:1074490592 collection:"collection2" file_count:21698 delete_count:1 deleted_byte_count:33968 replica_placement:100 version:3 modified_at_second:1614877434 + volume id:269 size:1077552872 collection:"collection2" file_count:21875 delete_count:4 deleted_byte_count:347272 replica_placement:100 version:3 modified_at_second:1614877481 + volume id:270 size:1076876568 collection:"collection2" file_count:22057 delete_count:1 deleted_byte_count:43916 replica_placement:100 version:3 modified_at_second:1614877469 + volume id:275 size:1078349024 collection:"collection2" file_count:20808 delete_count:1 deleted_byte_count:1118 replica_placement:100 version:3 modified_at_second:1614884147 + volume id:277 size:1074956288 collection:"collection2" file_count:19260 delete_count:2 deleted_byte_count:172356 replica_placement:100 version:3 modified_at_second:1614889988 + volume id:278 size:1078798640 collection:"collection2" file_count:20597 delete_count:5 deleted_byte_count:400060 replica_placement:100 version:3 modified_at_second:1614890292 + volume id:279 size:1077325040 collection:"collection2" file_count:19671 delete_count:6 deleted_byte_count:379116 replica_placement:100 version:3 modified_at_second:1614890229 + volume id:280 size:1077432216 collection:"collection2" file_count:20286 delete_count:1 deleted_byte_count:879 replica_placement:100 version:3 modified_at_second:1614890262 + volume id:281 size:1077581096 collection:"collection2" file_count:20206 delete_count:3 deleted_byte_count:143964 replica_placement:100 version:3 modified_at_second:1614890237 + volume id:284 size:1074533384 collection:"collection2" file_count:22196 delete_count:4 deleted_byte_count:154683 replica_placement:100 version:3 modified_at_second:1614897231 + volume id:285 size:1082128688 collection:"collection2" file_count:21804 delete_count:1 deleted_byte_count:1064 replica_placement:100 version:3 modified_at_second:1614897165 + volume id:289 size:1075284256 collection:"collection2" file_count:29342 delete_count:5 deleted_byte_count:100454 replica_placement:100 version:3 modified_at_second:1614904977 + volume id:290 size:1074723792 collection:"collection2" file_count:28340 delete_count:4 deleted_byte_count:199064 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:291 size:1076256768 collection:"collection2" file_count:28301 delete_count:5 deleted_byte_count:116027 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:293 size:1075409792 collection:"collection2" file_count:26063 delete_count:4 deleted_byte_count:183834 replica_placement:100 version:3 modified_at_second:1614912235 + volume id:294 size:1075444048 collection:"collection2" file_count:26076 delete_count:4 deleted_byte_count:194914 replica_placement:100 version:3 modified_at_second:1614912220 + volume id:296 size:1077824032 collection:"collection2" file_count:26741 delete_count:4 deleted_byte_count:199906 replica_placement:100 version:3 modified_at_second:1614912301 + volume id:297 size:1080229136 collection:"collection2" file_count:23409 delete_count:5 deleted_byte_count:46268 replica_placement:100 version:3 modified_at_second:1614918481 + volume id:298 size:1075410136 collection:"collection2" file_count:23222 delete_count:2 deleted_byte_count:46110 replica_placement:100 version:3 modified_at_second:1614918474 + volume id:299 size:1075147936 collection:"collection2" file_count:22927 delete_count:4 deleted_byte_count:345569 replica_placement:100 version:3 modified_at_second:1614918455 + volume id:300 size:1076212392 collection:"collection2" file_count:22892 delete_count:2 deleted_byte_count:61320 replica_placement:100 version:3 modified_at_second:1614918464 + volume id:301 size:1074655600 collection:"collection2" file_count:22543 delete_count:6 deleted_byte_count:136968 replica_placement:100 version:3 modified_at_second:1614918378 + volume id:303 size:1075944480 collection:"collection2" file_count:22541 delete_count:2 deleted_byte_count:13617 replica_placement:100 version:3 modified_at_second:1614925431 + volume id:306 size:1074764016 collection:"collection2" file_count:22939 replica_placement:100 version:3 modified_at_second:1614925462 + volume id:307 size:1076568000 collection:"collection2" file_count:23377 delete_count:2 deleted_byte_count:25453 replica_placement:100 version:3 modified_at_second:1614931448 + volume id:308 size:1074022392 collection:"collection2" file_count:23086 delete_count:2 deleted_byte_count:2127 replica_placement:100 version:3 modified_at_second:1614931401 + volume id:309 size:1075178664 collection:"collection2" file_count:22692 delete_count:3 deleted_byte_count:171124 replica_placement:100 version:3 modified_at_second:1614931409 + volume id:310 size:1074761528 collection:"collection2" file_count:21441 delete_count:3 deleted_byte_count:13934 replica_placement:100 version:3 modified_at_second:1614931077 + volume id:314 size:1074670840 collection:"collection2" file_count:20964 delete_count:4 deleted_byte_count:304291 replica_placement:100 version:3 modified_at_second:1614937441 + volume id:315 size:1084153544 collection:"collection2" file_count:23638 delete_count:2 deleted_byte_count:53956 replica_placement:100 version:3 modified_at_second:1614937885 + volume id:317 size:1076215096 collection:"collection2" file_count:23572 delete_count:2 deleted_byte_count:1441356 replica_placement:100 version:3 modified_at_second:1614943965 + volume id:318 size:1075965168 collection:"collection2" file_count:22459 delete_count:2 deleted_byte_count:37778 replica_placement:100 version:3 modified_at_second:1614943862 + volume id:319 size:1073952880 collection:"collection2" file_count:22286 delete_count:2 deleted_byte_count:43421 replica_placement:100 version:3 modified_at_second:1614943810 + volume id:320 size:1082437792 collection:"collection2" file_count:21544 delete_count:3 deleted_byte_count:16712 replica_placement:100 version:3 modified_at_second:1614943599 + volume id:321 size:1081477904 collection:"collection2" file_count:23531 delete_count:5 deleted_byte_count:262564 replica_placement:100 version:3 modified_at_second:1614943982 + volume id:324 size:1075606680 collection:"collection2" file_count:20799 delete_count:1 deleted_byte_count:251210 replica_placement:100 version:3 modified_at_second:1614950310 + volume id:325 size:1080701144 collection:"collection2" file_count:21735 replica_placement:100 version:3 modified_at_second:1614950525 + volume id:330 size:1080825832 collection:"collection2" file_count:22464 delete_count:2 deleted_byte_count:15771 replica_placement:100 version:3 modified_at_second:1614956477 + volume id:332 size:1075569928 collection:"collection2" file_count:22097 delete_count:3 deleted_byte_count:98273 replica_placement:100 version:3 modified_at_second:1614962869 + volume id:334 size:1075607880 collection:"collection2" file_count:22546 delete_count:6 deleted_byte_count:101538 replica_placement:100 version:3 modified_at_second:1614962978 + volume id:336 size:1087853056 collection:"collection2" file_count:22801 delete_count:2 deleted_byte_count:26394 replica_placement:100 version:3 modified_at_second:1614963005 + volume id:337 size:1075646784 collection:"collection2" file_count:21934 delete_count:1 deleted_byte_count:3397 replica_placement:100 version:3 modified_at_second:1614969937 + volume id:338 size:1076118304 collection:"collection2" file_count:21680 replica_placement:100 version:3 modified_at_second:1614969850 + volume id:340 size:1079462184 collection:"collection2" file_count:22319 delete_count:4 deleted_byte_count:93620 replica_placement:100 version:3 modified_at_second:1614969978 + volume id:341 size:1074448400 collection:"collection2" file_count:21590 delete_count:5 deleted_byte_count:160085 replica_placement:100 version:3 modified_at_second:1614969858 + volume id:342 size:1080186424 collection:"collection2" file_count:22405 delete_count:4 deleted_byte_count:64819 replica_placement:100 version:3 modified_at_second:1614977521 + volume id:344 size:1075035416 collection:"collection2" file_count:21765 delete_count:1 deleted_byte_count:24623 replica_placement:100 version:3 modified_at_second:1614977465 + volume id:345 size:1074560760 collection:"collection2" file_count:22117 delete_count:2 deleted_byte_count:373286 replica_placement:100 version:3 modified_at_second:1614977457 + volume id:346 size:1076464112 collection:"collection2" file_count:22320 delete_count:4 deleted_byte_count:798258 replica_placement:100 version:3 modified_at_second:1615631322 + volume id:348 size:1080623640 collection:"collection2" file_count:21667 delete_count:1 deleted_byte_count:2443 replica_placement:100 version:3 modified_at_second:1614984606 + volume id:350 size:1074756688 collection:"collection2" file_count:21990 delete_count:3 deleted_byte_count:233881 replica_placement:100 version:3 modified_at_second:1614984682 + volume id:351 size:1078795112 collection:"collection2" file_count:23660 delete_count:3 deleted_byte_count:102141 replica_placement:100 version:3 modified_at_second:1614984816 + volume id:352 size:1077145936 collection:"collection2" file_count:22066 delete_count:1 deleted_byte_count:1018 replica_placement:100 version:3 modified_at_second:1614992130 + volume id:353 size:1074897496 collection:"collection2" file_count:21266 delete_count:2 deleted_byte_count:3105374 replica_placement:100 version:3 modified_at_second:1614991951 + volume id:355 size:1075239728 collection:"collection2" file_count:22244 delete_count:1 deleted_byte_count:23282 replica_placement:100 version:3 modified_at_second:1614992157 + volume id:356 size:1083305048 collection:"collection2" file_count:21552 delete_count:4 deleted_byte_count:14472 replica_placement:100 version:3 modified_at_second:1614992028 + volume id:358 size:1085152368 collection:"collection2" file_count:23756 delete_count:3 deleted_byte_count:44531 replica_placement:100 version:3 modified_at_second:1614998824 + volume id:360 size:1075532456 collection:"collection2" file_count:22574 delete_count:3 deleted_byte_count:1774776 replica_placement:100 version:3 modified_at_second:1614998770 + volume id:361 size:1075362744 collection:"collection2" file_count:22272 delete_count:1 deleted_byte_count:3497 replica_placement:100 version:3 modified_at_second:1614998668 + volume id:375 size:1076140568 collection:"collection2" file_count:21880 delete_count:2 deleted_byte_count:51103 replica_placement:100 version:3 modified_at_second:1615016787 + volume id:376 size:1074845944 collection:"collection2" file_count:22908 delete_count:4 deleted_byte_count:432305 replica_placement:100 version:3 modified_at_second:1615016916 + volume id:377 size:957284144 collection:"collection2" file_count:14923 delete_count:1 deleted_byte_count:1797 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:378 size:959273936 collection:"collection2" file_count:15027 delete_count:4 deleted_byte_count:231414 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:381 size:939261032 collection:"collection2" file_count:14615 delete_count:5 deleted_byte_count:1192272 replica_placement:100 version:3 modified_at_second:1615632324 + Disk hdd total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + DataNode 192.168.1.6:8080 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + Rack rack3 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + DataCenter dc3 total size:111617646696 file_count:1762773 deleted_file:1221 deleted_bytes:398484585 + DataCenter dc4 hdd(volume:267/2000 active:267 free:1733 remote:0) + Rack DefaultRack hdd(volume:267/2000 active:267 free:1733 remote:0) + DataNode 192.168.1.1:8080 hdd(volume:267/2000 active:267 free:1733 remote:0) + Disk hdd(volume:267/2000 active:267 free:1733 remote:0) + volume id:1 size:284693256 file_count:1558 delete_count:2 deleted_byte_count:4818 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632062 + volume id:2 size:289228560 file_count:1640 delete_count:4 deleted_byte_count:464508 replica_placement:100 version:3 compact_revision:6 modified_at_second:1615630622 + volume id:3 size:308741952 file_count:1637 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632242 + volume id:4 size:285986968 file_count:1640 delete_count:1 deleted_byte_count:145095 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632302 + volume id:5 size:293806008 file_count:1669 delete_count:2 deleted_byte_count:274334 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631342 + volume id:6 size:302411024 file_count:1604 delete_count:2 deleted_byte_count:274587 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631402 + volume id:7 size:1924728 collection:"collection4" file_count:15 replica_placement:100 version:3 modified_at_second:1609331040 + volume id:9 size:77337416 collection:"collection3" file_count:58 replica_placement:100 version:3 ttl:772 modified_at_second:1615513762 + volume id:10 size:1212784656 collection:"collection0" file_count:58 replica_placement:100 version:3 modified_at_second:1609814543 + volume id:11 size:1109224552 collection:"collection0" file_count:44 replica_placement:100 version:3 modified_at_second:1609815123 + volume id:12 size:1110923848 collection:"collection0" file_count:45 replica_placement:100 version:3 modified_at_second:1609819726 + volume id:13 size:1184910656 collection:"collection0" file_count:47 replica_placement:100 version:3 modified_at_second:1609827832 + volume id:14 size:1107475720 collection:"collection0" file_count:80 delete_count:3 deleted_byte_count:6870 replica_placement:100 version:3 modified_at_second:1612956983 + volume id:15 size:1115965160 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:4956 replica_placement:100 version:3 modified_at_second:1612957001 + volume id:16 size:1113666048 collection:"collection0" file_count:73 delete_count:5 deleted_byte_count:6318 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:17 size:1095115800 collection:"collection0" file_count:83 delete_count:3 deleted_byte_count:7099 replica_placement:100 version:3 modified_at_second:1612957000 + volume id:18 size:1096678688 collection:"collection0" file_count:88 delete_count:4 deleted_byte_count:8633 replica_placement:100 version:3 modified_at_second:1612957000 + volume id:19 size:1096923792 collection:"collection0" file_count:100 delete_count:10 deleted_byte_count:75779917 replica_placement:100 version:3 compact_revision:4 modified_at_second:1612957011 + volume id:20 size:1074760432 collection:"collection0" file_count:82 delete_count:5 deleted_byte_count:12156 replica_placement:100 version:3 compact_revision:2 modified_at_second:1612957011 + volume id:22 size:1086828368 collection:"collection0" file_count:75 delete_count:3 deleted_byte_count:5551 replica_placement:100 version:3 modified_at_second:1612957007 + volume id:23 size:1076380280 collection:"collection0" file_count:68 delete_count:2 deleted_byte_count:2910 replica_placement:100 version:3 modified_at_second:1612957011 + volume id:24 size:1074139808 collection:"collection0" file_count:90 delete_count:1 deleted_byte_count:1977 replica_placement:100 version:3 modified_at_second:1612957011 + volume id:25 size:690757544 collection:"collection0" file_count:38 delete_count:1 deleted_byte_count:1944 replica_placement:100 version:3 modified_at_second:1612956995 + volume id:26 size:532657632 collection:"collection0" file_count:100 delete_count:4 deleted_byte_count:9081 replica_placement:100 version:3 modified_at_second:1614170023 + volume id:34 size:1077111136 collection:"collection1" file_count:9781 delete_count:110 deleted_byte_count:20894827 replica_placement:100 version:3 modified_at_second:1615619366 + volume id:35 size:1075241656 collection:"collection1" file_count:10523 delete_count:96 deleted_byte_count:46618989 replica_placement:100 version:3 modified_at_second:1615618790 + volume id:36 size:1075118360 collection:"collection1" file_count:10342 delete_count:116 deleted_byte_count:25493106 replica_placement:100 version:3 modified_at_second:1615606148 + volume id:37 size:1075895584 collection:"collection1" file_count:12013 delete_count:98 deleted_byte_count:50747932 replica_placement:100 version:3 modified_at_second:1615594777 + volume id:39 size:1076606536 collection:"collection1" file_count:12612 delete_count:78 deleted_byte_count:17462730 replica_placement:100 version:3 modified_at_second:1615611959 + volume id:40 size:1075358552 collection:"collection1" file_count:12597 delete_count:62 deleted_byte_count:11657901 replica_placement:100 version:3 modified_at_second:1615612994 + volume id:41 size:1076283528 collection:"collection1" file_count:12088 delete_count:84 deleted_byte_count:19319268 replica_placement:100 version:3 modified_at_second:1615596736 + volume id:42 size:1093948352 collection:"collection1" file_count:7889 delete_count:47 deleted_byte_count:5697275 replica_placement:100 version:3 modified_at_second:1615548908 + volume id:43 size:1116445864 collection:"collection1" file_count:7358 delete_count:54 deleted_byte_count:9534379 replica_placement:100 version:3 modified_at_second:1615566170 + volume id:44 size:1077582560 collection:"collection1" file_count:7295 delete_count:50 deleted_byte_count:12618414 replica_placement:100 version:3 modified_at_second:1615566170 + volume id:45 size:1075254640 collection:"collection1" file_count:10772 delete_count:76 deleted_byte_count:22426345 replica_placement:100 version:3 modified_at_second:1615573499 + volume id:46 size:1075286056 collection:"collection1" file_count:9947 delete_count:309 deleted_byte_count:105601163 replica_placement:100 version:3 modified_at_second:1615569826 + volume id:48 size:1076778720 collection:"collection1" file_count:9850 delete_count:77 deleted_byte_count:16641907 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630690 + volume id:50 size:1076688224 collection:"collection1" file_count:7921 delete_count:26 deleted_byte_count:5162032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615610879 + volume id:52 size:1083529704 collection:"collection1" file_count:10128 delete_count:32 deleted_byte_count:10608391 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615599195 + volume id:53 size:1063089216 collection:"collection1" file_count:9832 delete_count:31 deleted_byte_count:9273066 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:55 size:1012890016 collection:"collection1" file_count:8651 delete_count:27 deleted_byte_count:9418841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631452 + volume id:57 size:839849792 collection:"collection1" file_count:7514 delete_count:24 deleted_byte_count:6228543 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631774 + volume id:58 size:908064200 collection:"collection1" file_count:8128 delete_count:21 deleted_byte_count:6113731 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632342 + volume id:59 size:988302272 collection:"collection1" file_count:8098 delete_count:20 deleted_byte_count:3947615 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632238 + volume id:60 size:1010702480 collection:"collection1" file_count:8969 delete_count:79 deleted_byte_count:24782814 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632439 + volume id:61 size:975604488 collection:"collection1" file_count:8683 delete_count:20 deleted_byte_count:10276072 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631176 + volume id:62 size:873845936 collection:"collection1" file_count:7897 delete_count:23 deleted_byte_count:10920170 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631133 + volume id:64 size:965638488 collection:"collection1" file_count:8218 delete_count:27 deleted_byte_count:6922489 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631031 + volume id:65 size:823283552 collection:"collection1" file_count:7834 delete_count:29 deleted_byte_count:5950610 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632306 + volume id:66 size:821343440 collection:"collection1" file_count:7383 delete_count:29 deleted_byte_count:12010343 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631968 + volume id:67 size:878713872 collection:"collection1" file_count:7299 delete_count:117 deleted_byte_count:24857326 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:68 size:898630584 collection:"collection1" file_count:6934 delete_count:95 deleted_byte_count:27460707 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:70 size:886695472 collection:"collection1" file_count:7769 delete_count:164 deleted_byte_count:45162513 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632398 + volume id:71 size:907608392 collection:"collection1" file_count:7658 delete_count:122 deleted_byte_count:27622941 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307 + volume id:72 size:903990720 collection:"collection1" file_count:6996 delete_count:240 deleted_byte_count:74147727 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630982 + volume id:73 size:929047664 collection:"collection1" file_count:7038 delete_count:227 deleted_byte_count:65336664 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630707 + volume id:74 size:957046128 collection:"collection1" file_count:6981 delete_count:259 deleted_byte_count:73080838 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631460 + volume id:75 size:908044992 collection:"collection1" file_count:6911 delete_count:268 deleted_byte_count:73934373 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632430 + volume id:76 size:985296744 collection:"collection1" file_count:6566 delete_count:61 deleted_byte_count:44464430 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:77 size:929398296 collection:"collection1" file_count:7427 delete_count:238 deleted_byte_count:59581579 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632013 + volume id:78 size:1075671512 collection:"collection1" file_count:7540 delete_count:258 deleted_byte_count:71726846 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615582829 + volume id:79 size:948225472 collection:"collection1" file_count:6997 delete_count:227 deleted_byte_count:60625763 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631326 + volume id:82 size:1041661800 collection:"collection1" file_count:7043 delete_count:207 deleted_byte_count:52275724 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632430 + volume id:83 size:936195856 collection:"collection1" file_count:7593 delete_count:13 deleted_byte_count:4633917 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632029 + volume id:85 size:1023867520 collection:"collection1" file_count:7787 delete_count:240 deleted_byte_count:82091742 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631723 + volume id:86 size:1009437488 collection:"collection1" file_count:8474 delete_count:236 deleted_byte_count:64543674 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812 + volume id:87 size:922276640 collection:"collection1" file_count:12902 delete_count:13 deleted_byte_count:3412959 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632438 + volume id:89 size:1044401976 collection:"collection1" file_count:14943 delete_count:243 deleted_byte_count:58543159 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632208 + volume id:90 size:891145784 collection:"collection1" file_count:14608 delete_count:10 deleted_byte_count:2564369 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615629390 + volume id:91 size:936572832 collection:"collection1" file_count:14686 delete_count:11 deleted_byte_count:4717727 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631851 + volume id:92 size:992440712 collection:"collection1" file_count:7061 delete_count:195 deleted_byte_count:60649573 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630566 + volume id:93 size:1079603768 collection:"collection1" file_count:7878 delete_count:270 deleted_byte_count:74150048 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615556015 + volume id:94 size:1030685824 collection:"collection1" file_count:7660 delete_count:207 deleted_byte_count:70150733 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631616 + volume id:95 size:990879168 collection:"collection1" file_count:6620 delete_count:206 deleted_byte_count:60363604 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631866 + volume id:96 size:989296136 collection:"collection1" file_count:7544 delete_count:229 deleted_byte_count:59931853 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630778 + volume id:97 size:1053112992 collection:"collection1" file_count:6789 delete_count:50 deleted_byte_count:38894001 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631194 + volume id:99 size:1071718504 collection:"collection1" file_count:7470 delete_count:8 deleted_byte_count:9624950 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631175 + volume id:100 size:1083617440 collection:"collection1" file_count:7018 delete_count:187 deleted_byte_count:61304236 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615505917 + volume id:101 size:1077109520 collection:"collection1" file_count:7706 delete_count:226 deleted_byte_count:77864841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630994 + volume id:102 size:1074359920 collection:"collection1" file_count:7338 delete_count:7 deleted_byte_count:6499151 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615626683 + volume id:103 size:1075863904 collection:"collection1" file_count:7184 delete_count:186 deleted_byte_count:58872238 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615628417 + volume id:104 size:1076383768 collection:"collection1" file_count:7663 delete_count:184 deleted_byte_count:100578087 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615602661 + volume id:105 size:1073996824 collection:"collection1" file_count:6873 delete_count:19 deleted_byte_count:14271533 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615499756 + volume id:108 size:1074648024 collection:"collection1" file_count:7472 delete_count:194 deleted_byte_count:70864699 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593232 + volume id:109 size:1075254560 collection:"collection1" file_count:7556 delete_count:263 deleted_byte_count:55155265 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615502487 + volume id:110 size:1076575744 collection:"collection1" file_count:6996 delete_count:163 deleted_byte_count:52954032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615590786 + volume id:111 size:1073826232 collection:"collection1" file_count:7355 delete_count:155 deleted_byte_count:50083578 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593233 + volume id:114 size:1074762784 collection:"collection1" file_count:8802 delete_count:156 deleted_byte_count:38470055 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615591826 + volume id:115 size:1076192240 collection:"collection1" file_count:7690 delete_count:154 deleted_byte_count:32267193 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615285295 + volume id:116 size:1074489504 collection:"collection1" file_count:9981 delete_count:174 deleted_byte_count:53998777 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615611567 + volume id:117 size:1073917192 collection:"collection1" file_count:9520 delete_count:114 deleted_byte_count:21835126 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573714 + volume id:118 size:1074064400 collection:"collection1" file_count:8738 delete_count:15 deleted_byte_count:3460697 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615516265 + volume id:119 size:1075940104 collection:"collection1" file_count:9003 delete_count:12 deleted_byte_count:9128155 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573880 + volume id:120 size:1076115928 collection:"collection1" file_count:9639 delete_count:118 deleted_byte_count:33357871 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615482567 + volume id:121 size:1078803248 collection:"collection1" file_count:10113 delete_count:441 deleted_byte_count:94128627 replica_placement:100 version:3 modified_at_second:1615506629 + volume id:122 size:1076235312 collection:"collection1" file_count:9106 delete_count:252 deleted_byte_count:93041272 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:123 size:1080491112 collection:"collection1" file_count:10623 delete_count:302 deleted_byte_count:83956998 replica_placement:100 version:3 modified_at_second:1615585916 + volume id:124 size:1074519360 collection:"collection1" file_count:9457 delete_count:286 deleted_byte_count:74752459 replica_placement:100 version:3 modified_at_second:1615585916 + volume id:125 size:1088687040 collection:"collection1" file_count:9518 delete_count:281 deleted_byte_count:76037905 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:126 size:1073867464 collection:"collection1" file_count:9320 delete_count:278 deleted_byte_count:94547424 replica_placement:100 version:3 modified_at_second:1615585912 + volume id:127 size:1074907336 collection:"collection1" file_count:9900 delete_count:133 deleted_byte_count:48570820 replica_placement:100 version:3 modified_at_second:1615612991 + volume id:129 size:1074704272 collection:"collection1" file_count:10012 delete_count:150 deleted_byte_count:64491721 replica_placement:100 version:3 modified_at_second:1615627566 + volume id:130 size:1075000632 collection:"collection1" file_count:10633 delete_count:161 deleted_byte_count:34768201 replica_placement:100 version:3 modified_at_second:1615582330 + volume id:131 size:1075279584 collection:"collection1" file_count:10075 delete_count:135 deleted_byte_count:29795712 replica_placement:100 version:3 modified_at_second:1615523898 + volume id:132 size:1088539496 collection:"collection1" file_count:11051 delete_count:71 deleted_byte_count:17178322 replica_placement:100 version:3 modified_at_second:1615619584 + volume id:133 size:1075952760 collection:"collection1" file_count:9538 delete_count:74 deleted_byte_count:19558008 replica_placement:100 version:3 modified_at_second:1615584780 + volume id:134 size:1074367304 collection:"collection1" file_count:10662 delete_count:69 deleted_byte_count:25530139 replica_placement:100 version:3 modified_at_second:1615555876 + volume id:135 size:1073906720 collection:"collection1" file_count:10446 delete_count:71 deleted_byte_count:28599975 replica_placement:100 version:3 modified_at_second:1615569816 + volume id:137 size:1074309264 collection:"collection1" file_count:9633 delete_count:50 deleted_byte_count:27487972 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:139 size:1074163936 collection:"collection1" file_count:9314 delete_count:43 deleted_byte_count:10631353 replica_placement:100 version:3 modified_at_second:1615571946 + volume id:141 size:1074619488 collection:"collection1" file_count:9840 delete_count:45 deleted_byte_count:40890181 replica_placement:100 version:3 modified_at_second:1615630994 + volume id:142 size:1075732992 collection:"collection1" file_count:9009 delete_count:48 deleted_byte_count:9912854 replica_placement:100 version:3 modified_at_second:1615598914 + volume id:143 size:1075011280 collection:"collection1" file_count:9608 delete_count:51 deleted_byte_count:37282460 replica_placement:100 version:3 modified_at_second:1615488586 + volume id:145 size:1074394928 collection:"collection1" file_count:9255 delete_count:34 deleted_byte_count:38011392 replica_placement:100 version:3 modified_at_second:1615591825 + volume id:146 size:1076337520 collection:"collection1" file_count:10492 delete_count:50 deleted_byte_count:17071505 replica_placement:100 version:3 modified_at_second:1615632005 + volume id:147 size:1077130544 collection:"collection1" file_count:10451 delete_count:27 deleted_byte_count:8290907 replica_placement:100 version:3 modified_at_second:1615604117 + volume id:148 size:1076066568 collection:"collection1" file_count:9547 delete_count:33 deleted_byte_count:7034089 replica_placement:100 version:3 modified_at_second:1615586393 + volume id:149 size:1074989016 collection:"collection1" file_count:8352 delete_count:35 deleted_byte_count:7179742 replica_placement:100 version:3 modified_at_second:1615494496 + volume id:150 size:1076290408 collection:"collection1" file_count:9328 delete_count:33 deleted_byte_count:43417791 replica_placement:100 version:3 modified_at_second:1615611569 + volume id:151 size:1098659752 collection:"collection1" file_count:10805 delete_count:27 deleted_byte_count:7209106 replica_placement:100 version:3 modified_at_second:1615586390 + volume id:152 size:1075941376 collection:"collection1" file_count:9951 delete_count:36 deleted_byte_count:25348335 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:153 size:1078539784 collection:"collection1" file_count:10924 delete_count:34 deleted_byte_count:12603081 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:154 size:1081244752 collection:"collection1" file_count:11002 delete_count:31 deleted_byte_count:8467560 replica_placement:100 version:3 modified_at_second:1615478471 + volume id:156 size:1074975832 collection:"collection1" file_count:9535 delete_count:40 deleted_byte_count:11426621 replica_placement:100 version:3 modified_at_second:1615628342 + volume id:157 size:1076758536 collection:"collection1" file_count:10012 delete_count:19 deleted_byte_count:11688737 replica_placement:100 version:3 modified_at_second:1615597782 + volume id:158 size:1087251976 collection:"collection1" file_count:9972 delete_count:20 deleted_byte_count:10328429 replica_placement:100 version:3 modified_at_second:1615588879 + volume id:159 size:1074132336 collection:"collection1" file_count:9382 delete_count:27 deleted_byte_count:11474574 replica_placement:100 version:3 modified_at_second:1615593593 + volume id:160 size:1075680976 collection:"collection1" file_count:9772 delete_count:22 deleted_byte_count:4981968 replica_placement:100 version:3 modified_at_second:1615597782 + volume id:161 size:1077397136 collection:"collection1" file_count:9988 delete_count:28 deleted_byte_count:12509164 replica_placement:100 version:3 modified_at_second:1615631452 + volume id:162 size:1074286880 collection:"collection1" file_count:11220 delete_count:17 deleted_byte_count:1815547 replica_placement:100 version:3 modified_at_second:1615478127 + volume id:163 size:1074457224 collection:"collection1" file_count:12524 delete_count:27 deleted_byte_count:6359619 replica_placement:100 version:3 modified_at_second:1615579313 + volume id:164 size:1074261256 collection:"collection1" file_count:11922 delete_count:25 deleted_byte_count:2923288 replica_placement:100 version:3 modified_at_second:1615620085 + volume id:165 size:1073891080 collection:"collection1" file_count:9152 delete_count:12 deleted_byte_count:19164659 replica_placement:100 version:3 modified_at_second:1615471910 + volume id:166 size:1075637536 collection:"collection1" file_count:14211 delete_count:24 deleted_byte_count:20415490 replica_placement:100 version:3 modified_at_second:1615491021 + volume id:167 size:1073958280 collection:"collection1" file_count:25231 delete_count:48 deleted_byte_count:26022344 replica_placement:100 version:3 modified_at_second:1615620014 + volume id:168 size:1074718864 collection:"collection1" file_count:25702 delete_count:40 deleted_byte_count:4024775 replica_placement:100 version:3 modified_at_second:1615585664 + volume id:169 size:1073863032 collection:"collection1" file_count:25248 delete_count:43 deleted_byte_count:3013817 replica_placement:100 version:3 modified_at_second:1615569832 + volume id:170 size:1075747088 collection:"collection1" file_count:24596 delete_count:41 deleted_byte_count:3494711 replica_placement:100 version:3 modified_at_second:1615579207 + volume id:171 size:1081881400 collection:"collection1" file_count:24215 delete_count:36 deleted_byte_count:3191335 replica_placement:100 version:3 modified_at_second:1615596486 + volume id:172 size:1074787304 collection:"collection1" file_count:31236 delete_count:50 deleted_byte_count:3316482 replica_placement:100 version:3 modified_at_second:1615612385 + volume id:174 size:1073824160 collection:"collection1" file_count:30689 delete_count:36 deleted_byte_count:2160116 replica_placement:100 version:3 modified_at_second:1615598914 + volume id:175 size:1077742472 collection:"collection1" file_count:32353 delete_count:33 deleted_byte_count:1861403 replica_placement:100 version:3 modified_at_second:1615559516 + volume id:176 size:1073854800 collection:"collection1" file_count:30582 delete_count:34 deleted_byte_count:7701976 replica_placement:100 version:3 modified_at_second:1615626169 + volume id:178 size:1087560112 collection:"collection1" file_count:23482 delete_count:22 deleted_byte_count:18810492 replica_placement:100 version:3 modified_at_second:1615541369 + volume id:179 size:1074313920 collection:"collection1" file_count:21829 delete_count:24 deleted_byte_count:45574435 replica_placement:100 version:3 modified_at_second:1615580308 + volume id:180 size:1078438448 collection:"collection1" file_count:23614 delete_count:12 deleted_byte_count:4496474 replica_placement:100 version:3 modified_at_second:1614773243 + volume id:181 size:1074571672 collection:"collection1" file_count:22898 delete_count:19 deleted_byte_count:6628413 replica_placement:100 version:3 modified_at_second:1614745117 + volume id:183 size:1076361616 collection:"collection1" file_count:31293 delete_count:16 deleted_byte_count:468841 replica_placement:100 version:3 modified_at_second:1615572985 + volume id:184 size:1074594216 collection:"collection1" file_count:31368 delete_count:22 deleted_byte_count:857453 replica_placement:100 version:3 modified_at_second:1615586578 + volume id:185 size:1074099592 collection:"collection1" file_count:30612 delete_count:17 deleted_byte_count:2610847 replica_placement:100 version:3 modified_at_second:1615506835 + volume id:186 size:1074220664 collection:"collection1" file_count:31450 delete_count:15 deleted_byte_count:391855 replica_placement:100 version:3 modified_at_second:1615614934 + volume id:187 size:1074396112 collection:"collection1" file_count:31853 delete_count:17 deleted_byte_count:454283 replica_placement:100 version:3 modified_at_second:1615590491 + volume id:188 size:1074732632 collection:"collection1" file_count:31867 delete_count:19 deleted_byte_count:393743 replica_placement:100 version:3 modified_at_second:1615487645 + volume id:189 size:1074847824 collection:"collection1" file_count:31450 delete_count:16 deleted_byte_count:1040552 replica_placement:100 version:3 modified_at_second:1615335661 + volume id:190 size:1074008968 collection:"collection1" file_count:31987 delete_count:11 deleted_byte_count:685125 replica_placement:100 version:3 modified_at_second:1615447162 + volume id:191 size:1075492960 collection:"collection1" file_count:31301 delete_count:19 deleted_byte_count:708401 replica_placement:100 version:3 modified_at_second:1615357457 + volume id:192 size:1075857384 collection:"collection1" file_count:31490 delete_count:25 deleted_byte_count:720617 replica_placement:100 version:3 modified_at_second:1615621632 + volume id:193 size:1076616760 collection:"collection1" file_count:31907 delete_count:16 deleted_byte_count:464900 replica_placement:100 version:3 modified_at_second:1615507877 + volume id:194 size:1073985792 collection:"collection1" file_count:31434 delete_count:18 deleted_byte_count:391432 replica_placement:100 version:3 modified_at_second:1615559502 + volume id:195 size:1074158304 collection:"collection1" file_count:31453 delete_count:15 deleted_byte_count:718266 replica_placement:100 version:3 modified_at_second:1615559331 + volume id:196 size:1074594640 collection:"collection1" file_count:31665 delete_count:18 deleted_byte_count:3468922 replica_placement:100 version:3 modified_at_second:1615501690 + volume id:198 size:1075104624 collection:"collection1" file_count:16577 delete_count:18 deleted_byte_count:6583181 replica_placement:100 version:3 modified_at_second:1615623371 + volume id:199 size:1078117688 collection:"collection1" file_count:16497 delete_count:14 deleted_byte_count:1514286 replica_placement:100 version:3 modified_at_second:1615585987 + volume id:200 size:1075630464 collection:"collection1" file_count:16380 delete_count:18 deleted_byte_count:1103109 replica_placement:100 version:3 modified_at_second:1615485252 + volume id:201 size:1091460440 collection:"collection1" file_count:16684 delete_count:26 deleted_byte_count:5590335 replica_placement:100 version:3 modified_at_second:1615585987 + volume id:204 size:1079766904 collection:"collection1" file_count:3233 delete_count:255 deleted_byte_count:104707641 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615565702 + volume id:207 size:1081939960 collection:"collection1" file_count:3010 delete_count:4 deleted_byte_count:692350 replica_placement:100 version:3 modified_at_second:1615269061 + volume id:208 size:1077863624 collection:"collection1" file_count:3147 delete_count:6 deleted_byte_count:858726 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:209 size:1074083592 collection:"collection1" file_count:3238 delete_count:4 deleted_byte_count:1494244 replica_placement:100 version:3 modified_at_second:1615419954 + volume id:210 size:1094311304 collection:"collection1" file_count:3468 delete_count:4 deleted_byte_count:466433 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:211 size:1080610712 collection:"collection1" file_count:3247 delete_count:7 deleted_byte_count:1891456 replica_placement:100 version:3 modified_at_second:1615269124 + volume id:216 size:1080073496 collection:"collection1" file_count:3316 delete_count:4 deleted_byte_count:179819 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:218 size:1081263944 collection:"collection1" file_count:3433 delete_count:14 deleted_byte_count:3454237 replica_placement:100 version:3 modified_at_second:1615603637 + volume id:220 size:1081928312 collection:"collection1" file_count:3166 delete_count:13 deleted_byte_count:4127709 replica_placement:100 version:3 modified_at_second:1615579317 + volume id:221 size:1106545536 collection:"collection1" file_count:3153 delete_count:11 deleted_byte_count:1496835 replica_placement:100 version:3 modified_at_second:1615269138 + volume id:224 size:1093691520 collection:"collection1" file_count:3463 delete_count:10 deleted_byte_count:1128328 replica_placement:100 version:3 modified_at_second:1615601870 + volume id:225 size:1080698928 collection:"collection1" file_count:3115 delete_count:7 deleted_byte_count:18170416 replica_placement:100 version:3 modified_at_second:1615434685 + volume id:226 size:1103504792 collection:"collection1" file_count:2965 delete_count:10 deleted_byte_count:2639254 replica_placement:100 version:3 modified_at_second:1615601870 + volume id:227 size:1106699864 collection:"collection1" file_count:2827 delete_count:19 deleted_byte_count:5393310 replica_placement:100 version:3 modified_at_second:1615609989 + volume id:228 size:1109784072 collection:"collection1" file_count:2504 delete_count:24 deleted_byte_count:5458950 replica_placement:100 version:3 modified_at_second:1615610489 + volume id:229 size:1109855256 collection:"collection1" file_count:2857 delete_count:22 deleted_byte_count:2839883 replica_placement:100 version:3 modified_at_second:1615609989 + volume id:231 size:1112917664 collection:"collection1" file_count:3151 delete_count:19 deleted_byte_count:2852517 replica_placement:100 version:3 modified_at_second:1615611350 + volume id:232 size:1073901520 collection:"collection1" file_count:3004 delete_count:54 deleted_byte_count:10273081 replica_placement:100 version:3 modified_at_second:1615611352 + volume id:233 size:1080526464 collection:"collection1" file_count:3136 delete_count:61 deleted_byte_count:17991717 replica_placement:100 version:3 modified_at_second:1615611354 + volume id:236 size:1089476200 collection:"collection1" file_count:3231 delete_count:53 deleted_byte_count:11625921 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:238 size:354320000 collection:"collection1" file_count:701 delete_count:17 deleted_byte_count:5940420 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632030 + volume id:240 size:424791528 collection:"collection1" file_count:734 delete_count:12 deleted_byte_count:7353071 replica_placement:100 version:3 modified_at_second:1615631669 + volume id:242 size:1075383304 collection:"collection2" file_count:10470 replica_placement:100 version:3 modified_at_second:1614852115 + volume id:243 size:1088174560 collection:"collection2" file_count:11109 delete_count:1 deleted_byte_count:938 replica_placement:100 version:3 modified_at_second:1614852202 + volume id:245 size:1074597056 collection:"collection2" file_count:10371 delete_count:3 deleted_byte_count:209701 replica_placement:100 version:3 modified_at_second:1614852093 + volume id:247 size:1075859784 collection:"collection2" file_count:10443 delete_count:2 deleted_byte_count:564486 replica_placement:100 version:3 modified_at_second:1614856152 + volume id:249 size:1074819168 collection:"collection2" file_count:10763 delete_count:2 deleted_byte_count:271699 replica_placement:100 version:3 modified_at_second:1614856231 + volume id:250 size:1080572256 collection:"collection2" file_count:10220 replica_placement:100 version:3 modified_at_second:1614856129 + volume id:251 size:1075684408 collection:"collection2" file_count:10847 replica_placement:100 version:3 modified_at_second:1614856270 + volume id:254 size:1074830800 collection:"collection2" file_count:14140 delete_count:2 deleted_byte_count:105892 replica_placement:100 version:3 modified_at_second:1614861115 + volume id:257 size:1082621664 collection:"collection2" file_count:18172 delete_count:2 deleted_byte_count:25125 replica_placement:100 version:3 modified_at_second:1614866395 + volume id:260 size:1075105664 collection:"collection2" file_count:17316 delete_count:4 deleted_byte_count:2015310 replica_placement:100 version:3 modified_at_second:1614866226 + volume id:261 size:1076628592 collection:"collection2" file_count:18355 delete_count:1 deleted_byte_count:1155 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:262 size:1078492464 collection:"collection2" file_count:20390 delete_count:3 deleted_byte_count:287601 replica_placement:100 version:3 modified_at_second:1614871601 + volume id:263 size:1077167440 collection:"collection2" file_count:20227 delete_count:4 deleted_byte_count:97887 replica_placement:100 version:3 modified_at_second:1614871567 + volume id:268 size:1074490592 collection:"collection2" file_count:21698 delete_count:1 deleted_byte_count:33968 replica_placement:100 version:3 modified_at_second:1614877435 + volume id:269 size:1077552720 collection:"collection2" file_count:21875 delete_count:4 deleted_byte_count:347272 replica_placement:100 version:3 modified_at_second:1614877481 + volume id:271 size:1076992648 collection:"collection2" file_count:22640 delete_count:1 deleted_byte_count:30645 replica_placement:100 version:3 modified_at_second:1614877504 + volume id:273 size:1074873432 collection:"collection2" file_count:20511 delete_count:3 deleted_byte_count:46076 replica_placement:100 version:3 modified_at_second:1614884046 + volume id:274 size:1075994128 collection:"collection2" file_count:20997 replica_placement:100 version:3 modified_at_second:1614884113 + volume id:276 size:1076899888 collection:"collection2" file_count:20190 delete_count:1 deleted_byte_count:8798 replica_placement:100 version:3 modified_at_second:1614884003 + volume id:277 size:1074956160 collection:"collection2" file_count:19260 delete_count:2 deleted_byte_count:172356 replica_placement:100 version:3 modified_at_second:1614889988 + volume id:279 size:1077325096 collection:"collection2" file_count:19671 delete_count:6 deleted_byte_count:379116 replica_placement:100 version:3 modified_at_second:1614890230 + volume id:282 size:1075232240 collection:"collection2" file_count:22659 delete_count:4 deleted_byte_count:67915 replica_placement:100 version:3 modified_at_second:1614897304 + volume id:284 size:1074533384 collection:"collection2" file_count:22196 delete_count:4 deleted_byte_count:154683 replica_placement:100 version:3 modified_at_second:1614897231 + volume id:285 size:1082128576 collection:"collection2" file_count:21804 delete_count:1 deleted_byte_count:1064 replica_placement:100 version:3 modified_at_second:1614897165 + volume id:286 size:1077464824 collection:"collection2" file_count:23905 delete_count:6 deleted_byte_count:630577 replica_placement:100 version:3 modified_at_second:1614897401 + volume id:287 size:1074590528 collection:"collection2" file_count:28163 delete_count:5 deleted_byte_count:35727 replica_placement:100 version:3 modified_at_second:1614904874 + volume id:288 size:1075406800 collection:"collection2" file_count:27243 delete_count:2 deleted_byte_count:51519 replica_placement:100 version:3 modified_at_second:1614904738 + volume id:292 size:1092010744 collection:"collection2" file_count:26781 delete_count:5 deleted_byte_count:508910 replica_placement:100 version:3 modified_at_second:1614912327 + volume id:293 size:1075409776 collection:"collection2" file_count:26063 delete_count:4 deleted_byte_count:183834 replica_placement:100 version:3 modified_at_second:1614912235 + volume id:294 size:1075443992 collection:"collection2" file_count:26076 delete_count:4 deleted_byte_count:194914 replica_placement:100 version:3 modified_at_second:1614912220 + volume id:295 size:1074702376 collection:"collection2" file_count:24488 delete_count:3 deleted_byte_count:48555 replica_placement:100 version:3 modified_at_second:1614911929 + volume id:300 size:1076212424 collection:"collection2" file_count:22892 delete_count:2 deleted_byte_count:61320 replica_placement:100 version:3 modified_at_second:1614918464 + volume id:304 size:1081038888 collection:"collection2" file_count:24505 delete_count:2 deleted_byte_count:124447 replica_placement:100 version:3 modified_at_second:1614925567 + volume id:305 size:1074185552 collection:"collection2" file_count:22074 delete_count:5 deleted_byte_count:20221 replica_placement:100 version:3 modified_at_second:1614925312 + volume id:310 size:1074761520 collection:"collection2" file_count:21441 delete_count:3 deleted_byte_count:13934 replica_placement:100 version:3 modified_at_second:1614931077 + volume id:311 size:1088248208 collection:"collection2" file_count:23553 delete_count:6 deleted_byte_count:191716 replica_placement:100 version:3 modified_at_second:1614931460 + volume id:312 size:1075037808 collection:"collection2" file_count:22524 replica_placement:100 version:3 modified_at_second:1614937832 + volume id:313 size:1074876016 collection:"collection2" file_count:22404 delete_count:4 deleted_byte_count:51728 replica_placement:100 version:3 modified_at_second:1614937755 + volume id:314 size:1074670840 collection:"collection2" file_count:20964 delete_count:4 deleted_byte_count:304291 replica_placement:100 version:3 modified_at_second:1614937441 + volume id:315 size:1084153456 collection:"collection2" file_count:23638 delete_count:2 deleted_byte_count:53956 replica_placement:100 version:3 modified_at_second:1614937884 + volume id:316 size:1077720784 collection:"collection2" file_count:22605 delete_count:1 deleted_byte_count:8503 replica_placement:100 version:3 modified_at_second:1614937838 + volume id:317 size:1076215040 collection:"collection2" file_count:23572 delete_count:2 deleted_byte_count:1441356 replica_placement:100 version:3 modified_at_second:1614943965 + volume id:319 size:1073952744 collection:"collection2" file_count:22286 delete_count:2 deleted_byte_count:43421 replica_placement:100 version:3 modified_at_second:1614943810 + volume id:320 size:1082437736 collection:"collection2" file_count:21544 delete_count:3 deleted_byte_count:16712 replica_placement:100 version:3 modified_at_second:1614943591 + volume id:321 size:1081477960 collection:"collection2" file_count:23531 delete_count:5 deleted_byte_count:262564 replica_placement:100 version:3 modified_at_second:1614943982 + volume id:322 size:1078471600 collection:"collection2" file_count:21905 delete_count:3 deleted_byte_count:145002 replica_placement:100 version:3 modified_at_second:1614950574 + volume id:324 size:1075606712 collection:"collection2" file_count:20799 delete_count:1 deleted_byte_count:251210 replica_placement:100 version:3 modified_at_second:1614950310 + volume id:326 size:1076059936 collection:"collection2" file_count:22564 delete_count:2 deleted_byte_count:192886 replica_placement:100 version:3 modified_at_second:1614950619 + volume id:327 size:1076121224 collection:"collection2" file_count:22007 delete_count:3 deleted_byte_count:60358 replica_placement:100 version:3 modified_at_second:1614956487 + volume id:328 size:1074767928 collection:"collection2" file_count:21720 delete_count:3 deleted_byte_count:56429 replica_placement:100 version:3 modified_at_second:1614956362 + volume id:329 size:1076691776 collection:"collection2" file_count:22411 delete_count:5 deleted_byte_count:214092 replica_placement:100 version:3 modified_at_second:1614956485 + volume id:331 size:1074957192 collection:"collection2" file_count:21230 delete_count:4 deleted_byte_count:62145 replica_placement:100 version:3 modified_at_second:1614956259 + volume id:333 size:1074270192 collection:"collection2" file_count:21271 delete_count:2 deleted_byte_count:168122 replica_placement:100 version:3 modified_at_second:1614962697 + volume id:335 size:1076235176 collection:"collection2" file_count:22391 delete_count:3 deleted_byte_count:8838 replica_placement:100 version:3 modified_at_second:1614962970 + volume id:336 size:1087853032 collection:"collection2" file_count:22801 delete_count:2 deleted_byte_count:26394 replica_placement:100 version:3 modified_at_second:1614963003 + volume id:338 size:1076118360 collection:"collection2" file_count:21680 replica_placement:100 version:3 modified_at_second:1614969850 + volume id:342 size:1080186296 collection:"collection2" file_count:22405 delete_count:4 deleted_byte_count:64819 replica_placement:100 version:3 modified_at_second:1614977518 + volume id:343 size:1075345184 collection:"collection2" file_count:21095 delete_count:2 deleted_byte_count:20581 replica_placement:100 version:3 modified_at_second:1614977148 + volume id:349 size:1075957824 collection:"collection2" file_count:22395 delete_count:2 deleted_byte_count:61565 replica_placement:100 version:3 modified_at_second:1614984748 + volume id:350 size:1074756688 collection:"collection2" file_count:21990 delete_count:3 deleted_byte_count:233881 replica_placement:100 version:3 modified_at_second:1614984682 + volume id:354 size:1085213992 collection:"collection2" file_count:23150 delete_count:4 deleted_byte_count:82391 replica_placement:100 version:3 modified_at_second:1614992207 + volume id:356 size:1083304992 collection:"collection2" file_count:21552 delete_count:4 deleted_byte_count:14472 replica_placement:100 version:3 modified_at_second:1614992027 + volume id:358 size:1085152312 collection:"collection2" file_count:23756 delete_count:3 deleted_byte_count:44531 replica_placement:100 version:3 modified_at_second:1614998824 + volume id:359 size:1074211240 collection:"collection2" file_count:22437 delete_count:2 deleted_byte_count:187953 replica_placement:100 version:3 modified_at_second:1614998711 + volume id:362 size:1074074120 collection:"collection2" file_count:20595 delete_count:1 deleted_byte_count:112145 replica_placement:100 version:3 modified_at_second:1615004407 + volume id:363 size:1078859496 collection:"collection2" file_count:23177 delete_count:4 deleted_byte_count:9601 replica_placement:100 version:3 modified_at_second:1615004822 + volume id:364 size:1081280816 collection:"collection2" file_count:22686 delete_count:1 deleted_byte_count:84375 replica_placement:100 version:3 modified_at_second:1615004813 + volume id:365 size:1075736632 collection:"collection2" file_count:22193 delete_count:5 deleted_byte_count:259033 replica_placement:100 version:3 modified_at_second:1615004776 + volume id:366 size:1075267272 collection:"collection2" file_count:21856 delete_count:5 deleted_byte_count:138363 replica_placement:100 version:3 modified_at_second:1615004703 + volume id:367 size:1076403648 collection:"collection2" file_count:22995 delete_count:2 deleted_byte_count:36955 replica_placement:100 version:3 modified_at_second:1615010985 + volume id:368 size:1074822016 collection:"collection2" file_count:22252 delete_count:4 deleted_byte_count:3291946 replica_placement:100 version:3 modified_at_second:1615010878 + volume id:369 size:1091472040 collection:"collection2" file_count:23709 delete_count:4 deleted_byte_count:400876 replica_placement:100 version:3 modified_at_second:1615011019 + volume id:370 size:1076040480 collection:"collection2" file_count:22092 delete_count:2 deleted_byte_count:115388 replica_placement:100 version:3 modified_at_second:1615010877 + volume id:371 size:1078806160 collection:"collection2" file_count:22685 delete_count:2 deleted_byte_count:68905 replica_placement:100 version:3 modified_at_second:1615010994 + volume id:372 size:1076193312 collection:"collection2" file_count:22774 delete_count:1 deleted_byte_count:3495 replica_placement:100 version:3 modified_at_second:1615016911 + volume id:374 size:1085011080 collection:"collection2" file_count:23054 delete_count:2 deleted_byte_count:89034 replica_placement:100 version:3 modified_at_second:1615016917 + volume id:375 size:1076140688 collection:"collection2" file_count:21880 delete_count:2 deleted_byte_count:51103 replica_placement:100 version:3 modified_at_second:1615016787 + volume id:378 size:959273824 collection:"collection2" file_count:15031 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:379 size:1014108592 collection:"collection2" file_count:15360 delete_count:8 deleted_byte_count:2524591 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:380 size:1010760464 collection:"collection2" file_count:14920 delete_count:7 deleted_byte_count:134859 replica_placement:100 version:3 modified_at_second:1615632322 + Disk hdd total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + DataNode 192.168.1.1:8080 total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + Rack DefaultRack total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + DataCenter dc4 total size:274627838960 file_count:3607097 deleted_file:13594 deleted_bytes:4185524457 + DataCenter dc5 hdd(volume:299/3000 active:299 free:2701 remote:0) + Rack DefaultRack hdd(volume:299/3000 active:299 free:2701 remote:0) + DataNode 192.168.1.5:8080 hdd(volume:299/3000 active:299 free:2701 remote:0) + Disk hdd(volume:299/3000 active:299 free:2701 remote:0) + volume id:5 size:293806008 file_count:1669 delete_count:2 deleted_byte_count:274334 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631342 + volume id:11 size:1109224552 collection:"collection0" file_count:44 replica_placement:100 version:3 modified_at_second:1615629606 + volume id:18 size:1096678688 collection:"collection0" file_count:88 delete_count:4 deleted_byte_count:8633 replica_placement:100 version:3 modified_at_second:1615631673 + volume id:19 size:1096923792 collection:"collection0" file_count:100 delete_count:10 deleted_byte_count:75779917 replica_placement:100 version:3 compact_revision:4 modified_at_second:1615630117 + volume id:20 size:1074760432 collection:"collection0" file_count:82 delete_count:5 deleted_byte_count:12156 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615629340 + volume id:26 size:532657632 collection:"collection0" file_count:100 delete_count:4 deleted_byte_count:9081 replica_placement:100 version:3 modified_at_second:1614170024 + volume id:27 size:298886792 file_count:1608 replica_placement:100 version:3 modified_at_second:1615632482 + volume id:28 size:308919192 file_count:1591 delete_count:1 deleted_byte_count:125280 replica_placement:100 version:3 modified_at_second:1615631762 + volume id:29 size:281582688 file_count:1537 replica_placement:100 version:3 modified_at_second:1615629422 + volume id:30 size:289466144 file_count:1566 delete_count:1 deleted_byte_count:124972 replica_placement:100 version:3 modified_at_second:1615632422 + volume id:31 size:273363256 file_count:1498 replica_placement:100 version:3 modified_at_second:1615631642 + volume id:32 size:281343360 file_count:1497 replica_placement:100 version:3 modified_at_second:1615632362 + volume id:33 size:1130226344 collection:"collection1" file_count:7322 delete_count:172 deleted_byte_count:45199399 replica_placement:100 version:3 modified_at_second:1615618789 + volume id:34 size:1077111136 collection:"collection1" file_count:9781 delete_count:110 deleted_byte_count:20894827 replica_placement:100 version:3 modified_at_second:1615619366 + volume id:35 size:1075241744 collection:"collection1" file_count:10523 delete_count:97 deleted_byte_count:46586217 replica_placement:100 version:3 modified_at_second:1615618790 + volume id:36 size:1075118336 collection:"collection1" file_count:10341 delete_count:118 deleted_byte_count:24753278 replica_placement:100 version:3 modified_at_second:1615606148 + volume id:37 size:1075895576 collection:"collection1" file_count:12013 delete_count:98 deleted_byte_count:50747932 replica_placement:100 version:3 modified_at_second:1615594776 + volume id:38 size:1075545744 collection:"collection1" file_count:13324 delete_count:100 deleted_byte_count:25223906 replica_placement:100 version:3 modified_at_second:1615569830 + volume id:39 size:1076606536 collection:"collection1" file_count:12612 delete_count:78 deleted_byte_count:17462730 replica_placement:100 version:3 modified_at_second:1615611959 + volume id:40 size:1075358552 collection:"collection1" file_count:12597 delete_count:62 deleted_byte_count:11657901 replica_placement:100 version:3 modified_at_second:1615612994 + volume id:41 size:1076283592 collection:"collection1" file_count:12088 delete_count:84 deleted_byte_count:19311237 replica_placement:100 version:3 modified_at_second:1615596736 + volume id:42 size:1093948352 collection:"collection1" file_count:7889 delete_count:47 deleted_byte_count:5697275 replica_placement:100 version:3 modified_at_second:1615548906 + volume id:43 size:1116445864 collection:"collection1" file_count:7355 delete_count:57 deleted_byte_count:9727158 replica_placement:100 version:3 modified_at_second:1615566167 + volume id:44 size:1077582560 collection:"collection1" file_count:7295 delete_count:50 deleted_byte_count:12618414 replica_placement:100 version:3 modified_at_second:1615566170 + volume id:45 size:1075254640 collection:"collection1" file_count:10772 delete_count:76 deleted_byte_count:22426345 replica_placement:100 version:3 modified_at_second:1615573498 + volume id:46 size:1075286056 collection:"collection1" file_count:9947 delete_count:309 deleted_byte_count:105601163 replica_placement:100 version:3 modified_at_second:1615569825 + volume id:47 size:444599784 collection:"collection1" file_count:709 delete_count:19 deleted_byte_count:11913451 replica_placement:100 version:3 modified_at_second:1615632397 + volume id:48 size:1076778664 collection:"collection1" file_count:9850 delete_count:77 deleted_byte_count:16641907 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630690 + volume id:49 size:1078775288 collection:"collection1" file_count:9631 delete_count:27 deleted_byte_count:5985628 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615575823 + volume id:50 size:1076688288 collection:"collection1" file_count:7921 delete_count:26 deleted_byte_count:5162032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615610876 + volume id:51 size:1076796120 collection:"collection1" file_count:10550 delete_count:39 deleted_byte_count:12723654 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615547786 + volume id:53 size:1063089216 collection:"collection1" file_count:9832 delete_count:31 deleted_byte_count:9273066 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:54 size:1045022288 collection:"collection1" file_count:9409 delete_count:29 deleted_byte_count:15102818 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630813 + volume id:55 size:1012890016 collection:"collection1" file_count:8651 delete_count:27 deleted_byte_count:9418841 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631453 + volume id:56 size:1002412240 collection:"collection1" file_count:8762 delete_count:40 deleted_byte_count:65885831 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632014 + volume id:57 size:839849792 collection:"collection1" file_count:7514 delete_count:24 deleted_byte_count:6228543 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631775 + volume id:58 size:908064192 collection:"collection1" file_count:8128 delete_count:21 deleted_byte_count:6113731 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632343 + volume id:59 size:988302272 collection:"collection1" file_count:8098 delete_count:20 deleted_byte_count:3947615 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632238 + volume id:60 size:1010702480 collection:"collection1" file_count:8969 delete_count:79 deleted_byte_count:24782814 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632439 + volume id:61 size:975604544 collection:"collection1" file_count:8683 delete_count:20 deleted_byte_count:10276072 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631176 + volume id:62 size:873845904 collection:"collection1" file_count:7897 delete_count:23 deleted_byte_count:10920170 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631133 + volume id:63 size:956941176 collection:"collection1" file_count:8271 delete_count:32 deleted_byte_count:15876189 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632036 + volume id:64 size:965638424 collection:"collection1" file_count:8218 delete_count:27 deleted_byte_count:6922489 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631032 + volume id:65 size:823283608 collection:"collection1" file_count:7834 delete_count:29 deleted_byte_count:5950610 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307 + volume id:66 size:821343440 collection:"collection1" file_count:7383 delete_count:29 deleted_byte_count:12010343 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631968 + volume id:67 size:878713880 collection:"collection1" file_count:7299 delete_count:117 deleted_byte_count:24857326 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632156 + volume id:69 size:863913896 collection:"collection1" file_count:7291 delete_count:100 deleted_byte_count:25335024 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630534 + volume id:70 size:886695472 collection:"collection1" file_count:7769 delete_count:164 deleted_byte_count:45162513 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632398 + volume id:71 size:907608392 collection:"collection1" file_count:7658 delete_count:122 deleted_byte_count:27622941 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632307 + volume id:72 size:903990720 collection:"collection1" file_count:6996 delete_count:240 deleted_byte_count:74147727 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630985 + volume id:73 size:929047544 collection:"collection1" file_count:7038 delete_count:227 deleted_byte_count:65336664 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630707 + volume id:75 size:908045000 collection:"collection1" file_count:6911 delete_count:268 deleted_byte_count:73934373 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632430 + volume id:76 size:985296744 collection:"collection1" file_count:6566 delete_count:61 deleted_byte_count:44464430 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632284 + volume id:77 size:929398296 collection:"collection1" file_count:7427 delete_count:238 deleted_byte_count:59581579 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632014 + volume id:78 size:1075671512 collection:"collection1" file_count:7540 delete_count:258 deleted_byte_count:71726846 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615582829 + volume id:79 size:948225472 collection:"collection1" file_count:6997 delete_count:227 deleted_byte_count:60625763 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631326 + volume id:80 size:827912928 collection:"collection1" file_count:6916 delete_count:15 deleted_byte_count:5611604 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631159 + volume id:81 size:880693168 collection:"collection1" file_count:7481 delete_count:238 deleted_byte_count:80880878 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631395 + volume id:82 size:1041660512 collection:"collection1" file_count:7043 delete_count:207 deleted_byte_count:52275724 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632430 + volume id:83 size:936194288 collection:"collection1" file_count:7593 delete_count:13 deleted_byte_count:4633917 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632029 + volume id:84 size:871262320 collection:"collection1" file_count:8190 delete_count:14 deleted_byte_count:3150948 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631161 + volume id:86 size:1009434632 collection:"collection1" file_count:8474 delete_count:236 deleted_byte_count:64543674 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630812 + volume id:87 size:922274624 collection:"collection1" file_count:12902 delete_count:13 deleted_byte_count:3412959 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615632438 + volume id:88 size:1073767976 collection:"collection1" file_count:14994 delete_count:207 deleted_byte_count:82380696 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615541383 + volume id:89 size:1044421824 collection:"collection1" file_count:14943 delete_count:243 deleted_byte_count:58543159 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615632208 + volume id:90 size:891163760 collection:"collection1" file_count:14608 delete_count:10 deleted_byte_count:2564369 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615629392 + volume id:91 size:936573952 collection:"collection1" file_count:14686 delete_count:11 deleted_byte_count:4717727 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631851 + volume id:92 size:992439144 collection:"collection1" file_count:7061 delete_count:195 deleted_byte_count:60649573 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630566 + volume id:93 size:1079602592 collection:"collection1" file_count:7878 delete_count:270 deleted_byte_count:74150048 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615556013 + volume id:94 size:1030684704 collection:"collection1" file_count:7660 delete_count:207 deleted_byte_count:70150733 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631616 + volume id:95 size:990877824 collection:"collection1" file_count:6620 delete_count:206 deleted_byte_count:60363604 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615631867 + volume id:96 size:989294848 collection:"collection1" file_count:7544 delete_count:229 deleted_byte_count:59931853 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630778 + volume id:98 size:1077836472 collection:"collection1" file_count:7605 delete_count:202 deleted_byte_count:73180379 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615523691 + volume id:99 size:1071718496 collection:"collection1" file_count:7470 delete_count:8 deleted_byte_count:9624950 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615631175 + volume id:100 size:1083617472 collection:"collection1" file_count:7018 delete_count:187 deleted_byte_count:61304236 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615505914 + volume id:101 size:1077109408 collection:"collection1" file_count:7706 delete_count:226 deleted_byte_count:77864780 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615630994 + volume id:102 size:1074359920 collection:"collection1" file_count:7338 delete_count:7 deleted_byte_count:6499151 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615626682 + volume id:103 size:1075863904 collection:"collection1" file_count:7184 delete_count:186 deleted_byte_count:58872238 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615628417 + volume id:106 size:1075458680 collection:"collection1" file_count:7182 delete_count:307 deleted_byte_count:69349053 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615598137 + volume id:107 size:1073811776 collection:"collection1" file_count:7436 delete_count:168 deleted_byte_count:57747428 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615293569 + volume id:108 size:1074648024 collection:"collection1" file_count:7472 delete_count:194 deleted_byte_count:70864699 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593231 + volume id:109 size:1075254560 collection:"collection1" file_count:7556 delete_count:263 deleted_byte_count:55155265 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615502487 + volume id:110 size:1076575744 collection:"collection1" file_count:6996 delete_count:163 deleted_byte_count:52954032 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615590786 + volume id:111 size:1073826176 collection:"collection1" file_count:7355 delete_count:155 deleted_byte_count:50083578 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615593232 + volume id:112 size:1076392512 collection:"collection1" file_count:8291 delete_count:156 deleted_byte_count:74120183 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569823 + volume id:113 size:1076709184 collection:"collection1" file_count:9355 delete_count:177 deleted_byte_count:59796765 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615569822 + volume id:114 size:1074762792 collection:"collection1" file_count:8802 delete_count:156 deleted_byte_count:38470055 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615591826 + volume id:115 size:1076192296 collection:"collection1" file_count:7690 delete_count:154 deleted_byte_count:32267193 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615285296 + volume id:117 size:1073917192 collection:"collection1" file_count:9520 delete_count:114 deleted_byte_count:21835126 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615573712 + volume id:118 size:1074064344 collection:"collection1" file_count:8738 delete_count:15 deleted_byte_count:3460697 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615516264 + volume id:120 size:1076115928 collection:"collection1" file_count:9639 delete_count:118 deleted_byte_count:33357871 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615482567 + volume id:121 size:1078803320 collection:"collection1" file_count:10113 delete_count:441 deleted_byte_count:94128627 replica_placement:100 version:3 modified_at_second:1615506626 + volume id:122 size:1076235312 collection:"collection1" file_count:9106 delete_count:252 deleted_byte_count:93041272 replica_placement:100 version:3 modified_at_second:1615585912 + volume id:123 size:1080491112 collection:"collection1" file_count:10623 delete_count:302 deleted_byte_count:83956998 replica_placement:100 version:3 modified_at_second:1615585916 + volume id:124 size:1074519360 collection:"collection1" file_count:9457 delete_count:286 deleted_byte_count:74752459 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:125 size:1088687040 collection:"collection1" file_count:9518 delete_count:281 deleted_byte_count:76037905 replica_placement:100 version:3 modified_at_second:1615585913 + volume id:126 size:1073867408 collection:"collection1" file_count:9320 delete_count:278 deleted_byte_count:94547424 replica_placement:100 version:3 modified_at_second:1615585911 + volume id:127 size:1074907336 collection:"collection1" file_count:9900 delete_count:133 deleted_byte_count:48570820 replica_placement:100 version:3 modified_at_second:1615612990 + volume id:128 size:1074874632 collection:"collection1" file_count:9821 delete_count:148 deleted_byte_count:43633334 replica_placement:100 version:3 modified_at_second:1615602670 + volume id:129 size:1074704328 collection:"collection1" file_count:10012 delete_count:150 deleted_byte_count:64491721 replica_placement:100 version:3 modified_at_second:1615627566 + volume id:130 size:1075000632 collection:"collection1" file_count:10633 delete_count:161 deleted_byte_count:34768201 replica_placement:100 version:3 modified_at_second:1615582327 + volume id:131 size:1075279584 collection:"collection1" file_count:10075 delete_count:135 deleted_byte_count:29795712 replica_placement:100 version:3 modified_at_second:1615523898 + volume id:132 size:1088539552 collection:"collection1" file_count:11051 delete_count:71 deleted_byte_count:17178322 replica_placement:100 version:3 modified_at_second:1615619581 + volume id:134 size:1074367304 collection:"collection1" file_count:10662 delete_count:69 deleted_byte_count:25530139 replica_placement:100 version:3 modified_at_second:1615555873 + volume id:135 size:1073906776 collection:"collection1" file_count:10446 delete_count:71 deleted_byte_count:28599975 replica_placement:100 version:3 modified_at_second:1615569816 + volume id:136 size:1074433552 collection:"collection1" file_count:9593 delete_count:72 deleted_byte_count:26912512 replica_placement:100 version:3 modified_at_second:1615376036 + volume id:137 size:1074309264 collection:"collection1" file_count:9633 delete_count:50 deleted_byte_count:27487972 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:138 size:1074465744 collection:"collection1" file_count:10120 delete_count:55 deleted_byte_count:15875438 replica_placement:100 version:3 modified_at_second:1615572231 + volume id:140 size:1076203744 collection:"collection1" file_count:11219 delete_count:57 deleted_byte_count:19864498 replica_placement:100 version:3 modified_at_second:1615571947 + volume id:141 size:1074619488 collection:"collection1" file_count:9840 delete_count:45 deleted_byte_count:40890181 replica_placement:100 version:3 modified_at_second:1615630994 + volume id:142 size:1075733064 collection:"collection1" file_count:9009 delete_count:48 deleted_byte_count:9912854 replica_placement:100 version:3 modified_at_second:1615598913 + volume id:143 size:1075011280 collection:"collection1" file_count:9608 delete_count:51 deleted_byte_count:37282460 replica_placement:100 version:3 modified_at_second:1615488584 + volume id:144 size:1074549720 collection:"collection1" file_count:8780 delete_count:50 deleted_byte_count:52475146 replica_placement:100 version:3 modified_at_second:1615573451 + volume id:145 size:1074394928 collection:"collection1" file_count:9255 delete_count:34 deleted_byte_count:38011392 replica_placement:100 version:3 modified_at_second:1615591825 + volume id:146 size:1076337576 collection:"collection1" file_count:10492 delete_count:50 deleted_byte_count:17071505 replica_placement:100 version:3 modified_at_second:1615632005 + volume id:147 size:1077130576 collection:"collection1" file_count:10451 delete_count:27 deleted_byte_count:8290907 replica_placement:100 version:3 modified_at_second:1615604115 + volume id:148 size:1076066568 collection:"collection1" file_count:9547 delete_count:33 deleted_byte_count:7034089 replica_placement:100 version:3 modified_at_second:1615586390 + volume id:149 size:1074989016 collection:"collection1" file_count:8352 delete_count:35 deleted_byte_count:7179742 replica_placement:100 version:3 modified_at_second:1615494494 + volume id:150 size:1076290328 collection:"collection1" file_count:9328 delete_count:33 deleted_byte_count:43417791 replica_placement:100 version:3 modified_at_second:1615611567 + volume id:152 size:1075941400 collection:"collection1" file_count:9951 delete_count:36 deleted_byte_count:25348335 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:153 size:1078539784 collection:"collection1" file_count:10924 delete_count:34 deleted_byte_count:12603081 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:154 size:1081244696 collection:"collection1" file_count:11002 delete_count:31 deleted_byte_count:8467560 replica_placement:100 version:3 modified_at_second:1615478469 + volume id:155 size:1075140688 collection:"collection1" file_count:10882 delete_count:32 deleted_byte_count:10076804 replica_placement:100 version:3 modified_at_second:1615606614 + volume id:156 size:1074975832 collection:"collection1" file_count:9535 delete_count:40 deleted_byte_count:11426621 replica_placement:100 version:3 modified_at_second:1615628341 + volume id:157 size:1076758536 collection:"collection1" file_count:10012 delete_count:19 deleted_byte_count:11688737 replica_placement:100 version:3 modified_at_second:1615597782 + volume id:158 size:1087251976 collection:"collection1" file_count:9972 delete_count:20 deleted_byte_count:10328429 replica_placement:100 version:3 modified_at_second:1615588879 + volume id:159 size:1074132368 collection:"collection1" file_count:9382 delete_count:27 deleted_byte_count:11474574 replica_placement:100 version:3 modified_at_second:1615593593 + volume id:160 size:1075680952 collection:"collection1" file_count:9772 delete_count:22 deleted_byte_count:4981968 replica_placement:100 version:3 modified_at_second:1615597780 + volume id:162 size:1074286880 collection:"collection1" file_count:11220 delete_count:17 deleted_byte_count:1815547 replica_placement:100 version:3 modified_at_second:1615478126 + volume id:163 size:1074457192 collection:"collection1" file_count:12524 delete_count:27 deleted_byte_count:6359619 replica_placement:100 version:3 modified_at_second:1615579313 + volume id:164 size:1074261248 collection:"collection1" file_count:11922 delete_count:25 deleted_byte_count:2923288 replica_placement:100 version:3 modified_at_second:1615620084 + volume id:165 size:1073891016 collection:"collection1" file_count:9152 delete_count:12 deleted_byte_count:19164659 replica_placement:100 version:3 modified_at_second:1615471907 + volume id:166 size:1075637536 collection:"collection1" file_count:14211 delete_count:24 deleted_byte_count:20415490 replica_placement:100 version:3 modified_at_second:1615491019 + volume id:168 size:1074718808 collection:"collection1" file_count:25702 delete_count:40 deleted_byte_count:4024775 replica_placement:100 version:3 modified_at_second:1615585664 + volume id:169 size:1073863128 collection:"collection1" file_count:25248 delete_count:43 deleted_byte_count:3013817 replica_placement:100 version:3 modified_at_second:1615569832 + volume id:170 size:1075747096 collection:"collection1" file_count:24596 delete_count:41 deleted_byte_count:3494711 replica_placement:100 version:3 modified_at_second:1615579204 + volume id:171 size:1081881312 collection:"collection1" file_count:24215 delete_count:36 deleted_byte_count:3191335 replica_placement:100 version:3 modified_at_second:1615596485 + volume id:172 size:1074787312 collection:"collection1" file_count:31236 delete_count:50 deleted_byte_count:3316482 replica_placement:100 version:3 modified_at_second:1615612385 + volume id:173 size:1074154648 collection:"collection1" file_count:30884 delete_count:34 deleted_byte_count:2430948 replica_placement:100 version:3 modified_at_second:1615591904 + volume id:175 size:1077742504 collection:"collection1" file_count:32353 delete_count:33 deleted_byte_count:1861403 replica_placement:100 version:3 modified_at_second:1615559515 + volume id:176 size:1073854800 collection:"collection1" file_count:30582 delete_count:34 deleted_byte_count:7701976 replica_placement:100 version:3 modified_at_second:1615626169 + volume id:177 size:1074120120 collection:"collection1" file_count:22293 delete_count:16 deleted_byte_count:3719562 replica_placement:100 version:3 modified_at_second:1615516891 + volume id:178 size:1087560112 collection:"collection1" file_count:23482 delete_count:22 deleted_byte_count:18810492 replica_placement:100 version:3 modified_at_second:1615541369 + volume id:180 size:1078438536 collection:"collection1" file_count:23614 delete_count:12 deleted_byte_count:4496474 replica_placement:100 version:3 modified_at_second:1614773242 + volume id:181 size:1074571768 collection:"collection1" file_count:22898 delete_count:19 deleted_byte_count:6628413 replica_placement:100 version:3 modified_at_second:1614745116 + volume id:182 size:1076131280 collection:"collection1" file_count:31987 delete_count:21 deleted_byte_count:1416142 replica_placement:100 version:3 modified_at_second:1615568922 + volume id:183 size:1076361448 collection:"collection1" file_count:31293 delete_count:16 deleted_byte_count:468841 replica_placement:100 version:3 modified_at_second:1615572982 + volume id:184 size:1074594160 collection:"collection1" file_count:31368 delete_count:22 deleted_byte_count:857453 replica_placement:100 version:3 modified_at_second:1615586578 + volume id:185 size:1074099624 collection:"collection1" file_count:30612 delete_count:17 deleted_byte_count:2610847 replica_placement:100 version:3 modified_at_second:1615506832 + volume id:186 size:1074220864 collection:"collection1" file_count:31450 delete_count:15 deleted_byte_count:391855 replica_placement:100 version:3 modified_at_second:1615614933 + volume id:187 size:1074395944 collection:"collection1" file_count:31853 delete_count:17 deleted_byte_count:454283 replica_placement:100 version:3 modified_at_second:1615590490 + volume id:188 size:1074732792 collection:"collection1" file_count:31867 delete_count:19 deleted_byte_count:393743 replica_placement:100 version:3 modified_at_second:1615487645 + volume id:189 size:1074847896 collection:"collection1" file_count:31450 delete_count:16 deleted_byte_count:1040552 replica_placement:100 version:3 modified_at_second:1615335661 + volume id:190 size:1074008912 collection:"collection1" file_count:31987 delete_count:11 deleted_byte_count:685125 replica_placement:100 version:3 modified_at_second:1615447161 + volume id:191 size:1075493024 collection:"collection1" file_count:31301 delete_count:19 deleted_byte_count:708401 replica_placement:100 version:3 modified_at_second:1615357456 + volume id:192 size:1075857400 collection:"collection1" file_count:31490 delete_count:25 deleted_byte_count:720617 replica_placement:100 version:3 modified_at_second:1615621632 + volume id:193 size:1076616768 collection:"collection1" file_count:31907 delete_count:16 deleted_byte_count:464900 replica_placement:100 version:3 modified_at_second:1615507875 + volume id:194 size:1073985624 collection:"collection1" file_count:31434 delete_count:18 deleted_byte_count:391432 replica_placement:100 version:3 modified_at_second:1615559499 + volume id:195 size:1074158312 collection:"collection1" file_count:31453 delete_count:15 deleted_byte_count:718266 replica_placement:100 version:3 modified_at_second:1615559331 + volume id:196 size:1074594784 collection:"collection1" file_count:31665 delete_count:18 deleted_byte_count:3468922 replica_placement:100 version:3 modified_at_second:1615501688 + volume id:197 size:1075423296 collection:"collection1" file_count:16473 delete_count:15 deleted_byte_count:12552442 replica_placement:100 version:3 modified_at_second:1615485253 + volume id:198 size:1075104712 collection:"collection1" file_count:16577 delete_count:18 deleted_byte_count:6583181 replica_placement:100 version:3 modified_at_second:1615623369 + volume id:199 size:1078117688 collection:"collection1" file_count:16497 delete_count:14 deleted_byte_count:1514286 replica_placement:100 version:3 modified_at_second:1615585984 + volume id:200 size:1075630536 collection:"collection1" file_count:16380 delete_count:18 deleted_byte_count:1103109 replica_placement:100 version:3 modified_at_second:1615485252 + volume id:201 size:1091460440 collection:"collection1" file_count:16684 delete_count:26 deleted_byte_count:5590335 replica_placement:100 version:3 modified_at_second:1615585987 + volume id:202 size:1077533160 collection:"collection1" file_count:2847 delete_count:67 deleted_byte_count:65172985 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615588497 + volume id:203 size:1027316272 collection:"collection1" file_count:3040 delete_count:11 deleted_byte_count:3993230 replica_placement:100 version:3 compact_revision:3 modified_at_second:1615631728 + volume id:204 size:1079766872 collection:"collection1" file_count:3233 delete_count:255 deleted_byte_count:104707641 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615565701 + volume id:205 size:1078485304 collection:"collection1" file_count:2869 delete_count:43 deleted_byte_count:18290259 replica_placement:100 version:3 compact_revision:2 modified_at_second:1615579314 + volume id:206 size:1082045848 collection:"collection1" file_count:2979 delete_count:225 deleted_byte_count:88220074 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630989 + volume id:207 size:1081939960 collection:"collection1" file_count:3010 delete_count:4 deleted_byte_count:692350 replica_placement:100 version:3 modified_at_second:1615269061 + volume id:208 size:1077863624 collection:"collection1" file_count:3147 delete_count:6 deleted_byte_count:858726 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:210 size:1094311304 collection:"collection1" file_count:3468 delete_count:4 deleted_byte_count:466433 replica_placement:100 version:3 modified_at_second:1615495515 + volume id:212 size:1078293448 collection:"collection1" file_count:3106 delete_count:6 deleted_byte_count:2085755 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:213 size:1093588072 collection:"collection1" file_count:3681 delete_count:12 deleted_byte_count:3138791 replica_placement:100 version:3 modified_at_second:1615586387 + volume id:214 size:1074486992 collection:"collection1" file_count:3217 delete_count:10 deleted_byte_count:6392871 replica_placement:100 version:3 modified_at_second:1615586383 + volume id:215 size:1074798704 collection:"collection1" file_count:2819 delete_count:31 deleted_byte_count:10873569 replica_placement:100 version:3 modified_at_second:1615586386 + volume id:217 size:1075381872 collection:"collection1" file_count:3331 delete_count:14 deleted_byte_count:2009141 replica_placement:100 version:3 modified_at_second:1615401638 + volume id:218 size:1081263944 collection:"collection1" file_count:3433 delete_count:14 deleted_byte_count:3454237 replica_placement:100 version:3 modified_at_second:1615603637 + volume id:219 size:1092298816 collection:"collection1" file_count:3193 delete_count:17 deleted_byte_count:2047576 replica_placement:100 version:3 modified_at_second:1615579316 + volume id:220 size:1081928312 collection:"collection1" file_count:3166 delete_count:13 deleted_byte_count:4127709 replica_placement:100 version:3 modified_at_second:1615579317 + volume id:221 size:1106545456 collection:"collection1" file_count:3153 delete_count:11 deleted_byte_count:1496835 replica_placement:100 version:3 modified_at_second:1615269138 + volume id:222 size:1106623104 collection:"collection1" file_count:3273 delete_count:11 deleted_byte_count:2114627 replica_placement:100 version:3 modified_at_second:1615586243 + volume id:223 size:1075233064 collection:"collection1" file_count:2966 delete_count:9 deleted_byte_count:744001 replica_placement:100 version:3 modified_at_second:1615586244 + volume id:224 size:1093691520 collection:"collection1" file_count:3463 delete_count:10 deleted_byte_count:1128328 replica_placement:100 version:3 modified_at_second:1615601870 + volume id:225 size:1080698928 collection:"collection1" file_count:3115 delete_count:7 deleted_byte_count:18170416 replica_placement:100 version:3 modified_at_second:1615434684 + volume id:226 size:1103504768 collection:"collection1" file_count:2965 delete_count:10 deleted_byte_count:2639254 replica_placement:100 version:3 modified_at_second:1615601867 + volume id:228 size:1109784072 collection:"collection1" file_count:2504 delete_count:24 deleted_byte_count:5458950 replica_placement:100 version:3 modified_at_second:1615610489 + volume id:230 size:1080722984 collection:"collection1" file_count:2898 delete_count:15 deleted_byte_count:3929261 replica_placement:100 version:3 modified_at_second:1615610537 + volume id:232 size:1073901520 collection:"collection1" file_count:3004 delete_count:54 deleted_byte_count:10273081 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:234 size:1073835280 collection:"collection1" file_count:2965 delete_count:41 deleted_byte_count:4960354 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:235 size:1075586104 collection:"collection1" file_count:2767 delete_count:33 deleted_byte_count:3216540 replica_placement:100 version:3 modified_at_second:1615611354 + volume id:236 size:1089476136 collection:"collection1" file_count:3231 delete_count:53 deleted_byte_count:11625921 replica_placement:100 version:3 modified_at_second:1615611351 + volume id:237 size:375722792 collection:"collection1" file_count:736 delete_count:16 deleted_byte_count:4464870 replica_placement:100 version:3 modified_at_second:1615631727 + volume id:238 size:354320000 collection:"collection1" file_count:701 delete_count:17 deleted_byte_count:5940420 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615632030 + volume id:239 size:426569024 collection:"collection1" file_count:693 delete_count:19 deleted_byte_count:13020783 replica_placement:100 version:3 compact_revision:1 modified_at_second:1615630841 + volume id:240 size:424791528 collection:"collection1" file_count:733 delete_count:13 deleted_byte_count:7515220 replica_placement:100 version:3 modified_at_second:1615631670 + volume id:241 size:380217424 collection:"collection1" file_count:633 delete_count:6 deleted_byte_count:1715768 replica_placement:100 version:3 modified_at_second:1615632006 + volume id:242 size:1075383392 collection:"collection2" file_count:10470 replica_placement:100 version:3 modified_at_second:1614852116 + volume id:243 size:1088174704 collection:"collection2" file_count:11109 delete_count:1 deleted_byte_count:938 replica_placement:100 version:3 modified_at_second:1614852203 + volume id:244 size:1080295352 collection:"collection2" file_count:10812 delete_count:1 deleted_byte_count:795 replica_placement:100 version:3 modified_at_second:1615628825 + volume id:246 size:1075998672 collection:"collection2" file_count:10365 delete_count:1 deleted_byte_count:13112 replica_placement:100 version:3 modified_at_second:1614852106 + volume id:247 size:1075859808 collection:"collection2" file_count:10443 delete_count:2 deleted_byte_count:564486 replica_placement:100 version:3 modified_at_second:1614856152 + volume id:248 size:1084301208 collection:"collection2" file_count:11217 delete_count:4 deleted_byte_count:746488 replica_placement:100 version:3 modified_at_second:1614856285 + volume id:250 size:1080572168 collection:"collection2" file_count:10220 replica_placement:100 version:3 modified_at_second:1614856129 + volume id:252 size:1075065264 collection:"collection2" file_count:14622 delete_count:2 deleted_byte_count:5228 replica_placement:100 version:3 modified_at_second:1614861200 + volume id:253 size:1087328880 collection:"collection2" file_count:14920 delete_count:3 deleted_byte_count:522994 replica_placement:100 version:3 modified_at_second:1614861258 + volume id:254 size:1074830736 collection:"collection2" file_count:14140 delete_count:2 deleted_byte_count:105892 replica_placement:100 version:3 modified_at_second:1614861115 + volume id:255 size:1079581640 collection:"collection2" file_count:14877 delete_count:3 deleted_byte_count:101223 replica_placement:100 version:3 modified_at_second:1614861233 + volume id:256 size:1074283592 collection:"collection2" file_count:14157 delete_count:1 deleted_byte_count:18156 replica_placement:100 version:3 modified_at_second:1614861100 + volume id:257 size:1082621720 collection:"collection2" file_count:18172 delete_count:2 deleted_byte_count:25125 replica_placement:100 version:3 modified_at_second:1614866402 + volume id:258 size:1075527216 collection:"collection2" file_count:18421 delete_count:4 deleted_byte_count:267833 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:259 size:1075507848 collection:"collection2" file_count:18079 delete_count:2 deleted_byte_count:71992 replica_placement:100 version:3 modified_at_second:1614866381 + volume id:260 size:1075105664 collection:"collection2" file_count:17316 delete_count:4 deleted_byte_count:2015310 replica_placement:100 version:3 modified_at_second:1614866226 + volume id:261 size:1076628592 collection:"collection2" file_count:18355 delete_count:1 deleted_byte_count:1155 replica_placement:100 version:3 modified_at_second:1614866420 + volume id:262 size:1078492584 collection:"collection2" file_count:20390 delete_count:3 deleted_byte_count:287601 replica_placement:100 version:3 modified_at_second:1614871601 + volume id:264 size:1081624192 collection:"collection2" file_count:21151 replica_placement:100 version:3 modified_at_second:1614871629 + volume id:265 size:1076401104 collection:"collection2" file_count:19932 delete_count:2 deleted_byte_count:160823 replica_placement:100 version:3 modified_at_second:1614871543 + volume id:266 size:1075617552 collection:"collection2" file_count:20075 delete_count:1 deleted_byte_count:1039 replica_placement:100 version:3 modified_at_second:1614871526 + volume id:267 size:1075699376 collection:"collection2" file_count:21039 delete_count:3 deleted_byte_count:59956 replica_placement:100 version:3 modified_at_second:1614877294 + volume id:270 size:1076876424 collection:"collection2" file_count:22057 delete_count:1 deleted_byte_count:43916 replica_placement:100 version:3 modified_at_second:1614877469 + volume id:271 size:1076992704 collection:"collection2" file_count:22640 delete_count:1 deleted_byte_count:30645 replica_placement:100 version:3 modified_at_second:1614877504 + volume id:272 size:1076145912 collection:"collection2" file_count:21034 delete_count:2 deleted_byte_count:216564 replica_placement:100 version:3 modified_at_second:1614884139 + volume id:273 size:1074873432 collection:"collection2" file_count:20511 delete_count:3 deleted_byte_count:46076 replica_placement:100 version:3 modified_at_second:1614884046 + volume id:274 size:1075994184 collection:"collection2" file_count:20997 replica_placement:100 version:3 modified_at_second:1614884113 + volume id:275 size:1078349024 collection:"collection2" file_count:20808 delete_count:1 deleted_byte_count:1118 replica_placement:100 version:3 modified_at_second:1614884147 + volume id:276 size:1076899880 collection:"collection2" file_count:20190 delete_count:1 deleted_byte_count:8798 replica_placement:100 version:3 modified_at_second:1614884003 + volume id:278 size:1078798632 collection:"collection2" file_count:20597 delete_count:5 deleted_byte_count:400060 replica_placement:100 version:3 modified_at_second:1614890292 + volume id:280 size:1077432160 collection:"collection2" file_count:20286 delete_count:1 deleted_byte_count:879 replica_placement:100 version:3 modified_at_second:1614890262 + volume id:281 size:1077581064 collection:"collection2" file_count:20206 delete_count:3 deleted_byte_count:143964 replica_placement:100 version:3 modified_at_second:1614890237 + volume id:282 size:1075232184 collection:"collection2" file_count:22659 delete_count:4 deleted_byte_count:67915 replica_placement:100 version:3 modified_at_second:1614897304 + volume id:283 size:1080178880 collection:"collection2" file_count:19462 delete_count:7 deleted_byte_count:660407 replica_placement:100 version:3 modified_at_second:1614896623 + volume id:286 size:1077464816 collection:"collection2" file_count:23905 delete_count:6 deleted_byte_count:630577 replica_placement:100 version:3 modified_at_second:1614897401 + volume id:287 size:1074590536 collection:"collection2" file_count:28163 delete_count:5 deleted_byte_count:35727 replica_placement:100 version:3 modified_at_second:1614904875 + volume id:288 size:1075406920 collection:"collection2" file_count:27243 delete_count:2 deleted_byte_count:51519 replica_placement:100 version:3 modified_at_second:1614904738 + volume id:289 size:1075284312 collection:"collection2" file_count:29342 delete_count:5 deleted_byte_count:100454 replica_placement:100 version:3 modified_at_second:1614904977 + volume id:290 size:1074723800 collection:"collection2" file_count:28340 delete_count:4 deleted_byte_count:199064 replica_placement:100 version:3 modified_at_second:1614904924 + volume id:292 size:1092010672 collection:"collection2" file_count:26781 delete_count:5 deleted_byte_count:508910 replica_placement:100 version:3 modified_at_second:1614912325 + volume id:295 size:1074702320 collection:"collection2" file_count:24488 delete_count:3 deleted_byte_count:48555 replica_placement:100 version:3 modified_at_second:1614911929 + volume id:296 size:1077824056 collection:"collection2" file_count:26741 delete_count:4 deleted_byte_count:199906 replica_placement:100 version:3 modified_at_second:1614912301 + volume id:297 size:1080229176 collection:"collection2" file_count:23409 delete_count:5 deleted_byte_count:46268 replica_placement:100 version:3 modified_at_second:1614918481 + volume id:298 size:1075410024 collection:"collection2" file_count:23222 delete_count:2 deleted_byte_count:46110 replica_placement:100 version:3 modified_at_second:1614918474 + volume id:302 size:1077559640 collection:"collection2" file_count:23124 delete_count:7 deleted_byte_count:293111 replica_placement:100 version:3 modified_at_second:1614925500 + volume id:304 size:1081038944 collection:"collection2" file_count:24505 delete_count:2 deleted_byte_count:124447 replica_placement:100 version:3 modified_at_second:1614925569 + volume id:305 size:1074185376 collection:"collection2" file_count:22074 delete_count:5 deleted_byte_count:20221 replica_placement:100 version:3 modified_at_second:1614925312 + volume id:306 size:1074763952 collection:"collection2" file_count:22939 replica_placement:100 version:3 modified_at_second:1614925462 + volume id:307 size:1076567912 collection:"collection2" file_count:23377 delete_count:2 deleted_byte_count:25453 replica_placement:100 version:3 modified_at_second:1614931448 + volume id:308 size:1074022336 collection:"collection2" file_count:23086 delete_count:2 deleted_byte_count:2127 replica_placement:100 version:3 modified_at_second:1614931401 + volume id:311 size:1088248344 collection:"collection2" file_count:23553 delete_count:6 deleted_byte_count:191716 replica_placement:100 version:3 modified_at_second:1614931463 + volume id:312 size:1075037528 collection:"collection2" file_count:22524 replica_placement:100 version:3 modified_at_second:1614937831 + volume id:313 size:1074875960 collection:"collection2" file_count:22404 delete_count:4 deleted_byte_count:51728 replica_placement:100 version:3 modified_at_second:1614937755 + volume id:316 size:1077720776 collection:"collection2" file_count:22605 delete_count:1 deleted_byte_count:8503 replica_placement:100 version:3 modified_at_second:1614937838 + volume id:318 size:1075965168 collection:"collection2" file_count:22459 delete_count:2 deleted_byte_count:37778 replica_placement:100 version:3 modified_at_second:1614943862 + volume id:322 size:1078471536 collection:"collection2" file_count:21905 delete_count:3 deleted_byte_count:145002 replica_placement:100 version:3 modified_at_second:1614950572 + volume id:323 size:1074608056 collection:"collection2" file_count:21605 delete_count:4 deleted_byte_count:172090 replica_placement:100 version:3 modified_at_second:1614950526 + volume id:325 size:1080701232 collection:"collection2" file_count:21735 replica_placement:100 version:3 modified_at_second:1614950525 + volume id:326 size:1076059920 collection:"collection2" file_count:22564 delete_count:2 deleted_byte_count:192886 replica_placement:100 version:3 modified_at_second:1614950619 + volume id:327 size:1076121304 collection:"collection2" file_count:22007 delete_count:3 deleted_byte_count:60358 replica_placement:100 version:3 modified_at_second:1614956487 + volume id:328 size:1074767816 collection:"collection2" file_count:21720 delete_count:3 deleted_byte_count:56429 replica_placement:100 version:3 modified_at_second:1614956362 + volume id:329 size:1076691960 collection:"collection2" file_count:22411 delete_count:5 deleted_byte_count:214092 replica_placement:100 version:3 modified_at_second:1614956485 + volume id:330 size:1080825760 collection:"collection2" file_count:22464 delete_count:2 deleted_byte_count:15771 replica_placement:100 version:3 modified_at_second:1614956476 + volume id:331 size:1074957256 collection:"collection2" file_count:21230 delete_count:4 deleted_byte_count:62145 replica_placement:100 version:3 modified_at_second:1614956259 + volume id:332 size:1075569928 collection:"collection2" file_count:22097 delete_count:3 deleted_byte_count:98273 replica_placement:100 version:3 modified_at_second:1614962869 + volume id:333 size:1074270160 collection:"collection2" file_count:21271 delete_count:2 deleted_byte_count:168122 replica_placement:100 version:3 modified_at_second:1614962697 + volume id:334 size:1075607880 collection:"collection2" file_count:22546 delete_count:6 deleted_byte_count:101538 replica_placement:100 version:3 modified_at_second:1614962978 + volume id:335 size:1076235136 collection:"collection2" file_count:22391 delete_count:3 deleted_byte_count:8838 replica_placement:100 version:3 modified_at_second:1614962970 + volume id:337 size:1075646896 collection:"collection2" file_count:21934 delete_count:1 deleted_byte_count:3397 replica_placement:100 version:3 modified_at_second:1614969937 + volume id:339 size:1078402392 collection:"collection2" file_count:22309 replica_placement:100 version:3 modified_at_second:1614969995 + volume id:340 size:1079462152 collection:"collection2" file_count:22319 delete_count:4 deleted_byte_count:93620 replica_placement:100 version:3 modified_at_second:1614969977 + volume id:341 size:1074448360 collection:"collection2" file_count:21590 delete_count:5 deleted_byte_count:160085 replica_placement:100 version:3 modified_at_second:1614969858 + volume id:343 size:1075345072 collection:"collection2" file_count:21095 delete_count:2 deleted_byte_count:20581 replica_placement:100 version:3 modified_at_second:1614977148 + volume id:346 size:1076464112 collection:"collection2" file_count:22320 delete_count:4 deleted_byte_count:798258 replica_placement:100 version:3 modified_at_second:1614977511 + volume id:347 size:1075145248 collection:"collection2" file_count:22178 delete_count:1 deleted_byte_count:79392 replica_placement:100 version:3 modified_at_second:1614984727 + volume id:348 size:1080623544 collection:"collection2" file_count:21667 delete_count:1 deleted_byte_count:2443 replica_placement:100 version:3 modified_at_second:1614984604 + volume id:349 size:1075957672 collection:"collection2" file_count:22395 delete_count:2 deleted_byte_count:61565 replica_placement:100 version:3 modified_at_second:1614984748 + volume id:351 size:1078795120 collection:"collection2" file_count:23660 delete_count:3 deleted_byte_count:102141 replica_placement:100 version:3 modified_at_second:1614984816 + volume id:352 size:1077145936 collection:"collection2" file_count:22066 delete_count:1 deleted_byte_count:1018 replica_placement:100 version:3 modified_at_second:1614992130 + volume id:353 size:1074897496 collection:"collection2" file_count:21266 delete_count:2 deleted_byte_count:3105374 replica_placement:100 version:3 modified_at_second:1614991951 + volume id:354 size:1085214104 collection:"collection2" file_count:23150 delete_count:4 deleted_byte_count:82391 replica_placement:100 version:3 modified_at_second:1614992208 + volume id:357 size:1074276152 collection:"collection2" file_count:23137 delete_count:4 deleted_byte_count:188487 replica_placement:100 version:3 modified_at_second:1614998792 + volume id:359 size:1074211296 collection:"collection2" file_count:22437 delete_count:2 deleted_byte_count:187953 replica_placement:100 version:3 modified_at_second:1614998711 + volume id:360 size:1075532512 collection:"collection2" file_count:22574 delete_count:3 deleted_byte_count:1774776 replica_placement:100 version:3 modified_at_second:1614998770 + volume id:361 size:1075362744 collection:"collection2" file_count:22272 delete_count:1 deleted_byte_count:3497 replica_placement:100 version:3 modified_at_second:1614998668 + volume id:362 size:1074074176 collection:"collection2" file_count:20595 delete_count:1 deleted_byte_count:112145 replica_placement:100 version:3 modified_at_second:1615004407 + volume id:363 size:1078859640 collection:"collection2" file_count:23177 delete_count:4 deleted_byte_count:9601 replica_placement:100 version:3 modified_at_second:1615004823 + volume id:364 size:1081280880 collection:"collection2" file_count:22686 delete_count:1 deleted_byte_count:84375 replica_placement:100 version:3 modified_at_second:1615004813 + volume id:365 size:1075736632 collection:"collection2" file_count:22193 delete_count:5 deleted_byte_count:259033 replica_placement:100 version:3 modified_at_second:1615004776 + volume id:366 size:1075267272 collection:"collection2" file_count:21856 delete_count:5 deleted_byte_count:138363 replica_placement:100 version:3 modified_at_second:1615004703 + volume id:367 size:1076403648 collection:"collection2" file_count:22995 delete_count:2 deleted_byte_count:36955 replica_placement:100 version:3 modified_at_second:1615010985 + volume id:368 size:1074821960 collection:"collection2" file_count:22252 delete_count:4 deleted_byte_count:3291946 replica_placement:100 version:3 modified_at_second:1615010877 + volume id:369 size:1091472040 collection:"collection2" file_count:23709 delete_count:4 deleted_byte_count:400876 replica_placement:100 version:3 modified_at_second:1615011021 + volume id:370 size:1076040544 collection:"collection2" file_count:22092 delete_count:2 deleted_byte_count:115388 replica_placement:100 version:3 modified_at_second:1615010877 + volume id:371 size:1078806216 collection:"collection2" file_count:22685 delete_count:2 deleted_byte_count:68905 replica_placement:100 version:3 modified_at_second:1615010995 + volume id:372 size:1076193344 collection:"collection2" file_count:22774 delete_count:1 deleted_byte_count:3495 replica_placement:100 version:3 modified_at_second:1615016911 + volume id:373 size:1080928088 collection:"collection2" file_count:22617 delete_count:4 deleted_byte_count:91849 replica_placement:100 version:3 modified_at_second:1615016878 + volume id:374 size:1085011176 collection:"collection2" file_count:23054 delete_count:2 deleted_byte_count:89034 replica_placement:100 version:3 modified_at_second:1615016917 + volume id:376 size:1074845832 collection:"collection2" file_count:22908 delete_count:4 deleted_byte_count:432305 replica_placement:100 version:3 modified_at_second:1615016916 + volume id:377 size:957434264 collection:"collection2" file_count:14929 delete_count:1 deleted_byte_count:43099 replica_placement:100 version:3 modified_at_second:1615632323 + volume id:379 size:1014108528 collection:"collection2" file_count:15362 delete_count:6 deleted_byte_count:2481613 replica_placement:100 version:3 modified_at_second:1615632323 + Disk hdd total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 + DataNode 192.168.1.5:8080 total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 + Rack DefaultRack total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 + DataCenter dc5 total size:306912958016 file_count:4201794 deleted_file:15268 deleted_bytes:4779359660 +total size:775256653592 file_count:10478712 deleted_file:33754 deleted_bytes:10839266043 +` diff --git a/weed/shell/command_volume_server_evacuate.go b/weed/shell/command_volume_server_evacuate.go index 80c5b1d6b..f21d0334c 100644 --- a/weed/shell/command_volume_server_evacuate.go +++ b/weed/shell/command_volume_server_evacuate.go @@ -176,6 +176,11 @@ func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEc func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, vol *master_pb.VolumeInformationMessage, thisNode *Node, otherNodes []*Node, applyChange bool) (hasMoved bool, err error) { fn := capacityByFreeVolumeCount(types.ToDiskType(vol.DiskType)) + for _, n := range otherNodes { + n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool { + return v.DiskType == vol.DiskType + }) + } sort.Slice(otherNodes, func(i, j int) bool { return otherNodes[i].localVolumeRatio(fn) > otherNodes[j].localVolumeRatio(fn) }) diff --git a/weed/shell/command_volume_server_evacuate_test.go b/weed/shell/command_volume_server_evacuate_test.go new file mode 100644 index 000000000..5753af78b --- /dev/null +++ b/weed/shell/command_volume_server_evacuate_test.go @@ -0,0 +1,17 @@ +package shell + +import ( + "os" + "testing" +) + +func TestVolumeServerEvacuate(t *testing.T) { + topologyInfo := parseOutput(topoData) + + volumeServer := "192.168.1.4:8080" + + if err := evacuateNormalVolumes(nil, topologyInfo, volumeServer, true, false, os.Stdout); err != nil { + t.Errorf("evacuate: %v", err) + } + +} diff --git a/weed/shell/command_volume_tier_move.go b/weed/shell/command_volume_tier_move.go index f7fa94031..d6a49d6e1 100644 --- a/weed/shell/command_volume_tier_move.go +++ b/weed/shell/command_volume_tier_move.go @@ -133,7 +133,7 @@ func doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, collection strin // remove the remaining replicas for _, loc := range locations { - if loc.Url != sourceVolumeServer { + if loc.Url != dst.dataNode.Id { if err = deleteVolume(commandEnv.option.GrpcDialOption, vid, loc.Url); err != nil { fmt.Fprintf(writer, "failed to delete volume %d on %s\n", vid, loc.Url) } diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go index 00adb1abc..1dd611ca5 100644 --- a/weed/shell/shell_liner.go +++ b/weed/shell/shell_liner.go @@ -2,6 +2,7 @@ package shell import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/util/grace" "io" "os" "path" @@ -19,8 +20,15 @@ var ( func RunShell(options ShellOptions) { + sort.Slice(Commands, func(i, j int) bool { + return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0 + }) + line = liner.NewLiner() defer line.Close() + grace.OnInterrupt(func() { + line.Close() + }) line.SetCtrlCAborts(true) @@ -96,9 +104,6 @@ func printGenericHelp() { ` fmt.Print(msg) - sort.Slice(Commands, func(i, j int) bool { - return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0 - }) for _, c := range Commands { helpTexts := strings.SplitN(c.Help(), "\n", 2) fmt.Printf(" %-30s\t# %s \n", c.Name(), helpTexts[0]) @@ -114,10 +119,6 @@ func printHelp(cmds []string) { } else { cmd := strings.ToLower(args[0]) - sort.Slice(Commands, func(i, j int) bool { - return strings.Compare(Commands[i].Name(), Commands[j].Name()) < 0 - }) - for _, c := range Commands { if c.Name() == cmd { fmt.Printf(" %s\t# %s\n", c.Name(), c.Help()) diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go index b8b883be6..2dc61d02e 100644 --- a/weed/storage/backend/backend.go +++ b/weed/storage/backend/backend.go @@ -58,6 +58,9 @@ func LoadConfiguration(config *util.ViperProxy) { if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") { continue } + if _, found := BackendStorages[backendTypeName+"."+backendStorageId]; found { + continue + } backendStorage, buildErr := backendStorageFactory.BuildStorage(config, StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId) if buildErr != nil { @@ -81,6 +84,9 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { glog.Warningf("storage type %s not found", storageBackend.Type) continue } + if _, found := BackendStorages[storageBackend.Type+"."+storageBackend.Id]; found { + continue + } backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id) if buildErr != nil { glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go index 498963c31..3b42429cf 100644 --- a/weed/storage/backend/disk_file.go +++ b/weed/storage/backend/disk_file.go @@ -52,7 +52,7 @@ func (df *DiskFile) WriteAt(p []byte, off int64) (n int, err error) { return } -func (df *DiskFile) Append(p []byte) (n int, err error) { +func (df *DiskFile) Write(p []byte) (n int, err error) { return df.WriteAt(p, df.fileSize) } diff --git a/weed/storage/backend/s3_backend/s3_sessions.go b/weed/storage/backend/s3_backend/s3_sessions.go index e2fdf1eb6..b8378c379 100644 --- a/weed/storage/backend/s3_backend/s3_sessions.go +++ b/weed/storage/backend/s3_backend/s3_sessions.go @@ -34,8 +34,9 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string) } config := &aws.Config{ - Region: aws.String(region), - Endpoint: aws.String(endpoint), + Region: aws.String(region), + Endpoint: aws.String(endpoint), + S3ForcePathStyle: aws.Bool(true), } if awsAccessKeyId != "" && awsSecretAccessKey != "" { config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index 6de87c793..ed4e00312 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -131,7 +131,7 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne l.SetVolume(vid, v) size, _, _ := v.FileStat() - glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", + glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s", l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) return true } diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index 85d6a5fc8..171db92a4 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -63,7 +63,7 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection // read volume info ev.Version = needle.Version3 - if volumeInfo, found, _ := pb.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found { + if volumeInfo, _, found, _ := pb.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found { ev.Version = needle.Version(volumeInfo.Version) } else { pb.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) diff --git a/weed/storage/needle/crc.go b/weed/storage/needle/crc.go index 6fd910bb7..4476631c2 100644 --- a/weed/storage/needle/crc.go +++ b/weed/storage/needle/crc.go @@ -2,6 +2,7 @@ package needle import ( "fmt" + "io" "github.com/klauspost/crc32" @@ -29,3 +30,25 @@ func (n *Needle) Etag() string { util.Uint32toBytes(bits, uint32(n.Checksum)) return fmt.Sprintf("%x", bits) } + +func NewCRCwriter(w io.Writer) *CRCwriter { + + return &CRCwriter{ + crc: CRC(0), + w: w, + } + +} + +type CRCwriter struct { + crc CRC + w io.Writer +} + +func (c *CRCwriter) Write(p []byte) (n int, err error) { + n, err = c.w.Write(p) // with each write ... + c.crc = c.crc.Update(p) + return +} + +func (c *CRCwriter) Sum() uint32 { return c.crc.Value() } // final hash diff --git a/weed/storage/needle/needle_read_write.go b/weed/storage/needle/needle_read_write.go index 0f72bc0bb..16c2fd06b 100644 --- a/weed/storage/needle/needle_read_write.go +++ b/weed/storage/needle/needle_read_write.go @@ -3,13 +3,12 @@ package needle import ( "errors" "fmt" - "io" - "math" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/backend" . "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" + "io" + "math" ) const ( @@ -156,6 +155,35 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u return offset, size, actualSize, err } +func WriteNeedleBlob(w backend.BackendStorageFile, dataSlice []byte, size Size, appendAtNs uint64, version Version) (offset uint64, err error) { + + if end, _, e := w.GetStat(); e == nil { + defer func(w backend.BackendStorageFile, off int64) { + if err != nil { + if te := w.Truncate(end); te != nil { + glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) + } + } + }(w, end) + offset = uint64(end) + } else { + err = fmt.Errorf("Cannot Read Current Volume Position: %v", e) + return + } + + if version == Version3 { + tsOffset := NeedleHeaderSize + size + NeedleChecksumSize + util.Uint64toBytes(dataSlice[tsOffset:tsOffset+TimestampSize], appendAtNs) + } + + if err == nil { + _, err = w.WriteAt(dataSlice, int64(offset)) + } + + return + +} + func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size Size, version Version) (dataSlice []byte, err error) { dataSize := GetActualSize(size, version) @@ -168,7 +196,7 @@ func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size Size, versi } if err != nil { fileSize, _, _ := r.GetStat() - println("n",n, "dataSize", dataSize, "offset", offset, "fileSize", fileSize) + println("n", n, "dataSize", dataSize, "offset", offset, "fileSize", fileSize) } return dataSlice, err diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go index b25b5e89a..ba1fd3d1e 100644 --- a/weed/storage/needle_map/memdb.go +++ b/weed/storage/needle_map/memdb.go @@ -2,6 +2,7 @@ package needle_map import ( "fmt" + "io" "os" "github.com/syndtr/goleveldb/leveldb" @@ -104,7 +105,13 @@ func (cm *MemDb) LoadFromIdx(idxName string) (ret error) { } defer idxFile.Close() - return idx.WalkIndexFile(idxFile, func(key NeedleId, offset Offset, size Size) error { + return cm.LoadFromReaderAt(idxFile) + +} + +func (cm *MemDb) LoadFromReaderAt(readerAt io.ReaderAt) (ret error) { + + return idx.WalkIndexFile(readerAt, func(key NeedleId, offset Offset, size Size) error { if offset.IsZero() || size.IsDeleted() { return cm.Delete(key) } diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index 9716e9729..31c86d124 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -152,8 +152,10 @@ func (m *LevelDbNeedleMap) Close() { glog.Warningf("close index file %s failed: %v", indexFileName, err) } - if err := m.db.Close(); err != nil { - glog.Warningf("close levelDB failed: %v", err) + if m.db != nil { + if err := m.db.Close(); err != nil { + glog.Warningf("close levelDB failed: %v", err) + } } } diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go index 3449ff9dc..662b90531 100644 --- a/weed/storage/needle_map_sorted_file.go +++ b/weed/storage/needle_map_sorted_file.go @@ -94,8 +94,12 @@ func (m *SortedFileNeedleMap) Delete(key NeedleId, offset Offset) error { } func (m *SortedFileNeedleMap) Close() { - m.indexFile.Close() - m.dbFile.Close() + if m.indexFile != nil { + m.indexFile.Close() + } + if m.dbFile != nil { + m.dbFile.Close() + } } func (m *SortedFileNeedleMap) Destroy() error { diff --git a/weed/storage/store.go b/weed/storage/store.go index 47829666a..6be15a4c9 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -106,6 +106,9 @@ func (s *Store) FindFreeLocation(diskType DiskType) (ret *DiskLocation) { if diskType != location.DiskType { continue } + if location.isDiskSpaceLow { + continue + } currentFreeCount := location.MaxVolumeCount - location.VolumesLen() currentFreeCount *= erasure_coding.DataShardsCount currentFreeCount -= location.EcVolumesLen() @@ -217,23 +220,36 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { location.volumesLock.RLock() for _, v := range location.volumes { curMaxFileKey, volumeMessage := v.ToVolumeInformationMessage() + if volumeMessage == nil { + continue + } if maxFileKey < curMaxFileKey { maxFileKey = curMaxFileKey } + deleteVolume := false if !v.expired(volumeMessage.Size, s.GetVolumeSizeLimit()) { volumeMessages = append(volumeMessages, volumeMessage) } else { if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) { deleteVids = append(deleteVids, v.Id) + deleteVolume = true } else { glog.V(0).Infof("volume %d is expired", v.Id) } if v.lastIoError != nil { deleteVids = append(deleteVids, v.Id) + deleteVolume = true glog.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError) } } - collectionVolumeSize[v.Collection] += volumeMessage.Size + + if _, exist := collectionVolumeSize[v.Collection]; !exist { + collectionVolumeSize[v.Collection] = 0 + } + if !deleteVolume { + collectionVolumeSize[v.Collection] += volumeMessage.Size + } + if _, exist := collectionVolumeReadOnlyCount[v.Collection]; !exist { collectionVolumeReadOnlyCount[v.Collection] = map[string]uint8{ "IsReadOnly": 0, @@ -242,7 +258,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { "isDiskSpaceLow": 0, } } - if v.IsReadOnly() { + if !deleteVolume && v.IsReadOnly() { collectionVolumeReadOnlyCount[v.Collection]["IsReadOnly"] += 1 if v.noWriteOrDelete { collectionVolumeReadOnlyCount[v.Collection]["noWriteOrDelete"] += 1 @@ -267,7 +283,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { glog.V(0).Infof("volume %d is deleted", vid) } } else { - glog.V(0).Infof("delete volume %d: %v", vid, err) + glog.Warningf("delete volume %d: %v", vid, err) } } location.volumesLock.Unlock() @@ -446,7 +462,7 @@ func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error { // load, modify, save baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name())) vifFile := filepath.Join(location.Directory, baseFileName+".vif") - volumeInfo, _, err := pb.MaybeLoadVolumeInfo(vifFile) + volumeInfo, _, _, err := pb.MaybeLoadVolumeInfo(vifFile) if err != nil { return fmt.Errorf("volume %d fail to load vif", i) } diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index a9b6a8ff3..9702fdd50 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -200,7 +200,6 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur return } glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err) - forgetShardId(ecVolume, shardId) } // try reading by recovering from other shards @@ -303,7 +302,7 @@ func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId type break } if receiveErr != nil { - return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err) + return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, receiveErr) } if resp.IsDeleted { is_deleted = true diff --git a/weed/storage/super_block/replica_placement.go b/weed/storage/super_block/replica_placement.go index 65ec53819..a263e6669 100644 --- a/weed/storage/super_block/replica_placement.go +++ b/weed/storage/super_block/replica_placement.go @@ -36,6 +36,9 @@ func NewReplicaPlacementFromByte(b byte) (*ReplicaPlacement, error) { } func (rp *ReplicaPlacement) Byte() byte { + if rp == nil { + return 0 + } ret := rp.DiffDataCenterCount*100 + rp.DiffRackCount*10 + rp.SameRackCount return byte(ret) } diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 366449c53..e0638d8a8 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -234,10 +234,16 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool { return false } -func (v *Volume) CollectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64) { +func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) { v.dataFileAccessLock.RLock() defer v.dataFileAccessLock.RUnlock() - glog.V(3).Infof("CollectStatus volume %d", v.Id) + glog.V(3).Infof("collectStatus volume %d", v.Id) + + if v.nm == nil { + return + } + + ok = true maxFileKey = v.nm.MaxFileKey() datFileSize, modTime, _ = v.DataBackend.GetStat() @@ -251,7 +257,11 @@ func (v *Volume) CollectStatus() (maxFileKey types.NeedleId, datFileSize int64, func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.VolumeInformationMessage) { - maxFileKey, volumeSize, modTime, fileCount, deletedCount, deletedSize := v.CollectStatus() + maxFileKey, volumeSize, modTime, fileCount, deletedCount, deletedSize, ok := v.collectStatus() + + if !ok { + return 0, nil + } volumeInfo := &master_pb.VolumeInformationMessage{ Id: uint32(v.Id), diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index bff1055bb..0cf603ad8 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -39,12 +39,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind } }() - hasVolumeInfoFile := v.maybeLoadVolumeInfo() && v.volumeInfo.Version != 0 + hasVolumeInfoFile := v.maybeLoadVolumeInfo() if v.HasRemoteFile() { v.noWriteCanDelete = true v.noWriteOrDelete = false - glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo.Files) + glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo) v.LoadRemoteFile() alreadyHasSuperBlock = true } else if exists, canRead, canWrite, modifiedTime, fileSize := util.CheckFile(v.FileName(".dat")); exists { @@ -83,6 +83,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind if alreadyHasSuperBlock { err = v.readSuperBlock() + glog.V(0).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version) + if v.HasRemoteFile() { + // maybe temporary network problem + glog.Errorf("readSuperBlock remote volume %d: %v", v.Id, err) + err = nil + } } else { if !v.SuperBlock.Initialized() { return fmt.Errorf("volume %s not initialized", v.FileName(".dat")) diff --git a/weed/storage/volume_read.go b/weed/storage/volume_read.go new file mode 100644 index 000000000..f689eeec0 --- /dev/null +++ b/weed/storage/volume_read.go @@ -0,0 +1,131 @@ +package storage + +import ( + "fmt" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + . "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +// read fills in Needle content by looking up n.Id from NeedleMapper +func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, error) { + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() + + nv, ok := v.nm.Get(n.Id) + if !ok || nv.Offset.IsZero() { + return -1, ErrorNotFound + } + readSize := nv.Size + if readSize.IsDeleted() { + if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize { + glog.V(3).Infof("reading deleted %s", n.String()) + readSize = -readSize + } else { + return -1, ErrorDeleted + } + } + if readSize == 0 { + return 0, nil + } + err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version()) + if err == needle.ErrorSizeMismatch && OffsetSize == 4 { + err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version()) + } + v.checkReadWriteError(err) + if err != nil { + return 0, err + } + bytesRead := len(n.Data) + if !n.HasTtl() { + return bytesRead, nil + } + ttlMinutes := n.Ttl.Minutes() + if ttlMinutes == 0 { + return bytesRead, nil + } + if !n.HasLastModifiedDate() { + return bytesRead, nil + } + if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) { + return bytesRead, nil + } + return -1, ErrorNotFound +} + +// read fills in Needle content by looking up n.Id from NeedleMapper +func (v *Volume) ReadNeedleBlob(offset int64, size Size) ([]byte, error) { + v.dataFileAccessLock.RLock() + defer v.dataFileAccessLock.RUnlock() + + return needle.ReadNeedleBlob(v.DataBackend, offset, size, v.Version()) +} + +type VolumeFileScanner interface { + VisitSuperBlock(super_block.SuperBlock) error + ReadNeedleBody() bool + VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error +} + +func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, + needleMapKind NeedleMapKind, + volumeFileScanner VolumeFileScanner) (err error) { + var v *Volume + if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { + return fmt.Errorf("failed to load volume %d: %v", id, err) + } + if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { + return fmt.Errorf("failed to process volume %d super block: %v", id, err) + } + defer v.Close() + + version := v.Version() + + offset := int64(v.SuperBlock.BlockSize()) + + return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner) +} + +func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) { + n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset) + if e != nil { + if e == io.EOF { + return nil + } + return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e) + } + for n != nil { + var needleBody []byte + if volumeFileScanner.ReadNeedleBody() { + // println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest) + if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil { + glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err) + // err = fmt.Errorf("cannot read needle body: %v", err) + // return + } + } + err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody) + if err == io.EOF { + return nil + } + if err != nil { + glog.V(0).Infof("visit needle error: %v", err) + return fmt.Errorf("visit needle error: %v", err) + } + offset += NeedleHeaderSize + rest + glog.V(4).Infof("==> new entry offset %d", offset) + if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err) + } + glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) + } + return nil +} diff --git a/weed/storage/volume_stream_write.go b/weed/storage/volume_stream_write.go new file mode 100644 index 000000000..d229bdf20 --- /dev/null +++ b/weed/storage/volume_stream_write.go @@ -0,0 +1,104 @@ +package storage + +import ( + "bufio" + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + . "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func (v *Volume) StreamWrite(n *needle.Needle, data io.Reader, dataSize uint32) (err error) { + + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + df, ok := v.DataBackend.(*backend.DiskFile) + if !ok { + return fmt.Errorf("unexpected volume backend") + } + offset, _, _ := v.DataBackend.GetStat() + + header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation + CookieToBytes(header[0:CookieSize], n.Cookie) + NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id) + n.Size = 4 + Size(dataSize) + 1 + SizeToBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size) + + n.DataSize = dataSize + + // needle header + df.Write(header[0:NeedleHeaderSize]) + + // data size and data + util.Uint32toBytes(header[0:4], n.DataSize) + df.Write(header[0:4]) + // write and calculate CRC + crcWriter := needle.NewCRCwriter(df) + io.Copy(crcWriter, io.LimitReader(data, int64(dataSize))) + + // flags + util.Uint8toBytes(header[0:1], n.Flags) + df.Write(header[0:1]) + + // data checksum + util.Uint32toBytes(header[0:needle.NeedleChecksumSize], crcWriter.Sum()) + // write timestamp, padding + n.AppendAtNs = uint64(time.Now().UnixNano()) + util.Uint64toBytes(header[needle.NeedleChecksumSize:needle.NeedleChecksumSize+TimestampSize], n.AppendAtNs) + padding := needle.PaddingLength(n.Size, needle.Version3) + df.Write(header[0 : needle.NeedleChecksumSize+TimestampSize+padding]) + + // add to needle map + if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { + glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) + } + return +} + +func (v *Volume) StreamRead(n *needle.Needle, writer io.Writer) (err error) { + + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + nv, ok := v.nm.Get(n.Id) + if !ok || nv.Offset.IsZero() { + return ErrorNotFound + } + + sr := &StreamReader{ + readerAt: v.DataBackend, + offset: nv.Offset.ToActualOffset(), + } + bufReader := bufio.NewReader(sr) + bufReader.Discard(NeedleHeaderSize) + sizeBuf := make([]byte, 4) + bufReader.Read(sizeBuf) + if _, err = writer.Write(sizeBuf); err != nil { + return err + } + dataSize := util.BytesToUint32(sizeBuf) + + _, err = io.Copy(writer, io.LimitReader(bufReader, int64(dataSize))) + + return +} + +type StreamReader struct { + offset int64 + readerAt io.ReaderAt +} + +func (sr *StreamReader) Read(p []byte) (n int, err error) { + n, err = sr.readerAt.ReadAt(p, sr.offset) + if err != nil { + return + } + sr.offset += int64(n) + return +} diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go index 77efd8a14..23160906b 100644 --- a/weed/storage/volume_tier.go +++ b/weed/storage/volume_tier.go @@ -6,6 +6,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/backend" _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { @@ -14,13 +15,23 @@ func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { func (v *Volume) maybeLoadVolumeInfo() (found bool) { - v.volumeInfo, v.hasRemoteFile, _ = pb.MaybeLoadVolumeInfo(v.FileName(".vif")) + var err error + v.volumeInfo, v.hasRemoteFile, found, err = pb.MaybeLoadVolumeInfo(v.FileName(".vif")) + + if v.volumeInfo.Version == 0 { + v.volumeInfo.Version = uint32(needle.CurrentVersion) + } if v.hasRemoteFile { glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key) } + if err != nil { + glog.Warningf("load volume %d.vif file: %v", v.Id, err) + return + } + return } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 0ee1e61c6..be84f8a13 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -286,7 +286,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI if err != nil { return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, err) } - dstDatBackend.Append(needleBytes) + dstDatBackend.Write(needleBytes) util.Uint32toBytes(idxEntryBytes[8:12], uint32(offset/NeedlePaddingSize)) } else { //deleted needle //fakeDelNeedle 's default Data field is nil diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_write.go similarity index 62% rename from weed/storage/volume_read_write.go rename to weed/storage/volume_write.go index 07376bc88..a286c5dd5 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_write.go @@ -4,14 +4,12 @@ import ( "bytes" "errors" "fmt" - "io" "os" "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -104,47 +102,8 @@ func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchan err = fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize()) return } - if v.isFileUnchanged(n) { - size = Size(n.DataSize) - isUnchanged = true - return - } - // check whether existing needle cookie matches - nv, ok := v.nm.Get(n.Id) - if ok { - existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToActualOffset()) - if existingNeedleReadErr != nil { - err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr) - return - } - if existingNeedle.Cookie != n.Cookie { - glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie) - err = fmt.Errorf("mismatching cookie %x", n.Cookie) - return - } - } - - // append to dat file - n.AppendAtNs = uint64(time.Now().UnixNano()) - offset, size, _, err = n.Append(v.DataBackend, v.Version()) - v.checkReadWriteError(err) - if err != nil { - return - } - - v.lastAppendAtNs = n.AppendAtNs - - // add to needle map - if !ok || uint64(nv.Offset.ToActualOffset()) < offset { - if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { - glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) - } - } - if v.lastModifiedTsSeconds < n.LastModified { - v.lastModifiedTsSeconds = n.LastModified - } - return + return v.doWriteRequest(n) } func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) { @@ -185,7 +144,8 @@ func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isU return } if existingNeedle.Cookie != n.Cookie { - glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie) + glog.V(0).Infof("write cookie mismatch: existing %s, new %s", + needle.NewFileIdFromNeedle(v.Id, existingNeedle), needle.NewFileIdFromNeedle(v.Id, n)) err = fmt.Errorf("mismatching cookie %x", n.Cookie) return } @@ -223,24 +183,7 @@ func (v *Volume) syncDelete(n *needle.Needle) (Size, error) { return 0, err } - nv, ok := v.nm.Get(n.Id) - // fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size) - if ok && nv.Size.IsValid() { - size := nv.Size - n.Data = nil - n.AppendAtNs = uint64(time.Now().UnixNano()) - offset, _, _, err := n.Append(v.DataBackend, v.Version()) - v.checkReadWriteError(err) - if err != nil { - return size, err - } - v.lastAppendAtNs = n.AppendAtNs - if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil { - return size, err - } - return size, err - } - return 0, nil + return v.doDeleteRequest(n) } func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) { @@ -282,52 +225,6 @@ func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) { return 0, nil } -// read fills in Needle content by looking up n.Id from NeedleMapper -func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, error) { - v.dataFileAccessLock.RLock() - defer v.dataFileAccessLock.RUnlock() - - nv, ok := v.nm.Get(n.Id) - if !ok || nv.Offset.IsZero() { - return -1, ErrorNotFound - } - readSize := nv.Size - if readSize.IsDeleted() { - if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize { - glog.V(3).Infof("reading deleted %s", n.String()) - readSize = -readSize - } else { - return -1, ErrorDeleted - } - } - if readSize == 0 { - return 0, nil - } - err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version()) - if err == needle.ErrorSizeMismatch && OffsetSize == 4 { - err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version()) - } - v.checkReadWriteError(err) - if err != nil { - return 0, err - } - bytesRead := len(n.Data) - if !n.HasTtl() { - return bytesRead, nil - } - ttlMinutes := n.Ttl.Minutes() - if ttlMinutes == 0 { - return bytesRead, nil - } - if !n.HasLastModifiedDate() { - return bytesRead, nil - } - if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) { - return bytesRead, nil - } - return -1, ErrorNotFound -} - func (v *Volume) startWorker() { go func() { chanClosed := false @@ -403,66 +300,28 @@ func (v *Volume) startWorker() { }() } -type VolumeFileScanner interface { - VisitSuperBlock(super_block.SuperBlock) error - ReadNeedleBody() bool - VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error -} - -func ScanVolumeFile(dirname string, collection string, id needle.VolumeId, - needleMapKind NeedleMapKind, - volumeFileScanner VolumeFileScanner) (err error) { - var v *Volume - if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil { - return fmt.Errorf("failed to load volume %d: %v", id, err) - } - if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil { - return fmt.Errorf("failed to process volume %d super block: %v", id, err) - } - defer v.Close() - - version := v.Version() - - offset := int64(v.SuperBlock.BlockSize()) - - return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner) -} - -func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) { - n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset) - if e != nil { - if e == io.EOF { - return nil - } - return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e) - } - for n != nil { - var needleBody []byte - if volumeFileScanner.ReadNeedleBody() { - // println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest) - if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil { - glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err) - // err = fmt.Errorf("cannot read needle body: %v", err) - // return - } - } - err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody) - if err == io.EOF { - return nil - } - if err != nil { - glog.V(0).Infof("visit needle error: %v", err) - return fmt.Errorf("visit needle error: %v", err) - } - offset += NeedleHeaderSize + rest - glog.V(4).Infof("==> new entry offset %d", offset) - if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { - if err == io.EOF { - return nil - } - return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err) - } - glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) - } - return nil +func (v *Volume) WriteNeedleBlob(needleId NeedleId, needleBlob []byte, size Size) error { + + v.dataFileAccessLock.Lock() + defer v.dataFileAccessLock.Unlock() + + if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(len(needleBlob)) { + return fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize()) + } + + appendAtNs := uint64(time.Now().UnixNano()) + offset, err := needle.WriteNeedleBlob(v.DataBackend, needleBlob, size, appendAtNs, v.Version()) + + v.checkReadWriteError(err) + if err != nil { + return err + } + v.lastAppendAtNs = appendAtNs + + // add to needle map + if err = v.nm.Put(needleId, ToOffset(int64(offset)), size); err != nil { + glog.V(4).Infof("failed to put in needle map %d: %v", needleId, err) + } + + return err } diff --git a/weed/topology/data_node_ec.go b/weed/topology/data_node_ec.go index df1b6d658..330b16b24 100644 --- a/weed/topology/data_node_ec.go +++ b/weed/topology/data_node_ec.go @@ -25,7 +25,7 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) existingEcShards := dn.GetEcShards() - // found out the newShards and deletedShards + // find out the newShards and deletedShards var newShardCount, deletedShardCount int for _, ecShards := range existingEcShards { @@ -56,20 +56,19 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) disk.UpAdjustDiskUsageDelta(deltaDiskUsages) } + for _, ecShards := range actualShards { + if dn.hasEcShards(ecShards.VolumeId) { + continue + } + + newShards = append(newShards, ecShards) disk := dn.getOrCreateDisk(ecShards.DiskType) deltaDiskUsages := newDiskUsages() deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(ecShards.DiskType)) - - if !dn.hasEcShards(ecShards.VolumeId) { - newShards = append(newShards, ecShards) - newShardCount += ecShards.ShardIdCount() - } - - deltaDiskUsage.ecShardCount = int64(newShardCount) + deltaDiskUsage.ecShardCount = int64(ecShards.ShardIdCount()) disk.UpAdjustDiskUsageDelta(deltaDiskUsages) - } if len(newShards) > 0 || len(deletedShards) > 0 { diff --git a/weed/util/chunk_cache/chunk_cache_in_memory.go b/weed/util/chunk_cache/chunk_cache_in_memory.go index 1eb00e1fa..5f26b8c78 100644 --- a/weed/util/chunk_cache/chunk_cache_in_memory.go +++ b/weed/util/chunk_cache/chunk_cache_in_memory.go @@ -32,5 +32,7 @@ func (c *ChunkCacheInMemory) GetChunk(fileId string) []byte { } func (c *ChunkCacheInMemory) SetChunk(fileId string, data []byte) { - c.cache.Set(fileId, data, time.Hour) + localCopy := make([]byte, len(data)) + copy(localCopy, data) + c.cache.Set(fileId, localCopy, time.Hour) } diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go index d724e925e..6f87a9a06 100644 --- a/weed/util/chunk_cache/chunk_cache_on_disk.go +++ b/weed/util/chunk_cache/chunk_cache_on_disk.go @@ -88,15 +88,17 @@ func (v *ChunkCacheVolume) Shutdown() { } } -func (v *ChunkCacheVolume) destroy() { +func (v *ChunkCacheVolume) doReset() { v.Shutdown() - os.Remove(v.fileName + ".dat") - os.Remove(v.fileName + ".idx") + os.Truncate(v.fileName + ".dat", 0) + os.Truncate(v.fileName + ".idx", 0) + glog.V(4).Infof("cache removeAll %s ...", v.fileName + ".ldb") os.RemoveAll(v.fileName + ".ldb") + glog.V(4).Infof("cache removed %s", v.fileName + ".ldb") } func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) { - v.destroy() + v.doReset() return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit) } diff --git a/weed/util/constants.go b/weed/util/constants.go index 55f5b52d6..c595f0c53 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,7 +5,7 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 28) + VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 41) COMMIT = "" ) diff --git a/weed/util/fasthttp_util.go b/weed/util/fasthttp_util.go deleted file mode 100644 index 82575af98..000000000 --- a/weed/util/fasthttp_util.go +++ /dev/null @@ -1,115 +0,0 @@ -package util - -import ( - "bytes" - "fmt" - "github.com/valyala/fasthttp" - "sync" - "time" -) - -var ( - fastClient = &fasthttp.Client{ - NoDefaultUserAgentHeader: true, // Don't send: User-Agent: fasthttp - MaxConnsPerHost: 1024, - ReadBufferSize: 4096, // Make sure to set this big enough that your whole request can be read at once. - WriteBufferSize: 64 * 1024, // Same but for your response. - ReadTimeout: time.Second, - WriteTimeout: time.Second, - MaxIdleConnDuration: time.Minute, - DisableHeaderNamesNormalizing: true, // If you set the case on your headers correctly you can enable this. - } - - // Put everything in pools to prevent garbage. - bytesPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0) - return &b - }, - } - - responsePool = sync.Pool{ - New: func() interface{} { - return make(chan *fasthttp.Response) - }, - } -) - -func FastGet(url string) ([]byte, bool, error) { - - req := fasthttp.AcquireRequest() - res := fasthttp.AcquireResponse() - defer fasthttp.ReleaseRequest(req) - defer fasthttp.ReleaseResponse(res) - - req.SetRequestURIBytes([]byte(url)) - req.Header.Add("Accept-Encoding", "gzip") - - err := fastClient.Do(req, res) - if err != nil { - return nil, true, err - } - - var data []byte - contentEncoding := res.Header.Peek("Content-Encoding") - if bytes.Compare(contentEncoding, []byte("gzip")) == 0 { - data, err = res.BodyGunzip() - } else { - data = res.Body() - } - - out := make([]byte, len(data)) - copy(out, data) - - if res.StatusCode() >= 400 { - retryable := res.StatusCode() >= 500 - return nil, retryable, fmt.Errorf("%s: %d", url, res.StatusCode()) - } - if err != nil { - return nil, false, err - } - return out, false, nil -} - -func FastReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) (retryable bool, err error) { - - if cipherKey != nil { - return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn) - } - - req := fasthttp.AcquireRequest() - res := fasthttp.AcquireResponse() - defer fasthttp.ReleaseRequest(req) - defer fasthttp.ReleaseResponse(res) - - req.SetRequestURIBytes([]byte(fileUrl)) - - if isFullChunk { - req.Header.Add("Accept-Encoding", "gzip") - } else { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) - } - - if err = fastClient.Do(req, res); err != nil { - return true, err - } - - if res.StatusCode() >= 400 { - retryable = res.StatusCode() >= 500 - return retryable, fmt.Errorf("%s: %d", fileUrl, res.StatusCode()) - } - - contentEncoding := res.Header.Peek("Content-Encoding") - if bytes.Compare(contentEncoding, []byte("gzip")) == 0 { - bodyData, err := res.BodyGunzip() - if err != nil { - return false, err - } - fn(bodyData) - } else { - fn(res.Body()) - } - - return false, nil - -} diff --git a/weed/util/fla9/fla9.go b/weed/util/fla9/fla9.go index 1538daa55..eb5700e8c 100644 --- a/weed/util/fla9/fla9.go +++ b/weed/util/fla9/fla9.go @@ -886,7 +886,7 @@ func (f *FlagSet) parseOne() (bool, error) { // The return value will be ErrHelp if -help or -h were set but not defined. func (f *FlagSet) Parse(arguments []string) error { if _, ok := f.formal[DefaultConfigFlagName]; !ok { - f.String(DefaultConfigFlagName, "", "file with command line options with each line in optionName=optionValue format") + f.String(DefaultConfigFlagName, "", "a file of command line options, each line in optionName=optionValue format") } f.parsed = true diff --git a/weed/util/http_util.go b/weed/util/http_util.go index eff282bab..1630760b1 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -124,6 +124,27 @@ func Delete(url string, jwt string) error { return errors.New(string(body)) } +func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err error) { + req, err := http.NewRequest("DELETE", url, nil) + if jwt != "" { + req.Header.Set("Authorization", "BEARER "+string(jwt)) + } + if err != nil { + return + } + resp, err := client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return + } + httpStatus = resp.StatusCode + return +} + func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error { r, err := client.PostForm(url, values) if err != nil { @@ -313,7 +334,7 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is } func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) (bool, error) { - encryptedData, retryable, err := FastGet(fileUrl) + encryptedData, retryable, err := Get(fileUrl) if err != nil { return retryable, fmt.Errorf("fetch %s: %v", fileUrl, err) } diff --git a/weed/util/limiter.go b/weed/util/limiter.go index 91499632c..2debaaa85 100644 --- a/weed/util/limiter.go +++ b/weed/util/limiter.go @@ -1,5 +1,12 @@ package util +import ( + "math/rand" + "reflect" + "sync" + "sync/atomic" +) + // initial version comes from https://github.com/korovkin/limiter/blob/master/limiter.go // LimitedConcurrentExecutor object @@ -38,3 +45,70 @@ func (c *LimitedConcurrentExecutor) Execute(job func()) { job() }() } + +// a different implementation, but somehow more "conservative" +type OperationRequest func() + +type LimitedOutOfOrderProcessor struct { + processorSlots uint32 + processors []chan OperationRequest + processorLimit int32 + processorLimitCond *sync.Cond + currentProcessor int32 +} + +func NewLimitedOutOfOrderProcessor(limit int32) (c *LimitedOutOfOrderProcessor) { + + processorSlots := uint32(32) + c = &LimitedOutOfOrderProcessor{ + processorSlots: processorSlots, + processors: make([]chan OperationRequest, processorSlots), + processorLimit: limit, + processorLimitCond: sync.NewCond(new(sync.Mutex)), + } + + for i := 0; i < int(processorSlots); i++ { + c.processors[i] = make(chan OperationRequest) + } + + cases := make([]reflect.SelectCase, processorSlots) + for i, ch := range c.processors { + cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)} + } + + go func() { + for { + _, value, ok := reflect.Select(cases) + if !ok { + continue + } + + request := value.Interface().(OperationRequest) + + if c.processorLimit > 0 { + c.processorLimitCond.L.Lock() + for atomic.LoadInt32(&c.currentProcessor) > c.processorLimit { + c.processorLimitCond.Wait() + } + atomic.AddInt32(&c.currentProcessor, 1) + c.processorLimitCond.L.Unlock() + } + + go func() { + if c.processorLimit > 0 { + defer atomic.AddInt32(&c.currentProcessor, -1) + defer c.processorLimitCond.Signal() + } + request() + }() + + } + }() + + return c +} + +func (c *LimitedOutOfOrderProcessor) Execute(request OperationRequest) { + index := rand.Uint32() % c.processorSlots + c.processors[index] <- request +} diff --git a/weed/wdclient/exclusive_locks/exclusive_locker.go b/weed/wdclient/exclusive_locks/exclusive_locker.go index d477a6b2d..5b5fa2704 100644 --- a/weed/wdclient/exclusive_locks/exclusive_locker.go +++ b/weed/wdclient/exclusive_locks/exclusive_locker.go @@ -41,7 +41,7 @@ func (l *ExclusiveLocker) GetToken() (token int64, lockTsNs int64) { return atomic.LoadInt64(&l.token), atomic.LoadInt64(&l.lockTsNs) } -func (l *ExclusiveLocker) RequestLock() { +func (l *ExclusiveLocker) RequestLock(clientName string) { if l.isLocking { return } @@ -56,6 +56,7 @@ func (l *ExclusiveLocker) RequestLock() { PreviousToken: atomic.LoadInt64(&l.token), PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), LockName: AdminLockName, + ClientName: clientName, }) if err == nil { atomic.StoreInt64(&l.token, resp.Token) @@ -63,7 +64,7 @@ func (l *ExclusiveLocker) RequestLock() { } return err }); err != nil { - // println("leasing problem", err.Error()) + println("lock:", err.Error()) time.Sleep(InitLockInteval) } else { break @@ -83,6 +84,7 @@ func (l *ExclusiveLocker) RequestLock() { PreviousToken: atomic.LoadInt64(&l.token), PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), LockName: AdminLockName, + ClientName: clientName, }) if err == nil { atomic.StoreInt64(&l.token, resp.Token) diff --git a/weed/wdclient/net2/base_connection_pool.go b/weed/wdclient/net2/base_connection_pool.go new file mode 100644 index 000000000..5cc037d0f --- /dev/null +++ b/weed/wdclient/net2/base_connection_pool.go @@ -0,0 +1,159 @@ +package net2 + +import ( + "net" + "strings" + "time" + + rp "github.com/chrislusf/seaweedfs/weed/wdclient/resource_pool" +) + +const defaultDialTimeout = 1 * time.Second + +func defaultDialFunc(network string, address string) (net.Conn, error) { + return net.DialTimeout(network, address, defaultDialTimeout) +} + +func parseResourceLocation(resourceLocation string) ( + network string, + address string) { + + idx := strings.Index(resourceLocation, " ") + if idx >= 0 { + return resourceLocation[:idx], resourceLocation[idx+1:] + } + + return "", resourceLocation +} + +// A thin wrapper around the underlying resource pool. +type connectionPoolImpl struct { + options ConnectionOptions + + pool rp.ResourcePool +} + +// This returns a connection pool where all connections are connected +// to the same (network, address) +func newBaseConnectionPool( + options ConnectionOptions, + createPool func(rp.Options) rp.ResourcePool) ConnectionPool { + + dial := options.Dial + if dial == nil { + dial = defaultDialFunc + } + + openFunc := func(loc string) (interface{}, error) { + network, address := parseResourceLocation(loc) + return dial(network, address) + } + + closeFunc := func(handle interface{}) error { + return handle.(net.Conn).Close() + } + + poolOptions := rp.Options{ + MaxActiveHandles: options.MaxActiveConnections, + MaxIdleHandles: options.MaxIdleConnections, + MaxIdleTime: options.MaxIdleTime, + OpenMaxConcurrency: options.DialMaxConcurrency, + Open: openFunc, + Close: closeFunc, + NowFunc: options.NowFunc, + } + + return &connectionPoolImpl{ + options: options, + pool: createPool(poolOptions), + } +} + +// This returns a connection pool where all connections are connected +// to the same (network, address) +func NewSimpleConnectionPool(options ConnectionOptions) ConnectionPool { + return newBaseConnectionPool(options, rp.NewSimpleResourcePool) +} + +// This returns a connection pool that manages multiple (network, address) +// entries. The connections to each (network, address) entry acts +// independently. For example ("tcp", "localhost:11211") could act as memcache +// shard 0 and ("tcp", "localhost:11212") could act as memcache shard 1. +func NewMultiConnectionPool(options ConnectionOptions) ConnectionPool { + return newBaseConnectionPool( + options, + func(poolOptions rp.Options) rp.ResourcePool { + return rp.NewMultiResourcePool(poolOptions, nil) + }) +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) NumActive() int32 { + return p.pool.NumActive() +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) ActiveHighWaterMark() int32 { + return p.pool.ActiveHighWaterMark() +} + +// This returns the number of alive idle connections. This method is not part +// of ConnectionPool's API. It is used only for testing. +func (p *connectionPoolImpl) NumIdle() int { + return p.pool.NumIdle() +} + +// BaseConnectionPool can only register a single (network, address) entry. +// Register should be call before any Get calls. +func (p *connectionPoolImpl) Register(network string, address string) error { + return p.pool.Register(network + " " + address) +} + +// BaseConnectionPool has nothing to do on Unregister. +func (p *connectionPoolImpl) Unregister(network string, address string) error { + return nil +} + +func (p *connectionPoolImpl) ListRegistered() []NetworkAddress { + result := make([]NetworkAddress, 0, 1) + for _, location := range p.pool.ListRegistered() { + network, address := parseResourceLocation(location) + + result = append( + result, + NetworkAddress{ + Network: network, + Address: address, + }) + } + return result +} + +// This gets an active connection from the connection pool. Note that network +// and address arguments are ignored (The connections with point to the +// network/address provided by the first Register call). +func (p *connectionPoolImpl) Get( + network string, + address string) (ManagedConn, error) { + + handle, err := p.pool.Get(network + " " + address) + if err != nil { + return nil, err + } + return NewManagedConn(network, address, handle, p, p.options), nil +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) Release(conn ManagedConn) error { + return conn.ReleaseConnection() +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) Discard(conn ManagedConn) error { + return conn.DiscardConnection() +} + +// See ConnectionPool for documentation. +func (p *connectionPoolImpl) EnterLameDuckMode() { + p.pool.EnterLameDuckMode() +} diff --git a/weed/wdclient/net2/connection_pool.go b/weed/wdclient/net2/connection_pool.go new file mode 100644 index 000000000..5b8d4d232 --- /dev/null +++ b/weed/wdclient/net2/connection_pool.go @@ -0,0 +1,97 @@ +package net2 + +import ( + "net" + "time" +) + +type ConnectionOptions struct { + // The maximum number of connections that can be active per host at any + // given time (A non-positive value indicates the number of connections + // is unbounded). + MaxActiveConnections int32 + + // The maximum number of idle connections per host that are kept alive by + // the connection pool. + MaxIdleConnections uint32 + + // The maximum amount of time an idle connection can alive (if specified). + MaxIdleTime *time.Duration + + // This limits the number of concurrent Dial calls (there's no limit when + // DialMaxConcurrency is non-positive). + DialMaxConcurrency int + + // Dial specifies the dial function for creating network connections. + // If Dial is nil, net.DialTimeout is used, with timeout set to 1 second. + Dial func(network string, address string) (net.Conn, error) + + // This specifies the now time function. When the function is non-nil, the + // connection pool will use the specified function instead of time.Now to + // generate the current time. + NowFunc func() time.Time + + // This specifies the timeout for any Read() operation. + // Note that setting this to 0 (i.e. not setting it) will make + // read operations block indefinitely. + ReadTimeout time.Duration + + // This specifies the timeout for any Write() operation. + // Note that setting this to 0 (i.e. not setting it) will make + // write operations block indefinitely. + WriteTimeout time.Duration +} + +func (o ConnectionOptions) getCurrentTime() time.Time { + if o.NowFunc == nil { + return time.Now() + } else { + return o.NowFunc() + } +} + +// A generic interface for managed connection pool. All connection pool +// implementations must be threadsafe. +type ConnectionPool interface { + // This returns the number of active connections that are on loan. + NumActive() int32 + + // This returns the highest number of active connections for the entire + // lifetime of the pool. + ActiveHighWaterMark() int32 + + // This returns the number of idle connections that are in the pool. + NumIdle() int + + // This associates (network, address) to the connection pool; afterwhich, + // the user can get connections to (network, address). + Register(network string, address string) error + + // This dissociate (network, address) from the connection pool; + // afterwhich, the user can no longer get connections to + // (network, address). + Unregister(network string, address string) error + + // This returns the list of registered (network, address) entries. + ListRegistered() []NetworkAddress + + // This gets an active connection from the connection pool. The connection + // will remain active until one of the following is called: + // 1. conn.ReleaseConnection() + // 2. conn.DiscardConnection() + // 3. pool.Release(conn) + // 4. pool.Discard(conn) + Get(network string, address string) (ManagedConn, error) + + // This releases an active connection back to the connection pool. + Release(conn ManagedConn) error + + // This discards an active connection from the connection pool. + Discard(conn ManagedConn) error + + // Enter the connection pool into lame duck mode. The connection pool + // will no longer return connections, and all idle connections are closed + // immediately (including active connections that are released back to the + // pool afterward). + EnterLameDuckMode() +} diff --git a/weed/wdclient/net2/doc.go b/weed/wdclient/net2/doc.go new file mode 100644 index 000000000..fd1c6323d --- /dev/null +++ b/weed/wdclient/net2/doc.go @@ -0,0 +1,6 @@ +// net2 is a collection of functions meant to supplement the capabilities +// provided by the standard "net" package. +package net2 + +// copied from https://github.com/dropbox/godropbox/tree/master/net2 +// removed other dependencies diff --git a/weed/wdclient/net2/ip.go b/weed/wdclient/net2/ip.go new file mode 100644 index 000000000..ff5e3b24e --- /dev/null +++ b/weed/wdclient/net2/ip.go @@ -0,0 +1,177 @@ +package net2 + +import ( + "fmt" + "log" + "net" + "os" + "strings" + "sync" +) + +var myHostname string +var myHostnameOnce sync.Once + +// Like os.Hostname but caches first successful result, making it cheap to call it +// over and over. +// It will also crash whole process if fetching Hostname fails! +func MyHostname() string { + myHostnameOnce.Do(func() { + var err error + myHostname, err = os.Hostname() + if err != nil { + log.Fatal(err) + } + }) + return myHostname +} + +var myIp4 *net.IPAddr +var myIp4Once sync.Once + +// Resolves `MyHostname()` to an Ip4 address. Caches first successful result, making it +// cheap to call it over and over. +// It will also crash whole process if resolving the IP fails! +func MyIp4() *net.IPAddr { + myIp4Once.Do(func() { + var err error + myIp4, err = net.ResolveIPAddr("ip4", MyHostname()) + if err != nil { + log.Fatal(err) + } + }) + return myIp4 +} + +var myIp6 *net.IPAddr +var myIp6Once sync.Once + +// Resolves `MyHostname()` to an Ip6 address. Caches first successful result, making it +// cheap to call it over and over. +// It will also crash whole process if resolving the IP fails! +func MyIp6() *net.IPAddr { + myIp6Once.Do(func() { + var err error + myIp6, err = net.ResolveIPAddr("ip6", MyHostname()) + if err != nil { + log.Fatal(err) + } + }) + return myIp6 +} + +// This returns the list of local ip addresses which other hosts can connect +// to (NOTE: Loopback ip is ignored). +// Also resolves Hostname to an address and adds it to the list too, so +// IPs from /etc/hosts can work too. +func GetLocalIPs() ([]*net.IP, error) { + hostname, err := os.Hostname() + if err != nil { + return nil, fmt.Errorf("Failed to lookup hostname: %v", err) + } + // Resolves IP Address from Hostname, this way overrides in /etc/hosts + // can work too for IP resolution. + ipInfo, err := net.ResolveIPAddr("ip4", hostname) + if err != nil { + return nil, fmt.Errorf("Failed to resolve ip: %v", err) + } + ips := []*net.IP{&ipInfo.IP} + + // TODO(zviad): Is rest of the code really necessary? + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, fmt.Errorf("Failed to get interface addresses: %v", err) + } + for _, addr := range addrs { + ipnet, ok := addr.(*net.IPNet) + if !ok { + continue + } + + if ipnet.IP.IsLoopback() { + continue + } + + ips = append(ips, &ipnet.IP) + } + return ips, nil +} + +var localhostIPNets []*net.IPNet + +func init() { + for _, mask := range []string{"127.0.0.1/8", "::1/128"} { + _, ipnet, err := net.ParseCIDR(mask) + if err != nil { + panic(err) + } + localhostIPNets = append(localhostIPNets, ipnet) + } +} + +func IsLocalhostIp(ipStr string) bool { + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, ipnet := range localhostIPNets { + if ipnet.Contains(ip) { + return true + } + } + return false +} + +// Given a host string, return true if the host is an ip (v4/v6) localhost. +func IsLocalhost(host string) bool { + return IsLocalhostIp(host) || + host == "localhost" || + host == "ip6-localhost" || + host == "ipv6-localhost" +} + +// Resolves hostnames in addresses to actual IP4 addresses. Skips all invalid addresses +// and all addresses that can't be resolved. +// `addrs` are assumed to be of form: [":", ...] +// Returns an error in addition to resolved addresses if not all resolutions succeed. +func ResolveIP4s(addrs []string) ([]string, error) { + resolvedAddrs := make([]string, 0, len(addrs)) + var lastErr error + + for _, server := range addrs { + hostPort := strings.Split(server, ":") + if len(hostPort) != 2 { + lastErr = fmt.Errorf("Skipping invalid address: %s", server) + continue + } + + ip, err := net.ResolveIPAddr("ip4", hostPort[0]) + if err != nil { + lastErr = err + continue + } + resolvedAddrs = append(resolvedAddrs, ip.IP.String()+":"+hostPort[1]) + } + return resolvedAddrs, lastErr +} + +func LookupValidAddrs() (map[string]bool, error) { + hostName, err := os.Hostname() + if err != nil { + return nil, err + } + addrs, err := net.LookupHost(hostName) + if err != nil { + return nil, err + } + validAddrs := make(map[string]bool) + validAddrs[hostName] = true + for _, addr := range addrs { + validAddrs[addr] = true + } + // Special case localhost/127.0.0.1 so that this works on devVMs. It should + // have no affect in production. + validAddrs["127.0.0.1"] = true + validAddrs["localhost"] = true + return validAddrs, nil +} diff --git a/weed/wdclient/net2/managed_connection.go b/weed/wdclient/net2/managed_connection.go new file mode 100644 index 000000000..a886210d1 --- /dev/null +++ b/weed/wdclient/net2/managed_connection.go @@ -0,0 +1,185 @@ +package net2 + +import ( + "fmt" + "net" + "time" + + "errors" + "github.com/chrislusf/seaweedfs/weed/wdclient/resource_pool" +) + +// Dial's arguments. +type NetworkAddress struct { + Network string + Address string +} + +// A connection managed by a connection pool. NOTE: SetDeadline, +// SetReadDeadline and SetWriteDeadline are disabled for managed connections. +// (The deadlines are set by the connection pool). +type ManagedConn interface { + net.Conn + + // This returns the original (network, address) entry used for creating + // the connection. + Key() NetworkAddress + + // This returns the underlying net.Conn implementation. + RawConn() net.Conn + + // This returns the connection pool which owns this connection. + Owner() ConnectionPool + + // This indictes a user is done with the connection and releases the + // connection back to the connection pool. + ReleaseConnection() error + + // This indicates the connection is an invalid state, and that the + // connection should be discarded from the connection pool. + DiscardConnection() error +} + +// A physical implementation of ManagedConn +type managedConnImpl struct { + addr NetworkAddress + handle resource_pool.ManagedHandle + pool ConnectionPool + options ConnectionOptions +} + +// This creates a managed connection wrapper. +func NewManagedConn( + network string, + address string, + handle resource_pool.ManagedHandle, + pool ConnectionPool, + options ConnectionOptions) ManagedConn { + + addr := NetworkAddress{ + Network: network, + Address: address, + } + + return &managedConnImpl{ + addr: addr, + handle: handle, + pool: pool, + options: options, + } +} + +func (c *managedConnImpl) rawConn() (net.Conn, error) { + h, err := c.handle.Handle() + return h.(net.Conn), err +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) RawConn() net.Conn { + h, _ := c.handle.Handle() + return h.(net.Conn) +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) Key() NetworkAddress { + return c.addr +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) Owner() ConnectionPool { + return c.pool +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) ReleaseConnection() error { + return c.handle.Release() +} + +// See ManagedConn for documentation. +func (c *managedConnImpl) DiscardConnection() error { + return c.handle.Discard() +} + +// See net.Conn for documentation +func (c *managedConnImpl) Read(b []byte) (n int, err error) { + conn, err := c.rawConn() + if err != nil { + return 0, err + } + + if c.options.ReadTimeout > 0 { + deadline := c.options.getCurrentTime().Add(c.options.ReadTimeout) + _ = conn.SetReadDeadline(deadline) + } + n, err = conn.Read(b) + if err != nil { + var localAddr string + if conn.LocalAddr() != nil { + localAddr = conn.LocalAddr().String() + } else { + localAddr = "(nil)" + } + + var remoteAddr string + if conn.RemoteAddr() != nil { + remoteAddr = conn.RemoteAddr().String() + } else { + remoteAddr = "(nil)" + } + err = fmt.Errorf("Read error from host: %s <-> %s: %v", localAddr, remoteAddr, err) + } + return +} + +// See net.Conn for documentation +func (c *managedConnImpl) Write(b []byte) (n int, err error) { + conn, err := c.rawConn() + if err != nil { + return 0, err + } + + if c.options.WriteTimeout > 0 { + deadline := c.options.getCurrentTime().Add(c.options.WriteTimeout) + _ = conn.SetWriteDeadline(deadline) + } + n, err = conn.Write(b) + if err != nil { + err = fmt.Errorf("Write error: %v", err) + } + return +} + +// See net.Conn for documentation +func (c *managedConnImpl) Close() error { + return c.handle.Discard() +} + +// See net.Conn for documentation +func (c *managedConnImpl) LocalAddr() net.Addr { + conn, _ := c.rawConn() + return conn.LocalAddr() +} + +// See net.Conn for documentation +func (c *managedConnImpl) RemoteAddr() net.Addr { + conn, _ := c.rawConn() + return conn.RemoteAddr() +} + +// SetDeadline is disabled for managed connection (The deadline is set by +// us, with respect to the read/write timeouts specified in ConnectionOptions). +func (c *managedConnImpl) SetDeadline(t time.Time) error { + return errors.New("Cannot set deadline for managed connection") +} + +// SetReadDeadline is disabled for managed connection (The deadline is set by +// us with respect to the read timeout specified in ConnectionOptions). +func (c *managedConnImpl) SetReadDeadline(t time.Time) error { + return errors.New("Cannot set read deadline for managed connection") +} + +// SetWriteDeadline is disabled for managed connection (The deadline is set by +// us with respect to the write timeout specified in ConnectionOptions). +func (c *managedConnImpl) SetWriteDeadline(t time.Time) error { + return errors.New("Cannot set write deadline for managed connection") +} diff --git a/weed/wdclient/net2/port.go b/weed/wdclient/net2/port.go new file mode 100644 index 000000000..f83adba28 --- /dev/null +++ b/weed/wdclient/net2/port.go @@ -0,0 +1,19 @@ +package net2 + +import ( + "net" + "strconv" +) + +// Returns the port information. +func GetPort(addr net.Addr) (int, error) { + _, lport, err := net.SplitHostPort(addr.String()) + if err != nil { + return -1, err + } + lportInt, err := strconv.Atoi(lport) + if err != nil { + return -1, err + } + return lportInt, nil +} diff --git a/weed/wdclient/resource_pool/doc.go b/weed/wdclient/resource_pool/doc.go new file mode 100644 index 000000000..b8b3f92fa --- /dev/null +++ b/weed/wdclient/resource_pool/doc.go @@ -0,0 +1,5 @@ +// A generic resource pool for managing resources such as network connections. +package resource_pool + +// copied from https://github.com/dropbox/godropbox/tree/master/resource_pool +// removed other dependencies diff --git a/weed/wdclient/resource_pool/managed_handle.go b/weed/wdclient/resource_pool/managed_handle.go new file mode 100644 index 000000000..e1d82ca7b --- /dev/null +++ b/weed/wdclient/resource_pool/managed_handle.go @@ -0,0 +1,97 @@ +package resource_pool + +import ( + "sync/atomic" + + "errors" +) + +// A resource handle managed by a resource pool. +type ManagedHandle interface { + // This returns the handle's resource location. + ResourceLocation() string + + // This returns the underlying resource handle (or error if the handle + // is no longer active). + Handle() (interface{}, error) + + // This returns the resource pool which owns this handle. + Owner() ResourcePool + + // The releases the underlying resource handle to the caller and marks the + // managed handle as inactive. The caller is responsible for cleaning up + // the released handle. This returns nil if the managed handle no longer + // owns the resource. + ReleaseUnderlyingHandle() interface{} + + // This indictes a user is done with the handle and releases the handle + // back to the resource pool. + Release() error + + // This indicates the handle is an invalid state, and that the + // connection should be discarded from the connection pool. + Discard() error +} + +// A physical implementation of ManagedHandle +type managedHandleImpl struct { + location string + handle interface{} + pool ResourcePool + isActive int32 // atomic bool + options Options +} + +// This creates a managed handle wrapper. +func NewManagedHandle( + resourceLocation string, + handle interface{}, + pool ResourcePool, + options Options) ManagedHandle { + + h := &managedHandleImpl{ + location: resourceLocation, + handle: handle, + pool: pool, + options: options, + } + atomic.StoreInt32(&h.isActive, 1) + + return h +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) ResourceLocation() string { + return c.location +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Handle() (interface{}, error) { + if atomic.LoadInt32(&c.isActive) == 0 { + return c.handle, errors.New("Resource handle is no longer valid") + } + return c.handle, nil +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Owner() ResourcePool { + return c.pool +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) ReleaseUnderlyingHandle() interface{} { + if atomic.CompareAndSwapInt32(&c.isActive, 1, 0) { + return c.handle + } + return nil +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Release() error { + return c.pool.Release(c) +} + +// See ManagedHandle for documentation. +func (c *managedHandleImpl) Discard() error { + return c.pool.Discard(c) +} diff --git a/weed/wdclient/resource_pool/multi_resource_pool.go b/weed/wdclient/resource_pool/multi_resource_pool.go new file mode 100644 index 000000000..9ac25526d --- /dev/null +++ b/weed/wdclient/resource_pool/multi_resource_pool.go @@ -0,0 +1,200 @@ +package resource_pool + +import ( + "fmt" + "sync" + + "errors" +) + +// A resource pool implementation that manages multiple resource location +// entries. The handles to each resource location entry acts independently. +// For example "tcp localhost:11211" could act as memcache +// shard 0 and "tcp localhost:11212" could act as memcache shard 1. +type multiResourcePool struct { + options Options + + createPool func(Options) ResourcePool + + rwMutex sync.RWMutex + isLameDuck bool // guarded by rwMutex + // NOTE: the locationPools is guarded by rwMutex, but the pool entries + // are not. + locationPools map[string]ResourcePool +} + +// This returns a MultiResourcePool, which manages multiple +// resource location entries. The handles to each resource location +// entry acts independently. +// +// When createPool is nil, NewSimpleResourcePool is used as default. +func NewMultiResourcePool( + options Options, + createPool func(Options) ResourcePool) ResourcePool { + + if createPool == nil { + createPool = NewSimpleResourcePool + } + + return &multiResourcePool{ + options: options, + createPool: createPool, + rwMutex: sync.RWMutex{}, + isLameDuck: false, + locationPools: make(map[string]ResourcePool), + } +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) NumActive() int32 { + total := int32(0) + + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + for _, pool := range p.locationPools { + total += pool.NumActive() + } + return total +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) ActiveHighWaterMark() int32 { + high := int32(0) + + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + for _, pool := range p.locationPools { + val := pool.ActiveHighWaterMark() + if val > high { + high = val + } + } + return high +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) NumIdle() int { + total := 0 + + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + for _, pool := range p.locationPools { + total += pool.NumIdle() + } + return total +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Register(resourceLocation string) error { + if resourceLocation == "" { + return errors.New("Registering invalid resource location") + } + + p.rwMutex.Lock() + defer p.rwMutex.Unlock() + + if p.isLameDuck { + return fmt.Errorf( + "Cannot register %s to lame duck resource pool", + resourceLocation) + } + + if _, inMap := p.locationPools[resourceLocation]; inMap { + return nil + } + + pool := p.createPool(p.options) + if err := pool.Register(resourceLocation); err != nil { + return err + } + + p.locationPools[resourceLocation] = pool + return nil +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Unregister(resourceLocation string) error { + p.rwMutex.Lock() + defer p.rwMutex.Unlock() + + if pool, inMap := p.locationPools[resourceLocation]; inMap { + _ = pool.Unregister("") + pool.EnterLameDuckMode() + delete(p.locationPools, resourceLocation) + } + return nil +} + +func (p *multiResourcePool) ListRegistered() []string { + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + result := make([]string, 0, len(p.locationPools)) + for key, _ := range p.locationPools { + result = append(result, key) + } + + return result +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Get( + resourceLocation string) (ManagedHandle, error) { + + pool := p.getPool(resourceLocation) + if pool == nil { + return nil, fmt.Errorf( + "%s is not registered in the resource pool", + resourceLocation) + } + return pool.Get(resourceLocation) +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Release(handle ManagedHandle) error { + pool := p.getPool(handle.ResourceLocation()) + if pool == nil { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + return pool.Release(handle) +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) Discard(handle ManagedHandle) error { + pool := p.getPool(handle.ResourceLocation()) + if pool == nil { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + return pool.Discard(handle) +} + +// See ResourcePool for documentation. +func (p *multiResourcePool) EnterLameDuckMode() { + p.rwMutex.Lock() + defer p.rwMutex.Unlock() + + p.isLameDuck = true + + for _, pool := range p.locationPools { + pool.EnterLameDuckMode() + } +} + +func (p *multiResourcePool) getPool(resourceLocation string) ResourcePool { + p.rwMutex.RLock() + defer p.rwMutex.RUnlock() + + if pool, inMap := p.locationPools[resourceLocation]; inMap { + return pool + } + return nil +} diff --git a/weed/wdclient/resource_pool/resource_pool.go b/weed/wdclient/resource_pool/resource_pool.go new file mode 100644 index 000000000..26c433f50 --- /dev/null +++ b/weed/wdclient/resource_pool/resource_pool.go @@ -0,0 +1,96 @@ +package resource_pool + +import ( + "time" +) + +type Options struct { + // The maximum number of active resource handles per resource location. (A + // non-positive value indicates the number of active resource handles is + // unbounded). + MaxActiveHandles int32 + + // The maximum number of idle resource handles per resource location that + // are kept alive by the resource pool. + MaxIdleHandles uint32 + + // The maximum amount of time an idle resource handle can remain alive (if + // specified). + MaxIdleTime *time.Duration + + // This limits the number of concurrent Open calls (there's no limit when + // OpenMaxConcurrency is non-positive). + OpenMaxConcurrency int + + // This function creates a resource handle (e.g., a connection) for a + // resource location. The function must be thread-safe. + Open func(resourceLocation string) ( + handle interface{}, + err error) + + // This function destroys a resource handle and performs the necessary + // cleanup to free up resources. The function must be thread-safe. + Close func(handle interface{}) error + + // This specifies the now time function. When the function is non-nil, the + // resource pool will use the specified function instead of time.Now to + // generate the current time. + NowFunc func() time.Time +} + +func (o Options) getCurrentTime() time.Time { + if o.NowFunc == nil { + return time.Now() + } else { + return o.NowFunc() + } +} + +// A generic interface for managed resource pool. All resource pool +// implementations must be threadsafe. +type ResourcePool interface { + // This returns the number of active resource handles. + NumActive() int32 + + // This returns the highest number of actives handles for the entire + // lifetime of the pool. If the pool contains multiple sub-pools, the + // high water mark is the max of the sub-pools' high water marks. + ActiveHighWaterMark() int32 + + // This returns the number of alive idle handles. NOTE: This is only used + // for testing. + NumIdle() int + + // This associates a resource location to the resource pool; afterwhich, + // the user can get resource handles for the resource location. + Register(resourceLocation string) error + + // This dissociates a resource location from the resource pool; afterwhich, + // the user can no longer get resource handles for the resource location. + // If the given resource location corresponds to a sub-pool, the unregistered + // sub-pool will enter lame duck mode. + Unregister(resourceLocation string) error + + // This returns the list of registered resource location entries. + ListRegistered() []string + + // This gets an active resource handle from the resource pool. The + // handle will remain active until one of the following is called: + // 1. handle.Release() + // 2. handle.Discard() + // 3. pool.Release(handle) + // 4. pool.Discard(handle) + Get(key string) (ManagedHandle, error) + + // This releases an active resource handle back to the resource pool. + Release(handle ManagedHandle) error + + // This discards an active resource from the resource pool. + Discard(handle ManagedHandle) error + + // Enter the resource pool into lame duck mode. The resource pool + // will no longer return resource handles, and all idle resource handles + // are closed immediately (including active resource handles that are + // released back to the pool afterward). + EnterLameDuckMode() +} diff --git a/weed/wdclient/resource_pool/semaphore.go b/weed/wdclient/resource_pool/semaphore.go new file mode 100644 index 000000000..ff35d5bc5 --- /dev/null +++ b/weed/wdclient/resource_pool/semaphore.go @@ -0,0 +1,154 @@ +package resource_pool + +import ( + "fmt" + "sync" + "sync/atomic" + "time" +) + +type Semaphore interface { + // Increment the semaphore counter by one. + Release() + + // Decrement the semaphore counter by one, and block if counter < 0 + Acquire() + + // Decrement the semaphore counter by one, and block if counter < 0 + // Wait for up to the given duration. Returns true if did not timeout + TryAcquire(timeout time.Duration) bool +} + +// A simple counting Semaphore. +type boundedSemaphore struct { + slots chan struct{} +} + +// Create a bounded semaphore. The count parameter must be a positive number. +// NOTE: The bounded semaphore will panic if the user tries to Release +// beyond the specified count. +func NewBoundedSemaphore(count uint) Semaphore { + sem := &boundedSemaphore{ + slots: make(chan struct{}, int(count)), + } + for i := 0; i < cap(sem.slots); i++ { + sem.slots <- struct{}{} + } + return sem +} + +// Acquire returns on successful acquisition. +func (sem *boundedSemaphore) Acquire() { + <-sem.slots +} + +// TryAcquire returns true if it acquires a resource slot within the +// timeout, false otherwise. +func (sem *boundedSemaphore) TryAcquire(timeout time.Duration) bool { + if timeout > 0 { + // Wait until we get a slot or timeout expires. + tm := time.NewTimer(timeout) + defer tm.Stop() + select { + case <-sem.slots: + return true + case <-tm.C: + // Timeout expired. In very rare cases this might happen even if + // there is a slot available, e.g. GC pause after we create the timer + // and select randomly picked this one out of the two available channels. + // We should do one final immediate check below. + } + } + + // Return true if we have a slot available immediately and false otherwise. + select { + case <-sem.slots: + return true + default: + return false + } +} + +// Release the acquired semaphore. You must not release more than you +// have acquired. +func (sem *boundedSemaphore) Release() { + select { + case sem.slots <- struct{}{}: + default: + // slots is buffered. If a send blocks, it indicates a programming + // error. + panic(fmt.Errorf("too many releases for boundedSemaphore")) + } +} + +// This returns an unbound counting semaphore with the specified initial count. +// The semaphore counter can be arbitrary large (i.e., Release can be called +// unlimited amount of times). +// +// NOTE: In general, users should use bounded semaphore since it is more +// efficient than unbounded semaphore. +func NewUnboundedSemaphore(initialCount int) Semaphore { + res := &unboundedSemaphore{ + counter: int64(initialCount), + } + res.cond.L = &res.lock + return res +} + +type unboundedSemaphore struct { + lock sync.Mutex + cond sync.Cond + counter int64 +} + +func (s *unboundedSemaphore) Release() { + s.lock.Lock() + s.counter += 1 + if s.counter > 0 { + // Not broadcasting here since it's unlike we can satify all waiting + // goroutines. Instead, we will Signal again if there are left over + // quota after Acquire, in case of lost wakeups. + s.cond.Signal() + } + s.lock.Unlock() +} + +func (s *unboundedSemaphore) Acquire() { + s.lock.Lock() + for s.counter < 1 { + s.cond.Wait() + } + s.counter -= 1 + if s.counter > 0 { + s.cond.Signal() + } + s.lock.Unlock() +} + +func (s *unboundedSemaphore) TryAcquire(timeout time.Duration) bool { + done := make(chan bool, 1) + // Gate used to communicate between the threads and decide what the result + // is. If the main thread decides, we have timed out, otherwise we succeed. + decided := new(int32) + atomic.StoreInt32(decided, 0) + go func() { + s.Acquire() + if atomic.SwapInt32(decided, 1) == 0 { + // Acquire won the race + done <- true + } else { + // If we already decided the result, and this thread did not win + s.Release() + } + }() + select { + case <-done: + return true + case <-time.After(timeout): + if atomic.SwapInt32(decided, 1) == 1 { + // The other thread already decided the result + return true + } + return false + } +} diff --git a/weed/wdclient/resource_pool/simple_resource_pool.go b/weed/wdclient/resource_pool/simple_resource_pool.go new file mode 100644 index 000000000..b0c539100 --- /dev/null +++ b/weed/wdclient/resource_pool/simple_resource_pool.go @@ -0,0 +1,343 @@ +package resource_pool + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +type idleHandle struct { + handle interface{} + keepUntil *time.Time +} + +type TooManyHandles struct { + location string +} + +func (t TooManyHandles) Error() string { + return fmt.Sprintf("Too many handles to %s", t.location) +} + +type OpenHandleError struct { + location string + err error +} + +func (o OpenHandleError) Error() string { + return fmt.Sprintf("Failed to open resource handle: %s (%v)", o.location, o.err) +} + +// A resource pool implementation where all handles are associated to the +// same resource location. +type simpleResourcePool struct { + options Options + + numActive *int32 // atomic counter + + activeHighWaterMark *int32 // atomic / monotonically increasing value + + openTokens Semaphore + + mutex sync.Mutex + location string // guard by mutex + idleHandles []*idleHandle // guarded by mutex + isLameDuck bool // guarded by mutex +} + +// This returns a SimpleResourcePool, where all handles are associated to a +// single resource location. +func NewSimpleResourcePool(options Options) ResourcePool { + numActive := new(int32) + atomic.StoreInt32(numActive, 0) + + activeHighWaterMark := new(int32) + atomic.StoreInt32(activeHighWaterMark, 0) + + var tokens Semaphore + if options.OpenMaxConcurrency > 0 { + tokens = NewBoundedSemaphore(uint(options.OpenMaxConcurrency)) + } + + return &simpleResourcePool{ + location: "", + options: options, + numActive: numActive, + activeHighWaterMark: activeHighWaterMark, + openTokens: tokens, + mutex: sync.Mutex{}, + idleHandles: make([]*idleHandle, 0, 0), + isLameDuck: false, + } +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) NumActive() int32 { + return atomic.LoadInt32(p.numActive) +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) ActiveHighWaterMark() int32 { + return atomic.LoadInt32(p.activeHighWaterMark) +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) NumIdle() int { + p.mutex.Lock() + defer p.mutex.Unlock() + return len(p.idleHandles) +} + +// SimpleResourcePool can only register a single (network, address) entry. +// Register should be call before any Get calls. +func (p *simpleResourcePool) Register(resourceLocation string) error { + if resourceLocation == "" { + return errors.New("Invalid resource location") + } + + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.isLameDuck { + return fmt.Errorf( + "cannot register %s to lame duck resource pool", + resourceLocation) + } + + if p.location == "" { + p.location = resourceLocation + return nil + } + return errors.New("SimpleResourcePool can only register one location") +} + +// SimpleResourcePool will enter lame duck mode upon calling Unregister. +func (p *simpleResourcePool) Unregister(resourceLocation string) error { + p.EnterLameDuckMode() + return nil +} + +func (p *simpleResourcePool) ListRegistered() []string { + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.location != "" { + return []string{p.location} + } + return []string{} +} + +func (p *simpleResourcePool) getLocation() (string, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.location == "" { + return "", fmt.Errorf( + "resource location is not set for SimpleResourcePool") + } + + if p.isLameDuck { + return "", fmt.Errorf( + "lame duck resource pool cannot return handles to %s", + p.location) + } + + return p.location, nil +} + +// This gets an active resource from the resource pool. Note that the +// resourceLocation argument is ignored (The handles are associated to the +// resource location provided by the first Register call). +func (p *simpleResourcePool) Get(unused string) (ManagedHandle, error) { + activeCount := atomic.AddInt32(p.numActive, 1) + if p.options.MaxActiveHandles > 0 && + activeCount > p.options.MaxActiveHandles { + + atomic.AddInt32(p.numActive, -1) + return nil, TooManyHandles{p.location} + } + + highest := atomic.LoadInt32(p.activeHighWaterMark) + for activeCount > highest && + !atomic.CompareAndSwapInt32( + p.activeHighWaterMark, + highest, + activeCount) { + + highest = atomic.LoadInt32(p.activeHighWaterMark) + } + + if h := p.getIdleHandle(); h != nil { + return h, nil + } + + location, err := p.getLocation() + if err != nil { + atomic.AddInt32(p.numActive, -1) + return nil, err + } + + if p.openTokens != nil { + // Current implementation does not wait for tokens to become available. + // If that causes availability hits, we could increase the wait, + // similar to simple_pool.go. + if p.openTokens.TryAcquire(0) { + defer p.openTokens.Release() + } else { + // We could not immediately acquire a token. + // Instead of waiting + atomic.AddInt32(p.numActive, -1) + return nil, OpenHandleError{ + p.location, errors.New("Open Error: reached OpenMaxConcurrency")} + } + } + + handle, err := p.options.Open(location) + if err != nil { + atomic.AddInt32(p.numActive, -1) + return nil, OpenHandleError{p.location, err} + } + + return NewManagedHandle(p.location, handle, p, p.options), nil +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) Release(handle ManagedHandle) error { + if pool, ok := handle.Owner().(*simpleResourcePool); !ok || pool != p { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + h := handle.ReleaseUnderlyingHandle() + if h != nil { + // We can unref either before or after queuing the idle handle. + // The advantage of unref-ing before queuing is that there is + // a higher chance of successful Get when number of active handles + // is close to the limit (but potentially more handle creation). + // The advantage of queuing before unref-ing is that there's a + // higher chance of reusing handle (but potentially more Get failures). + atomic.AddInt32(p.numActive, -1) + p.queueIdleHandles(h) + } + + return nil +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) Discard(handle ManagedHandle) error { + if pool, ok := handle.Owner().(*simpleResourcePool); !ok || pool != p { + return errors.New( + "Resource pool cannot take control of a handle owned " + + "by another resource pool") + } + + h := handle.ReleaseUnderlyingHandle() + if h != nil { + atomic.AddInt32(p.numActive, -1) + if err := p.options.Close(h); err != nil { + return fmt.Errorf("failed to close resource handle: %v", err) + } + } + return nil +} + +// See ResourcePool for documentation. +func (p *simpleResourcePool) EnterLameDuckMode() { + p.mutex.Lock() + + toClose := p.idleHandles + p.isLameDuck = true + p.idleHandles = []*idleHandle{} + + p.mutex.Unlock() + + p.closeHandles(toClose) +} + +// This returns an idle resource, if there is one. +func (p *simpleResourcePool) getIdleHandle() ManagedHandle { + var toClose []*idleHandle + defer func() { + // NOTE: Must keep the closure around to late bind the toClose slice. + p.closeHandles(toClose) + }() + + now := p.options.getCurrentTime() + + p.mutex.Lock() + defer p.mutex.Unlock() + + var i int + for i = 0; i < len(p.idleHandles); i++ { + idle := p.idleHandles[i] + if idle.keepUntil == nil || now.Before(*idle.keepUntil) { + break + } + } + if i > 0 { + toClose = p.idleHandles[0:i] + } + + if i < len(p.idleHandles) { + idle := p.idleHandles[i] + p.idleHandles = p.idleHandles[i+1:] + return NewManagedHandle(p.location, idle.handle, p, p.options) + } + + if len(p.idleHandles) > 0 { + p.idleHandles = []*idleHandle{} + } + return nil +} + +// This adds an idle resource to the pool. +func (p *simpleResourcePool) queueIdleHandles(handle interface{}) { + var toClose []*idleHandle + defer func() { + // NOTE: Must keep the closure around to late bind the toClose slice. + p.closeHandles(toClose) + }() + + now := p.options.getCurrentTime() + var keepUntil *time.Time + if p.options.MaxIdleTime != nil { + // NOTE: Assign to temp variable first to work around compiler bug + x := now.Add(*p.options.MaxIdleTime) + keepUntil = &x + } + + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.isLameDuck { + toClose = []*idleHandle{ + {handle: handle}, + } + return + } + + p.idleHandles = append( + p.idleHandles, + &idleHandle{ + handle: handle, + keepUntil: keepUntil, + }) + + nIdleHandles := uint32(len(p.idleHandles)) + if nIdleHandles > p.options.MaxIdleHandles { + handlesToClose := nIdleHandles - p.options.MaxIdleHandles + toClose = p.idleHandles[0:handlesToClose] + p.idleHandles = p.idleHandles[handlesToClose:nIdleHandles] + } +} + +// Closes resources, at this point it is assumed that this resources +// are no longer referenced from the main idleHandles slice. +func (p *simpleResourcePool) closeHandles(handles []*idleHandle) { + for _, handle := range handles { + _ = p.options.Close(handle.handle) + } +} diff --git a/weed/wdclient/volume_tcp_client.go b/weed/wdclient/volume_tcp_client.go new file mode 100644 index 000000000..afebd71eb --- /dev/null +++ b/weed/wdclient/volume_tcp_client.go @@ -0,0 +1,97 @@ +package wdclient + +import ( + "bufio" + "bytes" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient/net2" + "io" + "net" + "time" +) + +// VolumeTcpClient put/get/delete file chunks directly on volume servers without replication +type VolumeTcpClient struct { + cp net2.ConnectionPool +} + +type VolumeTcpConn struct { + net.Conn + bufWriter *bufio.Writer + bufReader *bufio.Reader +} + +func NewVolumeTcpClient() *VolumeTcpClient { + MaxIdleTime := 10 * time.Second + return &VolumeTcpClient{ + cp: net2.NewMultiConnectionPool(net2.ConnectionOptions{ + MaxActiveConnections: 16, + MaxIdleConnections: 1, + MaxIdleTime: &MaxIdleTime, + DialMaxConcurrency: 0, + Dial: func(network string, address string) (net.Conn, error) { + conn, err := net.Dial(network, address) + return &VolumeTcpConn{ + conn, + bufio.NewWriter(conn), + bufio.NewReader(conn), + }, err + }, + NowFunc: nil, + ReadTimeout: 0, + WriteTimeout: 0, + }), + } +} +func (c *VolumeTcpClient) PutFileChunk(volumeServerAddress string, fileId string, fileSize uint32, fileReader io.Reader) (err error) { + + tcpAddress, parseErr := pb.ParseServerAddress(volumeServerAddress, 20000) + if parseErr != nil { + return parseErr + } + + c.cp.Register("tcp", tcpAddress) + tcpConn, getErr := c.cp.Get("tcp", tcpAddress) + if getErr != nil { + return fmt.Errorf("get connection to %s: %v", tcpAddress, getErr) + } + conn := tcpConn.RawConn().(*VolumeTcpConn) + defer func() { + if err != nil { + tcpConn.DiscardConnection() + } else { + tcpConn.ReleaseConnection() + } + }() + + buf := []byte("+" + fileId + "\n") + _, err = conn.bufWriter.Write([]byte(buf)) + if err != nil { + return + } + util.Uint32toBytes(buf[0:4], fileSize) + _, err = conn.bufWriter.Write(buf[0:4]) + if err != nil { + return + } + _, err = io.Copy(conn.bufWriter, fileReader) + if err != nil { + return + } + conn.bufWriter.Write([]byte("!\n")) + conn.bufWriter.Flush() + + ret, _, err := conn.bufReader.ReadLine() + if err != nil { + glog.V(0).Infof("upload by tcp: %v", err) + return + } + if !bytes.HasPrefix(ret, []byte("+OK")) { + glog.V(0).Infof("upload by tcp: %v", string(ret)) + } + + return nil +}