mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
merge master, resolve conflicts
This commit is contained in:
commit
1c94b3d013
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
|
@ -30,7 +30,7 @@ jobs:
|
|||
time: '30s'
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@v1.17
|
||||
|
|
15
README.md
15
README.md
|
@ -3,7 +3,7 @@
|
|||
|
||||
[![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
[![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs)
|
||||
[![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs)
|
||||
[![Build Status](https://travis-ci.com/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.com/chrislusf/seaweedfs)
|
||||
[![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
|
||||
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
|
@ -93,13 +93,6 @@ and these volume servers manage files and their metadata.
|
|||
This relieves concurrency pressure from the central master and spreads file metadata into volume servers,
|
||||
allowing faster file access (O(1), usually just one disk read operation).
|
||||
|
||||
SeaweedFS can transparently integrate with the cloud.
|
||||
With hot data on local cluster, and warm data on the cloud with O(1) access time,
|
||||
SeaweedFS can achieve both fast local access time and elastic cloud storage capacity.
|
||||
What's more, the cloud storage access API cost is minimized.
|
||||
Faster and Cheaper than direct cloud storage!
|
||||
Signup for future managed SeaweedFS cluster offering at "seaweedfilesystem at gmail dot com".
|
||||
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata.
|
||||
It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
|
||||
|
@ -115,6 +108,12 @@ For any distributed key value stores, the large values can be offloaded to Seawe
|
|||
With the fast access speed and linearly scalable capacity,
|
||||
SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore].
|
||||
|
||||
SeaweedFS can transparently integrate with the cloud.
|
||||
With hot data on local cluster, and warm data on the cloud with O(1) access time,
|
||||
SeaweedFS can achieve both fast local access time and elastic cloud storage capacity.
|
||||
What's more, the cloud storage access API cost is minimized.
|
||||
Faster and Cheaper than direct cloud storage!
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Additional Features ##
|
||||
|
|
|
@ -25,6 +25,9 @@ dev_tls: build certstrap
|
|||
dev_mount: build
|
||||
docker-compose -f compose/local-mount-compose.yml -p seaweedfs up
|
||||
|
||||
run_image: build
|
||||
docker run --rm -ti --device /dev/fuse --cap-add SYS_ADMIN --entrypoint /bin/sh chrislusf/seaweedfs:local
|
||||
|
||||
profile_mount: build
|
||||
docker-compose -f compose/local-mount-profile-compose.yml -p seaweedfs up
|
||||
|
||||
|
|
30
go.mod
30
go.mod
|
@ -3,11 +3,8 @@ module github.com/chrislusf/seaweedfs
|
|||
go 1.16
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.58.0 // indirect
|
||||
cloud.google.com/go/pubsub v1.3.1
|
||||
cloud.google.com/go/storage v1.9.0
|
||||
github.com/Azure/azure-amqp-common-go/v2 v2.1.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0
|
||||
github.com/OneOfOne/xxhash v1.2.2
|
||||
github.com/Shopify/sarama v1.23.1
|
||||
|
@ -17,7 +14,6 @@ require (
|
|||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/chrislusf/raft v1.0.7
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/eapache/go-resiliency v1.2.0 // indirect
|
||||
|
@ -33,8 +29,9 @@ require (
|
|||
github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.4
|
||||
|
@ -43,8 +40,7 @@ require (
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/jinzhu/copier v0.2.8
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible // indirect
|
||||
github.com/json-iterator/go v1.1.11
|
||||
github.com/karlseguin/ccache/v2 v2.0.7
|
||||
github.com/klauspost/compress v1.10.9 // indirect
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
|
@ -52,17 +48,15 @@ require (
|
|||
github.com/klauspost/reedsolomon v1.9.2
|
||||
github.com/kurin/blazer v0.5.3
|
||||
github.com/lib/pq v1.10.0
|
||||
github.com/lunixbochs/vtclean v1.0.0 // indirect
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/olivere/elastic/v7 v7.0.19
|
||||
github.com/peterh/liner v1.1.0
|
||||
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0
|
||||
github.com/prometheus/client_golang v1.3.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
|
||||
github.com/seaweedfs/fuse v1.1.6
|
||||
github.com/seaweedfs/fuse v1.1.8
|
||||
github.com/seaweedfs/goexif v1.0.2
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
|
@ -72,8 +66,8 @@ require (
|
|||
github.com/stretchr/testify v1.6.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
|
||||
github.com/tidwall/gjson v1.3.2
|
||||
github.com/tidwall/match v1.0.1
|
||||
github.com/tidwall/gjson v1.8.1
|
||||
github.com/tidwall/match v1.0.3
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/viant/assertly v0.5.4 // indirect
|
||||
|
@ -81,24 +75,22 @@ require (
|
|||
github.com/viant/toolbox v0.33.2 // indirect
|
||||
github.com/willf/bitset v1.1.10 // indirect
|
||||
github.com/willf/bloom v2.0.3+incompatible
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
|
||||
go.etcd.io/etcd v3.3.15+incompatible
|
||||
go.mongodb.org/mongo-driver v1.3.2
|
||||
go.mongodb.org/mongo-driver v1.7.0
|
||||
gocloud.dev v0.20.0
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40
|
||||
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78
|
||||
google.golang.org/api v0.26.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/protobuf v1.24.0
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
google.golang.org/protobuf v1.26.0-rc.1
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect
|
||||
modernc.org/sqlite v1.10.7
|
||||
github.com/posener/complete v1.2.3
|
||||
)
|
||||
|
||||
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
|
||||
|
|
286
go.sum
286
go.sum
|
@ -5,7 +5,6 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR
|
|||
cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
|
@ -41,34 +40,20 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
|||
cloud.google.com/go/storage v1.9.0 h1:oXnZyBjHB6hC8TnSle0AWW6pGJ29EuSo5ww+SFmdNBg=
|
||||
cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU=
|
||||
contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
|
||||
contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
|
||||
contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
|
||||
github.com/Azure/azure-service-bus-go v0.10.1/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to=
|
||||
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0 h1:kORqvzXP8ORhKbW13FflGUaSE5CMyDWun9UwMxY8gPs=
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0/go.mod h1:8UBPbiOhrMQ4pLPi3gA1tXnpjrS76UYE/fo5A40vf4g=
|
||||
github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
|
||||
github.com/Azure/go-amqp v0.12.7/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
|
||||
github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
|
||||
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
|
@ -80,20 +65,22 @@ github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMl
|
|||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
|
@ -109,8 +96,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
|
|||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
|
@ -121,16 +107,13 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l
|
|||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.31.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
|
||||
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.34.30 h1:izATc/E0+HcT5YHmaQVjn7GHCoqaBxn0PGo6Zq5UNFA=
|
||||
github.com/aws/aws-sdk-go v1.34.30/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
|
@ -145,20 +128,12 @@ github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgIS
|
|||
github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chrislusf/raft v1.0.4 h1:THhbsVik2hxdE0/VXX834f64Wn9RzgVPp+E+XCWZdKM=
|
||||
github.com/chrislusf/raft v1.0.4/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/chrislusf/raft v1.0.5 h1:g8GxKCSStfm0/bGBDpNEbmEXL6MJkpXX+NI0ksbX5D4=
|
||||
github.com/chrislusf/raft v1.0.5/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/chrislusf/raft v1.0.6 h1:wunb85WWhMKhNRn7EmdIw35D4Lmew0ZJv8oYDizR/+Y=
|
||||
github.com/chrislusf/raft v1.0.6/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/chrislusf/raft v1.0.7 h1:reybAIwnQOTSgTj1YgflbJFWLSN0KVQSxe8gDZYa04o=
|
||||
github.com/chrislusf/raft v1.0.7/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
|
@ -170,11 +145,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
|||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
|
@ -182,7 +154,6 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9
|
|||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
|
@ -201,7 +172,6 @@ github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44am
|
|||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
|
||||
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
|
@ -228,18 +198,15 @@ github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+ne
|
|||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fclairamb/ftpserverlib v0.8.0 h1:ZsWUQ8Vg3Y8LIWRUAzVnFXY982Yztz2odDdK/UVJtik=
|
||||
github.com/fclairamb/ftpserverlib v0.8.0/go.mod h1:xF4cy07oCHA9ZorKehsFGqA/1UHYaonmqHK2g3P1X8U=
|
||||
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
|
||||
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg=
|
||||
github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
|
||||
|
@ -251,6 +218,7 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
|
@ -258,8 +226,6 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
|||
github.com/go-redis/redis/v8 v8.4.4 h1:fGqgxCTR1sydaKI00oQf3OmkU/DIe/I/fYXvGklCIuc=
|
||||
github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
|
@ -296,16 +262,14 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
|||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
|
@ -319,7 +283,6 @@ github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
|
|||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
|
@ -329,10 +292,10 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x
|
|||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
|
@ -341,18 +304,16 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
|||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
|
||||
github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=
|
||||
|
@ -373,8 +334,6 @@ github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
|
|||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=
|
||||
github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
|
||||
github.com/google/wire v0.4.0 h1:kXcsA/rIGzJImVqPdhfnr6q0xsS9gU0515q1EPpJ9fE=
|
||||
github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=
|
||||
|
@ -389,20 +348,15 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
|||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 h1:aT5ISUniaOTErogCQ+4pGoYNBB6rm6Fq3g1v8QwYGas=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
|
@ -410,10 +364,12 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc
|
|||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
|
@ -424,7 +380,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
|||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
|
@ -432,22 +387,18 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
|
|||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
|
||||
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
|
||||
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||
github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE=
|
||||
github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
|
@ -456,23 +407,21 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
|
|||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU=
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/karlseguin/ccache/v2 v2.0.7 h1:y5Pfi4eiyYCOD6LS/Kj+o6Nb4M5Ngpw9qFQs+v44ZYM=
|
||||
github.com/karlseguin/ccache/v2 v2.0.7/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ=
|
||||
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
|
||||
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
|
||||
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA=
|
||||
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
|
||||
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
|
||||
|
@ -482,9 +431,7 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C
|
|||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.9 h1:pPRt1Z78crspaHISkpSSHjDlx+Tt9suHe519dsI0vF4=
|
||||
github.com/klauspost/compress v1.10.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||
|
@ -493,10 +440,10 @@ github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I
|
|||
github.com/klauspost/crc32 v1.2.0/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/reedsolomon v1.9.2 h1:E9CMS2Pqbv+C7tsrYad4YC9MfhnMVWhMRsTi7U0UB18=
|
||||
github.com/klauspost/reedsolomon v1.9.2/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
|
@ -507,16 +454,11 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|||
github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk=
|
||||
github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
|
||||
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
|
@ -525,24 +467,19 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7
|
|||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
||||
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
|
@ -564,26 +501,21 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9
|
|||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/jwt v1.0.1 h1:71ivoESdfT2K/qDiw5YwX/3W9/dR7c+m83xiGOj/EZ4=
|
||||
github.com/nats-io/jwt v1.0.1/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M=
|
||||
github.com/nats-io/nats-server/v2 v2.0.0/go.mod h1:RyVdsHHvY4B6c9pWG+uRLpZ0h0XsqiuKp2XCTurP5LI=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2 h1:i2Ly0B+1+rzNZHHWtD4ZwKi+OU5l+uQo1iDHZ2PmiIc=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ=
|
||||
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
|
||||
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nats.go v1.10.0 h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1TvY=
|
||||
github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE=
|
||||
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nkeys v0.2.0 h1:WXKF7diOaPU9cJdLD7nuzwasQy9vT1tBqzXZZf3AMJM=
|
||||
|
@ -599,12 +531,10 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v
|
|||
github.com/olivere/elastic/v7 v7.0.19 h1:w4F6JpqOISadhYf/n0NR1cNj73xHqh4pzPwD1Gkidts=
|
||||
github.com/olivere/elastic/v7 v7.0.19/go.mod h1:4Jqt5xvjqpjCqgnTcHwl3j8TLs8mvoOK8NYgo/qEOu4=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
|
||||
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
|
@ -623,10 +553,9 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
|
|||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||
github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os=
|
||||
github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
|
@ -636,7 +565,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
|
|||
github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc=
|
||||
github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -645,41 +573,42 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ
|
|||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc=
|
||||
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
|
||||
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
|
@ -693,35 +622,20 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
|||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seaweedfs/fuse v1.0.8 h1:HBPJTC77OlxwSd2JiTwvLPn8bWTElqQp3xs9vf3C15s=
|
||||
github.com/seaweedfs/fuse v1.0.8/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8=
|
||||
github.com/seaweedfs/fuse v1.0.9 h1:3JZoGsW7cmmrd/U5KYcIGR2+EqyBvCYCoCpEdZAz/Dc=
|
||||
github.com/seaweedfs/fuse v1.0.9/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.0 h1:cL1qPHFNtFv0UuJTLjKKgWDzfJ4iZzTa4Y7ipc2acGw=
|
||||
github.com/seaweedfs/fuse v1.1.0/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.1 h1:WD51YFJcBViOx8I89jeqPD+vAKl4EowzBy9GUw0plb0=
|
||||
github.com/seaweedfs/fuse v1.1.1/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.3 h1:0DddotXwSRGbYG2kynoJyr8GHCy30Z2SpdhP3vdyijY=
|
||||
github.com/seaweedfs/fuse v1.1.3/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.4 h1:YYqkK86agMhXRSwR+wFbRI8ikMgk3kL6PNTna1MAHyQ=
|
||||
github.com/seaweedfs/fuse v1.1.4/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.5 h1:wyuRh/mDvrvt8ZLDS7YdPSe6nczniSx4sQFs/Jonveo=
|
||||
github.com/seaweedfs/fuse v1.1.5/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.6 h1:kvCqaIsCEaYOBw5r8kJPUs9GcbwlIKcScnkPLT7HLuQ=
|
||||
github.com/seaweedfs/fuse v1.1.6/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.8 h1:YFSDPotG4uhQzV7ooDUvQ8BRVy5rM1XCFPJAmAsZz68=
|
||||
github.com/seaweedfs/fuse v1.1.8/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
|
||||
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
|
||||
github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw=
|
||||
github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
|
@ -735,14 +649,12 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ
|
|||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.3.1 h1:GPTpEAuNr98px18yNQ66JllNil98wfRZ/5Ukny8FeQA=
|
||||
github.com/spf13/afero v1.3.1/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
|
@ -751,24 +663,18 @@ github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
|||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 h1:2MR0pKUzlP3SGgj5NYJe/zRYDwOu9ku6YHy+Iw7l5DM=
|
||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
@ -776,26 +682,23 @@ github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFd
|
|||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
|
||||
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
|
||||
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/gjson v1.8.1 h1:8j5EE9Hrh3l9Od1OIEDAb7IpezNA20UdRngNAj5N0WU=
|
||||
github.com/tidwall/gjson v1.8.1/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk=
|
||||
github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
|
||||
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=
|
||||
github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 h1:6iRwZdrFUzbcVYZwa8dXTIILGIxmmhjyUPJEcwzPGaU=
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365/go.mod h1:zj0GJHGvyf1ed3Jm/Tb4830c/ZKDq+YoLsCt2rGQuT0=
|
||||
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.20.0 h1:olTmcnLQeZrkBc4TVgE/BatTo1NE/IvW050AuD8SW+U=
|
||||
github.com/valyala/fasthttp v1.20.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/viant/assertly v0.5.4 h1:5Hh4U3pLZa6uhCFAGpYOxck/8l9TZczEzoHNfJAhHEQ=
|
||||
github.com/viant/assertly v0.5.4/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/ptrie v0.3.0 h1:SDaRd7Gqr1+ItCNz0GpTxRdK21nOfqjV6YtBm9jGlMY=
|
||||
|
@ -808,30 +711,33 @@ github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX
|
|||
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 h1:s71VGheLtWmCYsnNjf+s7XE8HsrZnd3EYGrLGWVm7nY=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM=
|
||||
go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug=
|
||||
go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
|
||||
go.mongodb.org/mongo-driver v1.7.0 h1:hHrvOBWlWB2c7+8Gh/Xi5jj82AgidK/t7KVXBZ+IyUA=
|
||||
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
|
||||
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
|
@ -839,33 +745,24 @@ go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw=
|
||||
go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
||||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM=
|
||||
gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0=
|
||||
gocloud.dev v0.20.0 h1:mbEKMfnyPV7W1Rj35R1xXfjszs9dXkwSOq2KoFr25g8=
|
||||
gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc=
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI=
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI=
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0 h1:DsOXYKfcSTh0SHKwuhpQAJmPLDj3+ARvYgBIupVPClE=
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0/go.mod h1:Zh7v7Q1DZjAoBwsavZLdvinMIO1eYE0PJTllMuX3VGA=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0 h1:hwupxLvWG8jTPNQ+9Q/YWZzyMagL9blTwWYYhoW4pco=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0/go.mod h1:xYCXmI3ixWuCW4s1KqyZpgKT90MMjdXdMlb0Kgmd7TM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -876,15 +773,13 @@ golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaE
|
|||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
|
@ -899,7 +794,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
|
@ -911,7 +805,6 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
|
|||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
|
@ -950,7 +843,6 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
@ -962,14 +854,13 @@ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
|
@ -979,14 +870,12 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -996,7 +885,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1006,8 +894,6 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1018,10 +904,10 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1036,22 +922,22 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd h1:WgqgiQvkiZWz7XLhphjt2GI2GcGCTIZs9jqXMWmH+oc=
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1080,7 +966,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
@ -1089,7 +974,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
|
@ -1109,26 +993,19 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509 h1:MI14dOfl3OG6Zd32w3ugsrvcUO810fDZdWakTq39dH4=
|
||||
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78 h1:M8tBwCtWD/cZV9DZpFYRUgaymAYAr+aIUTWzDaM3uPs=
|
||||
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
|
||||
google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
|
@ -1145,10 +1022,7 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
|
|||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
|
||||
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
|
@ -1159,9 +1033,7 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
|
@ -1182,7 +1054,6 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG
|
|||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 h1:i+Aiej6cta/Frzp13/swvwz5O00kYcSe0A/C5Wd7zX8=
|
||||
|
@ -1194,12 +1065,9 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq
|
|||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
|
@ -1213,12 +1081,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
|||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -1226,7 +1093,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
|
|||
gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14 h1:tHqNpm9sPaE6BSuMLXBzgTwukQLdBEt4OYU2coQjEQQ=
|
||||
gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14/go.mod h1:nzmlZQ+UqB5+55CRTV/dOaiK8OrPl6Co96Ob8lH4Wxw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
|
@ -1237,24 +1103,20 @@ gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN
|
|||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw=
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -1265,7 +1127,6 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
|
@ -1278,11 +1139,11 @@ modernc.org/cc/v3 v3.33.5/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878=
|
|||
modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo=
|
||||
modernc.org/ccgo/v3 v3.9.4 h1:mt2+HyTZKxva27O6T4C9//0xiNQ/MornL3i8itM5cCs=
|
||||
modernc.org/ccgo/v3 v3.9.4/go.mod h1:19XAY9uOrYnDhOgfHwCABasBvK69jgC4I8+rizbk3Bc=
|
||||
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
|
||||
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
|
||||
modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
|
||||
modernc.org/libc v1.9.5 h1:zv111ldxmP7DJ5mOIqzRbza7ZDl3kh4ncKfASB2jIYY=
|
||||
modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
|
||||
modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE=
|
||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY=
|
||||
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
|
@ -1294,12 +1155,13 @@ modernc.org/sqlite v1.10.7 h1:B4ITfAx3HxSxOOKZqKhw4vnrhM+kTY1HoJf2L7PQBCQ=
|
|||
modernc.org/sqlite v1.10.7/go.mod h1:GXpJIZPNgRGqG0inyYDW18j9YpBpFUBn/weGI63hLLs=
|
||||
modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=
|
||||
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/tcl v1.5.2 h1:sYNjGr4zK6cDH74USl8wVJRrvDX6UOLpG0j4lFvR0W0=
|
||||
modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
|
||||
modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc=
|
||||
modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
|
||||
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
appVersion: "2.49"
|
||||
version: 2.49
|
||||
appVersion: "2.62"
|
||||
version: "2.62"
|
||||
|
|
|
@ -110,7 +110,7 @@ spec:
|
|||
-dataCenter={{ .Values.volume.dataCenter }} \
|
||||
{{- end }}
|
||||
-ip.bind={{ .Values.volume.ipBind }} \
|
||||
-read.redirect={{ .Values.volume.readRedirect }} \
|
||||
-readMode={{ .Values.volume.readMode }} \
|
||||
{{- if .Values.volume.whiteList }}
|
||||
-whiteList={{ .Values.volume.whiteList }} \
|
||||
{{- end }}
|
||||
|
|
|
@ -4,7 +4,6 @@ global:
|
|||
registry: ""
|
||||
repository: ""
|
||||
imageName: chrislusf/seaweedfs
|
||||
# imageTag: "2.49" - started using {.Chart.appVersion}
|
||||
imagePullPolicy: IfNotPresent
|
||||
imagePullSecrets: imagepullsecret
|
||||
restartPolicy: Always
|
||||
|
@ -175,8 +174,8 @@ volume:
|
|||
# Volume server's data center name
|
||||
dataCenter: null
|
||||
|
||||
# Redirect moved or non-local volumes. (default true)
|
||||
readRedirect: true
|
||||
# Redirect moved or non-local volumes. (default proxy)
|
||||
readMode: proxy
|
||||
|
||||
# Comma separated Ip addresses having write permission. No limit if empty.
|
||||
whiteList: null
|
||||
|
|
BIN
note/SeaweedFS_Filer_Backup.png
Normal file
BIN
note/SeaweedFS_Filer_Backup.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 95 KiB |
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.6</version>
|
||||
<version>1.6.7</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.6</version>
|
||||
<version>1.6.7</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.6</version>
|
||||
<version>1.6.7</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -10,7 +10,7 @@ import java.util.List;
|
|||
|
||||
public class ByteBufferPool {
|
||||
|
||||
private static final int MIN_BUFFER_SIZE = 8 * 1024 * 1024;
|
||||
private static final int MIN_BUFFER_SIZE = 1 * 1024 * 1024;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ByteBufferPool.class);
|
||||
|
||||
private static final List<ByteBuffer> bufferList = new ArrayList<>();
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class RemoteUtil {
|
||||
public static boolean isInRemoteOnly(FilerProto.Entry entry) {
|
||||
if (entry.getChunksList() == null || entry.getChunksList().isEmpty()) {
|
||||
return entry.getRemoteEntry() != null && entry.getRemoteEntry().getRemoteSize() > 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static FilerProto.Entry downloadRemoteEntry(FilerClient filerClient, String fullpath, FilerProto.Entry entry) throws IOException {
|
||||
String dir = SeaweedOutputStream.getParentDirectory(fullpath);
|
||||
String name = SeaweedOutputStream.getFileName(fullpath);
|
||||
|
||||
final FilerProto.DownloadToLocalResponse downloadToLocalResponse = filerClient.getBlockingStub()
|
||||
.downloadToLocal(FilerProto.DownloadToLocalRequest.newBuilder()
|
||||
.setDirectory(dir).setName(name).build());
|
||||
|
||||
return downloadToLocalResponse.getEntry();
|
||||
}
|
||||
}
|
|
@ -19,9 +19,9 @@ public class SeaweedInputStream extends InputStream {
|
|||
|
||||
private final FilerClient filerClient;
|
||||
private final String path;
|
||||
private final FilerProto.Entry entry;
|
||||
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
|
||||
private final long contentLength;
|
||||
private FilerProto.Entry entry;
|
||||
|
||||
private long position = 0; // cursor of the file
|
||||
|
||||
|
@ -35,10 +35,14 @@ public class SeaweedInputStream extends InputStream {
|
|||
this.entry = filerClient.lookupEntry(
|
||||
SeaweedOutputStream.getParentDirectory(fullpath),
|
||||
SeaweedOutputStream.getFileName(fullpath));
|
||||
if(entry == null){
|
||||
if (entry == null) {
|
||||
throw new FileNotFoundException();
|
||||
}
|
||||
|
||||
if (RemoteUtil.isInRemoteOnly(entry)) {
|
||||
entry = RemoteUtil.downloadRemoteEntry(filerClient, fullpath, entry);
|
||||
}
|
||||
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
|
||||
|
@ -54,6 +58,11 @@ public class SeaweedInputStream extends InputStream {
|
|||
this.filerClient = filerClient;
|
||||
this.path = path;
|
||||
this.entry = entry;
|
||||
|
||||
if (RemoteUtil.isInRemoteOnly(entry)) {
|
||||
this.entry = RemoteUtil.downloadRemoteEntry(filerClient, path, entry);
|
||||
}
|
||||
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
|
||||
|
@ -111,8 +120,8 @@ public class SeaweedInputStream extends InputStream {
|
|||
long bytesRead = 0;
|
||||
int len = buf.remaining();
|
||||
int start = (int) this.position;
|
||||
if (start+len <= entry.getContent().size()) {
|
||||
entry.getContent().substring(start, start+len).copyTo(buf);
|
||||
if (start + len <= entry.getContent().size()) {
|
||||
entry.getContent().substring(start, start + len).copyTo(buf);
|
||||
} else {
|
||||
bytesRead = SeaweedRead.read(this.filerClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
|
||||
}
|
||||
|
|
|
@ -32,10 +32,10 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
private long lastTotalAppendOffset = 0;
|
||||
private ByteBuffer buffer;
|
||||
private long outputIndex;
|
||||
private String replication = "000";
|
||||
private String replication = "";
|
||||
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String fullpath) {
|
||||
this(filerClient, fullpath, "000");
|
||||
this(filerClient, fullpath, "");
|
||||
}
|
||||
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String fullpath, final String replication) {
|
||||
|
|
|
@ -67,6 +67,8 @@ service SeaweedFiler {
|
|||
rpc KvPut (KvPutRequest) returns (KvPutResponse) {
|
||||
}
|
||||
|
||||
rpc DownloadToLocal (DownloadToLocalRequest) returns (DownloadToLocalResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
@ -92,6 +94,13 @@ message ListEntriesResponse {
|
|||
Entry entry = 1;
|
||||
}
|
||||
|
||||
message RemoteEntry {
|
||||
string storage_name = 1;
|
||||
int64 local_mtime = 2;
|
||||
string remote_e_tag = 3;
|
||||
int64 remote_mtime = 4;
|
||||
int64 remote_size = 5;
|
||||
}
|
||||
message Entry {
|
||||
string name = 1;
|
||||
bool is_directory = 2;
|
||||
|
@ -101,6 +110,8 @@ message Entry {
|
|||
bytes hard_link_id = 7;
|
||||
int32 hard_link_counter = 8; // only exists in hard link meta data
|
||||
bytes content = 9; // if not empty, the file content
|
||||
|
||||
RemoteEntry remote_entry = 10;
|
||||
}
|
||||
|
||||
message FullEntry {
|
||||
|
@ -208,6 +219,7 @@ message AtomicRenameEntryRequest {
|
|||
string old_name = 2;
|
||||
string new_directory = 3;
|
||||
string new_name = 4;
|
||||
repeated int32 signatures = 5;
|
||||
}
|
||||
|
||||
message AtomicRenameEntryResponse {
|
||||
|
@ -292,6 +304,7 @@ message GetFilerConfigurationResponse {
|
|||
int32 signature = 8;
|
||||
string metrics_address = 9;
|
||||
int32 metrics_interval_sec = 10;
|
||||
string version = 11;
|
||||
}
|
||||
|
||||
message SubscribeMetadataRequest {
|
||||
|
@ -334,7 +347,9 @@ message LocateBrokerResponse {
|
|||
repeated Resource resources = 2;
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
// Key-Value operations
|
||||
/////////////////////////
|
||||
message KvGetRequest {
|
||||
bytes key = 1;
|
||||
}
|
||||
|
@ -350,7 +365,9 @@ message KvPutResponse {
|
|||
string error = 1;
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
// path-based configurations
|
||||
/////////////////////////
|
||||
message FilerConf {
|
||||
int32 version = 1;
|
||||
message PathConf {
|
||||
|
@ -361,6 +378,35 @@ message FilerConf {
|
|||
string disk_type = 5;
|
||||
bool fsync = 6;
|
||||
uint32 volume_growth_count = 7;
|
||||
bool read_only = 8;
|
||||
}
|
||||
repeated PathConf locations = 2;
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
// Remote Storage related
|
||||
/////////////////////////
|
||||
message RemoteConf {
|
||||
string type = 1;
|
||||
string name = 2;
|
||||
string s3_access_key = 4;
|
||||
string s3_secret_key = 5;
|
||||
string s3_region = 6;
|
||||
string s3_endpoint = 7;
|
||||
}
|
||||
|
||||
message RemoteStorageMapping {
|
||||
map<string,RemoteStorageLocation> mappings = 1;
|
||||
}
|
||||
message RemoteStorageLocation {
|
||||
string name = 1;
|
||||
string bucket = 2;
|
||||
string path = 3;
|
||||
}
|
||||
message DownloadToLocalRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
}
|
||||
message DownloadToLocalResponse {
|
||||
Entry entry = 1;
|
||||
}
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.6</version>
|
||||
<version>1.6.7</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>1.6.6</version>
|
||||
<version>1.6.7</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
|
|
@ -301,7 +301,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -1805,6 +1805,336 @@
|
|||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "graph",
|
||||
"title": "Process memory",
|
||||
"gridPos": {
|
||||
"x": 0,
|
||||
"y": 40,
|
||||
"w": 24,
|
||||
"h": 7
|
||||
},
|
||||
"aliasColors": {},
|
||||
"dashLength": 10,
|
||||
"datasource": "${DS_PROMETHEUS-DEV}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"id": 66,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"pluginVersion": "7.4.5",
|
||||
"pointradius": 5,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:258",
|
||||
"alias": "max limit",
|
||||
"color": "#890f02"
|
||||
}
|
||||
],
|
||||
"spaceLength": 10,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "process_resident_memory_bytes{exported_job=\"filer\"}",
|
||||
"legendFormat": "resident {{exported_instance}}",
|
||||
"interval": "",
|
||||
"format": "time_series",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"intervalFactor": 2,
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"value"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"step": 120,
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"expr": "process_virtual_memory_bytes{exported_job=\"filer\"}",
|
||||
"legendFormat": "virtual {{exported_instance}}",
|
||||
"interval": "",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"refId": "B",
|
||||
"step": 120
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeRegions": [],
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 2,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:265",
|
||||
"format": "bytes",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:266",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
},
|
||||
"bars": false,
|
||||
"dashes": false,
|
||||
"fillGradient": 0,
|
||||
"hiddenSeries": false,
|
||||
"percentage": false,
|
||||
"points": false,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"timeFrom": null,
|
||||
"timeShift": null
|
||||
},
|
||||
{
|
||||
"type": "graph",
|
||||
"title": "Open File Descriptor",
|
||||
"gridPos": {
|
||||
"x": 0,
|
||||
"y": 47,
|
||||
"w": 24,
|
||||
"h": 7
|
||||
},
|
||||
"aliasColors": {},
|
||||
"dashLength": 10,
|
||||
"datasource": "${DS_PROMETHEUS-DEV}",
|
||||
"editable": true,
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"links": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"id": 68,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"paceLength": 10,
|
||||
"pluginVersion": "7.4.5",
|
||||
"pointradius": 5,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "process_open_fds{exported_job=\"filer\"}",
|
||||
"legendFormat": "{{exported_instance}}",
|
||||
"interval": "",
|
||||
"format": "time_series",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"intervalFactor": 2,
|
||||
"metric": "",
|
||||
"policy": "default",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"value"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"step": 240,
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"expr": "process_max_fds{exported_job=\"filer\"}",
|
||||
"legendFormat": "{{exported_instance}}",
|
||||
"interval": "",
|
||||
"format": "time_series",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"intervalFactor": 2,
|
||||
"metric": "",
|
||||
"policy": "default",
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"value"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"step": 240,
|
||||
"tags": [],
|
||||
"hide": false
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeRegions": [],
|
||||
"tooltip": {
|
||||
"msResolution": false,
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1087",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1088",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
},
|
||||
"bars": false,
|
||||
"dashes": false,
|
||||
"error": false,
|
||||
"fillGradient": 0,
|
||||
"hiddenSeries": false,
|
||||
"percentage": false,
|
||||
"points": false,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"timeFrom": null,
|
||||
"timeShift": null
|
||||
}
|
||||
],
|
||||
"repeat": null,
|
||||
|
|
BIN
test/data/187.idx
Normal file
BIN
test/data/187.idx
Normal file
Binary file not shown.
|
@ -1,21 +1,21 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flag.String("dir", "/tmp", "directory to create files")
|
||||
n = flag.Int("n", 100, "the number of metadata")
|
||||
tailFiler = flag.String("filer", "localhost:8888", "the filer address")
|
||||
isWrite = flag.Bool("write", false, "only write")
|
||||
isWrite = flag.Bool("write", false, "only write")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -33,7 +33,7 @@ func main() {
|
|||
return nil
|
||||
}
|
||||
name := event.EventNotification.NewEntry.Name
|
||||
fmt.Printf("=> %s\n", name)
|
||||
glog.V(0).Infof("=> %s ts:%+v", name, time.Unix(0, event.TsNs))
|
||||
id := name[4:]
|
||||
if x, err := strconv.Atoi(id); err == nil {
|
||||
if x != expected {
|
||||
|
@ -43,6 +43,7 @@ func main() {
|
|||
} else {
|
||||
return err
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -71,37 +72,9 @@ func startGenerateMetadata() {
|
|||
|
||||
func startSubscribeMetadata(eachEntryFunc func(event *filer_pb.SubscribeMetadataResponse) error) {
|
||||
|
||||
lastTsNs := int64(0)
|
||||
tailErr := pb.FollowMetadata(*tailFiler, grpc.WithInsecure(), "tail",
|
||||
*dir, 0, 0, eachEntryFunc, false)
|
||||
|
||||
tailErr := pb.WithFilerClient(*tailFiler, grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "tail",
|
||||
PathPrefix: *dir,
|
||||
SinceNs: lastTsNs,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
if listenErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
if err = eachEntryFunc(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
lastTsNs = resp.TsNs
|
||||
}
|
||||
|
||||
})
|
||||
if tailErr != nil {
|
||||
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ func walkMetaFile(dst *os.File) error {
|
|||
|
||||
fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
|
||||
for i, chunk := range fullEntry.Entry.Chunks {
|
||||
fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String())
|
||||
fmt.Fprintf(os.Stdout, " chunk: %d %v %d,%x%08x\n", i+1, chunk, chunk.Fid.VolumeId, chunk.Fid.FileKey, chunk.Fid.Cookie)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ debug_shell:
|
|||
|
||||
debug_mount:
|
||||
go build -gcflags="all=-N -l"
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/
|
||||
|
||||
debug_server:
|
||||
go build -gcflags="all=-N -l"
|
||||
|
@ -37,3 +37,7 @@ debug_s3:
|
|||
debug_filer_copy:
|
||||
go build -gcflags="all=-N -l"
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h
|
||||
|
||||
debug_filer_remote_sync:
|
||||
go build -gcflags="all=-N -l"
|
||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=10000h
|
||||
|
|
109
weed/command/autocomplete.go
Normal file
109
weed/command/autocomplete.go
Normal file
|
@ -0,0 +1,109 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
|
||||
"github.com/posener/complete"
|
||||
completeinstall "github.com/posener/complete/cmd/install"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func AutocompleteMain(commands []*Command) bool {
|
||||
subCommands := make(map[string]complete.Command)
|
||||
helpSubCommands := make(map[string]complete.Command)
|
||||
for _, cmd := range commands {
|
||||
flags := make(map[string]complete.Predictor)
|
||||
cmd.Flag.VisitAll(func(flag *flag.Flag) {
|
||||
flags["-"+flag.Name] = complete.PredictAnything
|
||||
})
|
||||
|
||||
subCommands[cmd.Name()] = complete.Command{
|
||||
Flags: flags,
|
||||
}
|
||||
helpSubCommands[cmd.Name()] = complete.Command{}
|
||||
}
|
||||
subCommands["help"] = complete.Command{Sub: helpSubCommands}
|
||||
|
||||
globalFlags := make(map[string]complete.Predictor)
|
||||
flag.VisitAll(func(flag *flag.Flag) {
|
||||
globalFlags["-"+flag.Name] = complete.PredictAnything
|
||||
})
|
||||
|
||||
weedCmd := complete.Command{
|
||||
Sub: subCommands,
|
||||
Flags: globalFlags,
|
||||
GlobalFlags: complete.Flags{"-h": complete.PredictNothing},
|
||||
}
|
||||
cmp := complete.New("weed", weedCmd)
|
||||
|
||||
return cmp.Complete()
|
||||
}
|
||||
|
||||
func installAutoCompletion() bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
fmt.Println("windows is not supported")
|
||||
return false
|
||||
}
|
||||
|
||||
err := completeinstall.Install("weed")
|
||||
if err != nil {
|
||||
fmt.Printf("install failed! %s\n", err)
|
||||
return false
|
||||
}
|
||||
fmt.Printf("autocompletion is enabled. Please restart your shell.\n")
|
||||
return true
|
||||
}
|
||||
|
||||
func uninstallAutoCompletion() bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
fmt.Println("windows is not supported")
|
||||
return false
|
||||
}
|
||||
|
||||
err := completeinstall.Uninstall("weed")
|
||||
if err != nil {
|
||||
fmt.Printf("uninstall failed! %s\n", err)
|
||||
return false
|
||||
}
|
||||
fmt.Printf("autocompletion is disable. Please restart your shell.\n")
|
||||
return true
|
||||
}
|
||||
|
||||
var cmdAutocomplete = &Command{
|
||||
Run: runAutocomplete,
|
||||
UsageLine: "autocomplete",
|
||||
Short: "install autocomplete",
|
||||
Long: `weed autocomplete is installed in the shell.
|
||||
|
||||
Supported shells are bash, zsh, and fish.
|
||||
Windows is not supported.
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
func runAutocomplete(cmd *Command, args []string) bool {
|
||||
if len(args) != 0 {
|
||||
cmd.Usage()
|
||||
}
|
||||
|
||||
return installAutoCompletion()
|
||||
}
|
||||
|
||||
var cmdUnautocomplete = &Command{
|
||||
Run: runUnautocomplete,
|
||||
UsageLine: "autocomplete.uninstall",
|
||||
Short: "uninstall autocomplete",
|
||||
Long: `weed autocomplete is uninstalled in the shell.
|
||||
|
||||
Windows is not supported.
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
func runUnautocomplete(cmd *Command, args []string) bool {
|
||||
if len(args) != 0 {
|
||||
cmd.Usage()
|
||||
}
|
||||
|
||||
return uninstallAutoCompletion()
|
||||
}
|
|
@ -8,17 +8,20 @@ import (
|
|||
)
|
||||
|
||||
var Commands = []*Command{
|
||||
cmdBenchmark,
|
||||
cmdAutocomplete,
|
||||
cmdUnautocomplete,
|
||||
cmdBackup,
|
||||
cmdBenchmark,
|
||||
cmdCompact,
|
||||
cmdCopy,
|
||||
cmdDownload,
|
||||
cmdExport,
|
||||
cmdFiler,
|
||||
cmdFilerBackup,
|
||||
cmdFilerCat,
|
||||
cmdFilerCopy,
|
||||
cmdFilerMetaBackup,
|
||||
cmdFilerMetaTail,
|
||||
cmdFilerRemoteSynchronize,
|
||||
cmdFilerReplicate,
|
||||
cmdFilerSynchronize,
|
||||
cmdFix,
|
||||
|
|
|
@ -50,6 +50,8 @@ type FilerOptions struct {
|
|||
saveToFilerLimit *int
|
||||
defaultLevelDbDirectory *string
|
||||
concurrentUploadLimitMB *int
|
||||
debug *bool
|
||||
debugPort *int
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -73,6 +75,8 @@ func init() {
|
|||
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
|
||||
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
|
||||
f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
|
||||
f.debug = cmdFiler.Flag.Bool("debug", false, "serves runtime profiling data, e.g., http://localhost:<debug.port>/debug/pprof/goroutine?debug=2")
|
||||
f.debugPort = cmdFiler.Flag.Int("debug.port", 6060, "http port for debugging")
|
||||
|
||||
// start s3 on filer
|
||||
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
|
||||
|
@ -122,6 +126,9 @@ var cmdFiler = &Command{
|
|||
}
|
||||
|
||||
func runFiler(cmd *Command, args []string) bool {
|
||||
if *f.debug {
|
||||
go http.ListenAndServe(fmt.Sprintf(":%d", *f.debugPort), nil)
|
||||
}
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -52,11 +49,11 @@ var cmdFilerBackup = &Command{
|
|||
|
||||
func runFilerBackup(cmd *Command, args []string) bool {
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
util.LoadConfiguration("replication", true)
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
for {
|
||||
err := doFilerBackup(grpcDialOption, &filerBackupOptions)
|
||||
if err != nil {
|
||||
|
@ -110,48 +107,12 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
|||
|
||||
processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
|
||||
|
||||
return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "backup_" + dataSink.GetName(),
|
||||
PathPrefix: sourcePath,
|
||||
SinceNs: startFrom.UnixNano(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %v", err)
|
||||
}
|
||||
|
||||
var counter int64
|
||||
var lastWriteTime time.Time
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
|
||||
if listenErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
|
||||
if err := processEventFn(resp); err != nil {
|
||||
return fmt.Errorf("processEventFn: %v", err)
|
||||
}
|
||||
|
||||
counter++
|
||||
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
|
||||
glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
|
||||
counter = 0
|
||||
lastWriteTime = time.Now()
|
||||
if err := setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), resp.TsNs); err != nil {
|
||||
return fmt.Errorf("setOffset: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
|
||||
return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs)
|
||||
})
|
||||
|
||||
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_"+dataSink.GetName(),
|
||||
sourcePath, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
|
||||
}
|
||||
|
|
|
@ -52,21 +52,21 @@ type CopyOptions struct {
|
|||
}
|
||||
|
||||
func init() {
|
||||
cmdCopy.Run = runCopy // break init cycle
|
||||
cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information")
|
||||
copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
|
||||
copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
|
||||
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
|
||||
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
||||
copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
||||
copy.maxMB = cmdCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
|
||||
copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
|
||||
copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
|
||||
copy.checkSize = cmdCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file")
|
||||
copy.verbose = cmdCopy.Flag.Bool("verbose", false, "print out details during copying")
|
||||
cmdFilerCopy.Run = runCopy // break init cycle
|
||||
cmdFilerCopy.IsDebug = cmdFilerCopy.Flag.Bool("debug", false, "verbose debug information")
|
||||
copy.include = cmdFilerCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
|
||||
copy.replication = cmdFilerCopy.Flag.String("replication", "", "replication type")
|
||||
copy.collection = cmdFilerCopy.Flag.String("collection", "", "optional collection name")
|
||||
copy.ttl = cmdFilerCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
|
||||
copy.diskType = cmdFilerCopy.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
||||
copy.maxMB = cmdFilerCopy.Flag.Int("maxMB", 4, "split files larger than the limit")
|
||||
copy.concurrenctFiles = cmdFilerCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
|
||||
copy.concurrenctChunks = cmdFilerCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file")
|
||||
copy.checkSize = cmdFilerCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file")
|
||||
copy.verbose = cmdFilerCopy.Flag.Bool("verbose", false, "print out details during copying")
|
||||
}
|
||||
|
||||
var cmdCopy = &Command{
|
||||
var cmdFilerCopy = &Command{
|
||||
UsageLine: "filer.copy file_or_dir1 [file_or_dir2 file_or_dir3] http://localhost:8888/path/to/a/folder/",
|
||||
Short: "copy one or a list of files to a filer folder",
|
||||
Long: `copy one or a list of files, or batch copy one whole folder recursively, to a filer folder
|
||||
|
@ -154,7 +154,7 @@ func runCopy(cmd *Command, args []string) bool {
|
|||
}
|
||||
copy.ttlSec = int32(ttl.Minutes()) * 60
|
||||
|
||||
if *cmdCopy.IsDebug {
|
||||
if *cmdFilerCopy.IsDebug {
|
||||
grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
|
||||
}
|
||||
|
||||
|
@ -213,11 +213,15 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
|
|||
|
||||
mode := fi.Mode()
|
||||
uid, gid := util.GetFileUidGid(fi)
|
||||
fileSize := fi.Size()
|
||||
if mode.IsDir() {
|
||||
fileSize = 0
|
||||
}
|
||||
|
||||
fileCopyTaskChan <- FileCopyTask{
|
||||
sourceLocation: fileOrDir,
|
||||
destinationUrlPath: destPath,
|
||||
fileSize: fi.Size(),
|
||||
fileSize: fileSize,
|
||||
fileMode: fi.Mode(),
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
|
@ -377,6 +381,9 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
|||
if assignResult.Error != "" {
|
||||
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
||||
}
|
||||
if assignResult.Url == "" {
|
||||
return fmt.Errorf("assign volume failure %v: %v", request, assignResult)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
|
@ -53,6 +52,7 @@ The backup writes to another filer store specified in a backup_filer.toml.
|
|||
|
||||
func runFilerMetaBackup(cmd *Command, args []string) bool {
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
// load backup_filer.toml
|
||||
|
@ -189,48 +189,15 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
tailErr := pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "meta_backup",
|
||||
PathPrefix: *metaBackup.filerDirectory,
|
||||
SinceNs: startTime.UnixNano(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %v", err)
|
||||
}
|
||||
|
||||
var counter int64
|
||||
var lastWriteTime time.Time
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
if listenErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
if err = eachEntryFunc(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
counter++
|
||||
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
|
||||
glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
|
||||
counter = 0
|
||||
lastWriteTime = time.Now()
|
||||
if err2 := metaBackup.setOffset(lastWriteTime); err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
lastTime := time.Unix(0, lastTsNs)
|
||||
glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3))
|
||||
return metaBackup.setOffset(lastTime)
|
||||
})
|
||||
return tailErr
|
||||
|
||||
return pb.FollowMetadata(*metaBackup.filerAddress, metaBackup.grpcDialOption, "meta_backup",
|
||||
*metaBackup.filerDirectory, startTime.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
|
||||
}
|
||||
|
||||
func (metaBackup *FilerMetaBackupOptions) getOffset() (lastWriteTime time.Time, err error) {
|
||||
|
|
|
@ -3,16 +3,15 @@ package command
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/olivere/elastic/v7"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
|
@ -45,6 +44,7 @@ var (
|
|||
|
||||
func runFilerMetaTail(cmd *Command, args []string) bool {
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
var filterFunc func(dir, fname string) bool
|
||||
|
@ -103,37 +103,18 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "tail",
|
||||
PathPrefix: *tailTarget,
|
||||
SinceNs: time.Now().Add(-*tailStart).UnixNano(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
if listenErr == io.EOF {
|
||||
tailErr := pb.FollowMetadata(*tailFiler, grpcDialOption, "tail",
|
||||
*tailTarget, time.Now().Add(-*tailStart).UnixNano(), 0,
|
||||
func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
if !shouldPrint(resp) {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
if !shouldPrint(resp) {
|
||||
continue
|
||||
}
|
||||
if err = eachEntryFunc(resp); err != nil {
|
||||
if err := eachEntryFunc(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, false)
|
||||
|
||||
})
|
||||
if tailErr != nil {
|
||||
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
|
||||
}
|
||||
|
|
257
weed/command/filer_remote_sync.go
Normal file
257
weed/command/filer_remote_sync.go
Normal file
|
@ -0,0 +1,257 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RemoteSyncOptions struct {
|
||||
filerAddress *string
|
||||
grpcDialOption grpc.DialOption
|
||||
readChunkFromFiler *bool
|
||||
debug *bool
|
||||
timeAgo *time.Duration
|
||||
dir *string
|
||||
}
|
||||
|
||||
const (
|
||||
RemoteSyncKeyPrefix = "remote.sync."
|
||||
)
|
||||
|
||||
var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
|
||||
|
||||
func (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||
return pb.WithFilerClient(*option.filerAddress, option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return fn(client)
|
||||
})
|
||||
}
|
||||
func (option *RemoteSyncOptions) AdjustedUrl(location *filer_pb.Location) string {
|
||||
return location.Url
|
||||
}
|
||||
|
||||
var (
|
||||
remoteSyncOptions RemoteSyncOptions
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
|
||||
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
|
||||
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "/", "a mounted directory on filer")
|
||||
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
|
||||
remoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool("debug", false, "debug mode to print out filer updated remote files")
|
||||
remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||
}
|
||||
|
||||
var cmdFilerRemoteSynchronize = &Command{
|
||||
UsageLine: "filer.remote.sync -filer=<filerHost>:<filerPort> -dir=/mount/s3_on_cloud",
|
||||
Short: "resumable continuously write back updates to remote storage if the directory is mounted to the remote storage",
|
||||
Long: `resumable continuously write back updates to remote storage if the directory is mounted to the remote storage
|
||||
|
||||
filer.remote.sync listens on filer update events.
|
||||
If any mounted remote file is updated, it will fetch the updated content,
|
||||
and write to the remote storage.
|
||||
`,
|
||||
}
|
||||
|
||||
func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
remoteSyncOptions.grpcDialOption = grpcDialOption
|
||||
|
||||
// read filer remote storage mount mappings
|
||||
mappings, readErr := filer.ReadMountMappings(grpcDialOption, *remoteSyncOptions.filerAddress)
|
||||
if readErr != nil {
|
||||
fmt.Printf("read mount mapping: %v", readErr)
|
||||
return false
|
||||
}
|
||||
|
||||
filerSource := &source.FilerSource{}
|
||||
filerSource.DoInitialize(
|
||||
*remoteSyncOptions.filerAddress,
|
||||
pb.ServerToGrpcAddress(*remoteSyncOptions.filerAddress),
|
||||
"/", // does not matter
|
||||
*remoteSyncOptions.readChunkFromFiler,
|
||||
)
|
||||
|
||||
var found bool
|
||||
for dir, remoteStorageMountLocation := range mappings.Mappings {
|
||||
if *remoteSyncOptions.dir == dir {
|
||||
found = true
|
||||
storageConf, readErr := filer.ReadRemoteStorageConf(grpcDialOption, *remoteSyncOptions.filerAddress, remoteStorageMountLocation.Name)
|
||||
if readErr != nil {
|
||||
fmt.Printf("read remote storage configuration for %s: %v", dir, readErr)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("synchronize %s to remote storage...\n", *remoteSyncOptions.dir)
|
||||
if err := util.Retry("filer.remote.sync "+dir, func() error {
|
||||
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir, storageConf, remoteStorageMountLocation)
|
||||
}); err != nil {
|
||||
fmt.Printf("synchronize %s: %v\n", *remoteSyncOptions.dir, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
fmt.Printf("directory %s is not mounted to any remote storage\n", *remoteSyncOptions.dir)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string, remoteStorage *filer_pb.RemoteConf, remoteStorageMountLocation *filer_pb.RemoteStorageLocation) error {
|
||||
|
||||
dirHash := util.HashStringToLong(mountedDir)
|
||||
|
||||
// 1. specified by timeAgo
|
||||
// 2. last offset timestamp for this directory
|
||||
// 3. directory creation time
|
||||
var lastOffsetTs time.Time
|
||||
if *option.timeAgo == 0 {
|
||||
mountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))
|
||||
if err != nil {
|
||||
return fmt.Errorf("lookup %s: %v", mountedDir, err)
|
||||
}
|
||||
|
||||
lastOffsetTsNs, err := getOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash))
|
||||
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
||||
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
||||
glog.V(0).Infof("resume from %v", lastOffsetTs)
|
||||
} else {
|
||||
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
|
||||
}
|
||||
} else {
|
||||
lastOffsetTs = time.Now().Add(-*option.timeAgo)
|
||||
}
|
||||
|
||||
client, err := remote_storage.GetRemoteStorage(remoteStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
message := resp.EventNotification
|
||||
if message.OldEntry == nil && message.NewEntry == nil {
|
||||
return nil
|
||||
}
|
||||
if message.OldEntry == nil && message.NewEntry != nil {
|
||||
if len(message.NewEntry.Chunks) == 0 {
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("create: %+v\n", resp)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
fmt.Printf("skipping creating: %+v\n", resp)
|
||||
return nil
|
||||
}
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if message.NewEntry.IsDirectory {
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
reader := filer.NewChunkStreamReader(filerSource, message.NewEntry.Chunks)
|
||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry == nil {
|
||||
fmt.Printf("delete: %+v\n", resp)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
return client.DeleteFile(dest)
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry != nil {
|
||||
oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
|
||||
if !shouldSendToRemote(message.NewEntry) {
|
||||
fmt.Printf("skipping updating: %+v\n", resp)
|
||||
return nil
|
||||
}
|
||||
if message.NewEntry.IsDirectory {
|
||||
return client.WriteDirectory(dest, message.NewEntry)
|
||||
}
|
||||
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
|
||||
if isSameChunks(message.OldEntry.Chunks, message.NewEntry.Chunks) {
|
||||
fmt.Printf("update meta: %+v\n", resp)
|
||||
return client.UpdateFileMetadata(dest, message.NewEntry)
|
||||
}
|
||||
}
|
||||
fmt.Printf("update: %+v\n", resp)
|
||||
if err := client.DeleteFile(oldDest); err != nil {
|
||||
return err
|
||||
}
|
||||
reader := filer.NewChunkStreamReader(filerSource, message.NewEntry.Chunks)
|
||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
||||
if writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
lastTime := time.Unix(0, lastTsNs)
|
||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
||||
return setOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash), lastTsNs)
|
||||
})
|
||||
|
||||
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption,
|
||||
"filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
||||
}
|
||||
|
||||
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *filer_pb.RemoteStorageLocation) *filer_pb.RemoteStorageLocation {
|
||||
source := string(sourcePath[len(mountDir):])
|
||||
dest := util.FullPath(remoteMountLocation.Path).Child(source)
|
||||
return &filer_pb.RemoteStorageLocation{
|
||||
Name: remoteMountLocation.Name,
|
||||
Bucket: remoteMountLocation.Bucket,
|
||||
Path: string(dest),
|
||||
}
|
||||
}
|
||||
|
||||
func isSameChunks(a, b []*filer_pb.FileChunk) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(a); i++ {
|
||||
x, y := a[i], b[i]
|
||||
if !proto.Equal(x, y) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldSendToRemote(entry *filer_pb.Entry) bool {
|
||||
if entry.RemoteEntry == nil {
|
||||
return true
|
||||
}
|
||||
if entry.RemoteEntry.LocalMtime < entry.Attributes.Mtime {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {
|
||||
entry.RemoteEntry = remoteEntry
|
||||
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
|
||||
Directory: dir,
|
||||
Entry: entry,
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
|
@ -7,12 +7,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sub"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
@ -71,8 +70,8 @@ func init() {
|
|||
|
||||
var cmdFilerSynchronize = &Command{
|
||||
UsageLine: "filer.sync -a=<oneFilerHost>:<oneFilerPort> -b=<otherFilerHost>:<otherFilerPort>",
|
||||
Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
|
||||
Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers
|
||||
Short: "resumable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
|
||||
Long: `resumable continuous synchronization for file changes between two active-active or active-passive filers
|
||||
|
||||
filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content,
|
||||
and write to the other destination. Different from filer.replicate:
|
||||
|
@ -89,6 +88,7 @@ var cmdFilerSynchronize = &Command{
|
|||
|
||||
func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
grace.SetupProfiling(*syncCpuProfile, *syncMemProfile)
|
||||
|
@ -165,50 +165,14 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
|
|||
return persistEventFn(resp)
|
||||
}
|
||||
|
||||
return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "syncTo_" + targetFiler,
|
||||
PathPrefix: sourcePath,
|
||||
SinceNs: sourceFilerOffsetTsNs,
|
||||
Signature: targetFilerSignature,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %v", err)
|
||||
}
|
||||
|
||||
var counter int64
|
||||
var lastWriteTime time.Time
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
if listenErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
|
||||
if err := processEventFn(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
counter++
|
||||
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
|
||||
glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
|
||||
counter = 0
|
||||
lastWriteTime = time.Now()
|
||||
if err := setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, resp.TsNs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||
glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
|
||||
return setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, lastTsNs)
|
||||
})
|
||||
|
||||
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+targetFiler,
|
||||
sourcePath, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
|
||||
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -359,16 +323,19 @@ func genProcessFunction(sourcePath string, targetPath string, dataSink sink.Repl
|
|||
return processEventFn
|
||||
}
|
||||
|
||||
func buildKey(dataSink sink.ReplicationSink, message *filer_pb.EventNotification, targetPath string, sourceKey util.FullPath, sourcePath string) string {
|
||||
func buildKey(dataSink sink.ReplicationSink, message *filer_pb.EventNotification, targetPath string, sourceKey util.FullPath, sourcePath string) (key string) {
|
||||
if !dataSink.IsIncremental() {
|
||||
return util.Join(targetPath, string(sourceKey)[len(sourcePath):])
|
||||
key = util.Join(targetPath, string(sourceKey)[len(sourcePath):])
|
||||
} else {
|
||||
var mTime int64
|
||||
if message.NewEntry != nil {
|
||||
mTime = message.NewEntry.Attributes.Mtime
|
||||
} else if message.OldEntry != nil {
|
||||
mTime = message.OldEntry.Attributes.Mtime
|
||||
}
|
||||
dateKey := time.Unix(mTime, 0).Format("2006-01-02")
|
||||
key = util.Join(targetPath, dateKey, string(sourceKey)[len(sourcePath):])
|
||||
}
|
||||
var mTime int64
|
||||
if message.NewEntry != nil {
|
||||
mTime = message.NewEntry.Attributes.Mtime
|
||||
} else if message.OldEntry != nil {
|
||||
mTime = message.OldEntry.Attributes.Mtime
|
||||
}
|
||||
dateKey := time.Unix(mTime, 0).Format("2006-01-02")
|
||||
return util.Join(targetPath, dateKey, string(sourceKey)[len(sourcePath):])
|
||||
|
||||
return escapeKey(key)
|
||||
}
|
||||
|
|
7
weed/command/filer_sync_std.go
Normal file
7
weed/command/filer_sync_std.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !windows
|
||||
|
||||
package command
|
||||
|
||||
func escapeKey(key string) string {
|
||||
return key
|
||||
}
|
12
weed/command/filer_sync_windows.go
Normal file
12
weed/command/filer_sync_windows.go
Normal file
|
@ -0,0 +1,12 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func escapeKey(key string) string {
|
||||
if strings.Contains(key, ":") {
|
||||
return strings.ReplaceAll(key, ":", "")
|
||||
}
|
||||
return key
|
||||
}
|
|
@ -2,140 +2,232 @@ package command
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"strconv"
|
||||
"time"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdFuse.Run = runFuse // break init cycle
|
||||
}
|
||||
|
||||
type parameter struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
func runFuse(cmd *Command, args []string) bool {
|
||||
argsLen := len(args)
|
||||
options := []string{}
|
||||
rawArgs := strings.Join(args, " ")
|
||||
rawArgsLen := len(rawArgs)
|
||||
option := strings.Builder{}
|
||||
options := []parameter{}
|
||||
masterProcess := true
|
||||
fusermountPath := ""
|
||||
|
||||
// at least target mount path should be passed
|
||||
if argsLen < 1 {
|
||||
return false
|
||||
// first parameter
|
||||
i := 0
|
||||
for i = 0; i < rawArgsLen && rawArgs[i] != ' '; i++ {
|
||||
option.WriteByte(rawArgs[i])
|
||||
}
|
||||
options = append(options, parameter{"arg0", option.String()})
|
||||
option.Reset()
|
||||
|
||||
for i++; i < rawArgsLen; i++ {
|
||||
|
||||
// space separator check for filled option
|
||||
if rawArgs[i] == ' ' {
|
||||
if option.Len() > 0 {
|
||||
options = append(options, parameter{option.String(), "true"})
|
||||
option.Reset()
|
||||
}
|
||||
|
||||
// dash separator read option until next space
|
||||
} else if rawArgs[i] == '-' {
|
||||
for i++; i < rawArgsLen && rawArgs[i] != ' '; i++ {
|
||||
option.WriteByte(rawArgs[i])
|
||||
}
|
||||
options = append(options, parameter{option.String(), "true"})
|
||||
option.Reset()
|
||||
|
||||
// equal separator start option with pending value
|
||||
} else if rawArgs[i] == '=' {
|
||||
name := option.String()
|
||||
option.Reset()
|
||||
|
||||
for i++; i < rawArgsLen && rawArgs[i] != ',' && rawArgs[i] != ' '; i++ {
|
||||
// double quote separator read option until next double quote
|
||||
if rawArgs[i] == '"' {
|
||||
for i++; i < rawArgsLen && rawArgs[i] != '"'; i++ {
|
||||
option.WriteByte(rawArgs[i])
|
||||
}
|
||||
|
||||
// single quote separator read option until next single quote
|
||||
} else if rawArgs[i] == '\'' {
|
||||
for i++; i < rawArgsLen && rawArgs[i] != '\''; i++ {
|
||||
option.WriteByte(rawArgs[i])
|
||||
}
|
||||
|
||||
// add chars before comma
|
||||
} else if rawArgs[i] != ' ' {
|
||||
option.WriteByte(rawArgs[i])
|
||||
}
|
||||
}
|
||||
|
||||
options = append(options, parameter{name, option.String()})
|
||||
option.Reset()
|
||||
|
||||
// comma separator just read current option
|
||||
} else if rawArgs[i] == ',' {
|
||||
options = append(options, parameter{option.String(), "true"})
|
||||
option.Reset()
|
||||
|
||||
// what is not a separator fill option buffer
|
||||
} else {
|
||||
option.WriteByte(rawArgs[i])
|
||||
}
|
||||
}
|
||||
|
||||
// first option is always target mount path
|
||||
mountOptions.dir = &args[0]
|
||||
// get residual option data
|
||||
if option.Len() > 0 {
|
||||
// add value to pending option
|
||||
options = append(options, parameter{option.String(), "true"})
|
||||
option.Reset()
|
||||
}
|
||||
|
||||
// scan parameters looking for one or more -o options
|
||||
// -o options receive parameters on format key=value[,key=value]...
|
||||
for i := 0; i < argsLen; i++ {
|
||||
if args[i] == "-o" && i+1 <= argsLen {
|
||||
options = strings.Split(args[i+1], ",")
|
||||
// scan each parameter
|
||||
for i := 0; i < len(options); i++ {
|
||||
parameter := options[i]
|
||||
|
||||
switch parameter.name {
|
||||
case "child":
|
||||
masterProcess = false
|
||||
case "arg0":
|
||||
mountOptions.dir = ¶meter.value
|
||||
case "filer":
|
||||
mountOptions.filer = ¶meter.value
|
||||
case "filer.path":
|
||||
mountOptions.filerMountRootPath = ¶meter.value
|
||||
case "dirAutoCreate":
|
||||
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
|
||||
mountOptions.dirAutoCreate = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("dirAutoCreate: %s", err))
|
||||
}
|
||||
case "collection":
|
||||
mountOptions.collection = ¶meter.value
|
||||
case "replication":
|
||||
mountOptions.replication = ¶meter.value
|
||||
case "disk":
|
||||
mountOptions.diskType = ¶meter.value
|
||||
case "ttl":
|
||||
if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil {
|
||||
intValue := int(parsed)
|
||||
mountOptions.ttlSec = &intValue
|
||||
} else {
|
||||
panic(fmt.Errorf("ttl: %s", err))
|
||||
}
|
||||
case "chunkSizeLimitMB":
|
||||
if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil {
|
||||
intValue := int(parsed)
|
||||
mountOptions.chunkSizeLimitMB = &intValue
|
||||
} else {
|
||||
panic(fmt.Errorf("chunkSizeLimitMB: %s", err))
|
||||
}
|
||||
case "concurrentWriters":
|
||||
i++
|
||||
if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil {
|
||||
intValue := int(parsed)
|
||||
mountOptions.concurrentWriters = &intValue
|
||||
} else {
|
||||
panic(fmt.Errorf("concurrentWriters: %s", err))
|
||||
}
|
||||
case "cacheDir":
|
||||
mountOptions.cacheDir = ¶meter.value
|
||||
case "cacheCapacityMB":
|
||||
if parsed, err := strconv.ParseInt(parameter.value, 0, 64); err == nil {
|
||||
mountOptions.cacheSizeMB = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("cacheCapacityMB: %s", err))
|
||||
}
|
||||
case "dataCenter":
|
||||
mountOptions.dataCenter = ¶meter.value
|
||||
case "allowOthers":
|
||||
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
|
||||
mountOptions.allowOthers = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("allowOthers: %s", err))
|
||||
}
|
||||
case "umask":
|
||||
mountOptions.umaskString = ¶meter.value
|
||||
case "nonempty":
|
||||
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
|
||||
mountOptions.nonempty = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("nonempty: %s", err))
|
||||
}
|
||||
case "volumeServerAccess":
|
||||
mountOptions.volumeServerAccess = ¶meter.value
|
||||
case "map.uid":
|
||||
mountOptions.uidMap = ¶meter.value
|
||||
case "map.gid":
|
||||
mountOptions.gidMap = ¶meter.value
|
||||
case "readOnly":
|
||||
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
|
||||
mountOptions.readOnly = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("readOnly: %s", err))
|
||||
}
|
||||
case "cpuprofile":
|
||||
mountCpuProfile = ¶meter.value
|
||||
case "memprofile":
|
||||
mountMemProfile = ¶meter.value
|
||||
case "readRetryTime":
|
||||
if parsed, err := time.ParseDuration(parameter.value); err == nil {
|
||||
mountReadRetryTime = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("readRetryTime: %s", err))
|
||||
}
|
||||
case "fusermount.path":
|
||||
fusermountPath = parameter.value
|
||||
}
|
||||
}
|
||||
|
||||
// for each option passed with -o
|
||||
for _, option := range options {
|
||||
// split just first = character
|
||||
parts := strings.SplitN(option, "=", 2)
|
||||
|
||||
// if doesn't key and value skip
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
// the master start the child, release it then finish himself
|
||||
if masterProcess {
|
||||
arg0, err := os.Executable()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
key, value := parts[0], parts[1]
|
||||
argv := append(os.Args, "-o", "child")
|
||||
|
||||
// switch key keeping "weed mount" parameters
|
||||
switch key {
|
||||
case "filer":
|
||||
mountOptions.filer = &value
|
||||
case "filer.path":
|
||||
mountOptions.filerMountRootPath = &value
|
||||
case "dirAutoCreate":
|
||||
if parsed, err := strconv.ParseBool(value); err != nil {
|
||||
mountOptions.dirAutoCreate = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("dirAutoCreate: %s", err))
|
||||
}
|
||||
case "collection":
|
||||
mountOptions.collection = &value
|
||||
case "replication":
|
||||
mountOptions.replication = &value
|
||||
case "disk":
|
||||
mountOptions.diskType = &value
|
||||
case "ttl":
|
||||
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil {
|
||||
intValue := int(parsed)
|
||||
mountOptions.ttlSec = &intValue
|
||||
} else {
|
||||
panic(fmt.Errorf("ttl: %s", err))
|
||||
}
|
||||
case "chunkSizeLimitMB":
|
||||
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil {
|
||||
intValue := int(parsed)
|
||||
mountOptions.chunkSizeLimitMB = &intValue
|
||||
} else {
|
||||
panic(fmt.Errorf("chunkSizeLimitMB: %s", err))
|
||||
}
|
||||
case "concurrentWriters":
|
||||
if parsed, err := strconv.ParseInt(value, 0, 32); err != nil {
|
||||
intValue := int(parsed)
|
||||
mountOptions.concurrentWriters = &intValue
|
||||
} else {
|
||||
panic(fmt.Errorf("concurrentWriters: %s", err))
|
||||
}
|
||||
case "cacheDir":
|
||||
mountOptions.cacheDir = &value
|
||||
case "cacheCapacityMB":
|
||||
if parsed, err := strconv.ParseInt(value, 0, 64); err != nil {
|
||||
mountOptions.cacheSizeMB = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("cacheCapacityMB: %s", err))
|
||||
}
|
||||
case "dataCenter":
|
||||
mountOptions.dataCenter = &value
|
||||
case "allowOthers":
|
||||
if parsed, err := strconv.ParseBool(value); err != nil {
|
||||
mountOptions.allowOthers = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("allowOthers: %s", err))
|
||||
}
|
||||
case "umask":
|
||||
mountOptions.umaskString = &value
|
||||
case "nonempty":
|
||||
if parsed, err := strconv.ParseBool(value); err != nil {
|
||||
mountOptions.nonempty = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("nonempty: %s", err))
|
||||
}
|
||||
case "volumeServerAccess":
|
||||
mountOptions.volumeServerAccess = &value
|
||||
case "map.uid":
|
||||
mountOptions.uidMap = &value
|
||||
case "map.gid":
|
||||
mountOptions.gidMap = &value
|
||||
case "readOnly":
|
||||
if parsed, err := strconv.ParseBool(value); err != nil {
|
||||
mountOptions.readOnly = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("readOnly: %s", err))
|
||||
}
|
||||
case "cpuprofile":
|
||||
mountCpuProfile = &value
|
||||
case "memprofile":
|
||||
mountMemProfile = &value
|
||||
case "readRetryTime":
|
||||
if parsed, err := time.ParseDuration(value); err != nil {
|
||||
mountReadRetryTime = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("readRetryTime: %s", err))
|
||||
}
|
||||
attr := os.ProcAttr{}
|
||||
attr.Env = os.Environ()
|
||||
|
||||
child, err := os.StartProcess(arg0, argv, &attr)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("master process can not start child process: %s", err))
|
||||
}
|
||||
|
||||
err = child.Release()
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("master process can not release child process: %s", err))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// I don't know why PATH environment variable is lost
|
||||
if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"); err != nil {
|
||||
panic(fmt.Errorf("setenv: %s", err))
|
||||
if fusermountPath != "" {
|
||||
if err := os.Setenv("PATH", fusermountPath); err != nil {
|
||||
panic(fmt.Errorf("setenv: %s", err))
|
||||
}
|
||||
} else if os.Getenv("PATH") == "" {
|
||||
if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin"); err != nil {
|
||||
panic(fmt.Errorf("setenv: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
// just call "weed mount" command
|
||||
|
@ -144,7 +236,7 @@ func runFuse(cmd *Command, args []string) bool {
|
|||
|
||||
var cmdFuse = &Command{
|
||||
UsageLine: "fuse /mnt/mount/point -o \"filer=localhost:8888,filer.path=/\"",
|
||||
Short: "Allow use weed with linux's mount command",
|
||||
Short: "Allow use weed with linux's mount command",
|
||||
Long: `Allow use weed with linux's mount command
|
||||
|
||||
You can use -t weed on mount command:
|
||||
|
@ -160,6 +252,9 @@ var cmdFuse = &Command{
|
|||
mount -t fuse./home/user/bin/weed fuse /mnt -o "filer=localhost:8888,filer.path=/"
|
||||
mount -t fuse "/home/user/bin/weed#fuse" /mnt -o "filer=localhost:8888,filer.path=/"
|
||||
|
||||
To pass more than one parameter use quotes, example:
|
||||
mount -t weed fuse /mnt -o "filer='192.168.0.1:8888,192.168.0.2:8888',filer.path=/"
|
||||
|
||||
To check valid options look "weed mount --help"
|
||||
`,
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ func (iamopt *IamOptions) startIamServer() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
for {
|
||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
|
30
weed/command/imports.go
Normal file
30
weed/command/imports.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
_ "net/http/pprof"
|
||||
|
||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/s3"
|
||||
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
|
||||
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/hbase"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
|
||||
)
|
|
@ -5,15 +5,18 @@ package command
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
|
||||
|
||||
"github.com/seaweedfs/fuse"
|
||||
|
@ -49,6 +52,21 @@ func runMount(cmd *Command, args []string) bool {
|
|||
return RunMount(&mountOptions, os.FileMode(umask))
|
||||
}
|
||||
|
||||
func getParentInode(mountDir string) (uint64, error) {
|
||||
parentDir := filepath.Clean(filepath.Join(mountDir, ".."))
|
||||
fi, err := os.Stat(parentDir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
stat, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return stat.Ino, nil
|
||||
}
|
||||
|
||||
func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||
|
||||
filers := strings.Split(*option.filer, ",")
|
||||
|
@ -85,13 +103,19 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
|
||||
filerMountRootPath := *option.filerMountRootPath
|
||||
dir := util.ResolvePath(*option.dir)
|
||||
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
|
||||
parentInode, err := getParentInode(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to retrieve inode for parent directory of %s: %v", dir, err)
|
||||
return true
|
||||
}
|
||||
|
||||
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
if dir == "" {
|
||||
fmt.Printf("Please specify the mount directory via \"-dir\"")
|
||||
return false
|
||||
}
|
||||
|
||||
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
|
||||
if chunkSizeLimitMB <= 0 {
|
||||
fmt.Printf("Please specify a reasonable buffer size.")
|
||||
return false
|
||||
|
@ -199,6 +223,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
MountMode: mountMode,
|
||||
MountCtime: fileInfo.ModTime(),
|
||||
MountMtime: time.Now(),
|
||||
MountParentInode: parentInode,
|
||||
Umask: umask,
|
||||
VolumeServerAccess: *mountOptions.volumeServerAccess,
|
||||
Cipher: cipher,
|
||||
|
@ -221,6 +246,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir)
|
||||
server := fs.New(c, nil)
|
||||
seaweedFileSystem.Server = server
|
||||
seaweedFileSystem.StartBackgroundTasks()
|
||||
err = server.Serve(seaweedFileSystem)
|
||||
|
||||
// check if the mount process has an error to report
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/command/scaffold"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
@ -35,17 +37,17 @@ func runScaffold(cmd *Command, args []string) bool {
|
|||
content := ""
|
||||
switch *config {
|
||||
case "filer":
|
||||
content = FILER_TOML_EXAMPLE
|
||||
content = scaffold.Filer
|
||||
case "notification":
|
||||
content = NOTIFICATION_TOML_EXAMPLE
|
||||
content = scaffold.Notification
|
||||
case "replication":
|
||||
content = REPLICATION_TOML_EXAMPLE
|
||||
content = scaffold.Replication
|
||||
case "security":
|
||||
content = SECURITY_TOML_EXAMPLE
|
||||
content = scaffold.Security
|
||||
case "master":
|
||||
content = MASTER_TOML_EXAMPLE
|
||||
content = scaffold.Master
|
||||
case "shell":
|
||||
content = SHELL_TOML_EXAMPLE
|
||||
content = scaffold.Shell
|
||||
}
|
||||
if content == "" {
|
||||
println("need a valid -config option")
|
||||
|
@ -55,519 +57,7 @@ func runScaffold(cmd *Command, args []string) bool {
|
|||
if *outputPath != "" {
|
||||
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
|
||||
} else {
|
||||
println(content)
|
||||
fmt.Println(content)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
FILER_TOML_EXAMPLE = `
|
||||
# A sample TOML config file for SeaweedFS filer store
|
||||
# Used with "weed filer" or "weed server -filer"
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./filer.toml
|
||||
# $HOME/.seaweedfs/filer.toml
|
||||
# /etc/seaweedfs/filer.toml
|
||||
|
||||
####################################################
|
||||
# Customizable filer server options
|
||||
####################################################
|
||||
[filer.options]
|
||||
# with http DELETE, by default the filer would check whether a folder is empty.
|
||||
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
|
||||
recursive_delete = false
|
||||
# directories under this folder will be automatically creating a separate bucket
|
||||
buckets_folder = "/buckets"
|
||||
|
||||
####################################################
|
||||
# The following are filer store options
|
||||
####################################################
|
||||
|
||||
[leveldb2]
|
||||
# local on disk, mostly for simple single-machine setup, fairly scalable
|
||||
# faster than previous leveldb, recommended.
|
||||
enabled = true
|
||||
dir = "./filerldb2" # directory to store level db files
|
||||
|
||||
[leveldb3]
|
||||
# similar to leveldb2.
|
||||
# each bucket has its own meta store.
|
||||
enabled = false
|
||||
dir = "./filerldb3" # directory to store level db files
|
||||
|
||||
[rocksdb]
|
||||
# local on disk, similar to leveldb
|
||||
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
|
||||
enabled = false
|
||||
dir = "./filerrdb" # directory to store rocksdb files
|
||||
|
||||
[sqlite]
|
||||
# local on disk, similar to leveldb
|
||||
enabled = false
|
||||
dbFile = "./filer.db" # sqlite db file
|
||||
|
||||
[mysql] # or memsql, tidb
|
||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
|
||||
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
|
||||
# directory TEXT COMMENT 'full path to parent directory',
|
||||
# meta LONGBLOB,
|
||||
# PRIMARY KEY (dirhash, name)
|
||||
# ) DEFAULT CHARSET=utf8;
|
||||
|
||||
enabled = false
|
||||
hostname = "localhost"
|
||||
port = 3306
|
||||
username = "root"
|
||||
password = ""
|
||||
database = "" # create or use an existing database
|
||||
connection_max_idle = 2
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
interpolateParams = false
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
||||
|
||||
[mysql2] # or memsql, tidb
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(1000) BINARY,
|
||||
directory TEXT,
|
||||
meta LONGBLOB,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
) DEFAULT CHARSET=utf8;
|
||||
"""
|
||||
hostname = "localhost"
|
||||
port = 3306
|
||||
username = "root"
|
||||
password = ""
|
||||
database = "" # create or use an existing database
|
||||
connection_max_idle = 2
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
interpolateParams = false
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
||||
|
||||
[postgres] # or cockroachdb, YugabyteDB
|
||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||
# dirhash BIGINT,
|
||||
# name VARCHAR(65535),
|
||||
# directory VARCHAR(65535),
|
||||
# meta bytea,
|
||||
# PRIMARY KEY (dirhash, name)
|
||||
# );
|
||||
enabled = false
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
username = "postgres"
|
||||
password = ""
|
||||
database = "postgres" # create or use an existing database
|
||||
schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
|
||||
|
||||
[postgres2]
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS "%s" (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(65535),
|
||||
directory VARCHAR(65535),
|
||||
meta bytea,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
);
|
||||
"""
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
username = "postgres"
|
||||
password = ""
|
||||
database = "postgres" # create or use an existing database
|
||||
schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
|
||||
|
||||
[cassandra]
|
||||
# CREATE TABLE filemeta (
|
||||
# directory varchar,
|
||||
# name varchar,
|
||||
# meta blob,
|
||||
# PRIMARY KEY (directory, name)
|
||||
# ) WITH CLUSTERING ORDER BY (name ASC);
|
||||
enabled = false
|
||||
keyspace="seaweedfs"
|
||||
hosts=[
|
||||
"localhost:9042",
|
||||
]
|
||||
username=""
|
||||
password=""
|
||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
|
||||
[hbase]
|
||||
enabled = false
|
||||
zkquorum = ""
|
||||
table = "seaweedfs"
|
||||
|
||||
[redis2]
|
||||
enabled = false
|
||||
address = "localhost:6379"
|
||||
password = ""
|
||||
database = 0
|
||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
|
||||
[redis_cluster2]
|
||||
enabled = false
|
||||
addresses = [
|
||||
"localhost:30001",
|
||||
"localhost:30002",
|
||||
"localhost:30003",
|
||||
"localhost:30004",
|
||||
"localhost:30005",
|
||||
"localhost:30006",
|
||||
]
|
||||
password = ""
|
||||
# allows reads from slave servers or the master, but all writes still go to the master
|
||||
readOnly = false
|
||||
# automatically use the closest Redis server for reads
|
||||
routeByLatency = false
|
||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
|
||||
[etcd]
|
||||
enabled = false
|
||||
servers = "localhost:2379"
|
||||
timeout = "3s"
|
||||
|
||||
[mongodb]
|
||||
enabled = false
|
||||
uri = "mongodb://localhost:27017"
|
||||
option_pool_size = 0
|
||||
database = "seaweedfs"
|
||||
|
||||
[elastic7]
|
||||
enabled = false
|
||||
servers = [
|
||||
"http://localhost1:9200",
|
||||
"http://localhost2:9200",
|
||||
"http://localhost3:9200",
|
||||
]
|
||||
username = ""
|
||||
password = ""
|
||||
sniff_enabled = false
|
||||
healthcheck_enabled = false
|
||||
# increase the value is recommend, be sure the value in Elastic is greater or equal here
|
||||
index.max_result_window = 10000
|
||||
|
||||
|
||||
|
||||
##########################
|
||||
##########################
|
||||
# To add path-specific filer store:
|
||||
#
|
||||
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
|
||||
# 2. Add a location configuraiton. E.g., location = "/tmp/"
|
||||
# 3. Copy and customize all other configurations.
|
||||
# Make sure they are not the same if using the same store type!
|
||||
# 4. Set enabled to true
|
||||
#
|
||||
# The following is just using redis as an example
|
||||
##########################
|
||||
[redis2.tmp]
|
||||
enabled = false
|
||||
location = "/tmp/"
|
||||
address = "localhost:6379"
|
||||
password = ""
|
||||
database = 1
|
||||
|
||||
`
|
||||
|
||||
NOTIFICATION_TOML_EXAMPLE = `
|
||||
# A sample TOML config file for SeaweedFS filer store
|
||||
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./notification.toml
|
||||
# $HOME/.seaweedfs/notification.toml
|
||||
# /etc/seaweedfs/notification.toml
|
||||
|
||||
####################################################
|
||||
# notification
|
||||
# send and receive filer updates for each file to an external message queue
|
||||
####################################################
|
||||
[notification.log]
|
||||
# this is only for debugging perpose and does not work with "weed filer.replicate"
|
||||
enabled = false
|
||||
|
||||
|
||||
[notification.kafka]
|
||||
enabled = false
|
||||
hosts = [
|
||||
"localhost:9092"
|
||||
]
|
||||
topic = "seaweedfs_filer"
|
||||
offsetFile = "./last.offset"
|
||||
offsetSaveIntervalSeconds = 10
|
||||
|
||||
|
||||
[notification.aws_sqs]
|
||||
# experimental, let me know if it works
|
||||
enabled = false
|
||||
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
sqs_queue_name = "my_filer_queue" # an existing queue name
|
||||
|
||||
|
||||
[notification.google_pub_sub]
|
||||
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
|
||||
enabled = false
|
||||
google_application_credentials = "/path/to/x.json" # path to json credential file
|
||||
project_id = "" # an existing project id
|
||||
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
|
||||
|
||||
[notification.gocdk_pub_sub]
|
||||
# The Go Cloud Development Kit (https://gocloud.dev).
|
||||
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
|
||||
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
|
||||
enabled = false
|
||||
# This URL will Dial the RabbitMQ server at the URL in the environment
|
||||
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
||||
# The exchange must have already been created by some other means, like
|
||||
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
||||
# create binding myexchange => myqueue
|
||||
topic_url = "rabbit://myexchange"
|
||||
sub_url = "rabbit://myqueue"
|
||||
`
|
||||
|
||||
REPLICATION_TOML_EXAMPLE = `
|
||||
# A sample TOML config file for replicating SeaweedFS filer
|
||||
# Used with "weed filer.backup"
|
||||
# Using with "weed filer.replicate" is deprecated.
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./replication.toml
|
||||
# $HOME/.seaweedfs/replication.toml
|
||||
# /etc/seaweedfs/replication.toml
|
||||
|
||||
[source.filer] # deprecated. Only useful with "weed filer.replicate"
|
||||
enabled = true
|
||||
grpcAddress = "localhost:18888"
|
||||
# all files under this directory tree are replicated.
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all files with this "prefix" are sent to notification message queue.
|
||||
directory = "/buckets"
|
||||
|
||||
[sink.local]
|
||||
enabled = false
|
||||
directory = "/data"
|
||||
# all replicated files are under modified time as yyyy-mm-dd directories
|
||||
# so each date directory contains all new and updated files.
|
||||
is_incremental = false
|
||||
|
||||
[sink.filer]
|
||||
enabled = false
|
||||
grpcAddress = "localhost:18888"
|
||||
# all replicated files are under this directory tree
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all received files will be "prefixed" to this directory.
|
||||
directory = "/backup"
|
||||
replication = ""
|
||||
collection = ""
|
||||
ttlSec = 0
|
||||
is_incremental = false
|
||||
|
||||
[sink.s3]
|
||||
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
|
||||
# default loads credentials from the shared credentials file (~/.aws/credentials).
|
||||
enabled = false
|
||||
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
bucket = "your_bucket_name" # an existing bucket
|
||||
directory = "/" # destination directory
|
||||
endpoint = ""
|
||||
is_incremental = false
|
||||
|
||||
[sink.google_cloud_storage]
|
||||
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
|
||||
enabled = false
|
||||
google_application_credentials = "/path/to/x.json" # path to json credential file
|
||||
bucket = "your_bucket_seaweedfs" # an existing bucket
|
||||
directory = "/" # destination directory
|
||||
is_incremental = false
|
||||
|
||||
[sink.azure]
|
||||
# experimental, let me know if it works
|
||||
enabled = false
|
||||
account_name = ""
|
||||
account_key = ""
|
||||
container = "mycontainer" # an existing container
|
||||
directory = "/" # destination directory
|
||||
is_incremental = false
|
||||
|
||||
[sink.backblaze]
|
||||
enabled = false
|
||||
b2_account_id = ""
|
||||
b2_master_application_key = ""
|
||||
bucket = "mybucket" # an existing bucket
|
||||
directory = "/" # destination directory
|
||||
is_incremental = false
|
||||
|
||||
`
|
||||
|
||||
SECURITY_TOML_EXAMPLE = `
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./security.toml
|
||||
# $HOME/.seaweedfs/security.toml
|
||||
# /etc/seaweedfs/security.toml
|
||||
# this file is read by master, volume server, and filer
|
||||
|
||||
# the jwt signing key is read by master and volume server.
|
||||
# a jwt defaults to expire after 10 seconds.
|
||||
[jwt.signing]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
|
||||
[jwt.signing.read]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
# all grpc tls authentications are mutual
|
||||
# the values for the following ca, cert, and key are paths to the PERM files.
|
||||
# the host name is not checked, so the PERM files can be shared.
|
||||
[grpc]
|
||||
ca = ""
|
||||
# Set wildcard domain for enable TLS authentication by common names
|
||||
allowed_wildcard_domain = "" # .mycompany.com
|
||||
|
||||
[grpc.volume]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
[grpc.master]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
[grpc.filer]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
[grpc.msg_broker]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
# use this for any place needs a grpc client
|
||||
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
|
||||
[grpc.client]
|
||||
cert = ""
|
||||
key = ""
|
||||
|
||||
# volume server https options
|
||||
# Note: work in progress!
|
||||
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
|
||||
[https.client]
|
||||
enabled = true
|
||||
[https.volume]
|
||||
cert = ""
|
||||
key = ""
|
||||
|
||||
|
||||
`
|
||||
|
||||
MASTER_TOML_EXAMPLE = `
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./master.toml
|
||||
# $HOME/.seaweedfs/master.toml
|
||||
# /etc/seaweedfs/master.toml
|
||||
# this file is read by master
|
||||
|
||||
[master.maintenance]
|
||||
# periodically run these scripts are the same as running them from 'weed shell'
|
||||
scripts = """
|
||||
lock
|
||||
ec.encode -fullPercent=95 -quietFor=1h
|
||||
ec.rebuild -force
|
||||
ec.balance -force
|
||||
volume.balance -force
|
||||
volume.fix.replication
|
||||
unlock
|
||||
"""
|
||||
sleep_minutes = 17 # sleep minutes between each script execution
|
||||
|
||||
[master.filer]
|
||||
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
|
||||
|
||||
|
||||
[master.sequencer]
|
||||
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
|
||||
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
|
||||
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
|
||||
sequencer_etcd_urls = "http://127.0.0.1:2379"
|
||||
|
||||
|
||||
# configurations for tiered cloud storage
|
||||
# old volumes are transparently moved to cloud for cost efficiency
|
||||
[storage.backend]
|
||||
[storage.backend.s3.default]
|
||||
enabled = false
|
||||
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
bucket = "your_bucket_name" # an existing bucket
|
||||
endpoint = ""
|
||||
|
||||
# create this number of logical volumes if no more writable volumes
|
||||
# count_x means how many copies of data.
|
||||
# e.g.:
|
||||
# 000 has only one copy, copy_1
|
||||
# 010 and 001 has two copies, copy_2
|
||||
# 011 has only 3 copies, copy_3
|
||||
[master.volume_growth]
|
||||
copy_1 = 7 # create 1 x 7 = 7 actual volumes
|
||||
copy_2 = 6 # create 2 x 6 = 12 actual volumes
|
||||
copy_3 = 3 # create 3 x 3 = 9 actual volumes
|
||||
copy_other = 1 # create n x 1 = n actual volumes
|
||||
|
||||
# configuration flags for replication
|
||||
[master.replication]
|
||||
# any replication counts should be considered minimums. If you specify 010 and
|
||||
# have 3 different racks, that's still considered writable. Writes will still
|
||||
# try to replicate to all available volumes. You should only use this option
|
||||
# if you are doing your own replication or periodic sync of volumes.
|
||||
treat_replication_as_minimums = false
|
||||
|
||||
`
|
||||
SHELL_TOML_EXAMPLE = `
|
||||
|
||||
[cluster]
|
||||
default = "c1"
|
||||
|
||||
[cluster.c1]
|
||||
master = "localhost:9333" # comma-separated master servers
|
||||
filer = "localhost:8888" # filer host and port
|
||||
|
||||
[cluster.c2]
|
||||
master = ""
|
||||
filer = ""
|
||||
|
||||
`
|
||||
)
|
||||
|
|
21
weed/command/scaffold/example.go
Normal file
21
weed/command/scaffold/example.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package scaffold
|
||||
|
||||
import _ "embed"
|
||||
|
||||
//go:embed filer.toml
|
||||
var Filer string
|
||||
|
||||
//go:embed notification.toml
|
||||
var Notification string
|
||||
|
||||
//go:embed replication.toml
|
||||
var Replication string
|
||||
|
||||
//go:embed security.toml
|
||||
var Security string
|
||||
|
||||
//go:embed master.toml
|
||||
var Master string
|
||||
|
||||
//go:embed shell.toml
|
||||
var Shell string
|
232
weed/command/scaffold/filer.toml
Normal file
232
weed/command/scaffold/filer.toml
Normal file
|
@ -0,0 +1,232 @@
|
|||
# A sample TOML config file for SeaweedFS filer store
|
||||
# Used with "weed filer" or "weed server -filer"
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./filer.toml
|
||||
# $HOME/.seaweedfs/filer.toml
|
||||
# /etc/seaweedfs/filer.toml
|
||||
|
||||
####################################################
|
||||
# Customizable filer server options
|
||||
####################################################
|
||||
[filer.options]
|
||||
# with http DELETE, by default the filer would check whether a folder is empty.
|
||||
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
|
||||
recursive_delete = false
|
||||
|
||||
####################################################
|
||||
# The following are filer store options
|
||||
####################################################
|
||||
|
||||
[leveldb2]
|
||||
# local on disk, mostly for simple single-machine setup, fairly scalable
|
||||
# faster than previous leveldb, recommended.
|
||||
enabled = true
|
||||
dir = "./filerldb2" # directory to store level db files
|
||||
|
||||
[leveldb3]
|
||||
# similar to leveldb2.
|
||||
# each bucket has its own meta store.
|
||||
enabled = false
|
||||
dir = "./filerldb3" # directory to store level db files
|
||||
|
||||
[rocksdb]
|
||||
# local on disk, similar to leveldb
|
||||
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
|
||||
enabled = false
|
||||
dir = "./filerrdb" # directory to store rocksdb files
|
||||
|
||||
[sqlite]
|
||||
# local on disk, similar to leveldb
|
||||
enabled = false
|
||||
dbFile = "./filer.db" # sqlite db file
|
||||
|
||||
[mysql] # or memsql, tidb
|
||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
|
||||
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
|
||||
# directory TEXT COMMENT 'full path to parent directory',
|
||||
# meta LONGBLOB,
|
||||
# PRIMARY KEY (dirhash, name)
|
||||
# ) DEFAULT CHARSET=utf8;
|
||||
|
||||
enabled = false
|
||||
hostname = "localhost"
|
||||
port = 3306
|
||||
username = "root"
|
||||
password = ""
|
||||
database = "" # create or use an existing database
|
||||
connection_max_idle = 2
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
interpolateParams = false
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
||||
|
||||
[mysql2] # or memsql, tidb
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(1000) BINARY,
|
||||
directory TEXT,
|
||||
meta LONGBLOB,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
) DEFAULT CHARSET=utf8;
|
||||
"""
|
||||
hostname = "localhost"
|
||||
port = 3306
|
||||
username = "root"
|
||||
password = ""
|
||||
database = "" # create or use an existing database
|
||||
connection_max_idle = 2
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
interpolateParams = false
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
||||
|
||||
[postgres] # or cockroachdb, YugabyteDB
|
||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||
# dirhash BIGINT,
|
||||
# name VARCHAR(65535),
|
||||
# directory VARCHAR(65535),
|
||||
# meta bytea,
|
||||
# PRIMARY KEY (dirhash, name)
|
||||
# );
|
||||
enabled = false
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
username = "postgres"
|
||||
password = ""
|
||||
database = "postgres" # create or use an existing database
|
||||
schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
|
||||
|
||||
[postgres2]
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS "%s" (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(65535),
|
||||
directory VARCHAR(65535),
|
||||
meta bytea,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
);
|
||||
"""
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
username = "postgres"
|
||||
password = ""
|
||||
database = "postgres" # create or use an existing database
|
||||
schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||
enableUpsert = true
|
||||
upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
|
||||
|
||||
[cassandra]
|
||||
# CREATE TABLE filemeta (
|
||||
# directory varchar,
|
||||
# name varchar,
|
||||
# meta blob,
|
||||
# PRIMARY KEY (directory, name)
|
||||
# ) WITH CLUSTERING ORDER BY (name ASC);
|
||||
enabled = false
|
||||
keyspace = "seaweedfs"
|
||||
hosts = [
|
||||
"localhost:9042",
|
||||
]
|
||||
username = ""
|
||||
password = ""
|
||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
# Name of the datacenter local to this filer, used as host selection fallback.
|
||||
localDC = ""
|
||||
|
||||
[hbase]
|
||||
enabled = false
|
||||
zkquorum = ""
|
||||
table = "seaweedfs"
|
||||
|
||||
[redis2]
|
||||
enabled = false
|
||||
address = "localhost:6379"
|
||||
password = ""
|
||||
database = 0
|
||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
|
||||
[redis_cluster2]
|
||||
enabled = false
|
||||
addresses = [
|
||||
"localhost:30001",
|
||||
"localhost:30002",
|
||||
"localhost:30003",
|
||||
"localhost:30004",
|
||||
"localhost:30005",
|
||||
"localhost:30006",
|
||||
]
|
||||
password = ""
|
||||
# allows reads from slave servers or the master, but all writes still go to the master
|
||||
readOnly = false
|
||||
# automatically use the closest Redis server for reads
|
||||
routeByLatency = false
|
||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
|
||||
[etcd]
|
||||
enabled = false
|
||||
servers = "localhost:2379"
|
||||
timeout = "3s"
|
||||
|
||||
[mongodb]
|
||||
enabled = false
|
||||
uri = "mongodb://localhost:27017"
|
||||
option_pool_size = 0
|
||||
database = "seaweedfs"
|
||||
|
||||
[elastic7]
|
||||
enabled = false
|
||||
servers = [
|
||||
"http://localhost1:9200",
|
||||
"http://localhost2:9200",
|
||||
"http://localhost3:9200",
|
||||
]
|
||||
username = ""
|
||||
password = ""
|
||||
sniff_enabled = false
|
||||
healthcheck_enabled = false
|
||||
# increase the value is recommend, be sure the value in Elastic is greater or equal here
|
||||
index.max_result_window = 10000
|
||||
|
||||
|
||||
|
||||
##########################
|
||||
##########################
|
||||
# To add path-specific filer store:
|
||||
#
|
||||
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
|
||||
# 2. Add a location configuraiton. E.g., location = "/tmp/"
|
||||
# 3. Copy and customize all other configurations.
|
||||
# Make sure they are not the same if using the same store type!
|
||||
# 4. Set enabled to true
|
||||
#
|
||||
# The following is just using redis as an example
|
||||
##########################
|
||||
[redis2.tmp]
|
||||
enabled = false
|
||||
location = "/tmp/"
|
||||
address = "localhost:6379"
|
||||
password = ""
|
||||
database = 1
|
63
weed/command/scaffold/master.toml
Normal file
63
weed/command/scaffold/master.toml
Normal file
|
@ -0,0 +1,63 @@
|
|||
# Put this file to one of the location, with descending priority
|
||||
# ./master.toml
|
||||
# $HOME/.seaweedfs/master.toml
|
||||
# /etc/seaweedfs/master.toml
|
||||
# this file is read by master
|
||||
|
||||
[master.maintenance]
|
||||
# periodically run these scripts are the same as running them from 'weed shell'
|
||||
scripts = """
|
||||
lock
|
||||
ec.encode -fullPercent=95 -quietFor=1h
|
||||
ec.rebuild -force
|
||||
ec.balance -force
|
||||
volume.deleteEmpty -quietFor=24h -force
|
||||
volume.balance -force
|
||||
volume.fix.replication
|
||||
unlock
|
||||
"""
|
||||
sleep_minutes = 17 # sleep minutes between each script execution
|
||||
|
||||
[master.filer]
|
||||
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
|
||||
|
||||
|
||||
[master.sequencer]
|
||||
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
|
||||
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
|
||||
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
|
||||
sequencer_etcd_urls = "http://127.0.0.1:2379"
|
||||
# when sequencer.type = snowflake, the snowflake id must be different from other masters
|
||||
sequencer_snowflake_id = 0 # any number between 1~1023
|
||||
|
||||
|
||||
# configurations for tiered cloud storage
|
||||
# old volumes are transparently moved to cloud for cost efficiency
|
||||
[storage.backend]
|
||||
[storage.backend.s3.default]
|
||||
enabled = false
|
||||
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
bucket = "your_bucket_name" # an existing bucket
|
||||
endpoint = ""
|
||||
|
||||
# create this number of logical volumes if no more writable volumes
|
||||
# count_x means how many copies of data.
|
||||
# e.g.:
|
||||
# 000 has only one copy, copy_1
|
||||
# 010 and 001 has two copies, copy_2
|
||||
# 011 has only 3 copies, copy_3
|
||||
[master.volume_growth]
|
||||
copy_1 = 7 # create 1 x 7 = 7 actual volumes
|
||||
copy_2 = 6 # create 2 x 6 = 12 actual volumes
|
||||
copy_3 = 3 # create 3 x 3 = 9 actual volumes
|
||||
copy_other = 1 # create n x 1 = n actual volumes
|
||||
|
||||
# configuration flags for replication
|
||||
[master.replication]
|
||||
# any replication counts should be considered minimums. If you specify 010 and
|
||||
# have 3 different racks, that's still considered writable. Writes will still
|
||||
# try to replicate to all available volumes. You should only use this option
|
||||
# if you are doing your own replication or periodic sync of volumes.
|
||||
treat_replication_as_minimums = false
|
54
weed/command/scaffold/notification.toml
Normal file
54
weed/command/scaffold/notification.toml
Normal file
|
@ -0,0 +1,54 @@
|
|||
# A sample TOML config file for SeaweedFS filer store
|
||||
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./notification.toml
|
||||
# $HOME/.seaweedfs/notification.toml
|
||||
# /etc/seaweedfs/notification.toml
|
||||
|
||||
####################################################
|
||||
# notification
|
||||
# send and receive filer updates for each file to an external message queue
|
||||
####################################################
|
||||
[notification.log]
|
||||
# this is only for debugging perpose and does not work with "weed filer.replicate"
|
||||
enabled = false
|
||||
|
||||
|
||||
[notification.kafka]
|
||||
enabled = false
|
||||
hosts = [
|
||||
"localhost:9092"
|
||||
]
|
||||
topic = "seaweedfs_filer"
|
||||
offsetFile = "./last.offset"
|
||||
offsetSaveIntervalSeconds = 10
|
||||
|
||||
|
||||
[notification.aws_sqs]
|
||||
# experimental, let me know if it works
|
||||
enabled = false
|
||||
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
sqs_queue_name = "my_filer_queue" # an existing queue name
|
||||
|
||||
|
||||
[notification.google_pub_sub]
|
||||
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
|
||||
enabled = false
|
||||
google_application_credentials = "/path/to/x.json" # path to json credential file
|
||||
project_id = "" # an existing project id
|
||||
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
|
||||
|
||||
[notification.gocdk_pub_sub]
|
||||
# The Go Cloud Development Kit (https://gocloud.dev).
|
||||
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
|
||||
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
|
||||
enabled = false
|
||||
# This URL will Dial the RabbitMQ server at the URL in the environment
|
||||
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
||||
# The exchange must have already been created by some other means, like
|
||||
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
||||
# create binding myexchange => myqueue
|
||||
topic_url = "rabbit://myexchange"
|
||||
sub_url = "rabbit://myqueue"
|
71
weed/command/scaffold/replication.toml
Normal file
71
weed/command/scaffold/replication.toml
Normal file
|
@ -0,0 +1,71 @@
|
|||
# A sample TOML config file for replicating SeaweedFS filer
|
||||
# Used with "weed filer.backup"
|
||||
# Using with "weed filer.replicate" is deprecated.
|
||||
# Put this file to one of the location, with descending priority
|
||||
# ./replication.toml
|
||||
# $HOME/.seaweedfs/replication.toml
|
||||
# /etc/seaweedfs/replication.toml
|
||||
|
||||
[source.filer] # deprecated. Only useful with "weed filer.replicate"
|
||||
enabled = true
|
||||
grpcAddress = "localhost:18888"
|
||||
# all files under this directory tree are replicated.
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all files with this "prefix" are sent to notification message queue.
|
||||
directory = "/buckets"
|
||||
|
||||
[sink.local]
|
||||
enabled = false
|
||||
directory = "/data"
|
||||
# all replicated files are under modified time as yyyy-mm-dd directories
|
||||
# so each date directory contains all new and updated files.
|
||||
is_incremental = false
|
||||
|
||||
[sink.filer]
|
||||
enabled = false
|
||||
grpcAddress = "localhost:18888"
|
||||
# all replicated files are under this directory tree
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all received files will be "prefixed" to this directory.
|
||||
directory = "/backup"
|
||||
replication = ""
|
||||
collection = ""
|
||||
ttlSec = 0
|
||||
is_incremental = false
|
||||
|
||||
[sink.s3]
|
||||
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
|
||||
# default loads credentials from the shared credentials file (~/.aws/credentials).
|
||||
enabled = false
|
||||
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
|
||||
region = "us-east-2"
|
||||
bucket = "your_bucket_name" # an existing bucket
|
||||
directory = "/" # destination directory
|
||||
endpoint = ""
|
||||
is_incremental = false
|
||||
|
||||
[sink.google_cloud_storage]
|
||||
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
|
||||
enabled = false
|
||||
google_application_credentials = "/path/to/x.json" # path to json credential file
|
||||
bucket = "your_bucket_seaweedfs" # an existing bucket
|
||||
directory = "/" # destination directory
|
||||
is_incremental = false
|
||||
|
||||
[sink.azure]
|
||||
# experimental, let me know if it works
|
||||
enabled = false
|
||||
account_name = ""
|
||||
account_key = ""
|
||||
container = "mycontainer" # an existing container
|
||||
directory = "/" # destination directory
|
||||
is_incremental = false
|
||||
|
||||
[sink.backblaze]
|
||||
enabled = false
|
||||
b2_account_id = ""
|
||||
b2_master_application_key = ""
|
||||
bucket = "mybucket" # an existing bucket
|
||||
directory = "/" # destination directory
|
||||
is_incremental = false
|
60
weed/command/scaffold/security.toml
Normal file
60
weed/command/scaffold/security.toml
Normal file
|
@ -0,0 +1,60 @@
|
|||
# Put this file to one of the location, with descending priority
|
||||
# ./security.toml
|
||||
# $HOME/.seaweedfs/security.toml
|
||||
# /etc/seaweedfs/security.toml
|
||||
# this file is read by master, volume server, and filer
|
||||
|
||||
# the jwt signing key is read by master and volume server.
|
||||
# a jwt defaults to expire after 10 seconds.
|
||||
[jwt.signing]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
|
||||
[jwt.signing.read]
|
||||
key = ""
|
||||
expires_after_seconds = 10 # seconds
|
||||
|
||||
# all grpc tls authentications are mutual
|
||||
# the values for the following ca, cert, and key are paths to the PERM files.
|
||||
# the host name is not checked, so the PERM files can be shared.
|
||||
[grpc]
|
||||
ca = ""
|
||||
# Set wildcard domain for enable TLS authentication by common names
|
||||
allowed_wildcard_domain = "" # .mycompany.com
|
||||
|
||||
[grpc.volume]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
[grpc.master]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
[grpc.filer]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
[grpc.msg_broker]
|
||||
cert = ""
|
||||
key = ""
|
||||
allowed_commonNames = "" # comma-separated SSL certificate common names
|
||||
|
||||
# use this for any place needs a grpc client
|
||||
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
|
||||
[grpc.client]
|
||||
cert = ""
|
||||
key = ""
|
||||
|
||||
# volume server https options
|
||||
# Note: work in progress!
|
||||
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
|
||||
[https.client]
|
||||
enabled = true
|
||||
[https.volume]
|
||||
cert = ""
|
||||
key = ""
|
||||
|
10
weed/command/scaffold/shell.toml
Normal file
10
weed/command/scaffold/shell.toml
Normal file
|
@ -0,0 +1,10 @@
|
|||
[cluster]
|
||||
default = "c1"
|
||||
|
||||
[cluster.c1]
|
||||
master = "localhost:9333" # comma-separated master servers
|
||||
filer = "localhost:8888" # filer host and port
|
||||
|
||||
[cluster.c2]
|
||||
master = ""
|
||||
filer = ""
|
|
@ -3,6 +3,7 @@ package command
|
|||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -16,6 +17,8 @@ import (
|
|||
type ServerOptions struct {
|
||||
cpuprofile *string
|
||||
memprofile *string
|
||||
debug *bool
|
||||
debugPort *int
|
||||
v VolumeServerOptions
|
||||
}
|
||||
|
||||
|
@ -78,6 +81,8 @@ var (
|
|||
func init() {
|
||||
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
serverOptions.memprofile = cmdServer.Flag.String("memprofile", "", "memory profile output file")
|
||||
serverOptions.debug = cmdServer.Flag.Bool("debug", false, "serves runtime profiling data, e.g., http://localhost:6060/debug/pprof/goroutine?debug=2")
|
||||
serverOptions.debugPort = cmdServer.Flag.Int("debug.port", 6060, "http port for debugging")
|
||||
|
||||
masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
|
||||
masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
|
||||
|
@ -107,10 +112,11 @@ func init() {
|
|||
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
||||
serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
||||
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
|
||||
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
|
||||
serverOptions.v.readMode = cmdServer.Flag.String("volume.readMode", "proxy", "[local|proxy|redirect] how to deal with non-local volume: 'not found|read in remote node|redirect volume location'.")
|
||||
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
|
||||
serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
|
||||
serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
|
||||
serverOptions.v.concurrentDownloadLimitMB = cmdServer.Flag.Int("volume.concurrentDownloadLimitMB", 64, "limit total concurrent download size")
|
||||
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
|
||||
serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
|
||||
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
|
||||
|
@ -139,6 +145,10 @@ func init() {
|
|||
|
||||
func runServer(cmd *Command, args []string) bool {
|
||||
|
||||
if *serverOptions.debug {
|
||||
go http.ListenAndServe(fmt.Sprintf(":%d", *serverOptions.debugPort), nil)
|
||||
}
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
util.LoadConfiguration("master", false)
|
||||
|
||||
|
@ -245,7 +255,7 @@ func runServer(cmd *Command, args []string) bool {
|
|||
|
||||
// start volume server
|
||||
if *isStartingVolumeServer {
|
||||
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
|
||||
minFreeSpaces := util.MustParseMinFreeSpace(*volumeMinFreeSpace, *volumeMinFreeSpacePercent)
|
||||
go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, minFreeSpaces)
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ func runShell(command *Command, args []string) bool {
|
|||
|
||||
var err error
|
||||
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
|
||||
shellOptions.FilerAddress = *shellInitialFiler
|
||||
if err != nil {
|
||||
fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
|
||||
return false
|
||||
|
|
|
@ -71,20 +71,20 @@ func runUpload(cmd *Command, args []string) bool {
|
|||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master)
|
||||
defaultReplication, err := readMasterConfiguration(grpcDialOption, *upload.master)
|
||||
if err != nil {
|
||||
fmt.Printf("upload: %v", err)
|
||||
return false
|
||||
}
|
||||
if *upload.replication == "" {
|
||||
*upload.replication = defaultCollection
|
||||
*upload.replication = defaultReplication
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
if *upload.dir == "" {
|
||||
return false
|
||||
}
|
||||
filepath.Walk(util.ResolvePath(*upload.dir), func(path string, info os.FileInfo, err error) error {
|
||||
err = filepath.Walk(util.ResolvePath(*upload.dir), func(path string, info os.FileInfo, err error) error {
|
||||
if err == nil {
|
||||
if !info.IsDir() {
|
||||
if *upload.include != "" {
|
||||
|
@ -108,12 +108,21 @@ func runUpload(cmd *Command, args []string) bool {
|
|||
}
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
parts, e := operation.NewFileParts(args)
|
||||
if e != nil {
|
||||
fmt.Println(e.Error())
|
||||
return false
|
||||
}
|
||||
results, err := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return false
|
||||
}
|
||||
results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||
bytes, _ := json.Marshal(results)
|
||||
fmt.Println(string(bytes))
|
||||
}
|
||||
|
|
|
@ -35,31 +35,32 @@ var (
|
|||
)
|
||||
|
||||
type VolumeServerOptions struct {
|
||||
port *int
|
||||
publicPort *int
|
||||
folders []string
|
||||
folderMaxLimits []int
|
||||
idxFolder *string
|
||||
ip *string
|
||||
publicUrl *string
|
||||
bindIp *string
|
||||
masters *string
|
||||
idleConnectionTimeout *int
|
||||
dataCenter *string
|
||||
rack *string
|
||||
whiteList []string
|
||||
indexType *string
|
||||
diskType *string
|
||||
fixJpgOrientation *bool
|
||||
readRedirect *bool
|
||||
cpuProfile *string
|
||||
memProfile *string
|
||||
compactionMBPerSecond *int
|
||||
fileSizeLimitMB *int
|
||||
concurrentUploadLimitMB *int
|
||||
pprof *bool
|
||||
preStopSeconds *int
|
||||
metricsHttpPort *int
|
||||
port *int
|
||||
publicPort *int
|
||||
folders []string
|
||||
folderMaxLimits []int
|
||||
idxFolder *string
|
||||
ip *string
|
||||
publicUrl *string
|
||||
bindIp *string
|
||||
masters *string
|
||||
idleConnectionTimeout *int
|
||||
dataCenter *string
|
||||
rack *string
|
||||
whiteList []string
|
||||
indexType *string
|
||||
diskType *string
|
||||
fixJpgOrientation *bool
|
||||
readMode *string
|
||||
cpuProfile *string
|
||||
memProfile *string
|
||||
compactionMBPerSecond *int
|
||||
fileSizeLimitMB *int
|
||||
concurrentUploadLimitMB *int
|
||||
concurrentDownloadLimitMB *int
|
||||
pprof *bool
|
||||
preStopSeconds *int
|
||||
metricsHttpPort *int
|
||||
// pulseSeconds *int
|
||||
enableTcp *bool
|
||||
}
|
||||
|
@ -80,12 +81,13 @@ func init() {
|
|||
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
||||
v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
||||
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
|
||||
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
|
||||
v.readMode = cmdVolume.Flag.String("readMode", "proxy", "[local|proxy|redirect] how to deal with non-local volume: 'not found|proxy to remote node|redirect volume location'.")
|
||||
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
|
||||
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
|
||||
v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory")
|
||||
v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
|
||||
v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 256, "limit total concurrent upload size")
|
||||
v.concurrentDownloadLimitMB = cmdVolume.Flag.Int("concurrentDownloadLimitMB", 256, "limit total concurrent download size")
|
||||
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
|
||||
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
|
||||
|
@ -228,10 +230,11 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
|||
volumeNeedleMapKind,
|
||||
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
|
||||
v.whiteList,
|
||||
*v.fixJpgOrientation, *v.readRedirect,
|
||||
*v.fixJpgOrientation, *v.readMode,
|
||||
*v.compactionMBPerSecond,
|
||||
*v.fileSizeLimitMB,
|
||||
int64(*v.concurrentUploadLimitMB)*1024*1024,
|
||||
int64(*v.concurrentDownloadLimitMB)*1024*1024,
|
||||
)
|
||||
// starting grpc server
|
||||
grpcS := v.startGrpcService(volumeServer)
|
||||
|
@ -259,6 +262,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
|||
|
||||
// Stop heartbeats
|
||||
if !volumeServer.StopHeartbeat() {
|
||||
volumeServer.SetStopping()
|
||||
glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
|
||||
time.Sleep(time.Duration(*v.preStopSeconds) * time.Second)
|
||||
}
|
||||
|
|
|
@ -32,6 +32,9 @@ type AbstractSqlStore struct {
|
|||
dbsLock sync.Mutex
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) CanDropWholeBucket() bool {
|
||||
return store.SupportBucketTable
|
||||
}
|
||||
func (store *AbstractSqlStore) OnBucketCreation(bucket string) {
|
||||
store.dbsLock.Lock()
|
||||
defer store.dbsLock.Unlock()
|
||||
|
@ -277,7 +280,6 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
|
|||
}
|
||||
|
||||
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
|
||||
|
||||
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
||||
|
@ -287,7 +289,6 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
|
|||
if err != nil {
|
||||
return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ func (store *CassandraStore) Initialize(configuration util.Configuration, prefix
|
|||
configuration.GetString(prefix+"username"),
|
||||
configuration.GetString(prefix+"password"),
|
||||
configuration.GetStringSlice(prefix+"superLargeDirectories"),
|
||||
configuration.GetString(prefix+"localDC"),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -40,13 +41,19 @@ func (store *CassandraStore) isSuperLargeDirectory(dir string) (dirHash string,
|
|||
return
|
||||
}
|
||||
|
||||
func (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string) (err error) {
|
||||
func (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string, localDC string) (err error) {
|
||||
store.cluster = gocql.NewCluster(hosts...)
|
||||
if username != "" && password != "" {
|
||||
store.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password}
|
||||
}
|
||||
store.cluster.Keyspace = keyspace
|
||||
fallback := gocql.RoundRobinHostPolicy()
|
||||
if localDC != "" {
|
||||
fallback = gocql.DCAwareRoundRobinPolicy(localDC)
|
||||
}
|
||||
store.cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)
|
||||
store.cluster.Consistency = gocql.LocalQuorum
|
||||
|
||||
store.session, err = store.cluster.CreateSession()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
|
||||
|
@ -117,7 +124,7 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPa
|
|||
var data []byte
|
||||
if err := store.session.Query(
|
||||
"SELECT meta FROM filemeta WHERE directory=? AND name=?",
|
||||
dir, name).Consistency(gocql.One).Scan(&data); err != nil {
|
||||
dir, name).Scan(&data); err != nil {
|
||||
if err != gocql.ErrNotFound {
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ type Entry struct {
|
|||
HardLinkId HardLinkId
|
||||
HardLinkCounter int32
|
||||
Content []byte
|
||||
Remote *filer_pb.RemoteEntry
|
||||
}
|
||||
|
||||
func (entry *Entry) Size() uint64 {
|
||||
|
@ -56,20 +57,55 @@ func (entry *Entry) Timestamp() time.Time {
|
|||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) ShallowClone() *Entry {
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
newEntry := &Entry{}
|
||||
newEntry.FullPath = entry.FullPath
|
||||
newEntry.Attr = entry.Attr
|
||||
newEntry.Chunks = entry.Chunks
|
||||
newEntry.Extended = entry.Extended
|
||||
newEntry.HardLinkId = entry.HardLinkId
|
||||
newEntry.HardLinkCounter = entry.HardLinkCounter
|
||||
newEntry.Content = entry.Content
|
||||
newEntry.Remote = entry.Remote
|
||||
|
||||
return newEntry
|
||||
}
|
||||
|
||||
func (entry *Entry) ToProtoEntry() *filer_pb.Entry {
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
return &filer_pb.Entry{
|
||||
Name: entry.FullPath.Name(),
|
||||
IsDirectory: entry.IsDirectory(),
|
||||
Attributes: EntryAttributeToPb(entry),
|
||||
Chunks: entry.Chunks,
|
||||
Extended: entry.Extended,
|
||||
HardLinkId: entry.HardLinkId,
|
||||
HardLinkCounter: entry.HardLinkCounter,
|
||||
Content: entry.Content,
|
||||
message := &filer_pb.Entry{}
|
||||
message.Name = entry.FullPath.Name()
|
||||
entry.ToExistingProtoEntry(message)
|
||||
return message
|
||||
}
|
||||
|
||||
func (entry *Entry) ToExistingProtoEntry(message *filer_pb.Entry) {
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
message.IsDirectory = entry.IsDirectory()
|
||||
message.Attributes = EntryAttributeToPb(entry)
|
||||
message.Chunks = entry.Chunks
|
||||
message.Extended = entry.Extended
|
||||
message.HardLinkId = entry.HardLinkId
|
||||
message.HardLinkCounter = entry.HardLinkCounter
|
||||
message.Content = entry.Content
|
||||
message.RemoteEntry = entry.Remote
|
||||
}
|
||||
|
||||
func FromPbEntryToExistingEntry(message *filer_pb.Entry, fsEntry *Entry) {
|
||||
fsEntry.Attr = PbToEntryAttribute(message.Attributes)
|
||||
fsEntry.Chunks = message.Chunks
|
||||
fsEntry.Extended = message.Extended
|
||||
fsEntry.HardLinkId = HardLinkId(message.HardLinkId)
|
||||
fsEntry.HardLinkCounter = message.HardLinkCounter
|
||||
fsEntry.Content = message.Content
|
||||
fsEntry.Remote = message.RemoteEntry
|
||||
}
|
||||
|
||||
func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
|
||||
|
@ -83,26 +119,11 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
|
|||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Clone() *Entry {
|
||||
return &Entry{
|
||||
FullPath: entry.FullPath,
|
||||
Attr: entry.Attr,
|
||||
Chunks: entry.Chunks,
|
||||
Extended: entry.Extended,
|
||||
HardLinkId: entry.HardLinkId,
|
||||
HardLinkCounter: entry.HardLinkCounter,
|
||||
}
|
||||
}
|
||||
|
||||
func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
|
||||
return &Entry{
|
||||
FullPath: util.NewFullPath(dir, entry.Name),
|
||||
Attr: PbToEntryAttribute(entry.Attributes),
|
||||
Chunks: entry.Chunks,
|
||||
HardLinkId: HardLinkId(entry.HardLinkId),
|
||||
HardLinkCounter: entry.HardLinkCounter,
|
||||
Content: entry.Content,
|
||||
}
|
||||
t := &Entry{}
|
||||
t.FullPath = util.NewFullPath(dir, entry.Name)
|
||||
FromPbEntryToExistingEntry(entry, t)
|
||||
return t
|
||||
}
|
||||
|
||||
func maxUint64(x, y uint64) uint64 {
|
||||
|
|
|
@ -12,14 +12,8 @@ import (
|
|||
)
|
||||
|
||||
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {
|
||||
message := &filer_pb.Entry{
|
||||
Attributes: EntryAttributeToPb(entry),
|
||||
Chunks: entry.Chunks,
|
||||
Extended: entry.Extended,
|
||||
HardLinkId: entry.HardLinkId,
|
||||
HardLinkCounter: entry.HardLinkCounter,
|
||||
Content: entry.Content,
|
||||
}
|
||||
message := &filer_pb.Entry{}
|
||||
entry.ToExistingProtoEntry(message)
|
||||
return proto.Marshal(message)
|
||||
}
|
||||
|
||||
|
@ -31,15 +25,7 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error {
|
|||
return fmt.Errorf("decoding value blob for %s: %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
entry.Attr = PbToEntryAttribute(message.Attributes)
|
||||
|
||||
entry.Extended = message.Extended
|
||||
|
||||
entry.Chunks = message.Chunks
|
||||
|
||||
entry.HardLinkId = message.HardLinkId
|
||||
entry.HardLinkCounter = message.HardLinkCounter
|
||||
entry.Content = message.Content
|
||||
FromPbEntryToExistingEntry(message, entry)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -129,6 +115,9 @@ func EqualEntry(a, b *Entry) bool {
|
|||
if !bytes.Equal(a.Content, b.Content) {
|
||||
return false
|
||||
}
|
||||
if !proto.Equal(a.Remote, b.Remote) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ManifestBatch = 1000
|
||||
ManifestBatch = 10000
|
||||
)
|
||||
|
||||
func HasChunkManifest(chunks []*filer_pb.FileChunk) bool {
|
||||
|
@ -39,9 +39,14 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa
|
|||
return
|
||||
}
|
||||
|
||||
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
||||
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset, stopOffset int64) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
||||
// TODO maybe parallel this
|
||||
for _, chunk := range chunks {
|
||||
|
||||
if max(chunk.Offset, startOffset) >= min(chunk.Offset+int64(chunk.Size), stopOffset) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !chunk.IsChunkManifest {
|
||||
dataChunks = append(dataChunks, chunk)
|
||||
continue
|
||||
|
@ -54,7 +59,7 @@ func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chun
|
|||
|
||||
manifestChunks = append(manifestChunks, chunk)
|
||||
// recursive
|
||||
dchunks, mchunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks)
|
||||
dchunks, mchunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks, startOffset, stopOffset)
|
||||
if subErr != nil {
|
||||
return chunks, nil, subErr
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
|
|||
|
||||
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
|
||||
|
||||
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
|
||||
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
|
||||
|
||||
fileIds := make(map[string]bool)
|
||||
for _, interval := range visibles {
|
||||
|
@ -72,11 +72,11 @@ func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks
|
|||
|
||||
func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
|
||||
|
||||
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
|
||||
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64)
|
||||
if aErr != nil {
|
||||
return nil, aErr
|
||||
}
|
||||
bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs)
|
||||
bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64)
|
||||
if bErr != nil {
|
||||
return nil, bErr
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func (cv *ChunkView) IsFullChunk() bool {
|
|||
|
||||
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
|
||||
|
||||
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
|
||||
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
|
||||
|
||||
return ViewFromVisibleIntervals(visibles, offset, size)
|
||||
|
||||
|
@ -221,9 +221,9 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
|
|||
|
||||
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
|
||||
// If the file chunk content is a chunk manifest
|
||||
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
|
||||
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) {
|
||||
|
||||
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
|
||||
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
|
||||
|
||||
sort.Slice(chunks, func(i, j int) bool {
|
||||
if chunks[i].Mtime == chunks[j].Mtime {
|
||||
|
|
|
@ -90,7 +90,7 @@ func TestRandomFileChunksCompact(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
visibles, _ := NonOverlappingVisibleIntervals(nil, chunks)
|
||||
visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64)
|
||||
|
||||
for _, v := range visibles {
|
||||
for x := v.start; x < v.stop; x++ {
|
||||
|
@ -227,7 +227,7 @@ func TestIntervalMerging(t *testing.T) {
|
|||
|
||||
for i, testcase := range testcases {
|
||||
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
|
||||
intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks)
|
||||
intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64)
|
||||
for x, interval := range intervals {
|
||||
log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
|
||||
i, x, interval.start, interval.stop, interval.fileId)
|
||||
|
|
|
@ -42,6 +42,7 @@ type Filer struct {
|
|||
MetaAggregator *MetaAggregator
|
||||
Signature int32
|
||||
FilerConf *FilerConf
|
||||
RemoteStorage *FilerRemoteStorage
|
||||
}
|
||||
|
||||
func NewFiler(masters []string, grpcDialOption grpc.DialOption,
|
||||
|
@ -51,8 +52,9 @@ func NewFiler(masters []string, grpcDialOption grpc.DialOption,
|
|||
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
||||
GrpcDialOption: grpcDialOption,
|
||||
FilerConf: NewFilerConf(),
|
||||
RemoteStorage: NewFilerRemoteStorage(),
|
||||
}
|
||||
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn)
|
||||
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn)
|
||||
f.metaLogCollection = collection
|
||||
f.metaLogReplication = replication
|
||||
|
||||
|
@ -207,7 +209,7 @@ func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, di
|
|||
Attr: Attr{
|
||||
Mtime: now,
|
||||
Crtime: now,
|
||||
Mode: os.ModeDir | entry.Mode | 0110,
|
||||
Mode: os.ModeDir | entry.Mode | 0111,
|
||||
Uid: entry.Uid,
|
||||
Gid: entry.Gid,
|
||||
Collection: entry.Collection,
|
||||
|
|
|
@ -3,6 +3,7 @@ package filer
|
|||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -78,6 +79,9 @@ func (f *Filer) isBucket(entry *Entry) bool {
|
|||
if parent != f.DirBucketsPath {
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(dirName, ".") {
|
||||
return false
|
||||
}
|
||||
|
||||
f.buckets.RLock()
|
||||
defer f.buckets.RUnlock()
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
const (
|
||||
DirectoryEtcRoot = "/etc"
|
||||
DirectoryEtcSeaweedFS = "/etc/seaweedfs"
|
||||
DirectoryEtcRemote = "/etc/remote"
|
||||
FilerConfName = "filer.conf"
|
||||
IamConfigDirecotry = "/etc/iam"
|
||||
IamIdentityFile = "identity.json"
|
||||
|
@ -126,6 +127,9 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
|
|||
if b.VolumeGrowthCount > 0 {
|
||||
a.VolumeGrowthCount = b.VolumeGrowthCount
|
||||
}
|
||||
if b.ReadOnly {
|
||||
a.ReadOnly = b.ReadOnly
|
||||
}
|
||||
}
|
||||
|
||||
func (fc *FilerConf) ToProto() *filer_pb.FilerConf {
|
||||
|
|
|
@ -24,6 +24,18 @@ func TestFilerConf(t *testing.T) {
|
|||
LocationPrefix: "/buckets/",
|
||||
Replication: "001",
|
||||
},
|
||||
{
|
||||
LocationPrefix: "/buckets",
|
||||
ReadOnly: false,
|
||||
},
|
||||
{
|
||||
LocationPrefix: "/buckets/xxx",
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
LocationPrefix: "/buckets/xxx/yyy",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}}
|
||||
fc.doLoadConf(conf)
|
||||
|
||||
|
@ -31,4 +43,7 @@ func TestFilerConf(t *testing.T) {
|
|||
assert.Equal(t, "abcd", fc.MatchStorageRule("/buckets/abcd/jasdf").Collection)
|
||||
assert.Equal(t, "001", fc.MatchStorageRule("/buckets/abc/jasdf").Replication)
|
||||
|
||||
assert.Equal(t, true, fc.MatchStorageRule("/buckets/xxx/yyy/zzz").ReadOnly)
|
||||
assert.Equal(t, false, fc.MatchStorageRule("/buckets/other").ReadOnly)
|
||||
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
|
||||
lastFileName := ""
|
||||
includeLastFile := false
|
||||
if !isDeletingBucket {
|
||||
if !isDeletingBucket || !f.Store.CanDropWholeBucket() {
|
||||
for {
|
||||
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
|
||||
if err != nil {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
// onMetadataChangeEvent is triggered after filer processed change events from local or remote filers
|
||||
func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) {
|
||||
f.maybeReloadFilerConfiguration(event)
|
||||
f.maybeReloadRemoteStorageConfigurationAndMapping(event)
|
||||
f.onBucketEvents(event)
|
||||
}
|
||||
|
||||
|
@ -24,10 +25,14 @@ func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) {
|
|||
}
|
||||
if f.DirBucketsPath == event.Directory {
|
||||
if message.OldEntry == nil && message.NewEntry != nil {
|
||||
f.Store.OnBucketCreation(message.NewEntry.Name)
|
||||
if message.NewEntry.IsDirectory {
|
||||
f.Store.OnBucketCreation(message.NewEntry.Name)
|
||||
}
|
||||
}
|
||||
if message.OldEntry != nil && message.NewEntry == nil {
|
||||
f.Store.OnBucketDeletion(message.OldEntry.Name)
|
||||
if message.OldEntry.IsDirectory {
|
||||
f.Store.OnBucketDeletion(message.OldEntry.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -80,3 +85,16 @@ func (f *Filer) LoadFilerConf() {
|
|||
}
|
||||
f.FilerConf = fc
|
||||
}
|
||||
|
||||
////////////////////////////////////
|
||||
// load and maintain remote storages
|
||||
////////////////////////////////////
|
||||
func (f *Filer) LoadRemoteStorageConfAndMapping() {
|
||||
if err := f.RemoteStorage.LoadRemoteStorageConfigurationsAndMapping(f); err != nil {
|
||||
glog.Errorf("read remote conf and mapping: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
func (f *Filer) maybeReloadRemoteStorageConfigurationAndMapping(event *filer_pb.SubscribeMetadataResponse) {
|
||||
// FIXME add reloading
|
||||
}
|
||||
|
|
197
weed/filer/filer_remote_storage.go
Normal file
197
weed/filer/filer_remote_storage.go
Normal file
|
@ -0,0 +1,197 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/viant/ptrie"
|
||||
)
|
||||
|
||||
const REMOTE_STORAGE_CONF_SUFFIX = ".conf"
|
||||
const REMOTE_STORAGE_MOUNT_FILE = "mount.mapping"
|
||||
|
||||
type FilerRemoteStorage struct {
|
||||
rules ptrie.Trie
|
||||
storageNameToConf map[string]*filer_pb.RemoteConf
|
||||
}
|
||||
|
||||
func NewFilerRemoteStorage() (rs *FilerRemoteStorage) {
|
||||
rs = &FilerRemoteStorage{
|
||||
rules: ptrie.New(),
|
||||
storageNameToConf: make(map[string]*filer_pb.RemoteConf),
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *Filer) (err error) {
|
||||
// execute this on filer
|
||||
|
||||
entries, _, err := filer.ListDirectoryEntries(context.Background(), DirectoryEtcRemote, "", false, math.MaxInt64, "", "", "")
|
||||
if err != nil {
|
||||
if err == filer_pb.ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
glog.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Name() == REMOTE_STORAGE_MOUNT_FILE {
|
||||
if err := rs.loadRemoteStorageMountMapping(entry.Content); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !strings.HasSuffix(entry.Name(), REMOTE_STORAGE_CONF_SUFFIX) {
|
||||
return nil
|
||||
}
|
||||
conf := &filer_pb.RemoteConf{}
|
||||
if err := proto.Unmarshal(entry.Content, conf); err != nil {
|
||||
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, entry.Name(), err)
|
||||
}
|
||||
rs.storageNameToConf[conf.Name] = conf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err error) {
|
||||
mappings := &filer_pb.RemoteStorageMapping{}
|
||||
if err := proto.Unmarshal(data, mappings); err != nil {
|
||||
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, err)
|
||||
}
|
||||
for dir, storageLocation := range mappings.Mappings {
|
||||
rs.mapDirectoryToRemoteStorage(util.FullPath(dir), storageLocation)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *filer_pb.RemoteStorageLocation) {
|
||||
rs.rules.Put([]byte(dir+"/"), loc)
|
||||
}
|
||||
|
||||
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *filer_pb.RemoteStorageLocation) {
|
||||
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
|
||||
mountDir = util.FullPath(string(key[:len(key)-1]))
|
||||
remoteLocation = value.(*filer_pb.RemoteStorageLocation)
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
|
||||
var storageLocation *filer_pb.RemoteStorageLocation
|
||||
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
|
||||
storageLocation = value.(*filer_pb.RemoteStorageLocation)
|
||||
return true
|
||||
})
|
||||
|
||||
if storageLocation == nil {
|
||||
found = false
|
||||
return
|
||||
}
|
||||
|
||||
return rs.GetRemoteStorageClient(storageLocation.Name)
|
||||
}
|
||||
|
||||
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
|
||||
remoteConf, found = rs.storageNameToConf[storageName]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
if client, err = remote_storage.GetRemoteStorage(remoteConf); err == nil {
|
||||
found = true
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.RemoteStorageMapping, err error) {
|
||||
mappings = &filer_pb.RemoteStorageMapping{
|
||||
Mappings: make(map[string]*filer_pb.RemoteStorageLocation),
|
||||
}
|
||||
if len(oldContent) > 0 {
|
||||
if err = proto.Unmarshal(oldContent, mappings); err != nil {
|
||||
glog.Warningf("unmarshal existing mappings: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func AddRemoteStorageMapping(oldContent []byte, dir string, storageLocation *filer_pb.RemoteStorageLocation) (newContent []byte, err error) {
|
||||
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
|
||||
if unmarshalErr != nil {
|
||||
// skip
|
||||
}
|
||||
|
||||
// set the new mapping
|
||||
mappings.Mappings[dir] = storageLocation
|
||||
|
||||
if newContent, err = proto.Marshal(mappings); err != nil {
|
||||
return oldContent, fmt.Errorf("marshal mappings: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func RemoveRemoteStorageMapping(oldContent []byte, dir string) (newContent []byte, err error) {
|
||||
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
|
||||
if unmarshalErr != nil {
|
||||
return nil, unmarshalErr
|
||||
}
|
||||
|
||||
// set the new mapping
|
||||
delete(mappings.Mappings, dir)
|
||||
|
||||
if newContent, err = proto.Marshal(mappings); err != nil {
|
||||
return oldContent, fmt.Errorf("marshal mappings: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *filer_pb.RemoteStorageMapping, readErr error) {
|
||||
var oldContent []byte
|
||||
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
|
||||
return readErr
|
||||
}); readErr != nil {
|
||||
return nil, readErr
|
||||
}
|
||||
|
||||
mappings, readErr = UnmarshalRemoteStorageMappings(oldContent)
|
||||
if readErr != nil {
|
||||
return nil, fmt.Errorf("unmarshal mappings: %v", readErr)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *filer_pb.RemoteConf, readErr error) {
|
||||
var oldContent []byte
|
||||
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
|
||||
return readErr
|
||||
}); readErr != nil {
|
||||
return nil, readErr
|
||||
}
|
||||
|
||||
// unmarshal storage configuration
|
||||
conf = &filer_pb.RemoteConf{}
|
||||
if unMarshalErr := proto.Unmarshal(oldContent, conf); unMarshalErr != nil {
|
||||
readErr = fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
34
weed/filer/filer_remote_storage_test.go
Normal file
34
weed/filer/filer_remote_storage_test.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
|
||||
conf := &filer_pb.RemoteConf{
|
||||
Name: "s7",
|
||||
Type: "s3",
|
||||
}
|
||||
rs := NewFilerRemoteStorage()
|
||||
rs.storageNameToConf[conf.Name] = conf
|
||||
|
||||
rs.mapDirectoryToRemoteStorage("/a/b/c", &filer_pb.RemoteStorageLocation{
|
||||
Name: "s7",
|
||||
Bucket: "some",
|
||||
Path: "/dir",
|
||||
})
|
||||
|
||||
_, _, found := rs.FindRemoteStorageClient("/a/b/c/d/e/f")
|
||||
assert.Equal(t, true, found, "find storage client")
|
||||
|
||||
_, _, found2 := rs.FindRemoteStorageClient("/a/b")
|
||||
assert.Equal(t, false, found2, "should not find storage client")
|
||||
|
||||
_, _, found3 := rs.FindRemoteStorageClient("/a/b/c")
|
||||
assert.Equal(t, false, found3, "should not find storage client")
|
||||
|
||||
_, _, found4 := rs.FindRemoteStorageClient("/a/b/cc")
|
||||
assert.Equal(t, false, found4, "should not find storage client")
|
||||
}
|
|
@ -3,6 +3,7 @@ package filer
|
|||
import (
|
||||
"context"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
@ -27,6 +28,10 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, start
|
|||
return true
|
||||
})
|
||||
|
||||
if limit == math.MaxInt64 {
|
||||
limit = math.MaxInt64 - 1
|
||||
}
|
||||
|
||||
hasMore = int64(len(entries)) >= limit+1
|
||||
if hasMore {
|
||||
entries = entries[:limit]
|
||||
|
|
|
@ -43,4 +43,5 @@ type FilerStore interface {
|
|||
type BucketAware interface {
|
||||
OnBucketCreation(bucket string)
|
||||
OnBucketDeletion(bucket string)
|
||||
CanDropWholeBucket() bool
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ type VirtualFilerStore interface {
|
|||
AddPathSpecificStore(path string, storeId string, store FilerStore)
|
||||
OnBucketCreation(bucket string)
|
||||
OnBucketDeletion(bucket string)
|
||||
CanDropWholeBucket() bool
|
||||
}
|
||||
|
||||
type FilerStoreWrapper struct {
|
||||
|
@ -42,6 +43,13 @@ func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
|
|||
}
|
||||
}
|
||||
|
||||
func (fsw *FilerStoreWrapper) CanDropWholeBucket() bool {
|
||||
if ba, ok := fsw.defaultStore.(BucketAware); ok {
|
||||
return ba.CanDropWholeBucket()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (fsw *FilerStoreWrapper) OnBucketCreation(bucket string) {
|
||||
for _, store := range fsw.storeIdToStore {
|
||||
if ba, ok := store.(BucketAware); ok {
|
||||
|
|
|
@ -5,13 +5,15 @@ import (
|
|||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
|
@ -47,6 +49,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
|
|||
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
|
||||
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
|
||||
CompactionTableSizeMultiplier: 4,
|
||||
Filter: filter.NewBloomFilter(8), // false positive rate 0.02
|
||||
}
|
||||
|
||||
for d := 0; d < dbCount; d++ {
|
||||
|
|
|
@ -5,15 +5,17 @@ import (
|
|||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
|
@ -62,17 +64,19 @@ func (store *LevelDB3Store) initialize(dir string) (err error) {
|
|||
}
|
||||
|
||||
func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
|
||||
|
||||
bloom := filter.NewBloomFilter(8) // false positive rate 0.02
|
||||
opts := &opt.Options{
|
||||
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
|
||||
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
|
||||
CompactionTableSizeMultiplier: 4,
|
||||
Filter: bloom,
|
||||
}
|
||||
if name != DEFAULT {
|
||||
opts = &opt.Options{
|
||||
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
|
||||
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
|
||||
CompactionTableSizeMultiplier: 4,
|
||||
Filter: bloom,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAgg
|
|||
grpcDialOption: grpcDialOption,
|
||||
}
|
||||
t.ListenersCond = sync.NewCond(&t.ListenersLock)
|
||||
t.MetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, nil, func() {
|
||||
t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() {
|
||||
t.ListenersCond.Broadcast()
|
||||
})
|
||||
return t
|
||||
|
@ -118,6 +118,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
|
|||
}
|
||||
|
||||
for {
|
||||
glog.V(4).Infof("subscribing remote %s meta change: %v", peer, time.Unix(0, lastTsNs))
|
||||
err := pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
|
||||
|
@ -15,11 +16,11 @@ func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte)
|
|||
|
||||
c := store.connect.Database(store.database).Collection(store.collectionName)
|
||||
|
||||
_, err = c.InsertOne(ctx, Model{
|
||||
Directory: dir,
|
||||
Name: name,
|
||||
Meta: value,
|
||||
})
|
||||
opts := options.Update().SetUpsert(true)
|
||||
filter := bson.D{{"directory", dir}, {"name", name}}
|
||||
update := bson.D{{"$set", bson.D{{"meta", value}}}}
|
||||
|
||||
_, err = c.UpdateOne(ctx, filter, update, opts)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("kv put: %v", err)
|
||||
|
|
45
weed/filer/read_remote.go
Normal file
45
weed/filer/read_remote.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (entry *Entry) IsInRemoteOnly() bool {
|
||||
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
|
||||
}
|
||||
|
||||
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data []byte, err error) {
|
||||
client, _, found := f.RemoteStorage.GetRemoteStorageClient(entry.Remote.StorageName)
|
||||
if !found {
|
||||
return nil, fmt.Errorf("remote storage %v not found", entry.Remote.StorageName)
|
||||
}
|
||||
|
||||
mountDir, remoteLoation := f.RemoteStorage.FindMountDirectory(entry.FullPath)
|
||||
|
||||
sourceLoc := MapFullPathToRemoteStorageLocation(mountDir, remoteLoation, entry.FullPath)
|
||||
|
||||
return client.ReadFile(sourceLoc, offset, size)
|
||||
}
|
||||
|
||||
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, fp util.FullPath) *filer_pb.RemoteStorageLocation {
|
||||
remoteLocation := &filer_pb.RemoteStorageLocation{
|
||||
Name: remoteMountedLocation.Name,
|
||||
Bucket: remoteMountedLocation.Bucket,
|
||||
Path: remoteMountedLocation.Path,
|
||||
}
|
||||
remoteLocation.Path += string(fp)[len(localMountedDir):]
|
||||
return remoteLocation
|
||||
}
|
||||
|
||||
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *filer_pb.RemoteConf, remoteLocation *filer_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
|
||||
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
|
||||
Directory: string(parent),
|
||||
Name: entry.Name,
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
|
@ -2,13 +2,9 @@ package filer
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -31,50 +27,17 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed
|
|||
|
||||
}
|
||||
|
||||
func ReadContent(filerAddress string, dir, name string) ([]byte, error) {
|
||||
|
||||
target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name)
|
||||
|
||||
data, _, err := util.Get(target)
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error {
|
||||
var target string
|
||||
if port == 0 {
|
||||
target = fmt.Sprintf("http://%s%s/%s", host, dir, name)
|
||||
} else {
|
||||
target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name)
|
||||
func ReadInsideFiler(filerClient filer_pb.SeaweedFilerClient, dir, name string) (content []byte, err error) {
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Directory: dir,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// set the HTTP method, url, and request body
|
||||
req, err := http.NewRequest(http.MethodPut, target, byteBuffer)
|
||||
respLookupEntry, err := filer_pb.LookupEntry(filerClient, request)
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
// set the request header Content-Type for json
|
||||
if contentType != "" {
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer util.CloseResponse(resp)
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Errorf("%s: %s %v", target, resp.Status, string(b))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
content = respLookupEntry.Entry.Content
|
||||
return
|
||||
}
|
||||
|
||||
func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, content []byte) error {
|
||||
|
|
|
@ -24,18 +24,21 @@ func init() {
|
|||
|
||||
type options struct {
|
||||
opt *gorocksdb.Options
|
||||
bto *gorocksdb.BlockBasedTableOptions
|
||||
ro *gorocksdb.ReadOptions
|
||||
wo *gorocksdb.WriteOptions
|
||||
}
|
||||
|
||||
func (opt *options) init() {
|
||||
opt.opt = gorocksdb.NewDefaultOptions()
|
||||
opt.bto = gorocksdb.NewDefaultBlockBasedTableOptions()
|
||||
opt.ro = gorocksdb.NewDefaultReadOptions()
|
||||
opt.wo = gorocksdb.NewDefaultWriteOptions()
|
||||
}
|
||||
|
||||
func (opt *options) close() {
|
||||
opt.opt.Destroy()
|
||||
opt.bto.Destroy()
|
||||
opt.ro.Destroy()
|
||||
opt.wo.Destroy()
|
||||
}
|
||||
|
@ -69,6 +72,11 @@ func (store *RocksDBStore) initialize(dir string) (err error) {
|
|||
store.opt.SetCompactionFilter(NewTTLFilter())
|
||||
// store.opt.SetMaxBackgroundCompactions(2)
|
||||
|
||||
// https://github.com/tecbot/gorocksdb/issues/132
|
||||
store.bto.SetFilterPolicy(gorocksdb.NewBloomFilterFull(8))
|
||||
store.opt.SetBlockBasedTableFactory(store.bto)
|
||||
// store.opt.EnableStatistics()
|
||||
|
||||
store.db, err = gorocksdb.OpenDb(store.opt, dir)
|
||||
|
||||
return
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"io"
|
||||
)
|
||||
|
||||
|
@ -14,7 +15,7 @@ func ParseS3ConfigurationFromBytes(content []byte, config *iam_pb.S3ApiConfigura
|
|||
return nil
|
||||
}
|
||||
|
||||
func S3ConfigurationToText(writer io.Writer, config *iam_pb.S3ApiConfiguration) error {
|
||||
func ProtoToText(writer io.Writer, config proto.Message) error {
|
||||
|
||||
m := jsonpb.Marshaler{
|
||||
EmitDefaults: false,
|
||||
|
|
|
@ -44,7 +44,7 @@ func TestS3Conf(t *testing.T) {
|
|||
},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := S3ConfigurationToText(&buf, s3Conf)
|
||||
err := ProtoToText(&buf, s3Conf)
|
||||
assert.Equal(t, err, nil)
|
||||
s3ConfSaved := &iam_pb.S3ApiConfiguration{}
|
||||
err = ParseS3ConfigurationFromBytes(buf.Bytes(), s3ConfSaved)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// +build !linux,!darwin,!windows
|
||||
// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64
|
||||
|
||||
// limited GOOS due to modernc.org/libc/unistd
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -88,16 +89,37 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
|
|||
|
||||
// ---------------- ChunkStreamReader ----------------------------------
|
||||
type ChunkStreamReader struct {
|
||||
chunkViews []*ChunkView
|
||||
logicOffset int64
|
||||
buffer []byte
|
||||
bufferOffset int64
|
||||
bufferPos int
|
||||
chunkIndex int
|
||||
lookupFileId wdclient.LookupFileIdFunctionType
|
||||
chunkViews []*ChunkView
|
||||
totalSize int64
|
||||
logicOffset int64
|
||||
buffer []byte
|
||||
bufferOffset int64
|
||||
bufferPos int
|
||||
nextChunkViewIndex int
|
||||
lookupFileId wdclient.LookupFileIdFunctionType
|
||||
}
|
||||
|
||||
var _ = io.ReadSeeker(&ChunkStreamReader{})
|
||||
var _ = io.ReaderAt(&ChunkStreamReader{})
|
||||
|
||||
func doNewChunkStreamReader(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
|
||||
|
||||
chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
|
||||
sort.Slice(chunkViews, func(i, j int) bool {
|
||||
return chunkViews[i].LogicOffset < chunkViews[j].LogicOffset
|
||||
})
|
||||
|
||||
var totalSize int64
|
||||
for _, chunk := range chunkViews {
|
||||
totalSize += int64(chunk.Size)
|
||||
}
|
||||
|
||||
return &ChunkStreamReader{
|
||||
chunkViews: chunkViews,
|
||||
lookupFileId: lookupFileIdFn,
|
||||
totalSize: totalSize,
|
||||
}
|
||||
}
|
||||
|
||||
func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
|
||||
|
||||
|
@ -105,39 +127,39 @@ func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks [
|
|||
return masterClient.LookupFileId(fileId)
|
||||
}
|
||||
|
||||
chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
|
||||
|
||||
return &ChunkStreamReader{
|
||||
chunkViews: chunkViews,
|
||||
lookupFileId: lookupFileIdFn,
|
||||
}
|
||||
return doNewChunkStreamReader(lookupFileIdFn, chunks)
|
||||
}
|
||||
|
||||
func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
|
||||
|
||||
lookupFileIdFn := LookupFn(filerClient)
|
||||
|
||||
chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64)
|
||||
return doNewChunkStreamReader(lookupFileIdFn, chunks)
|
||||
}
|
||||
|
||||
return &ChunkStreamReader{
|
||||
chunkViews: chunkViews,
|
||||
lookupFileId: lookupFileIdFn,
|
||||
func (c *ChunkStreamReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if err = c.prepareBufferFor(c.logicOffset); err != nil {
|
||||
return
|
||||
}
|
||||
return c.Read(p)
|
||||
}
|
||||
|
||||
func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
|
||||
for n < len(p) {
|
||||
if c.isBufferEmpty() {
|
||||
if c.chunkIndex >= len(c.chunkViews) {
|
||||
if c.nextChunkViewIndex >= len(c.chunkViews) {
|
||||
return n, io.EOF
|
||||
}
|
||||
chunkView := c.chunkViews[c.chunkIndex]
|
||||
c.fetchChunkToBuffer(chunkView)
|
||||
c.chunkIndex++
|
||||
chunkView := c.chunkViews[c.nextChunkViewIndex]
|
||||
if err = c.fetchChunkToBuffer(chunkView); err != nil {
|
||||
return
|
||||
}
|
||||
c.nextChunkViewIndex++
|
||||
}
|
||||
t := copy(p[n:], c.buffer[c.bufferPos:])
|
||||
c.bufferPos += t
|
||||
n += t
|
||||
c.logicOffset += int64(t)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -148,38 +170,55 @@ func (c *ChunkStreamReader) isBufferEmpty() bool {
|
|||
|
||||
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
var totalSize int64
|
||||
for _, chunk := range c.chunkViews {
|
||||
totalSize += int64(chunk.Size)
|
||||
}
|
||||
|
||||
var err error
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
offset += c.bufferOffset + int64(c.bufferPos)
|
||||
offset += c.logicOffset
|
||||
case io.SeekEnd:
|
||||
offset = totalSize + offset
|
||||
offset = c.totalSize + offset
|
||||
}
|
||||
if offset > totalSize {
|
||||
if offset > c.totalSize {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else {
|
||||
c.logicOffset = offset
|
||||
}
|
||||
|
||||
for i, chunk := range c.chunkViews {
|
||||
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
|
||||
if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
|
||||
c.fetchChunkToBuffer(chunk)
|
||||
c.chunkIndex = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
c.bufferPos = int(offset - c.bufferOffset)
|
||||
|
||||
return offset, err
|
||||
|
||||
}
|
||||
|
||||
func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
|
||||
// stay in the same chunk
|
||||
if !c.isBufferEmpty() {
|
||||
if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) {
|
||||
c.bufferPos = int(offset - c.bufferOffset)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// need to seek to a different chunk
|
||||
currentChunkIndex := sort.Search(len(c.chunkViews), func(i int) bool {
|
||||
return c.chunkViews[i].LogicOffset <= offset
|
||||
})
|
||||
if currentChunkIndex == len(c.chunkViews) {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
// positioning within the new chunk
|
||||
chunk := c.chunkViews[currentChunkIndex]
|
||||
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
|
||||
if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
|
||||
if err = c.fetchChunkToBuffer(chunk); err != nil {
|
||||
return
|
||||
}
|
||||
c.nextChunkViewIndex = currentChunkIndex + 1
|
||||
}
|
||||
c.bufferPos = int(offset - c.bufferOffset)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
||||
urlStrings, err := c.lookupFileId(chunkView.FileId)
|
||||
if err != nil {
|
||||
|
|
|
@ -54,28 +54,27 @@ func (dir *Dir) Id() uint64 {
|
|||
|
||||
func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||
|
||||
// https://github.com/bazil/fuse/issues/196
|
||||
attr.Valid = time.Second
|
||||
|
||||
if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
|
||||
dir.setRootDirAttributes(attr)
|
||||
glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
|
||||
return nil
|
||||
}
|
||||
|
||||
entry, err := dir.maybeLoadEntry()
|
||||
if err != nil {
|
||||
glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
|
||||
glog.V(3).Infof("dir Attr %s, err: %+v", dir.FullPath(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// https://github.com/bazil/fuse/issues/196
|
||||
attr.Valid = time.Second
|
||||
attr.Inode = dir.Id()
|
||||
attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir
|
||||
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
|
||||
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
|
||||
attr.Ctime = time.Unix(entry.Attributes.Crtime, 0)
|
||||
attr.Atime = time.Unix(entry.Attributes.Mtime, 0)
|
||||
attr.Gid = entry.Attributes.Gid
|
||||
attr.Uid = entry.Attributes.Uid
|
||||
|
||||
if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
|
||||
attr.BlockSize = blockSize
|
||||
}
|
||||
|
||||
glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
|
||||
|
||||
return nil
|
||||
|
@ -93,20 +92,6 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f
|
|||
return getxattr(entry, req, resp)
|
||||
}
|
||||
|
||||
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
|
||||
// attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
|
||||
attr.Valid = time.Second
|
||||
attr.Inode = dir.Id()
|
||||
attr.Uid = dir.wfs.option.MountUid
|
||||
attr.Gid = dir.wfs.option.MountGid
|
||||
attr.Mode = dir.wfs.option.MountMode
|
||||
attr.Crtime = dir.wfs.option.MountCtime
|
||||
attr.Ctime = dir.wfs.option.MountCtime
|
||||
attr.Mtime = dir.wfs.option.MountMtime
|
||||
attr.Atime = dir.wfs.option.MountMtime
|
||||
attr.BlockSize = blockSize
|
||||
}
|
||||
|
||||
func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
||||
// fsync works at OS level
|
||||
// write the file chunks to the filerGrpcAddress
|
||||
|
@ -156,7 +141,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
|
|||
var node fs.Node
|
||||
if isDirectory {
|
||||
node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name))
|
||||
return node, nil, nil
|
||||
return node, node, nil
|
||||
}
|
||||
|
||||
node = dir.newFile(req.Name)
|
||||
|
@ -375,6 +360,28 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
|
|||
glog.Errorf("list meta cache: %v", listErr)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// create proper . and .. directories
|
||||
ret = append(ret, fuse.Dirent{
|
||||
Inode: dirPath.AsInode(),
|
||||
Name: ".",
|
||||
Type: fuse.DT_Dir,
|
||||
})
|
||||
|
||||
// return the correct parent inode for the mount root
|
||||
var inode uint64
|
||||
if string(dirPath) == dir.wfs.option.FilerMountRootPath {
|
||||
inode = dir.wfs.option.MountParentInode
|
||||
} else {
|
||||
inode = util.FullPath(dir.parent.FullPath()).AsInode()
|
||||
}
|
||||
|
||||
ret = append(ret, fuse.Dirent{
|
||||
Inode: inode,
|
||||
Name: "..",
|
||||
Type: fuse.DT_Dir,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -436,7 +443,10 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
|
|||
dir.wfs.handlesLock.Lock()
|
||||
defer dir.wfs.handlesLock.Unlock()
|
||||
inodeId := filePath.AsInode()
|
||||
delete(dir.wfs.handles, inodeId)
|
||||
if fh, ok := dir.wfs.handles[inodeId]; ok {
|
||||
delete(dir.wfs.handles, inodeId)
|
||||
fh.isDeleted = true
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
|
|
|
@ -2,6 +2,9 @@ package filesys
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"math"
|
||||
|
||||
"github.com/seaweedfs/fuse"
|
||||
"github.com/seaweedfs/fuse/fs"
|
||||
|
@ -37,6 +40,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
OldName: req.OldName,
|
||||
NewDirectory: newDir.FullPath(),
|
||||
NewName: req.NewName,
|
||||
Signatures: []int32{dir.wfs.signature},
|
||||
}
|
||||
|
||||
_, err := client.AtomicRenameEntry(ctx, request)
|
||||
|
@ -53,46 +57,118 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
return fuse.EIO
|
||||
}
|
||||
|
||||
// TODO: replicate renaming logic on filer
|
||||
if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil {
|
||||
glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
|
||||
err = dir.moveEntry(context.Background(), util.FullPath(dir.FullPath()), oldEntry, util.FullPath(newDir.FullPath()), req.NewName)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("dir local Rename %s => %s : %v", oldPath, newPath, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
oldEntry.FullPath = newPath
|
||||
if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dir *Dir) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error {
|
||||
|
||||
oldName := entry.Name()
|
||||
|
||||
oldPath := oldParent.Child(oldName)
|
||||
newPath := newParent.Child(newName)
|
||||
if err := dir.moveSelfEntry(ctx, oldParent, entry, newParent, newName, func() error {
|
||||
|
||||
oldFsNode := NodeWithId(oldPath.AsInode())
|
||||
newFsNode := NodeWithId(newPath.AsInode())
|
||||
newDirNode, found := dir.wfs.Server.FindInternalNode(NodeWithId(newParent.AsInode()))
|
||||
var newDir *Dir
|
||||
if found {
|
||||
newDir = newDirNode.(*Dir)
|
||||
}
|
||||
dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) {
|
||||
if file, ok := internalNode.(*File); ok {
|
||||
glog.V(4).Infof("internal file node %s", oldParent.Child(oldName))
|
||||
file.Name = newName
|
||||
file.id = uint64(newFsNode)
|
||||
if found {
|
||||
file.dir = newDir
|
||||
}
|
||||
}
|
||||
if dir, ok := internalNode.(*Dir); ok {
|
||||
glog.V(4).Infof("internal dir node %s", oldParent.Child(oldName))
|
||||
dir.name = newName
|
||||
dir.id = uint64(newFsNode)
|
||||
if found {
|
||||
dir.parent = newDir
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// change file handle
|
||||
inodeId := oldPath.AsInode()
|
||||
dir.wfs.handlesLock.Lock()
|
||||
if existingHandle, found := dir.wfs.handles[inodeId]; found && existingHandle == nil {
|
||||
glog.V(4).Infof("opened file handle %s => %s", oldPath, newPath)
|
||||
delete(dir.wfs.handles, inodeId)
|
||||
dir.wfs.handles[newPath.AsInode()] = existingHandle
|
||||
}
|
||||
dir.wfs.handlesLock.Unlock()
|
||||
|
||||
if entry.IsDirectory() {
|
||||
if err := dir.moveFolderSubEntries(ctx, oldParent, oldName, newParent, newName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("fail to move %s => %s: %v", oldPath, newPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dir *Dir) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, oldName string, newParent util.FullPath, newName string) error {
|
||||
|
||||
currentDirPath := oldParent.Child(oldName)
|
||||
newDirPath := newParent.Child(newName)
|
||||
|
||||
glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath)
|
||||
|
||||
var moveErr error
|
||||
listErr := dir.wfs.metaCache.ListDirectoryEntries(ctx, currentDirPath, "", false, int64(math.MaxInt32), func(item *filer.Entry) bool {
|
||||
moveErr = dir.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name())
|
||||
if moveErr != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if listErr != nil {
|
||||
return listErr
|
||||
}
|
||||
if moveErr != nil {
|
||||
return moveErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dir *Dir) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error) error {
|
||||
|
||||
newPath := newParent.Child(newName)
|
||||
oldPath := oldParent.Child(entry.Name())
|
||||
|
||||
entry.FullPath = newPath
|
||||
if err := dir.wfs.metaCache.InsertEntry(ctx, entry); err != nil {
|
||||
glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
oldFsNode := NodeWithId(oldPath.AsInode())
|
||||
newFsNode := NodeWithId(newPath.AsInode())
|
||||
dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) {
|
||||
if file, ok := internalNode.(*File); ok {
|
||||
glog.V(4).Infof("internal file node %s", file.Name)
|
||||
file.Name = req.NewName
|
||||
file.id = uint64(newFsNode)
|
||||
file.dir = newDir
|
||||
if moveFolderSubEntries != nil {
|
||||
if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
|
||||
return moveChildrenErr
|
||||
}
|
||||
if dir, ok := internalNode.(*Dir); ok {
|
||||
glog.V(4).Infof("internal dir node %s", dir.name)
|
||||
dir.name = req.NewName
|
||||
dir.id = uint64(newFsNode)
|
||||
dir.parent = newDir
|
||||
}
|
||||
})
|
||||
|
||||
// change file handle
|
||||
dir.wfs.handlesLock.Lock()
|
||||
defer dir.wfs.handlesLock.Unlock()
|
||||
inodeId := oldPath.AsInode()
|
||||
existingHandle, found := dir.wfs.handles[inodeId]
|
||||
glog.V(4).Infof("has open filehandle %s: %v", oldPath, found)
|
||||
if !found || existingHandle == nil {
|
||||
return nil
|
||||
}
|
||||
glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath)
|
||||
delete(dir.wfs.handles, inodeId)
|
||||
dir.wfs.handles[newPath.AsInode()] = existingHandle
|
||||
|
||||
if err := dir.wfs.metaCache.DeleteEntry(ctx, oldPath); err != nil {
|
||||
glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ func (pages *TempFileDirtyPages) saveExistingPagesToStorage() {
|
|||
|
||||
for _, list := range pages.writtenIntervals.lists {
|
||||
listStopOffset := list.Offset() + list.Size()
|
||||
for uploadedOffset:=int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
|
||||
for uploadedOffset := int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
|
||||
start, stop := max(list.Offset(), uploadedOffset), min(listStopOffset, uploadedOffset+pageSize)
|
||||
if start >= stop {
|
||||
continue
|
||||
|
|
|
@ -54,7 +54,7 @@ func (list *WrittenIntervalLinkedList) ReadData(buf []byte, start, stop int64) {
|
|||
nodeStart, nodeStop := max(start, t.DataOffset), min(stop, t.DataOffset+t.Size)
|
||||
if nodeStart < nodeStop {
|
||||
// glog.V(4).Infof("copying start=%d stop=%d t=[%d,%d) => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.DataOffset, t.DataOffset+t.Size, len(buf), nodeStart, nodeStop)
|
||||
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset + nodeStart - t.DataOffset)
|
||||
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset+nodeStart-t.DataOffset)
|
||||
}
|
||||
|
||||
if t.Next == nil {
|
||||
|
|
|
@ -115,16 +115,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if file.isOpen > 0 {
|
||||
file.wfs.handlesLock.Lock()
|
||||
fileHandle := file.wfs.handles[file.Id()]
|
||||
file.wfs.handlesLock.Unlock()
|
||||
|
||||
if fileHandle != nil {
|
||||
fileHandle.Lock()
|
||||
defer fileHandle.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if req.Valid.Size() {
|
||||
|
||||
|
@ -154,17 +144,17 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
|
|||
file.dirtyMetadata = true
|
||||
}
|
||||
|
||||
if req.Valid.Mode() {
|
||||
if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode) {
|
||||
entry.Attributes.FileMode = uint32(req.Mode)
|
||||
file.dirtyMetadata = true
|
||||
}
|
||||
|
||||
if req.Valid.Uid() {
|
||||
if req.Valid.Uid() && entry.Attributes.Uid != req.Uid {
|
||||
entry.Attributes.Uid = req.Uid
|
||||
file.dirtyMetadata = true
|
||||
}
|
||||
|
||||
if req.Valid.Gid() {
|
||||
if req.Valid.Gid() && entry.Attributes.Gid != req.Gid {
|
||||
entry.Attributes.Gid = req.Gid
|
||||
file.dirtyMetadata = true
|
||||
}
|
||||
|
@ -174,7 +164,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
|
|||
file.dirtyMetadata = true
|
||||
}
|
||||
|
||||
if req.Valid.Mtime() {
|
||||
if req.Valid.Mtime() && entry.Attributes.Mtime != req.Mtime.Unix() {
|
||||
entry.Attributes.Mtime = req.Mtime.Unix()
|
||||
file.dirtyMetadata = true
|
||||
}
|
||||
|
@ -207,6 +197,11 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
|
|||
if err := setxattr(entry, req); err != nil {
|
||||
return err
|
||||
}
|
||||
file.dirtyMetadata = true
|
||||
|
||||
if file.isOpen > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return file.saveEntry(entry)
|
||||
|
||||
|
@ -224,6 +219,11 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
|
|||
if err := removexattr(entry, req); err != nil {
|
||||
return err
|
||||
}
|
||||
file.dirtyMetadata = true
|
||||
|
||||
if file.isOpen > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return file.saveEntry(entry)
|
||||
|
||||
|
@ -351,6 +351,8 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
|
|||
|
||||
file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
|
||||
|
||||
file.dirtyMetadata = false
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -358,3 +360,30 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
|
|||
func (file *File) getEntry() *filer_pb.Entry {
|
||||
return file.entry
|
||||
}
|
||||
|
||||
func (file *File) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) {
|
||||
err := file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.DownloadToLocalRequest{
|
||||
Directory: file.dir.FullPath(),
|
||||
Name: entry.Name,
|
||||
}
|
||||
|
||||
glog.V(4).Infof("download entry: %v", request)
|
||||
resp, err := client.DownloadToLocal(context.Background(), request)
|
||||
if err != nil {
|
||||
glog.Errorf("DownloadToLocal file %s/%s: %v", file.dir.FullPath(), file.Name, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
entry = resp.Entry
|
||||
|
||||
file.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, resp.Entry))
|
||||
|
||||
file.dirtyMetadata = false
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return entry, err
|
||||
}
|
||||
|
|
|
@ -33,11 +33,12 @@ type FileHandle struct {
|
|||
Uid uint32 // user ID of process making request
|
||||
Gid uint32 // group ID of process making request
|
||||
writeOnly bool
|
||||
isDeleted bool
|
||||
}
|
||||
|
||||
func newFileHandle(file *File, uid, gid uint32, writeOnly bool) *FileHandle {
|
||||
fh := &FileHandle{
|
||||
f: file,
|
||||
f: file,
|
||||
// dirtyPages: newContinuousDirtyPages(file, writeOnly),
|
||||
dirtyPages: newTempFileDirtyPages(file, writeOnly),
|
||||
Uid: uid,
|
||||
|
@ -113,6 +114,16 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
|||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if entry.IsInRemoteOnly() {
|
||||
glog.V(4).Infof("download remote entry %s", fh.f.fullpath())
|
||||
newEntry, err := fh.f.downloadRemoteEntry(entry)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("download remote entry %s: %v", fh.f.fullpath(), err)
|
||||
return 0, err
|
||||
}
|
||||
entry = newEntry
|
||||
}
|
||||
|
||||
fileSize := int64(filer.FileSize(entry))
|
||||
fileFullPath := fh.f.fullpath()
|
||||
|
||||
|
@ -129,7 +140,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
|||
|
||||
var chunkResolveErr error
|
||||
if fh.entryViewCache == nil {
|
||||
fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks)
|
||||
fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks, 0, math.MaxInt64)
|
||||
if chunkResolveErr != nil {
|
||||
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
|
||||
}
|
||||
|
@ -222,6 +233,11 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
|||
|
||||
glog.V(4).Infof("Flush %v fh %d", fh.f.fullpath(), fh.handle)
|
||||
|
||||
if fh.isDeleted {
|
||||
glog.V(4).Infof("Flush %v fh %d skip deleted", fh.f.fullpath(), fh.handle)
|
||||
return nil
|
||||
}
|
||||
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
|
||||
|
|
|
@ -3,14 +3,12 @@ package meta_cache
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
|
||||
"os"
|
||||
)
|
||||
|
||||
// need to have logic similar to FilerStoreWrapper
|
||||
|
@ -18,7 +16,7 @@ import (
|
|||
|
||||
type MetaCache struct {
|
||||
localStore filer.VirtualFilerStore
|
||||
sync.RWMutex
|
||||
// sync.RWMutex
|
||||
visitedBoundary *bounded_tree.BoundedTree
|
||||
uidGidMapper *UidGidMapper
|
||||
invalidateFunc func(util.FullPath)
|
||||
|
@ -54,8 +52,8 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore {
|
|||
}
|
||||
|
||||
func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error {
|
||||
mc.Lock()
|
||||
defer mc.Unlock()
|
||||
//mc.Lock()
|
||||
//defer mc.Unlock()
|
||||
return mc.doInsertEntry(ctx, entry)
|
||||
}
|
||||
|
||||
|
@ -64,8 +62,8 @@ func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) erro
|
|||
}
|
||||
|
||||
func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error {
|
||||
mc.Lock()
|
||||
defer mc.Unlock()
|
||||
//mc.Lock()
|
||||
//defer mc.Unlock()
|
||||
|
||||
oldDir, _ := oldPath.DirAndName()
|
||||
if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) {
|
||||
|
@ -97,14 +95,14 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
|
|||
}
|
||||
|
||||
func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error {
|
||||
mc.Lock()
|
||||
defer mc.Unlock()
|
||||
//mc.Lock()
|
||||
//defer mc.Unlock()
|
||||
return mc.localStore.UpdateEntry(ctx, entry)
|
||||
}
|
||||
|
||||
func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) {
|
||||
mc.RLock()
|
||||
defer mc.RUnlock()
|
||||
//mc.RLock()
|
||||
//defer mc.RUnlock()
|
||||
entry, err = mc.localStore.FindEntry(ctx, fp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -114,14 +112,14 @@ func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *fi
|
|||
}
|
||||
|
||||
func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) {
|
||||
mc.Lock()
|
||||
defer mc.Unlock()
|
||||
//mc.Lock()
|
||||
//defer mc.Unlock()
|
||||
return mc.localStore.DeleteEntry(ctx, fp)
|
||||
}
|
||||
|
||||
func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error {
|
||||
mc.RLock()
|
||||
defer mc.RUnlock()
|
||||
//mc.RLock()
|
||||
//defer mc.RUnlock()
|
||||
|
||||
if !mc.visitedBoundary.HasVisited(dirPath) {
|
||||
return fmt.Errorf("unsynchronized dir: %v", dirPath)
|
||||
|
@ -138,8 +136,8 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full
|
|||
}
|
||||
|
||||
func (mc *MetaCache) Shutdown() {
|
||||
mc.Lock()
|
||||
defer mc.Unlock()
|
||||
//mc.Lock()
|
||||
//defer mc.Unlock()
|
||||
mc.localStore.Shutdown()
|
||||
}
|
||||
|
||||
|
|
|
@ -43,5 +43,5 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
|
|||
}
|
||||
|
||||
func IsHiddenSystemEntry(dir, name string) bool {
|
||||
return dir == "/" && name == "topics"
|
||||
return dir == "/" && (name == "topics" || name == "etc")
|
||||
}
|
||||
|
|
|
@ -2,12 +2,9 @@ package meta_cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
@ -40,47 +37,30 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
|
|||
newEntry = filer.FromPbEntry(dir, message.NewEntry)
|
||||
}
|
||||
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
|
||||
if err == nil && message.OldEntry != nil && message.NewEntry != nil {
|
||||
key := util.NewFullPath(dir, message.NewEntry.Name)
|
||||
mc.invalidateFunc(key)
|
||||
if err == nil {
|
||||
if message.OldEntry != nil && message.NewEntry != nil {
|
||||
if message.OldEntry.Name == message.NewEntry.Name {
|
||||
// no need to invalidate
|
||||
} else {
|
||||
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
|
||||
mc.invalidateFunc(oldKey)
|
||||
newKey := util.NewFullPath(dir, message.NewEntry.Name)
|
||||
mc.invalidateFunc(newKey)
|
||||
}
|
||||
} else if message.OldEntry == nil && message.NewEntry != nil {
|
||||
// no need to invaalidate
|
||||
} else if message.OldEntry != nil && message.NewEntry == nil {
|
||||
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
|
||||
mc.invalidateFunc(oldKey)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
for {
|
||||
err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "mount",
|
||||
PathPrefix: dir,
|
||||
SinceNs: lastTsNs,
|
||||
Signature: selfSignature,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscribe: %v", err)
|
||||
}
|
||||
return util.Retry("followMetaUpdates", func() error {
|
||||
return pb.WithFilerClientFollowMetadata(client, "mount", dir, lastTsNs, selfSignature, processEventFn, true)
|
||||
})
|
||||
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
if listenErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
|
||||
if err := processEventFn(resp); err != nil {
|
||||
glog.Fatalf("process %v: %v", resp, err)
|
||||
}
|
||||
lastTsNs = resp.TsNs
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("subscribing filer meta change: %v", err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,9 +3,6 @@ package filesys
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
|
@ -14,6 +11,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||
|
@ -46,11 +47,12 @@ type Option struct {
|
|||
DataCenter string
|
||||
Umask os.FileMode
|
||||
|
||||
MountUid uint32
|
||||
MountGid uint32
|
||||
MountMode os.FileMode
|
||||
MountCtime time.Time
|
||||
MountMtime time.Time
|
||||
MountUid uint32
|
||||
MountGid uint32
|
||||
MountMode os.FileMode
|
||||
MountCtime time.Time
|
||||
MountMtime time.Time
|
||||
MountParentInode uint64
|
||||
|
||||
VolumeServerAccess string // how to access volume servers
|
||||
Cipher bool // whether encrypt data on volume server
|
||||
|
@ -123,8 +125,6 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||
glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
|
||||
}
|
||||
})
|
||||
startTime := time.Now()
|
||||
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
|
||||
grace.OnInterrupt(func() {
|
||||
wfs.metaCache.Shutdown()
|
||||
})
|
||||
|
@ -139,6 +139,11 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||
return wfs
|
||||
}
|
||||
|
||||
func (wfs *WFS) StartBackgroundTasks() {
|
||||
startTime := time.Now()
|
||||
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
|
||||
}
|
||||
|
||||
func (wfs *WFS) Root() (fs.Node, error) {
|
||||
return wfs.root, nil
|
||||
}
|
||||
|
|
|
@ -113,6 +113,21 @@ func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err err
|
|||
fullpath := util.NewFullPath(dir, name)
|
||||
// glog.V(3).Infof("read entry cache miss %s", fullpath)
|
||||
|
||||
// return a valid entry for the mount root
|
||||
if string(fullpath) == wfs.option.FilerMountRootPath {
|
||||
return &filer_pb.Entry{
|
||||
Name: wfs.option.FilerMountRootPath,
|
||||
IsDirectory: true,
|
||||
Attributes: &filer_pb.FuseAttributes{
|
||||
Mtime: wfs.option.MountMtime.Unix(),
|
||||
FileMode: uint32(wfs.option.MountMode),
|
||||
Uid: wfs.option.MountUid,
|
||||
Gid: wfs.option.MountGid,
|
||||
Crtime: wfs.option.MountCtime.Unix(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// read from async meta cache
|
||||
meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))
|
||||
cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath)
|
||||
|
|
|
@ -1,55 +1,13 @@
|
|||
package iamapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type mimeType string
|
||||
|
||||
const (
|
||||
mimeNone mimeType = ""
|
||||
mimeXML mimeType = "application/xml"
|
||||
)
|
||||
|
||||
func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
}
|
||||
|
||||
// Encodes the response headers into XML format.
|
||||
func encodeResponse(response interface{}) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
bytesBuffer.WriteString(xml.Header)
|
||||
e := xml.NewEncoder(&bytesBuffer)
|
||||
e.Encode(response)
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// If none of the http routes match respond with MethodNotAllowed
|
||||
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
|
||||
glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
|
||||
writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
|
||||
}
|
||||
|
||||
func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) {
|
||||
apiError := s3err.GetAPIError(errorCode)
|
||||
errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
||||
}
|
||||
|
||||
func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) {
|
||||
errCode := err.Error()
|
||||
errorResp := ErrorResponse{}
|
||||
|
@ -64,42 +22,10 @@ func writeIamErrorResponse(w http.ResponseWriter, err error, object string, valu
|
|||
case iam.ErrCodeNoSuchEntityException:
|
||||
msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value)
|
||||
errorResp.Error.Message = &msg
|
||||
writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML)
|
||||
s3err.WriteXMLResponse(w, http.StatusNotFound, errorResp)
|
||||
case iam.ErrCodeServiceFailureException:
|
||||
writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
|
||||
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
|
||||
default:
|
||||
writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
|
||||
s3err.WriteXMLResponse(w, http.StatusInternalServerError, errorResp)
|
||||
}
|
||||
}
|
||||
|
||||
func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse {
|
||||
return s3err.RESTErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
Resource: resource,
|
||||
RequestID: fmt.Sprintf("%d", time.Now().UnixNano()),
|
||||
}
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
setCommonHeaders(w)
|
||||
if response != nil {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
|
||||
}
|
||||
if mType != mimeNone {
|
||||
w.Header().Set("Content-Type", string(mType))
|
||||
}
|
||||
w.WriteHeader(statusCode)
|
||||
if response != nil {
|
||||
glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
|
||||
_, err := w.Write(response)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("write err: %v", err)
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
|
||||
writeResponse(w, http.StatusOK, response, mimeXML)
|
||||
}
|
||||
|
|
|
@ -362,7 +362,7 @@ func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu
|
|||
|
||||
func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
|
||||
return
|
||||
}
|
||||
values := r.PostForm
|
||||
|
@ -370,7 +370,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
s3cfgLock.RLock()
|
||||
s3cfg := &iam_pb.S3ApiConfiguration{}
|
||||
if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil {
|
||||
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
return
|
||||
}
|
||||
s3cfgLock.RUnlock()
|
||||
|
@ -411,14 +411,14 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
response, err = iama.CreatePolicy(s3cfg, values)
|
||||
if err != nil {
|
||||
glog.Errorf("CreatePolicy: %+v", err)
|
||||
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
|
||||
return
|
||||
}
|
||||
case "PutUserPolicy":
|
||||
response, err = iama.PutUserPolicy(s3cfg, values)
|
||||
if err != nil {
|
||||
glog.Errorf("PutUserPolicy: %+v", err)
|
||||
writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInvalidRequest, r)
|
||||
return
|
||||
}
|
||||
case "GetUserPolicy":
|
||||
|
@ -437,7 +437,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
errorResponse := ErrorResponse{}
|
||||
errorResponse.Error.Code = &errNotImplemented.Code
|
||||
errorResponse.Error.Message = &errNotImplemented.Description
|
||||
writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML)
|
||||
s3err.WriteXMLResponse(w, errNotImplemented.HTTPStatusCode, errorResponse)
|
||||
return
|
||||
}
|
||||
if changed {
|
||||
|
@ -449,5 +449,5 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
}
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
s3err.WriteXMLResponse(w, http.StatusOK, response)
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api"
|
||||
. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"github.com/gorilla/mux"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -71,7 +72,7 @@ func (iama *IamApiServer) registerRouter(router *mux.Router) {
|
|||
apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
|
||||
//
|
||||
// NotFound
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(s3err.NotFoundHandler)
|
||||
}
|
||||
|
||||
func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
|
||||
|
@ -95,8 +96,8 @@ func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfigurat
|
|||
|
||||
func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
|
||||
buf := bytes.Buffer{}
|
||||
if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil {
|
||||
return fmt.Errorf("S3ConfigurationToText: %s", err)
|
||||
if err := filer.ProtoToText(&buf, s3cfg); err != nil {
|
||||
return fmt.Errorf("ProtoToText: %s", err)
|
||||
}
|
||||
return pb.WithGrpcFilerClient(
|
||||
iam.option.FilerGrpcAddress,
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
"github.com/disintegration/imaging"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
|
||||
|
@ -47,6 +49,9 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
|
|||
jpeg.Encode(&buf, dstImage, nil)
|
||||
case ".gif":
|
||||
gif.Encode(&buf, dstImage, nil)
|
||||
case ".webp":
|
||||
// Webp does not have golang encoder.
|
||||
png.Encode(&buf, dstImage)
|
||||
}
|
||||
return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy()
|
||||
} else {
|
||||
|
|
23
weed/images/resizing_test.go
Normal file
23
weed/images/resizing_test.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResizing(t *testing.T) {
|
||||
fname := "sample2.webp"
|
||||
|
||||
dat, _ := ioutil.ReadFile(fname)
|
||||
|
||||
resized, _, _ := Resized(".webp", bytes.NewReader(dat), 100, 30, "")
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(resized)
|
||||
|
||||
ioutil.WriteFile("resized1.png", buf.Bytes(), 0644)
|
||||
|
||||
os.Remove("resized1.png")
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue