mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge branch 'master' into support_ssd_volume
This commit is contained in:
commit
821c46edf1
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -9,6 +9,7 @@ assignees: ''
|
|||
|
||||
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
|
||||
Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs
|
||||
Please ask questions in https://github.com/chrislusf/seaweedfs/discussions
|
||||
|
||||
example of a good issue report:
|
||||
https://github.com/chrislusf/seaweedfs/issues/1005
|
||||
|
|
22
.github/workflows/cleanup.yml
vendored
Normal file
22
.github/workflows/cleanup.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
name: Cleanup
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Delete old release assets
|
||||
uses: mknejp/delete-release-assets@v1
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
tag: dev
|
||||
fail-if-no-assets: false
|
||||
assets: |
|
||||
weed-*
|
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
|
@ -11,33 +11,29 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin, freebsd, netbsd, openbsd ]
|
||||
goarch: ["386", amd64, arm]
|
||||
goos: [linux, windows, darwin, freebsd ]
|
||||
goarch: [amd64, arm]
|
||||
exclude:
|
||||
- goarch: arm
|
||||
goos: darwin
|
||||
- goarch: 386
|
||||
goos: freebsd
|
||||
- goarch: arm
|
||||
goos: windows
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Delete old release assets
|
||||
uses: mknejp/delete-release-assets@v1
|
||||
- name: Wait for the deletion
|
||||
uses: jakejarvis/wait-action@master
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
tag: dev
|
||||
fail-if-no-assets: false
|
||||
assets: |
|
||||
weed-*
|
||||
time: '30s'
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@feature/asset-name
|
||||
uses: wangyoucao577/go-release-action@v1.14
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
|
@ -52,7 +48,7 @@ jobs:
|
|||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries
|
||||
uses: wangyoucao577/go-release-action@feature/asset-name
|
||||
uses: wangyoucao577/go-release-action@v1.14
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
|
|
29
README.md
29
README.md
|
@ -1,11 +1,14 @@
|
|||
# SeaweedFS
|
||||
|
||||
|
||||
[![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
[![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs)
|
||||
[![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs)
|
||||
[![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
|
||||
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
|
||||
|
||||
![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png)
|
||||
|
||||
<h2 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h2>
|
||||
|
@ -67,7 +70,7 @@ Table of Contents
|
|||
* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
|
||||
* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
|
||||
|
||||
Also, to increase capacity, just add more volume servers by `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally or a different machine. That is it!
|
||||
Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thoudsands of machines. That is it!
|
||||
|
||||
## Introduction ##
|
||||
|
||||
|
@ -76,15 +79,17 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two
|
|||
1. to store billions of files!
|
||||
2. to serve the files fast!
|
||||
|
||||
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation).
|
||||
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages volumes on volume servers, and these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation).
|
||||
|
||||
SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes.
|
||||
SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity. What's more, the cloud storage access API cost is minimized. Faster and Cheaper than direct cloud storage!
|
||||
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
|
||||
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Cassandra, Elastic Search, LevelDB, MemSql, TiDB, Etcd, CockroachDB, etc.
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, MemSql, TiDB, Etcd, CockroachDB, etc.
|
||||
|
||||
For any distributed key value stores, the large values can be offloaded to SeaweedFS. With the fast access speed and linearly scalable capacity, SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore].
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
|
@ -117,7 +122,10 @@ On top of the object store, optional [Filer] can support directories and POSIX a
|
|||
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
||||
|
||||
## Kubernetes ##
|
||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||
* [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator)
|
||||
|
||||
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
|
||||
[SuperLargeFiles]: https://github.com/chrislusf/seaweedfs/wiki/Data-Structure-for-Large-Files
|
||||
|
@ -134,6 +142,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a
|
|||
[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver
|
||||
[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
|
||||
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
|
||||
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
|
@ -335,7 +344,7 @@ Usually hot data are fresh and warm data are old. SeaweedFS puts the newly creat
|
|||
|
||||
With the O(1) access time, the network latency cost is kept at minimum.
|
||||
|
||||
If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
|
||||
If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
|
@ -365,7 +374,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
|
|||
|
||||
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
|
||||
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized.
|
||||
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
|
||||
|
||||
| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files |
|
||||
|
@ -403,11 +412,11 @@ SeaweedFS has a centralized master group to look up free volumes, while Ceph use
|
|||
|
||||
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
|
||||
|
||||
Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS places data by assigned volumes.
|
||||
Ceph uses CRUSH hashing to automatically manage the data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes are also as simple as it can be.
|
||||
|
||||
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
|
||||
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Elastic Search, Cassandra, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
|
||||
| SeaweedFS | comparable to Ceph | advantage |
|
||||
| ------------- | ------------- | ---------------- |
|
||||
|
@ -436,9 +445,7 @@ MinIO has specific requirements on storage layout. It is not flexible to adjust
|
|||
|
||||
## Dev Plan ##
|
||||
|
||||
* More tools and documentation, on how to manage and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
|
||||
* Integrate with Kubernetes. build [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator).
|
||||
* Add ftp server.
|
||||
* More tools and documentation, on how to manage and scale the system.
|
||||
* Read and write stream data.
|
||||
* Support structured data.
|
||||
|
||||
|
|
|
@ -37,6 +37,8 @@ EXPOSE 19333
|
|||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++
|
||||
RUN apk add git go g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
|
@ -14,6 +14,7 @@ COPY --from=builder /root/go/bin/weed /usr/bin/
|
|||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
|
@ -29,6 +30,8 @@ EXPOSE 19333
|
|||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++
|
||||
RUN apk add git go g++ fuse
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
ARG BRANCH=${BRANCH:-master}
|
||||
|
@ -14,6 +14,7 @@ COPY --from=builder /root/go/bin/weed /usr/bin/
|
|||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
|
@ -29,6 +30,8 @@ EXPOSE 19333
|
|||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ COPY ./weed /usr/bin/
|
|||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY ./filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY ./entrypoint.sh /entrypoint.sh
|
||||
RUN apk add fuse # for weed mount
|
||||
|
||||
# volume server grpc port
|
||||
EXPOSE 18080
|
||||
|
@ -19,6 +20,8 @@ EXPOSE 19333
|
|||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
# webdav server http port
|
||||
EXPOSE 7333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
|
|
|
@ -10,16 +10,28 @@ build:
|
|||
rm ./weed
|
||||
|
||||
dev: build
|
||||
docker-compose -f local-dev-compose.yml -p seaweedfs up
|
||||
docker-compose -f compose/local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
dev_mount: build
|
||||
docker-compose -f compose/local-mount-compose.yml -p seaweedfs up
|
||||
|
||||
k8s: build
|
||||
docker-compose -f local-k8s-compose.yml -p seaweedfs up
|
||||
docker-compose -f compose/local-k8s-compose.yml -p seaweedfs up
|
||||
|
||||
dev_registry: build
|
||||
docker-compose -f local-registry-compose.yml -p seaweedfs up
|
||||
docker-compose -f compose/local-registry-compose.yml -p seaweedfs up
|
||||
|
||||
dev_replicate: build
|
||||
docker-compose -f compose/local-replicate-compose.yml -p seaweedfs up
|
||||
|
||||
cluster: build
|
||||
docker-compose -f local-cluster-compose.yml -p seaweedfs up
|
||||
docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up
|
||||
|
||||
2clusters: build
|
||||
docker-compose -f compose/local-clusters-compose.yml -p seaweedfs up
|
||||
|
||||
filer_etcd: build
|
||||
docker stack deploy -c compose/swarm-etcd.yml fs
|
||||
|
||||
clean:
|
||||
rm ./weed
|
||||
|
|
|
@ -19,12 +19,32 @@ services:
|
|||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m3"
|
||||
volume:
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume -publicUrl=localhost:8080'
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
volume3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -39,7 +59,8 @@ services:
|
|||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume
|
||||
- volume1
|
||||
- volume2
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
|
@ -49,5 +70,6 @@ services:
|
|||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume
|
||||
- volume1
|
||||
- volume2
|
||||
- filer
|
23
docker/compose/local-clusters-compose.yml
Normal file
23
docker/compose/local-clusters-compose.yml
Normal file
|
@ -0,0 +1,23 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
server1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
- 8084:8080
|
||||
- 18084:18080
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1"
|
||||
server2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9333
|
||||
- 19334:19333
|
||||
- 8085:8080
|
||||
- 18085:18080
|
||||
- 8889:8888
|
||||
- 18889:18888
|
||||
command: "server -ip=server2 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1"
|
|
@ -12,7 +12,7 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
|
@ -33,3 +33,14 @@ services:
|
|||
- master
|
||||
- volume
|
||||
- filer
|
||||
mount:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
mem_limit: 4096m
|
||||
command: '-v=4 mount -filer="filer:8888" -dirAutoCreate -dir=/mnt/seaweedfs -cacheCapacityMB=100 -concurrentWriters=128'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
94
docker/compose/local-k8s-compose.yml
Normal file
94
docker/compose/local-k8s-compose.yml
Normal file
|
@ -0,0 +1,94 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
mysql:
|
||||
image: percona/percona-server:5.7
|
||||
ports:
|
||||
- 3306:3306
|
||||
volumes:
|
||||
- ./seaweedfs.sql:/docker-entrypoint-initdb.d/seaweedfs.sql
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=secret
|
||||
- MYSQL_DATABASE=seaweedfs
|
||||
- MYSQL_PASSWORD=secret
|
||||
- MYSQL_USER=seaweedfs
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
environment:
|
||||
- WEED_MYSQL_HOSTNAME=mysql
|
||||
- WEED_MYSQL_PORT=3306
|
||||
- WEED_MYSQL_DATABASE=seaweedfs
|
||||
- WEED_MYSQL_USERNAME=seaweedfs
|
||||
- WEED_MYSQL_PASSWORD=secret
|
||||
- WEED_MYSQL_ENABLED=true
|
||||
- WEED_MYSQL_CONNECTION_MAX_IDLE=5
|
||||
- WEED_MYSQL_CONNECTION_MAX_OPEN=75
|
||||
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
|
||||
- WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS=600
|
||||
# enable usage of memsql as filer backend
|
||||
- WEED_MYSQL_INTERPOLATEPARAMS=true
|
||||
- WEED_LEVELDB2_ENABLED=false
|
||||
command: '-v 9 filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- mysql
|
||||
ingress:
|
||||
image: jwilder/nginx-proxy:alpine
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: '-v 9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
environment:
|
||||
- VIRTUAL_HOST=ingress
|
||||
- VIRTUAL_PORT=8333
|
||||
registry:
|
||||
image: registry:2
|
||||
environment:
|
||||
REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3
|
||||
REGISTRY_LOG_LEVEL: "debug"
|
||||
REGISTRY_STORAGE: "s3"
|
||||
REGISTRY_STORAGE_S3_REGION: "us-east-1"
|
||||
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://ingress"
|
||||
REGISTRY_STORAGE_S3_BUCKET: "registry"
|
||||
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
|
||||
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
|
||||
REGISTRY_STORAGE_S3_V4AUTH: "true"
|
||||
REGISTRY_STORAGE_S3_SECURE: "false"
|
||||
REGISTRY_STORAGE_S3_SKIPVERIFY: "true"
|
||||
REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/"
|
||||
REGISTRY_STORAGE_DELETE_ENABLED: "true"
|
||||
REGISTRY_STORAGE_REDIRECT_DISABLE: "true"
|
||||
REGISTRY_VALIDATION_DISABLED: "true"
|
||||
ports:
|
||||
- 5001:5001
|
||||
depends_on:
|
||||
- s3
|
||||
- ingress
|
46
docker/compose/local-mount-compose.yml
Normal file
46
docker/compose/local-mount-compose.yml
Normal file
|
@ -0,0 +1,46 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 7455:8080
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
mount_1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1" -volumeServerAccess=filerProxy'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
mount_2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
privileged: true
|
||||
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1" -volumeServerAcess=publicUrl'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
- mount_1
|
61
docker/compose/local-replicate-compose.yml
Normal file
61
docker/compose/local-replicate-compose.yml
Normal file
|
@ -0,0 +1,61 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1"
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: '-v=9 filer -master="master:9333"'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- rabbitmq
|
||||
- replicate
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
replicate:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: '-v=9 filer.replicate'
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./notification.toml:/etc/seaweedfs/notification.toml
|
||||
- ./replication.toml:/etc/seaweedfs/replication.toml
|
||||
depends_on:
|
||||
- rabbitmq
|
||||
environment:
|
||||
RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/"
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.8.10-management-alpine
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15671:15671
|
||||
- 15672:15672
|
||||
environment:
|
||||
RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit log_levels [{connection,error},{queue,debug}]"
|
84
docker/compose/swarm-etcd.yml
Normal file
84
docker/compose/swarm-etcd.yml
Normal file
|
@ -0,0 +1,84 @@
|
|||
# 2021-01-30 16:25:30
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
|
||||
etcd:
|
||||
image: gasparekatapy/etcd
|
||||
networks:
|
||||
- net
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 3
|
||||
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_MASTER_FILER_DEFAULT: "filer:8888"
|
||||
WEED_MASTER_SEQUENCER_TYPE: "raft"
|
||||
ports:
|
||||
- "9333:9333"
|
||||
- "19333:19333"
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'master'
|
||||
- '-resumeState=true'
|
||||
- '-ip=master'
|
||||
- '-port=9333'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_ETCD_ENABLED: "true"
|
||||
WEED_ETCD_SERVERS: "etcd:2379"
|
||||
ports:
|
||||
- target: 8888
|
||||
published: 8888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18888
|
||||
published: 18888
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'filer'
|
||||
- '-ip=filer'
|
||||
- '-port=8888'
|
||||
- '-port.readonly=28888'
|
||||
- '-master=master:9333'
|
||||
- '-disableDirListing=true'
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- target: 8080
|
||||
published: 8080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
- target: 18080
|
||||
published: 18080
|
||||
protocol: tcp
|
||||
mode: host
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- 'volume'
|
||||
- '-mserver=master:9333'
|
||||
- '-port=8080'
|
||||
deploy:
|
||||
mode: global
|
||||
|
||||
###########################################################################
|
||||
|
||||
networks:
|
||||
net:
|
|
@ -1,65 +0,0 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
mysql:
|
||||
image: percona/percona-server:5.7
|
||||
ports:
|
||||
- 3306:3306
|
||||
volumes:
|
||||
- ./seaweedfs.sql:/docker-entrypoint-initdb.d/seaweedfs.sql
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=secret
|
||||
- MYSQL_DATABASE=seaweedfs
|
||||
- MYSQL_PASSWORD=secret
|
||||
- MYSQL_USER=seaweedfs
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
environment:
|
||||
- WEED_MYSQL_HOSTNAME=mysql
|
||||
- WEED_MYSQL_PORT=3306
|
||||
- WEED_MYSQL_DATABASE=seaweedfs
|
||||
- WEED_MYSQL_USERNAME=seaweedfs
|
||||
- WEED_MYSQL_PASSWORD=secret
|
||||
- WEED_MYSQL_ENABLED=true
|
||||
- WEED_LEVELDB2_ENABLED=false
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- mysql
|
||||
ingress:
|
||||
image: jwilder/nginx-proxy
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
- /tmp/nginx:/etc/nginx/conf.d
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
environment:
|
||||
- VIRTUAL_HOST=s3
|
||||
- VIRTUAL_PORT=8333
|
30
docker/nginx/proxy.conf
Normal file
30
docker/nginx/proxy.conf
Normal file
|
@ -0,0 +1,30 @@
|
|||
# HTTP 1.1 support
|
||||
proxy_http_version 1.1;
|
||||
#proxy_buffering off;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $proxy_connection;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
|
||||
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
|
||||
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
|
||||
|
||||
# Mitigate httpoxy attack (see README for details)
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
# aws default max_concurrent_requests 10
|
||||
# aws default multipart_threshold 8MB
|
||||
proxy_buffering on; # GET buffering or “X-Accel-Buffering” enables or disables buffering of a response;
|
||||
proxy_buffers 64 1m; # buffers used for reading a response from the proxied server, for a single connection
|
||||
proxy_buffer_size 8k; # maximum size of the data that nginx can receive from the server at a time is set
|
||||
proxy_busy_buffers_size 2m;
|
||||
|
||||
proxy_request_buffering on; # PUT buffering
|
||||
client_body_buffer_size 64m; # buffer size for reading client request body
|
||||
client_max_body_size 64m;
|
||||
|
||||
proxy_next_upstream error timeout non_idempotent http_500; # PUT request should be passed to the next server:
|
||||
proxy_connect_timeout 200ms;
|
||||
proxy_read_timeout 3s; #timeout is set only between two successive read operations
|
||||
proxy_send_timeout 3s; #timeout is set only between two successive write operations
|
17
docker/notification.toml
Normal file
17
docker/notification.toml
Normal file
|
@ -0,0 +1,17 @@
|
|||
[notification.log]
|
||||
# this is only for debugging perpose and does not work with "weed filer.replicate"
|
||||
enabled = false
|
||||
|
||||
|
||||
[notification.gocdk_pub_sub]
|
||||
# The Go Cloud Development Kit (https://gocloud.dev).
|
||||
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
|
||||
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
|
||||
enabled = true
|
||||
# This URL will Dial the RabbitMQ server at the URL in the environment
|
||||
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
||||
# The exchange must have already been created by some other means, like
|
||||
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
||||
# create binding myexchange => myqueue
|
||||
topic_url = "rabbit://swexchange"
|
||||
sub_url = "rabbit://swqueue"
|
13
docker/prometheus/prometheus.yml
Normal file
13
docker/prometheus/prometheus.yml
Normal file
|
@ -0,0 +1,13 @@
|
|||
global:
|
||||
scrape_interval: 30s
|
||||
scrape_timeout: 10s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: services
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- 'prometheus:9090'
|
||||
- 'volume:9325'
|
||||
- 'filer:9326'
|
||||
- 's3:9327'
|
11
docker/replication.toml
Normal file
11
docker/replication.toml
Normal file
|
@ -0,0 +1,11 @@
|
|||
[source.filer]
|
||||
enabled = true
|
||||
grpcAddress = "filer:18888"
|
||||
# all files under this directory tree are replicated.
|
||||
# this is not a directory on your hard drive, but on your filer.
|
||||
# i.e., all files with this "prefix" are sent to notification message queue.
|
||||
directory = "/buckets"
|
||||
|
||||
[sink.local_incremental]
|
||||
enabled = true
|
||||
directory = "/data"
|
|
@ -12,7 +12,8 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master:9333" -port=8080'
|
||||
- 9325:9325
|
||||
command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
|
@ -20,7 +21,8 @@ services:
|
|||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
- 9326:9326
|
||||
command: 'filer -master="master:9333" -metricsPort=9326'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
|
@ -40,8 +42,27 @@ services:
|
|||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
- 9327:9327
|
||||
command: 's3 -filer="filer:8888" -metricsPort=9327'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.21.0
|
||||
ports:
|
||||
- 9000:9090
|
||||
volumes:
|
||||
- ./prometheus:/etc/prometheus
|
||||
command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml
|
||||
depends_on:
|
||||
- s3
|
||||
|
|
|
@ -33,3 +33,12 @@ services:
|
|||
- master
|
||||
- volume
|
||||
- filer
|
||||
webdav:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 7333:7333
|
||||
command: 'webdav -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
|
|
39
go.mod
39
go.mod
|
@ -3,15 +3,18 @@ module github.com/chrislusf/seaweedfs
|
|||
go 1.12
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.44.3
|
||||
cloud.google.com/go v0.58.0
|
||||
cloud.google.com/go/pubsub v1.3.1
|
||||
cloud.google.com/go/storage v1.9.0
|
||||
github.com/Azure/azure-amqp-common-go/v2 v2.1.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0
|
||||
github.com/OneOfOne/xxhash v1.2.2
|
||||
github.com/Shopify/sarama v1.23.1
|
||||
github.com/aws/aws-sdk-go v1.33.5
|
||||
github.com/aws/aws-sdk-go v1.34.30
|
||||
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/chrislusf/raft v1.0.3
|
||||
github.com/chrislusf/raft v1.0.4
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
|
@ -25,11 +28,11 @@ require (
|
|||
github.com/fclairamb/ftpserverlib v0.8.0
|
||||
github.com/frankban/quicktest v1.7.2 // indirect
|
||||
github.com/go-errors/errors v1.1.1 // indirect
|
||||
github.com/go-redis/redis v6.15.7+incompatible
|
||||
github.com/go-redis/redis/v8 v8.4.4
|
||||
github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/google/uuid v1.1.1
|
||||
|
@ -39,7 +42,7 @@ require (
|
|||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible
|
||||
github.com/karlseguin/expect v1.0.1 // indirect
|
||||
github.com/karlseguin/ccache/v2 v2.0.7
|
||||
github.com/klauspost/compress v1.10.9 // indirect
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/klauspost/crc32 v1.2.0
|
||||
|
@ -48,26 +51,27 @@ require (
|
|||
github.com/lib/pq v1.2.0
|
||||
github.com/lunixbochs/vtclean v1.0.0 // indirect
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/olivere/elastic/v7 v7.0.19
|
||||
github.com/onsi/ginkgo v1.10.1 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/peterh/liner v1.1.0
|
||||
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
|
||||
github.com/prometheus/client_golang v1.3.0
|
||||
github.com/rakyll/statik v0.1.7
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
|
||||
github.com/seaweedfs/fuse v1.0.8
|
||||
github.com/seaweedfs/fuse v1.1.1
|
||||
github.com/seaweedfs/goexif v1.0.2
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
|
||||
github.com/tidwall/gjson v1.3.2
|
||||
github.com/tidwall/match v1.0.1
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/viant/assertly v0.5.4 // indirect
|
||||
github.com/viant/ptrie v0.3.0
|
||||
|
@ -77,16 +81,15 @@ require (
|
|||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
|
||||
go.etcd.io/etcd v3.3.15+incompatible
|
||||
go.mongodb.org/mongo-driver v1.3.2
|
||||
gocloud.dev v0.16.0
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0
|
||||
gocloud.dev v0.20.0
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114
|
||||
google.golang.org/api v0.9.0
|
||||
google.golang.org/appengine v1.6.2 // indirect
|
||||
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509
|
||||
google.golang.org/api v0.26.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/protobuf v1.24.0
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
|
|
310
go.sum
310
go.sum
|
@ -1,18 +1,52 @@
|
|||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.55.0/go.mod h1:ZHmoY+/lIMNkN2+fBmuTiqZ4inFhvQad8ft7MT8IV5Y=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.58.0 h1:vtAfVc723K3xKq1BQydk/FyCldnaNFhGhpJxaJzgRMQ=
|
||||
cloud.google.com/go v0.58.0/go.mod h1:W+9FnSUw6nhVwXlFcp1eL+krq5+HQUJeUogSeJZZiWg=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.2.0/go.mod h1:iISCjWnTpnoJT1R287xRdjvQHJrxQOpeah4phb5D3h0=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.9.0 h1:oXnZyBjHB6hC8TnSle0AWW6pGJ29EuSo5ww+SFmdNBg=
|
||||
cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU=
|
||||
contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
|
||||
contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
|
||||
contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
|
||||
github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=
|
||||
|
@ -21,18 +55,42 @@ github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot
|
|||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
|
||||
github.com/Azure/azure-service-bus-go v0.10.1/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to=
|
||||
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0 h1:kORqvzXP8ORhKbW13FflGUaSE5CMyDWun9UwMxY8gPs=
|
||||
github.com/Azure/azure-storage-blob-go v0.9.0/go.mod h1:8UBPbiOhrMQ4pLPi3gA1tXnpjrS76UYE/fo5A40vf4g=
|
||||
github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
|
||||
github.com/Azure/go-amqp v0.12.7/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
|
||||
github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
|
||||
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
|
@ -59,8 +117,11 @@ github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo
|
|||
github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.31.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
|
||||
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.34.30 h1:izATc/E0+HcT5YHmaQVjn7GHCoqaBxn0PGo6Zq5UNFA=
|
||||
github.com/aws/aws-sdk-go v1.34.30/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
|
@ -84,8 +145,11 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
|||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chrislusf/raft v1.0.3 h1:11YrnzJtVa5z7m9lhY2p8VcPHoUlC1UswyoAo+U1m1k=
|
||||
github.com/chrislusf/raft v1.0.3/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/chrislusf/raft v1.0.4 h1:THhbsVik2hxdE0/VXX834f64Wn9RzgVPp+E+XCWZdKM=
|
||||
github.com/chrislusf/raft v1.0.4/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
|
@ -114,6 +178,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
|
@ -157,10 +223,15 @@ github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3B
|
|||
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg=
|
||||
github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
@ -170,8 +241,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
|
||||
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis/v8 v8.4.4 h1:fGqgxCTR1sydaKI00oQf3OmkU/DIe/I/fYXvGklCIuc=
|
||||
github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
|
@ -179,6 +250,8 @@ github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gG
|
|||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM=
|
||||
github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
|
||||
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
|
||||
|
@ -220,14 +293,23 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9
|
|||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
|
@ -251,8 +333,11 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
|||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
|
||||
github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=
|
||||
|
@ -263,6 +348,11 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x
|
|||
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -270,6 +360,8 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
|||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=
|
||||
github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
|
||||
github.com/google/wire v0.4.0 h1:kXcsA/rIGzJImVqPdhfnr6q0xsS9gU0515q1EPpJ9fE=
|
||||
github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
|
@ -328,6 +420,7 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
|
|||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
|
||||
|
@ -339,6 +432,10 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5i
|
|||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
|
@ -349,12 +446,16 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
|
|||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU=
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w=
|
||||
github.com/karlseguin/ccache/v2 v2.0.7 h1:y5Pfi4eiyYCOD6LS/Kj+o6Nb4M5Ngpw9qFQs+v44ZYM=
|
||||
github.com/karlseguin/ccache/v2 v2.0.7/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ=
|
||||
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
|
||||
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
|
||||
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
|
||||
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
|
||||
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
|
@ -406,6 +507,8 @@ github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HN
|
|||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
|
||||
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
|
@ -438,6 +541,8 @@ github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrW
|
|||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/jwt v1.0.1 h1:71ivoESdfT2K/qDiw5YwX/3W9/dR7c+m83xiGOj/EZ4=
|
||||
github.com/nats-io/jwt v1.0.1/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M=
|
||||
github.com/nats-io/nats-server/v2 v2.0.0/go.mod h1:RyVdsHHvY4B6c9pWG+uRLpZ0h0XsqiuKp2XCTurP5LI=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2 h1:i2Ly0B+1+rzNZHHWtD4ZwKi+OU5l+uQo1iDHZ2PmiIc=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
|
@ -445,14 +550,21 @@ github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ
|
|||
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
|
||||
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nats.go v1.10.0 h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1TvY=
|
||||
github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE=
|
||||
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nkeys v0.2.0 h1:WXKF7diOaPU9cJdLD7nuzwasQy9vT1tBqzXZZf3AMJM=
|
||||
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
|
@ -462,12 +574,15 @@ github.com/olivere/elastic/v7 v7.0.19/go.mod h1:4Jqt5xvjqpjCqgnTcHwl3j8TLs8mvoOK
|
|||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
|
||||
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U=
|
||||
github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
|
@ -539,6 +654,8 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD
|
|||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
|
@ -547,10 +664,14 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
|||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seaweedfs/fuse v1.0.7 h1:tESMXhI3gXzN+dlWsCUrkIZDiWA4dZX18rQMoqmvazw=
|
||||
github.com/seaweedfs/fuse v1.0.7/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8=
|
||||
github.com/seaweedfs/fuse v1.0.8 h1:HBPJTC77OlxwSd2JiTwvLPn8bWTElqQp3xs9vf3C15s=
|
||||
github.com/seaweedfs/fuse v1.0.8/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8=
|
||||
github.com/seaweedfs/fuse v1.0.9 h1:3JZoGsW7cmmrd/U5KYcIGR2+EqyBvCYCoCpEdZAz/Dc=
|
||||
github.com/seaweedfs/fuse v1.0.9/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.0 h1:cL1qPHFNtFv0UuJTLjKKgWDzfJ4iZzTa4Y7ipc2acGw=
|
||||
github.com/seaweedfs/fuse v1.1.0/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/fuse v1.1.1 h1:WD51YFJcBViOx8I89jeqPD+vAKl4EowzBy9GUw0plb0=
|
||||
github.com/seaweedfs/fuse v1.1.1/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
|
||||
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
|
||||
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
|
||||
github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw=
|
||||
|
@ -562,6 +683,8 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
|||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
|
@ -595,6 +718,8 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRN
|
|||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 h1:2MR0pKUzlP3SGgj5NYJe/zRYDwOu9ku6YHy+Iw7l5DM=
|
||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
|
@ -612,6 +737,8 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd
|
|||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
|
||||
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
|
||||
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
|
@ -621,6 +748,8 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
|
|||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 h1:6iRwZdrFUzbcVYZwa8dXTIILGIxmmhjyUPJEcwzPGaU=
|
||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365/go.mod h1:zj0GJHGvyf1ed3Jm/Tb4830c/ZKDq+YoLsCt2rGQuT0=
|
||||
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
|
@ -647,6 +776,8 @@ github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0
|
|||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||
|
@ -662,8 +793,11 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
|||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw=
|
||||
go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
|
@ -683,10 +817,16 @@ go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
|||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM=
|
||||
gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0=
|
||||
gocloud.dev v0.20.0 h1:mbEKMfnyPV7W1Rj35R1xXfjszs9dXkwSOq2KoFr25g8=
|
||||
gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc=
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI=
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI=
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0 h1:DsOXYKfcSTh0SHKwuhpQAJmPLDj3+ARvYgBIupVPClE=
|
||||
gocloud.dev/pubsub/natspubsub v0.20.0/go.mod h1:Zh7v7Q1DZjAoBwsavZLdvinMIO1eYE0PJTllMuX3VGA=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0 h1:hwupxLvWG8jTPNQ+9Q/YWZzyMagL9blTwWYYhoW4pco=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0/go.mod h1:xYCXmI3ixWuCW4s1KqyZpgKT90MMjdXdMlb0Kgmd7TM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -703,10 +843,24 @@ golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5
|
|||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg=
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
|
@ -715,11 +869,20 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
|||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -739,14 +902,33 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
|||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -755,6 +937,7 @@ golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
|
||||
golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -780,15 +963,41 @@ golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd h1:WgqgiQvkiZWz7XLhphjt2GI2GcGCTIZs9jqXMWmH+oc=
|
||||
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
|
@ -816,11 +1025,40 @@ golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -836,6 +1074,17 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E
|
|||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.26.0 h1:VJZ8h6E8ip82FRpQl848c5vAadxlTXrUh8RzQzSRm08=
|
||||
google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -844,6 +1093,8 @@ google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3
|
|||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
|
||||
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
@ -855,8 +1106,30 @@ google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dT
|
|||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200325114520-5b2d0af7952b/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 h1:i+Aiej6cta/Frzp13/swvwz5O00kYcSe0A/C5Wd7zX8=
|
||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -871,6 +1144,8 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
|||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
@ -921,6 +1196,11 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
|
|||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -930,8 +1210,18 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o=
|
||||
modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
|
||||
modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE=
|
||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=
|
||||
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
|
|
@ -12,9 +12,12 @@ with ENV.
|
|||
### current instances config (AIO):
|
||||
1 instance for each type (master/filer/volume/s3)
|
||||
|
||||
instances need node labels:
|
||||
To avoid multiple volume servers on the same node, apply these node labels:
|
||||
* sw-volume: true (for volume instance, specific tag)
|
||||
* sw-backend: true (for all others, as they less resource demanding)
|
||||
```
|
||||
kubectl label node YOUR_NODE_NAME sw-volume=true,sw-backend=true
|
||||
```
|
||||
|
||||
you can update the replicas count for each node type in values.yaml,
|
||||
need to add more nodes with the corresponding label.
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
version: 2.14
|
||||
appVersion: "2.24"
|
||||
version: 2.24
|
||||
|
|
|
@ -52,12 +52,12 @@ Inject extra environment vars in the format key:value, if populated
|
|||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper postgresqlSchema image */}}
|
||||
{{/* Return the proper dbSchema image */}}
|
||||
{{- define "filer.dbSchema.image" -}}
|
||||
{{- if .Values.filer.dbSchema.imageOverride -}}
|
||||
{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
|
||||
|
@ -80,7 +80,7 @@ Inject extra environment vars in the format key:value, if populated
|
|||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
@ -94,7 +94,7 @@ Inject extra environment vars in the format key:value, if populated
|
|||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
@ -108,7 +108,7 @@ Inject extra environment vars in the format key:value, if populated
|
|||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
@ -122,7 +122,7 @@ Inject extra environment vars in the format key:value, if populated
|
|||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- $tag := .Chart.AppVersion | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -6,6 +6,7 @@ metadata:
|
|||
name: {{ include "seaweedfs.fullname" . }}-cronjob
|
||||
spec:
|
||||
schedule: "{{ .Values.cronjob.schedule }}"
|
||||
startingDeadlineSeconds: 200
|
||||
concurrencyPolicy: Forbid
|
||||
failedJobsHistoryLimit: 2
|
||||
successfulJobsHistoryLimit: 2
|
||||
|
@ -14,13 +15,13 @@ spec:
|
|||
backoffLimit: 2
|
||||
template:
|
||||
spec:
|
||||
{{- with .Values.cronjob.nodeSelector }}
|
||||
{{- if .Values.cronjob.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{ tpl .Values.cronjob.nodeSelector . | indent 12 | trim }}
|
||||
{{- end }}
|
||||
{{- with .Values.cronjob.tolerations }}
|
||||
{{- if .Values.cronjob.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{ tpl .Values.cronjob.tolerations . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
|
@ -35,10 +36,13 @@ spec:
|
|||
- |
|
||||
set -ex
|
||||
echo -e "lock\n\
|
||||
volume.balance -force\
|
||||
volume.balance -force \
|
||||
{{ if .Values.volume.dataCenter }} -dataCenter {{ .Values.volume.dataCenter }}{{ end }}\
|
||||
{{ if .Values.cronjob.collection }} -collection {{ .Values.cronjob.collection }}{{ end }}\n\
|
||||
volume.fix.replication\nunlock\n" | \
|
||||
{{- if .Values.cronjob.enableFixReplication }}
|
||||
volume.fix.replication {{ if .Values.cronjob.collectionPattern }} -collectionPattern={{ .Values.cronjob.collectionPattern }} {{ end }} \n\
|
||||
{{- end }}
|
||||
unlock\n" | \
|
||||
/usr/bin/weed shell \
|
||||
{{- if .Values.cronjob.master }}
|
||||
-master {{ .Values.cronjob.master }} \
|
||||
|
|
30
k8s/seaweedfs/templates/filer-service-client.yaml
Normal file
30
k8s/seaweedfs/templates/filer-service-client.yaml
Normal file
|
@ -0,0 +1,30 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer-client
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
monitoring: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- name: "swfs-filer"
|
||||
port: {{ .Values.filer.port }}
|
||||
targetPort: {{ .Values.filer.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-filer-grpc"
|
||||
port: {{ .Values.filer.grpcPort }}
|
||||
targetPort: {{ .Values.filer.grpcPort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
- name: "metrics"
|
||||
port: {{ .Values.filer.metricsPort }}
|
||||
targetPort: {{ .Values.filer.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
|
@ -1,6 +1,8 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
name: {{ template "seaweedfs.name" . }}-filer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
|
@ -8,6 +10,7 @@ metadata:
|
|||
component: filer
|
||||
spec:
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: true
|
||||
ports:
|
||||
- name: "swfs-filer"
|
||||
port: {{ .Values.filer.port }}
|
||||
|
@ -17,12 +20,6 @@ spec:
|
|||
port: {{ .Values.filer.grpcPort }}
|
||||
targetPort: {{ .Values.filer.grpcPort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.filer.metricsPort }}
|
||||
- name: "swfs-filer-metrics"
|
||||
port: {{ .Values.filer.metricsPort }}
|
||||
targetPort: {{ .Values.filer.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
|
@ -87,6 +87,12 @@ spec:
|
|||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
|
@ -127,14 +133,36 @@ spec:
|
|||
-encryptVolumeData \
|
||||
{{- end }}
|
||||
-ip=${POD_IP} \
|
||||
{{- if .Values.filer.enable_peers }}
|
||||
{{- if gt (.Values.filer.replicas | int) 1 }}
|
||||
-peers={{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
-peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.enabled }}
|
||||
-s3 \
|
||||
-s3.port={{ .Values.filer.s3.port }} \
|
||||
{{- if .Values.filer.s3.domainName }}
|
||||
-s3.domainName={{ .Values.filer.s3.domainName }} \
|
||||
{{- end }}
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
|
||||
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.allowEmptyFolder }}
|
||||
-s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.enableAuth }}
|
||||
-s3.config=/etc/sw/seaweedfs_s3_config \
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
|
||||
volumeMounts:
|
||||
- name: seaweedfs-filer-log-volume
|
||||
mountPath: "/logs/"
|
||||
- mountPath: /etc/sw
|
||||
name: config-users
|
||||
readOnly: true
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
|
@ -172,7 +200,7 @@ spec:
|
|||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 3
|
||||
timeoutSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
|
@ -182,7 +210,7 @@ spec:
|
|||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 3
|
||||
timeoutSeconds: 10
|
||||
{{- if .Values.filer.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.filer.resources . | nindent 12 | trim }}
|
||||
|
@ -192,6 +220,13 @@ spec:
|
|||
hostPath:
|
||||
path: /storage/logs/seaweedfs/filer
|
||||
type: DirectoryOrCreate
|
||||
- name: db-schema-config-volume
|
||||
configMap:
|
||||
name: seaweedfs-db-init-config
|
||||
- name: config-users
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: seaweedfs-s3-secret
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
|
|
|
@ -10,6 +10,7 @@ metadata:
|
|||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
spec:
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: true
|
||||
ports:
|
||||
- name: "swfs-master"
|
||||
port: {{ .Values.master.port }}
|
||||
|
|
|
@ -76,6 +76,12 @@ spec:
|
|||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
|
@ -157,7 +163,7 @@ spec:
|
|||
periodSeconds: 45
|
||||
successThreshold: 2
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 5
|
||||
timeoutSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /cluster/status
|
||||
|
@ -167,7 +173,7 @@ spec:
|
|||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 4
|
||||
timeoutSeconds: 5
|
||||
timeoutSeconds: 10
|
||||
{{- if .Values.master.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.master.resources . | nindent 12 | trim }}
|
||||
|
|
|
@ -59,11 +59,17 @@ spec:
|
|||
fieldPath: metadata.namespace
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed \
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.s3.loggingOverrideLevel }}
|
||||
-v={{ .Values.s3.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
|
@ -81,9 +87,19 @@ spec:
|
|||
{{- if .Values.s3.domainName }}
|
||||
-domainName={{ .Values.s3.domainName }} \
|
||||
{{- end }}
|
||||
-filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }}
|
||||
{{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }}
|
||||
{{- if .Values.s3.allowEmptyFolder }}
|
||||
-allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
|
||||
{{- end }}
|
||||
{{- if .Values.s3.enableAuth }}
|
||||
-config=/etc/sw/seaweedfs_s3_config \
|
||||
{{- end }}
|
||||
-filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }}
|
||||
volumeMounts:
|
||||
- name: logs
|
||||
mountPath: "/logs/"
|
||||
- mountPath: /etc/sw
|
||||
name: config-users
|
||||
readOnly: true
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
|
@ -106,7 +122,6 @@ spec:
|
|||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.s3.port }}
|
||||
name: swfs-s3
|
||||
|
@ -119,7 +134,7 @@ spec:
|
|||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 3
|
||||
timeoutSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
|
@ -129,12 +144,22 @@ spec:
|
|||
periodSeconds: 60
|
||||
successThreshold: 1
|
||||
failureThreshold: 20
|
||||
timeoutSeconds: 3
|
||||
timeoutSeconds: 10
|
||||
{{- if .Values.s3.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config-users
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: seaweedfs-s3-secret
|
||||
{{- if eq .Values.s3.logs.type "hostPath" }}
|
||||
- name: logs
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/s3
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
|
|
|
@ -9,15 +9,15 @@ metadata:
|
|||
spec:
|
||||
ports:
|
||||
- name: "swfs-s3"
|
||||
port: {{ .Values.s3.port }}
|
||||
targetPort: {{ .Values.s3.port }}
|
||||
port: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
|
||||
targetPort: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
|
||||
protocol: TCP
|
||||
{{- if .Values.s3.metricsPort }}
|
||||
- name: "swfs-s3-metrics"
|
||||
{{- if and .Values.s3.enabled .Values.s3.metricsPort }}
|
||||
- name: "metrics"
|
||||
port: {{ .Values.s3.metricsPort }}
|
||||
targetPort: {{ .Values.s3.metricsPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
||||
component: {{ if .Values.s3.enabled }}s3{{ else }}filer{{ end }}
|
||||
|
|
|
@ -64,6 +64,12 @@ spec:
|
|||
fieldPath: status.hostIP
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.global.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
|
@ -80,6 +86,9 @@ spec:
|
|||
-metricsPort {{ .Values.volume.metricsPort }} \
|
||||
{{- end }}
|
||||
-dir={{ .Values.volume.dir }} \
|
||||
{{- if .Values.volume.dir_idx }}
|
||||
-dir.idx={{ .Values.volume.dir_idx }} \
|
||||
{{- end }}
|
||||
-max={{ .Values.volume.maxVolumes }} \
|
||||
{{- if .Values.volume.rack }}
|
||||
-rack={{ .Values.volume.rack }} \
|
||||
|
@ -149,7 +158,7 @@ spec:
|
|||
periodSeconds: 90
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
timeoutSeconds: 5
|
||||
timeoutSeconds: 30
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
|
@ -159,7 +168,7 @@ spec:
|
|||
periodSeconds: 90
|
||||
successThreshold: 1
|
||||
failureThreshold: 4
|
||||
timeoutSeconds: 5
|
||||
timeoutSeconds: 30
|
||||
{{- if .Values.volume.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.volume.resources . | nindent 12 | trim }}
|
||||
|
|
|
@ -4,7 +4,7 @@ global:
|
|||
registry: ""
|
||||
repository: ""
|
||||
imageName: chrislusf/seaweedfs
|
||||
imageTag: "2.16"
|
||||
# imageTag: "2.24" - started using {.Chart.appVersion}
|
||||
imagePullPolicy: IfNotPresent
|
||||
imagePullSecrets: imagepullsecret
|
||||
restartPolicy: Always
|
||||
|
@ -21,6 +21,10 @@ global:
|
|||
# Y number of replica in other racks in the same data center
|
||||
# Z number of replica in other servers in the same rack
|
||||
replicationPlacment: "001"
|
||||
extraEnvironmentVars:
|
||||
WEED_CLUSTER_DEFAULT: "sw"
|
||||
WEED_CLUSTER_SW_MASTER: "seaweedfs-master:9333"
|
||||
WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client:8888"
|
||||
|
||||
image:
|
||||
registry: ""
|
||||
|
@ -132,7 +136,7 @@ volume:
|
|||
# limit file size to avoid out of memory, default 256mb
|
||||
fileSizeLimitMB: null
|
||||
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
|
||||
minFreeSpacePercent: 1
|
||||
minFreeSpacePercent: 7
|
||||
|
||||
|
||||
# limit background compaction or copying speed in mega bytes per second
|
||||
|
@ -140,6 +144,8 @@ volume:
|
|||
|
||||
# Directories to store data files. dir[,dir]... (default "/tmp")
|
||||
dir: "/data"
|
||||
# Directories to store index files. dir[,dir]... (default "/tmp")
|
||||
dir_idx: null
|
||||
|
||||
# Maximum numbers of volumes, count[,count]...
|
||||
# If set to zero on non-windows OS, the limit will be auto configured. (default "7")
|
||||
|
@ -223,6 +229,8 @@ filer:
|
|||
maxMB: null
|
||||
# encrypt data on volume servers
|
||||
encryptVolumeData: false
|
||||
# enable peers sync metadata, for leveldb (localdb for filer but with sync across)
|
||||
enable_peers: false
|
||||
|
||||
# Whether proxy or redirect to volume server during file GET request
|
||||
redirectOnRead: false
|
||||
|
@ -292,8 +300,10 @@ filer:
|
|||
WEED_MYSQL_HOSTNAME: "mysql-db-host"
|
||||
WEED_MYSQL_PORT: "3306"
|
||||
WEED_MYSQL_DATABASE: "sw_database"
|
||||
WEED_MYSQL_CONNECTION_MAX_IDLE: "10"
|
||||
WEED_MYSQL_CONNECTION_MAX_OPEN: "150"
|
||||
WEED_MYSQL_CONNECTION_MAX_IDLE: "5"
|
||||
WEED_MYSQL_CONNECTION_MAX_OPEN: "75"
|
||||
# "refresh" connection every 10 minutes, eliminating mysql closing "old" connections
|
||||
WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600"
|
||||
# enable usage of memsql as filer backend
|
||||
WEED_MYSQL_INTERPOLATEPARAMS: "true"
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
|
@ -303,8 +313,19 @@ filer:
|
|||
# directories under this folder will be automatically creating a separate bucket
|
||||
WEED_FILER_BUCKETS_FOLDER: "/buckets"
|
||||
|
||||
s3:
|
||||
s3:
|
||||
enabled: true
|
||||
port: 8333
|
||||
#allow empty folders
|
||||
allowEmptyFolder: false
|
||||
# Suffix of the host name, {bucket}.{domainName}
|
||||
domainName: ""
|
||||
# enable user & permission to s3 (need to inject to all services)
|
||||
enableAuth: false
|
||||
skipAuthSecretCreation: false
|
||||
|
||||
s3:
|
||||
enabled: false
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
|
@ -313,6 +334,11 @@ s3:
|
|||
port: 8333
|
||||
metricsPort: 9327
|
||||
loggingOverrideLevel: null
|
||||
#allow empty folders
|
||||
allowEmptyFolder: true
|
||||
# enable user & permission to s3 (need to inject to all services)
|
||||
enableAuth: false
|
||||
skipAuthSecretCreation: false
|
||||
|
||||
# Suffix of the host name, {bucket}.{domainName}
|
||||
domainName: ""
|
||||
|
@ -343,18 +369,27 @@ s3:
|
|||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
logs:
|
||||
type: "hostPath"
|
||||
size: ""
|
||||
storageClass: ""
|
||||
|
||||
cronjob:
|
||||
enabled: false
|
||||
enabled: true
|
||||
master: "seaweedfs-master:9333"
|
||||
filer: "seaweedfs-filer-client:8888"
|
||||
tolerations: ""
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
replication:
|
||||
enable: true
|
||||
collectionPattern: ""
|
||||
schedule: "*/7 * * * *"
|
||||
resources: null
|
||||
# balance all volumes among volume servers
|
||||
# ALL|EACH_COLLECTION|<collection_name>
|
||||
collection: ""
|
||||
master: ""
|
||||
filer: ""
|
||||
tolerations: ""
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
|
||||
certificates:
|
||||
commonName: "SeaweedFS CA"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.5.6</version>
|
||||
<version>1.6.1</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.5.6</version>
|
||||
<version>1.6.1</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.5.6</version>
|
||||
<version>1.6.1</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -23,7 +23,7 @@ public class FileChunkManifest {
|
|||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> resolveChunkManifest(
|
||||
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> chunks) throws IOException {
|
||||
final FilerClient filerClient, List<FilerProto.FileChunk> chunks) throws IOException {
|
||||
|
||||
List<FilerProto.FileChunk> dataChunks = new ArrayList<>();
|
||||
|
||||
|
@ -35,30 +35,30 @@ public class FileChunkManifest {
|
|||
|
||||
// IsChunkManifest
|
||||
LOG.debug("fetching chunk manifest:{}", chunk);
|
||||
byte[] data = fetchChunk(filerGrpcClient, chunk);
|
||||
byte[] data = fetchChunk(filerClient, chunk);
|
||||
FilerProto.FileChunkManifest m = FilerProto.FileChunkManifest.newBuilder().mergeFrom(data).build();
|
||||
List<FilerProto.FileChunk> resolvedChunks = new ArrayList<>();
|
||||
for (FilerProto.FileChunk t : m.getChunksList()) {
|
||||
// avoid deprecated chunk.getFileId()
|
||||
resolvedChunks.add(t.toBuilder().setFileId(FilerClient.toFileId(t.getFid())).build());
|
||||
}
|
||||
dataChunks.addAll(resolveChunkManifest(filerGrpcClient, resolvedChunks));
|
||||
dataChunks.addAll(resolveChunkManifest(filerClient, resolvedChunks));
|
||||
}
|
||||
|
||||
return dataChunks;
|
||||
}
|
||||
|
||||
private static byte[] fetchChunk(final FilerGrpcClient filerGrpcClient, FilerProto.FileChunk chunk) throws IOException {
|
||||
private static byte[] fetchChunk(final FilerClient filerClient, FilerProto.FileChunk chunk) throws IOException {
|
||||
|
||||
String vid = "" + chunk.getFid().getVolumeId();
|
||||
FilerProto.Locations locations = filerGrpcClient.vidLocations.get(vid);
|
||||
FilerProto.Locations locations = filerClient.vidLocations.get(vid);
|
||||
if (locations == null) {
|
||||
FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder();
|
||||
lookupRequest.addVolumeIds(vid);
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerClient
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
locations = lookupResponse.getLocationsMapMap().get(vid);
|
||||
filerGrpcClient.vidLocations.put(vid, locations);
|
||||
filerClient.vidLocations.put(vid, locations);
|
||||
LOG.debug("fetchChunk vid:{} locations:{}", vid, locations);
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ public class FileChunkManifest {
|
|||
byte[] chunkData = SeaweedRead.chunkCache.getChunk(chunkView.fileId);
|
||||
if (chunkData == null) {
|
||||
LOG.debug("doFetchFullChunkData:{}", chunkView);
|
||||
chunkData = SeaweedRead.doFetchFullChunkData(chunkView, locations);
|
||||
chunkData = SeaweedRead.doFetchFullChunkData(filerClient, chunkView, locations);
|
||||
}
|
||||
if (chunk.getIsChunkManifest()){
|
||||
LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length);
|
||||
|
@ -86,7 +86,7 @@ public class FileChunkManifest {
|
|||
}
|
||||
|
||||
public static List<FilerProto.FileChunk> maybeManifestize(
|
||||
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
|
||||
final FilerClient filerClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
|
||||
// the return variable
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
|
||||
|
@ -101,7 +101,7 @@ public class FileChunkManifest {
|
|||
|
||||
int remaining = dataChunks.size();
|
||||
for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) {
|
||||
FilerProto.FileChunk chunk = mergeIntoManifest(filerGrpcClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
|
||||
FilerProto.FileChunk chunk = mergeIntoManifest(filerClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
|
||||
chunks.add(chunk);
|
||||
remaining -= mergeFactor;
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ public class FileChunkManifest {
|
|||
return chunks;
|
||||
}
|
||||
|
||||
private static FilerProto.FileChunk mergeIntoManifest(final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
|
||||
private static FilerProto.FileChunk mergeIntoManifest(final FilerClient filerClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
|
||||
// create and serialize the manifest
|
||||
dataChunks = FilerClient.beforeEntrySerialization(dataChunks);
|
||||
FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks);
|
||||
|
@ -127,8 +127,8 @@ public class FileChunkManifest {
|
|||
}
|
||||
|
||||
FilerProto.FileChunk.Builder manifestChunk = SeaweedWrite.writeChunk(
|
||||
filerGrpcClient.getReplication(),
|
||||
filerGrpcClient,
|
||||
filerClient.getReplication(),
|
||||
filerClient,
|
||||
minOffset,
|
||||
data, 0, data.length, parentDirectory);
|
||||
manifestChunk.setIsChunkManifest(true);
|
||||
|
|
|
@ -11,18 +11,12 @@ import java.util.Arrays;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class FilerClient {
|
||||
public class FilerClient extends FilerGrpcClient {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
|
||||
public FilerClient(String host, int grpcPort) {
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
}
|
||||
|
||||
public FilerClient(FilerGrpcClient filerGrpcClient) {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
super(host, grpcPort);
|
||||
}
|
||||
|
||||
public static String toFileId(FilerProto.FileId fid) {
|
||||
|
@ -236,7 +230,7 @@ public class FilerClient {
|
|||
}
|
||||
|
||||
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit, boolean includeLastEntry) {
|
||||
Iterator<FilerProto.ListEntriesResponse> iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
|
||||
Iterator<FilerProto.ListEntriesResponse> iter = this.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
|
||||
.setDirectory(path)
|
||||
.setPrefix(entryPrefix)
|
||||
.setStartFromFileName(lastEntryName)
|
||||
|
@ -253,7 +247,7 @@ public class FilerClient {
|
|||
|
||||
public FilerProto.Entry lookupEntry(String directory, String entryName) {
|
||||
try {
|
||||
FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
|
||||
FilerProto.Entry entry = this.getBlockingStub().lookupDirectoryEntry(
|
||||
FilerProto.LookupDirectoryEntryRequest.newBuilder()
|
||||
.setDirectory(directory)
|
||||
.setName(entryName)
|
||||
|
@ -274,7 +268,7 @@ public class FilerClient {
|
|||
public boolean createEntry(String parent, FilerProto.Entry entry) {
|
||||
try {
|
||||
FilerProto.CreateEntryResponse createEntryResponse =
|
||||
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
|
||||
this.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
|
@ -291,7 +285,7 @@ public class FilerClient {
|
|||
|
||||
public boolean updateEntry(String parent, FilerProto.Entry entry) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
|
||||
this.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
|
@ -304,7 +298,7 @@ public class FilerClient {
|
|||
|
||||
public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive, boolean ignoreRecusiveError) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
|
||||
this.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setName(entryName)
|
||||
.setIsDeleteData(isDeleteFileChunk)
|
||||
|
@ -320,7 +314,7 @@ public class FilerClient {
|
|||
|
||||
public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
|
||||
this.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
|
||||
.setOldDirectory(oldParent)
|
||||
.setOldName(oldName)
|
||||
.setNewDirectory(newParent)
|
||||
|
@ -334,7 +328,7 @@ public class FilerClient {
|
|||
}
|
||||
|
||||
public Iterator<FilerProto.SubscribeMetadataResponse> watch(String prefix, String clientName, long sinceNs) {
|
||||
return filerGrpcClient.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder()
|
||||
return this.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder()
|
||||
.setPathPrefix(prefix)
|
||||
.setClientName(clientName)
|
||||
.setSinceNs(sinceNs)
|
||||
|
|
|
@ -9,8 +9,8 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.ssl.SSLException;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class FilerGrpcClient {
|
||||
|
@ -26,6 +26,9 @@ public class FilerGrpcClient {
|
|||
}
|
||||
}
|
||||
|
||||
public final int VOLUME_SERVER_ACCESS_DIRECT = 0;
|
||||
public final int VOLUME_SERVER_ACCESS_PUBLIC_URL = 1;
|
||||
public final int VOLUME_SERVER_ACCESS_FILER_PROXY = 2;
|
||||
public final Map<String, FilerProto.Locations> vidLocations = new HashMap<>();
|
||||
private final ManagedChannel channel;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
|
||||
|
@ -34,6 +37,8 @@ public class FilerGrpcClient {
|
|||
private boolean cipher = false;
|
||||
private String collection = "";
|
||||
private String replication = "";
|
||||
private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
|
||||
private String filerAddress;
|
||||
|
||||
public FilerGrpcClient(String host, int grpcPort) {
|
||||
this(host, grpcPort, sslContext);
|
||||
|
@ -49,6 +54,8 @@ public class FilerGrpcClient {
|
|||
.negotiationType(NegotiationType.TLS)
|
||||
.sslContext(sslContext));
|
||||
|
||||
filerAddress = String.format("%s:%d", host, grpcPort - 10000);
|
||||
|
||||
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
|
||||
this.getBlockingStub().getFilerConfiguration(
|
||||
FilerProto.GetFilerConfigurationRequest.newBuilder().build());
|
||||
|
@ -58,7 +65,7 @@ public class FilerGrpcClient {
|
|||
|
||||
}
|
||||
|
||||
public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
|
||||
private FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
|
||||
channel = channelBuilder.build();
|
||||
blockingStub = SeaweedFilerGrpc.newBlockingStub(channel);
|
||||
asyncStub = SeaweedFilerGrpc.newStub(channel);
|
||||
|
@ -93,4 +100,39 @@ public class FilerGrpcClient {
|
|||
return futureStub;
|
||||
}
|
||||
|
||||
public void setAccessVolumeServerDirectly() {
|
||||
this.volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
|
||||
}
|
||||
|
||||
public boolean isAccessVolumeServerDirectly() {
|
||||
return this.volumeServerAccess == VOLUME_SERVER_ACCESS_DIRECT;
|
||||
}
|
||||
|
||||
public void setAccessVolumeServerByPublicUrl() {
|
||||
this.volumeServerAccess = VOLUME_SERVER_ACCESS_PUBLIC_URL;
|
||||
}
|
||||
|
||||
public boolean isAccessVolumeServerByPublicUrl() {
|
||||
return this.volumeServerAccess == VOLUME_SERVER_ACCESS_PUBLIC_URL;
|
||||
}
|
||||
|
||||
public void setAccessVolumeServerByFilerProxy() {
|
||||
this.volumeServerAccess = VOLUME_SERVER_ACCESS_FILER_PROXY;
|
||||
}
|
||||
|
||||
public boolean isAccessVolumeServerByFilerProxy() {
|
||||
return this.volumeServerAccess == VOLUME_SERVER_ACCESS_FILER_PROXY;
|
||||
}
|
||||
|
||||
public String getChunkUrl(String chunkId, String url, String publicUrl) {
|
||||
switch (this.volumeServerAccess) {
|
||||
case VOLUME_SERVER_ACCESS_PUBLIC_URL:
|
||||
return String.format("http://%s/%s", publicUrl, chunkId);
|
||||
case VOLUME_SERVER_ACCESS_FILER_PROXY:
|
||||
return String.format("http://%s/?proxyChunkId=%s", this.filerAddress, chunkId);
|
||||
default:
|
||||
return String.format("http://%s/%s", url, chunkId);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,28 +1,22 @@
|
|||
package seaweed.hdfs;
|
||||
package seaweedfs.client;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import org.apache.hadoop.fs.ByteBufferReadable;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedRead;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedInputStream extends FSInputStream implements ByteBufferReadable {
|
||||
public class SeaweedInputStream extends InputStream {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
|
||||
private static final IOException EXCEPTION_STREAM_IS_CLOSED = new IOException("Stream is closed!");
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
private final Statistics statistics;
|
||||
private final FilerClient filerClient;
|
||||
private final String path;
|
||||
private final FilerProto.Entry entry;
|
||||
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
|
||||
|
@ -33,17 +27,31 @@ public class SeaweedInputStream extends FSInputStream implements ByteBufferReada
|
|||
private boolean closed = false;
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final Statistics statistics,
|
||||
final FilerClient filerClient,
|
||||
final String fullpath) throws IOException {
|
||||
this.path = fullpath;
|
||||
this.filerClient = filerClient;
|
||||
this.entry = filerClient.lookupEntry(
|
||||
SeaweedOutputStream.getParentDirectory(fullpath),
|
||||
SeaweedOutputStream.getFileName(fullpath));
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
}
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerClient filerClient,
|
||||
final String path,
|
||||
final FilerProto.Entry entry) throws IOException {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.statistics = statistics;
|
||||
this.filerClient = filerClient;
|
||||
this.path = path;
|
||||
this.entry = entry;
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
|
@ -86,7 +94,6 @@ public class SeaweedInputStream extends FSInputStream implements ByteBufferReada
|
|||
}
|
||||
|
||||
// implement ByteBufferReadable
|
||||
@Override
|
||||
public synchronized int read(ByteBuffer buf) throws IOException {
|
||||
|
||||
if (position < 0) {
|
||||
|
@ -102,7 +109,7 @@ public class SeaweedInputStream extends FSInputStream implements ByteBufferReada
|
|||
if (start+len <= entry.getContent().size()) {
|
||||
entry.getContent().substring(start, start+len).copyTo(buf);
|
||||
} else {
|
||||
bytesRead = SeaweedRead.read(this.filerGrpcClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
|
||||
bytesRead = SeaweedRead.read(this.filerClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
|
||||
}
|
||||
|
||||
if (bytesRead > Integer.MAX_VALUE) {
|
||||
|
@ -111,45 +118,32 @@ public class SeaweedInputStream extends FSInputStream implements ByteBufferReada
|
|||
|
||||
if (bytesRead > 0) {
|
||||
this.position += bytesRead;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
return (int) bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seek to given position in stream.
|
||||
*
|
||||
* @param n position to seek to
|
||||
* @throws IOException if there is an error
|
||||
* @throws EOFException if attempting to seek past end of file
|
||||
*/
|
||||
@Override
|
||||
public synchronized void seek(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
if (n < 0) {
|
||||
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
|
||||
throw new EOFException("Cannot seek to a negative offset");
|
||||
}
|
||||
if (n > contentLength) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
throw new EOFException("Attempted to seek or read past the end of the file");
|
||||
}
|
||||
|
||||
this.position = n;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long skip(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
if (this.position == contentLength) {
|
||||
if (n > 0) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
throw new EOFException("Attempted to seek or read past the end of the file");
|
||||
}
|
||||
}
|
||||
long newPos = this.position + n;
|
||||
|
@ -177,10 +171,9 @@ public class SeaweedInputStream extends FSInputStream implements ByteBufferReada
|
|||
@Override
|
||||
public synchronized int available() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(
|
||||
FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
final long remaining = this.contentLength - this.getPos();
|
||||
final long remaining = this.contentLength - this.position;
|
||||
return remaining <= Integer.MAX_VALUE
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
}
|
||||
|
@ -195,65 +188,21 @@ public class SeaweedInputStream extends FSInputStream implements ByteBufferReada
|
|||
*/
|
||||
public long length() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
return contentLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the current offset from the start of the file
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public synchronized long getPos() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
throw EXCEPTION_STREAM_IS_CLOSED;
|
||||
}
|
||||
return position;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seeks a different copy of the data. Returns true if
|
||||
* found a new source, false otherwise.
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public boolean seekToNewSource(long l) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
closed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*
|
||||
* @param readlimit ignored
|
||||
*/
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*/
|
||||
@Override
|
||||
public synchronized void reset() throws IOException {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
|
||||
*
|
||||
* @return always {@code false}
|
||||
*/
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -1,16 +1,9 @@
|
|||
package seaweed.hdfs;
|
||||
package seaweedfs.client;
|
||||
|
||||
// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.ByteBufferPool;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedWrite;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
|
@ -18,21 +11,19 @@ import java.io.OutputStream;
|
|||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
|
||||
|
||||
public class SeaweedOutputStream extends OutputStream {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
private final Path path;
|
||||
protected final boolean supportFlush = true;
|
||||
private final FilerClient filerClient;
|
||||
private final String path;
|
||||
private final int bufferSize;
|
||||
private final int maxConcurrentRequestCount;
|
||||
private final ThreadPoolExecutor threadExecutor;
|
||||
private final ExecutorCompletionService<Void> completionService;
|
||||
private final FilerProto.Entry.Builder entry;
|
||||
private final boolean supportFlush = false; // true;
|
||||
private final ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private final boolean shouldSaveMetadata = false;
|
||||
private FilerProto.Entry.Builder entry;
|
||||
private long position;
|
||||
private boolean closed;
|
||||
private volatile IOException lastError;
|
||||
|
@ -42,9 +33,17 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
private long outputIndex;
|
||||
private String replication = "000";
|
||||
|
||||
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String fullpath) {
|
||||
this(filerClient, fullpath, "000");
|
||||
}
|
||||
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String fullpath, final String replication) {
|
||||
this(filerClient, fullpath, null, 0, 8 * 1024 * 1024, "000");
|
||||
}
|
||||
|
||||
public SeaweedOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
|
||||
final long position, final int bufferSize, final String replication) {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.filerClient = filerClient;
|
||||
this.replication = replication;
|
||||
this.path = path;
|
||||
this.position = position;
|
||||
|
@ -67,12 +66,50 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
|
||||
|
||||
this.entry = entry;
|
||||
if (this.entry == null) {
|
||||
long now = System.currentTimeMillis() / 1000L;
|
||||
|
||||
this.entry = FilerProto.Entry.newBuilder()
|
||||
.setName(getFileName(path))
|
||||
.setIsDirectory(false)
|
||||
.setAttributes(FilerProto.FuseAttributes.newBuilder()
|
||||
.setFileMode(0755)
|
||||
.setReplication(replication)
|
||||
.setCrtime(now)
|
||||
.setMtime(now)
|
||||
.clearGroupName()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static String getParentDirectory(String path) {
|
||||
int protoIndex = path.indexOf("://");
|
||||
if (protoIndex >= 0) {
|
||||
int pathStart = path.indexOf("/", protoIndex+3);
|
||||
path = path.substring(pathStart);
|
||||
}
|
||||
if (path.equals("/")) {
|
||||
return path;
|
||||
}
|
||||
int lastSlashIndex = path.lastIndexOf("/");
|
||||
if (lastSlashIndex == 0) {
|
||||
return "/";
|
||||
}
|
||||
return path.substring(0, lastSlashIndex);
|
||||
}
|
||||
|
||||
public static String getFileName(String path) {
|
||||
if (path.indexOf("/") < 0) {
|
||||
return path;
|
||||
}
|
||||
int lastSlashIndex = path.lastIndexOf("/");
|
||||
return path.substring(lastSlashIndex + 1);
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
|
||||
try {
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
|
||||
} catch (Exception ex) {
|
||||
throw new IOException(ex);
|
||||
}
|
||||
|
@ -89,7 +126,9 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
throws IOException {
|
||||
maybeThrowLastError();
|
||||
|
||||
Preconditions.checkArgument(data != null, "null data");
|
||||
if (data == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (off < 0 || length < 0 || length > data.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
|
@ -152,7 +191,7 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
flushInternal();
|
||||
threadExecutor.shutdown();
|
||||
} finally {
|
||||
lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
lastError = new IOException("Stream is closed!");
|
||||
ByteBufferPool.release(buffer);
|
||||
buffer = null;
|
||||
outputIndex = 0;
|
||||
|
@ -162,6 +201,7 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
threadExecutor.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private synchronized void writeCurrentBufferToService() throws IOException {
|
||||
|
@ -185,7 +225,7 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
}
|
||||
final Future<Void> job = completionService.submit(() -> {
|
||||
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path.toUri().getPath());
|
||||
SeaweedWrite.writeData(entry, replication, filerClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path);
|
||||
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
ByteBufferPool.release(bufferToWrite);
|
||||
return null;
|
||||
|
@ -239,13 +279,13 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
}
|
||||
}
|
||||
|
||||
private synchronized void flushInternal() throws IOException {
|
||||
protected synchronized void flushInternal() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToService();
|
||||
}
|
||||
|
||||
private synchronized void flushInternalAsync() throws IOException {
|
||||
protected synchronized void flushInternalAsync() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToServiceAsync();
|
||||
|
@ -278,10 +318,6 @@ public class SeaweedOutputStream extends OutputStream {
|
|||
private final long length;
|
||||
|
||||
WriteOperation(final Future<Void> task, final long startOffset, final long length) {
|
||||
Preconditions.checkNotNull(task, "task");
|
||||
Preconditions.checkArgument(startOffset >= 0, "startOffset");
|
||||
Preconditions.checkArgument(length >= 0, "length");
|
||||
|
||||
this.task = task;
|
||||
this.startOffset = startOffset;
|
||||
this.length = length;
|
|
@ -23,7 +23,7 @@ public class SeaweedRead {
|
|||
static VolumeIdCache volumeIdCache = new VolumeIdCache(4 * 1024);
|
||||
|
||||
// returns bytesRead
|
||||
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
|
||||
public static long read(FilerClient filerClient, List<VisibleInterval> visibleIntervals,
|
||||
final long position, final ByteBuffer buf, final long fileSize) throws IOException {
|
||||
|
||||
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, buf.remaining());
|
||||
|
@ -42,7 +42,7 @@ public class SeaweedRead {
|
|||
}
|
||||
|
||||
if (lookupRequest.getVolumeIdsCount() > 0) {
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerClient
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap();
|
||||
for (Map.Entry<String, FilerProto.Locations> entry : vid2Locations.entrySet()) {
|
||||
|
@ -71,7 +71,7 @@ public class SeaweedRead {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int len = readChunkView(startOffset, buf, chunkView, locations);
|
||||
int len = readChunkView(filerClient, startOffset, buf, chunkView, locations);
|
||||
|
||||
LOG.debug("read [{},{}) {} size {}", startOffset, startOffset + len, chunkView.fileId, chunkView.size);
|
||||
|
||||
|
@ -93,12 +93,12 @@ public class SeaweedRead {
|
|||
return readCount;
|
||||
}
|
||||
|
||||
private static int readChunkView(long startOffset, ByteBuffer buf, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
private static int readChunkView(FilerClient filerClient, long startOffset, ByteBuffer buf, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
|
||||
|
||||
if (chunkData == null) {
|
||||
chunkData = doFetchFullChunkData(chunkView, locations);
|
||||
chunkData = doFetchFullChunkData(filerClient, chunkView, locations);
|
||||
chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||
}
|
||||
|
||||
|
@ -110,13 +110,13 @@ public class SeaweedRead {
|
|||
return len;
|
||||
}
|
||||
|
||||
public static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
public static byte[] doFetchFullChunkData(FilerClient filerClient, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
byte[] data = null;
|
||||
IOException lastException = null;
|
||||
for (long waitTime = 1000L; waitTime < 10 * 1000; waitTime += waitTime / 2) {
|
||||
for (FilerProto.Location location : locations.getLocationsList()) {
|
||||
String url = String.format("http://%s/%s", location.getUrl(), chunkView.fileId);
|
||||
String url = filerClient.getChunkUrl(chunkView.fileId, location.getUrl(), location.getPublicUrl());
|
||||
try {
|
||||
data = doFetchOneFullChunkData(chunkView, url);
|
||||
lastException = null;
|
||||
|
@ -145,7 +145,7 @@ public class SeaweedRead {
|
|||
|
||||
}
|
||||
|
||||
public static byte[] doFetchOneFullChunkData(ChunkView chunkView, String url) throws IOException {
|
||||
private static byte[] doFetchOneFullChunkData(ChunkView chunkView, String url) throws IOException {
|
||||
|
||||
HttpGet request = new HttpGet(url);
|
||||
|
||||
|
@ -221,9 +221,9 @@ public class SeaweedRead {
|
|||
}
|
||||
|
||||
public static List<VisibleInterval> nonOverlappingVisibleIntervals(
|
||||
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> chunkList) throws IOException {
|
||||
final FilerClient filerClient, List<FilerProto.FileChunk> chunkList) throws IOException {
|
||||
|
||||
chunkList = FileChunkManifest.resolveChunkManifest(filerGrpcClient, chunkList);
|
||||
chunkList = FileChunkManifest.resolveChunkManifest(filerClient, chunkList);
|
||||
|
||||
FilerProto.FileChunk[] chunks = chunkList.toArray(new FilerProto.FileChunk[0]);
|
||||
Arrays.sort(chunks, new Comparator<FilerProto.FileChunk>() {
|
||||
|
|
|
@ -23,41 +23,41 @@ public class SeaweedWrite {
|
|||
|
||||
public static void writeData(FilerProto.Entry.Builder entry,
|
||||
final String replication,
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final FilerClient filerClient,
|
||||
final long offset,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset, final long bytesLength,
|
||||
final String path) throws IOException {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
|
||||
replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength, path);
|
||||
replication, filerClient, offset, bytes, bytesOffset, bytesLength, path);
|
||||
synchronized (entry) {
|
||||
entry.addChunks(chunkBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
public static FilerProto.FileChunk.Builder writeChunk(final String replication,
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final FilerClient filerClient,
|
||||
final long offset,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset,
|
||||
final long bytesLength,
|
||||
final String path) throws IOException {
|
||||
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
|
||||
FilerProto.AssignVolumeResponse response = filerClient.getBlockingStub().assignVolume(
|
||||
FilerProto.AssignVolumeRequest.newBuilder()
|
||||
.setCollection(filerGrpcClient.getCollection())
|
||||
.setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
|
||||
.setCollection(filerClient.getCollection())
|
||||
.setReplication(replication == null ? filerClient.getReplication() : replication)
|
||||
.setDataCenter("")
|
||||
.setTtlSec(0)
|
||||
.setPath(path)
|
||||
.build());
|
||||
String fileId = response.getFileId();
|
||||
String url = response.getUrl();
|
||||
String auth = response.getAuth();
|
||||
String targetUrl = String.format("http://%s/%s", url, fileId);
|
||||
|
||||
String targetUrl = filerClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl());
|
||||
|
||||
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
|
||||
byte[] cipherKey = null;
|
||||
if (filerGrpcClient.isCipher()) {
|
||||
if (filerClient.isCipher()) {
|
||||
cipherKey = genCipherKey();
|
||||
cipherKeyString = ByteString.copyFrom(cipherKey);
|
||||
}
|
||||
|
@ -75,15 +75,15 @@ public class SeaweedWrite {
|
|||
.setCipherKey(cipherKeyString);
|
||||
}
|
||||
|
||||
public static void writeMeta(final FilerGrpcClient filerGrpcClient,
|
||||
public static void writeMeta(final FilerClient filerClient,
|
||||
final String parentDirectory,
|
||||
final FilerProto.Entry.Builder entry) throws IOException {
|
||||
|
||||
synchronized (entry) {
|
||||
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList(), parentDirectory);
|
||||
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerClient, entry.getChunksList(), parentDirectory);
|
||||
entry.clearChunks();
|
||||
entry.addAllChunks(chunks);
|
||||
filerGrpcClient.getBlockingStub().createEntry(
|
||||
filerClient.getBlockingStub().createEntry(
|
||||
FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parentDirectory)
|
||||
.setEntry(entry)
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.5.6</version>
|
||||
<version>1.6.1</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>1.5.6</version>
|
||||
<version>1.6.1</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
package com.seaweedfs.examples;
|
||||
|
||||
import seaweed.hdfs.SeaweedInputStream;
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.SeaweedInputStream;
|
||||
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -10,12 +9,11 @@ import java.io.InputStream;
|
|||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
public class UnzipFile {
|
||||
public class ExampleReadFile {
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
|
||||
FilerGrpcClient filerGrpcClient = new FilerGrpcClient("localhost", 18888);
|
||||
FilerClient filerClient = new FilerClient(filerGrpcClient);
|
||||
FilerClient filerClient = new FilerClient("localhost", 18888);
|
||||
|
||||
long startTime = System.currentTimeMillis();
|
||||
parseZip("/Users/chris/tmp/test.zip");
|
||||
|
@ -25,11 +23,7 @@ public class UnzipFile {
|
|||
long localProcessTime = startTime2 - startTime;
|
||||
|
||||
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(
|
||||
filerGrpcClient,
|
||||
new org.apache.hadoop.fs.FileSystem.Statistics(""),
|
||||
"/",
|
||||
filerClient.lookupEntry("/", "test.zip")
|
||||
);
|
||||
filerClient, "/test.zip");
|
||||
parseZip(seaweedInputStream);
|
||||
|
||||
long swProcessTime = System.currentTimeMillis() - startTime2;
|
|
@ -7,7 +7,7 @@ import java.io.IOException;
|
|||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class WatchFiles {
|
||||
public class ExampleWatchFileChanges {
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
package com.seaweedfs.examples;
|
||||
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.SeaweedInputStream;
|
||||
import seaweedfs.client.SeaweedOutputStream;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
public class ExampleWriteFile {
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
|
||||
FilerClient filerClient = new FilerClient("localhost", 18888);
|
||||
|
||||
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(filerClient, "/test.zip");
|
||||
unZipFiles(filerClient, seaweedInputStream);
|
||||
|
||||
}
|
||||
|
||||
public static void unZipFiles(FilerClient filerClient, InputStream is) throws IOException {
|
||||
ZipInputStream zin = new ZipInputStream(is);
|
||||
ZipEntry ze;
|
||||
while ((ze = zin.getNextEntry()) != null) {
|
||||
|
||||
String filename = ze.getName();
|
||||
if (filename.indexOf("/") >= 0) {
|
||||
filename = filename.substring(filename.lastIndexOf("/") + 1);
|
||||
}
|
||||
if (filename.length()==0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerClient, "/test/"+filename);
|
||||
byte[] bytesIn = new byte[16 * 1024];
|
||||
int read = 0;
|
||||
while ((read = zin.read(bytesIn))!=-1) {
|
||||
seaweedOutputStream.write(bytesIn,0,read);
|
||||
}
|
||||
seaweedOutputStream.close();
|
||||
|
||||
System.out.println(ze.getName());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package com.seaweedfs.examples;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
public class HdfsCopyFile {
|
||||
public static void main(String[] args) throws IOException {
|
||||
Configuration configuration = new Configuration();
|
||||
|
||||
configuration.set("fs.defaultFS", "seaweedfs://localhost:8888");
|
||||
configuration.set("fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem");
|
||||
|
||||
FileSystem fs = FileSystem.get(configuration);
|
||||
String source = "/Users/chris/tmp/test.zip";
|
||||
String destination = "/buckets/spark/test01.zip";
|
||||
InputStream in = new BufferedInputStream(new FileInputStream(source));
|
||||
|
||||
OutputStream out = fs.create(new Path(destination));
|
||||
IOUtils.copyBytes(in, out, 4096, true);
|
||||
}
|
||||
}
|
|
@ -301,7 +301,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ public class SeaweedFileSystem extends FileSystem {
|
|||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
||||
public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access";
|
||||
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
|
||||
|
|
|
@ -18,27 +18,31 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE;
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE;
|
||||
import static seaweed.hdfs.SeaweedFileSystem.*;
|
||||
|
||||
public class SeaweedFileSystemStore {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
|
||||
|
||||
private FilerGrpcClient filerGrpcClient;
|
||||
private FilerClient filerClient;
|
||||
private Configuration conf;
|
||||
|
||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
||||
int grpcPort = 10000 + port;
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
filerClient = new FilerClient(filerGrpcClient);
|
||||
filerClient = new FilerClient(host, grpcPort);
|
||||
this.conf = conf;
|
||||
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
||||
if (volumeServerAccessMode.equals("publicUrl")) {
|
||||
filerClient.setAccessVolumeServerByPublicUrl();
|
||||
} else if (volumeServerAccessMode.equals("filerProxy")) {
|
||||
filerClient.setAccessVolumeServerByFilerProxy();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void close() {
|
||||
try {
|
||||
this.filerGrpcClient.shutdown();
|
||||
this.filerClient.shutdown();
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
@ -213,10 +217,10 @@ public class SeaweedFileSystemStore {
|
|||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
|
||||
);
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
|
||||
}
|
||||
|
||||
return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
|
||||
return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication);
|
||||
|
||||
}
|
||||
|
||||
|
@ -230,7 +234,7 @@ public class SeaweedFileSystemStore {
|
|||
throw new FileNotFoundException("read non-exist file " + path);
|
||||
}
|
||||
|
||||
return new SeaweedInputStream(filerGrpcClient,
|
||||
return new SeaweedHadoopInputStream(filerClient,
|
||||
statistics,
|
||||
path.toUri().getPath(),
|
||||
entry);
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import org.apache.hadoop.fs.ByteBufferReadable;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedInputStream;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
public class SeaweedHadoopInputStream extends FSInputStream implements ByteBufferReadable {
|
||||
|
||||
private final SeaweedInputStream seaweedInputStream;
|
||||
private final Statistics statistics;
|
||||
|
||||
public SeaweedHadoopInputStream(
|
||||
final FilerClient filerClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry) throws IOException {
|
||||
this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry);
|
||||
this.statistics = statistics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
return seaweedInputStream.read();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(final byte[] b, final int off, final int len) throws IOException {
|
||||
return seaweedInputStream.read(b, off, len);
|
||||
}
|
||||
|
||||
// implement ByteBufferReadable
|
||||
@Override
|
||||
public synchronized int read(ByteBuffer buf) throws IOException {
|
||||
int bytesRead = seaweedInputStream.read(buf);
|
||||
|
||||
if (bytesRead > 0) {
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seek to given position in stream.
|
||||
*
|
||||
* @param n position to seek to
|
||||
* @throws IOException if there is an error
|
||||
* @throws EOFException if attempting to seek past end of file
|
||||
*/
|
||||
@Override
|
||||
public synchronized void seek(long n) throws IOException {
|
||||
seaweedInputStream.seek(n);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long skip(long n) throws IOException {
|
||||
return seaweedInputStream.skip(n);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of the remaining available bytes
|
||||
* if the size is less than or equal to {@link Integer#MAX_VALUE},
|
||||
* otherwise, return {@link Integer#MAX_VALUE}.
|
||||
* <p>
|
||||
* This is to match the behavior of DFSInputStream.available(),
|
||||
* which some clients may rely on (HBase write-ahead log reading in
|
||||
* particular).
|
||||
*/
|
||||
@Override
|
||||
public synchronized int available() throws IOException {
|
||||
return seaweedInputStream.available();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the length of the file that this stream refers to. Note that the length returned is the length
|
||||
* as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
|
||||
* they wont be reflected in the returned length.
|
||||
*
|
||||
* @return length of the file.
|
||||
* @throws IOException if the stream is closed
|
||||
*/
|
||||
public long length() throws IOException {
|
||||
return seaweedInputStream.length();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the current offset from the start of the file
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public synchronized long getPos() throws IOException {
|
||||
return seaweedInputStream.getPos();
|
||||
}
|
||||
|
||||
/**
|
||||
* Seeks a different copy of the data. Returns true if
|
||||
* found a new source, false otherwise.
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public boolean seekToNewSource(long l) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
seaweedInputStream.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*
|
||||
* @param readlimit ignored
|
||||
*/
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*/
|
||||
@Override
|
||||
public synchronized void reset() throws IOException {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
|
||||
*
|
||||
* @return always {@code false}
|
||||
*/
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
|
||||
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedOutputStream;
|
||||
|
||||
public class SeaweedHadoopOutputStream extends SeaweedOutputStream {
|
||||
|
||||
public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
|
||||
final long position, final int bufferSize, final String replication) {
|
||||
super(filerClient, path, entry, position, bufferSize, replication);
|
||||
}
|
||||
|
||||
}
|
|
@ -309,7 +309,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.5.6</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.1</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ public class SeaweedFileSystem extends FileSystem {
|
|||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
||||
public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access";
|
||||
public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
|
||||
|
|
|
@ -18,27 +18,31 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_BUFFER_SIZE;
|
||||
import static seaweed.hdfs.SeaweedFileSystem.FS_SEAWEED_DEFAULT_BUFFER_SIZE;
|
||||
import static seaweed.hdfs.SeaweedFileSystem.*;
|
||||
|
||||
public class SeaweedFileSystemStore {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
|
||||
|
||||
private FilerGrpcClient filerGrpcClient;
|
||||
private FilerClient filerClient;
|
||||
private Configuration conf;
|
||||
|
||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
||||
int grpcPort = 10000 + port;
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
filerClient = new FilerClient(filerGrpcClient);
|
||||
filerClient = new FilerClient(host, grpcPort);
|
||||
this.conf = conf;
|
||||
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
||||
if (volumeServerAccessMode.equals("publicUrl")) {
|
||||
filerClient.setAccessVolumeServerByPublicUrl();
|
||||
} else if (volumeServerAccessMode.equals("filerProxy")) {
|
||||
filerClient.setAccessVolumeServerByFilerProxy();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void close() {
|
||||
try {
|
||||
this.filerGrpcClient.shutdown();
|
||||
this.filerClient.shutdown();
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
@ -213,10 +217,10 @@ public class SeaweedFileSystemStore {
|
|||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
|
||||
);
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry);
|
||||
}
|
||||
|
||||
return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
|
||||
return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication);
|
||||
|
||||
}
|
||||
|
||||
|
@ -230,7 +234,7 @@ public class SeaweedFileSystemStore {
|
|||
throw new FileNotFoundException("read non-exist file " + path);
|
||||
}
|
||||
|
||||
return new SeaweedInputStream(filerGrpcClient,
|
||||
return new SeaweedHadoopInputStream(filerClient,
|
||||
statistics,
|
||||
path.toUri().getPath(),
|
||||
entry);
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import org.apache.hadoop.fs.ByteBufferReadable;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedInputStream;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
public class SeaweedHadoopInputStream extends FSInputStream implements ByteBufferReadable {
|
||||
|
||||
private final SeaweedInputStream seaweedInputStream;
|
||||
private final Statistics statistics;
|
||||
|
||||
public SeaweedHadoopInputStream(
|
||||
final FilerClient filerClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry) throws IOException {
|
||||
this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry);
|
||||
this.statistics = statistics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
return seaweedInputStream.read();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(final byte[] b, final int off, final int len) throws IOException {
|
||||
return seaweedInputStream.read(b, off, len);
|
||||
}
|
||||
|
||||
// implement ByteBufferReadable
|
||||
@Override
|
||||
public synchronized int read(ByteBuffer buf) throws IOException {
|
||||
int bytesRead = seaweedInputStream.read(buf);
|
||||
|
||||
if (bytesRead > 0) {
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seek to given position in stream.
|
||||
*
|
||||
* @param n position to seek to
|
||||
* @throws IOException if there is an error
|
||||
* @throws EOFException if attempting to seek past end of file
|
||||
*/
|
||||
@Override
|
||||
public synchronized void seek(long n) throws IOException {
|
||||
seaweedInputStream.seek(n);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long skip(long n) throws IOException {
|
||||
return seaweedInputStream.skip(n);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of the remaining available bytes
|
||||
* if the size is less than or equal to {@link Integer#MAX_VALUE},
|
||||
* otherwise, return {@link Integer#MAX_VALUE}.
|
||||
* <p>
|
||||
* This is to match the behavior of DFSInputStream.available(),
|
||||
* which some clients may rely on (HBase write-ahead log reading in
|
||||
* particular).
|
||||
*/
|
||||
@Override
|
||||
public synchronized int available() throws IOException {
|
||||
return seaweedInputStream.available();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the length of the file that this stream refers to. Note that the length returned is the length
|
||||
* as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
|
||||
* they wont be reflected in the returned length.
|
||||
*
|
||||
* @return length of the file.
|
||||
* @throws IOException if the stream is closed
|
||||
*/
|
||||
public long length() throws IOException {
|
||||
return seaweedInputStream.length();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the current offset from the start of the file
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public synchronized long getPos() throws IOException {
|
||||
return seaweedInputStream.getPos();
|
||||
}
|
||||
|
||||
/**
|
||||
* Seeks a different copy of the data. Returns true if
|
||||
* found a new source, false otherwise.
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public boolean seekToNewSource(long l) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
seaweedInputStream.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*
|
||||
* @param readlimit ignored
|
||||
*/
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*/
|
||||
@Override
|
||||
public synchronized void reset() throws IOException {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
|
||||
*
|
||||
* @return always {@code false}
|
||||
*/
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
|
||||
|
||||
import org.apache.hadoop.fs.StreamCapabilities;
|
||||
import org.apache.hadoop.fs.Syncable;
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedOutputStream;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
public class SeaweedHadoopOutputStream extends SeaweedOutputStream implements Syncable, StreamCapabilities {
|
||||
|
||||
public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry,
|
||||
final long position, final int bufferSize, final String replication) {
|
||||
super(filerClient, path, entry, position, bufferSize, replication);
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to posix fsync, flush out the data in client's user buffer
|
||||
* all the way to the disk device (but the disk may have it in its cache).
|
||||
*
|
||||
* @throws IOException if error occurs
|
||||
*/
|
||||
@Override
|
||||
public void hsync() throws IOException {
|
||||
if (supportFlush) {
|
||||
flushInternal();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush out the data in client's user buffer. After the return of
|
||||
* this call, new readers will see the data.
|
||||
*
|
||||
* @throws IOException if any error occurs
|
||||
*/
|
||||
@Override
|
||||
public void hflush() throws IOException {
|
||||
if (supportFlush) {
|
||||
flushInternal();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the stream for a specific capability.
|
||||
*
|
||||
* @param capability string to query the stream support for.
|
||||
* @return true for hsync and hflush.
|
||||
*/
|
||||
@Override
|
||||
public boolean hasCapability(String capability) {
|
||||
switch (capability.toLowerCase(Locale.ENGLISH)) {
|
||||
case StreamCapabilities.HSYNC:
|
||||
case StreamCapabilities.HFLUSH:
|
||||
return supportFlush;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,259 +0,0 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import org.apache.hadoop.fs.ByteBufferReadable;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedRead;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedInputStream extends FSInputStream implements ByteBufferReadable {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
private final Statistics statistics;
|
||||
private final String path;
|
||||
private final FilerProto.Entry entry;
|
||||
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
|
||||
private final long contentLength;
|
||||
|
||||
private long position = 0; // cursor of the file
|
||||
|
||||
private boolean closed = false;
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry) throws IOException {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.statistics = statistics;
|
||||
this.path = path;
|
||||
this.entry = entry;
|
||||
this.contentLength = SeaweedRead.fileSize(entry);
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerGrpcClient, entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
byte[] b = new byte[1];
|
||||
int numberOfBytesRead = read(b, 0, 1);
|
||||
if (numberOfBytesRead < 0) {
|
||||
return -1;
|
||||
} else {
|
||||
return (b[0] & 0xFF);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(final byte[] b, final int off, final int len) throws IOException {
|
||||
|
||||
if (b == null) {
|
||||
throw new IllegalArgumentException("null byte array passed in to read() method");
|
||||
}
|
||||
if (off >= b.length) {
|
||||
throw new IllegalArgumentException("offset greater than length of array");
|
||||
}
|
||||
if (len < 0) {
|
||||
throw new IllegalArgumentException("requested read length is less than zero");
|
||||
}
|
||||
if (len > (b.length - off)) {
|
||||
throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
|
||||
}
|
||||
|
||||
ByteBuffer buf = ByteBuffer.wrap(b, off, len);
|
||||
return read(buf);
|
||||
|
||||
}
|
||||
|
||||
// implement ByteBufferReadable
|
||||
@Override
|
||||
public synchronized int read(ByteBuffer buf) throws IOException {
|
||||
|
||||
if (position < 0) {
|
||||
throw new IllegalArgumentException("attempting to read from negative offset");
|
||||
}
|
||||
if (position >= contentLength) {
|
||||
return -1; // Hadoop prefers -1 to EOFException
|
||||
}
|
||||
|
||||
long bytesRead = 0;
|
||||
int len = buf.remaining();
|
||||
int start = (int) this.position;
|
||||
if (start+len <= entry.getContent().size()) {
|
||||
entry.getContent().substring(start, start+len).copyTo(buf);
|
||||
} else {
|
||||
bytesRead = SeaweedRead.read(this.filerGrpcClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry));
|
||||
}
|
||||
|
||||
if (bytesRead > Integer.MAX_VALUE) {
|
||||
throw new IOException("Unexpected Content-Length");
|
||||
}
|
||||
|
||||
if (bytesRead > 0) {
|
||||
this.position += bytesRead;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
return (int) bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seek to given position in stream.
|
||||
*
|
||||
* @param n position to seek to
|
||||
* @throws IOException if there is an error
|
||||
* @throws EOFException if attempting to seek past end of file
|
||||
*/
|
||||
@Override
|
||||
public synchronized void seek(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
if (n < 0) {
|
||||
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
|
||||
}
|
||||
if (n > contentLength) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
|
||||
this.position = n;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long skip(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
if (this.position == contentLength) {
|
||||
if (n > 0) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
}
|
||||
long newPos = this.position + n;
|
||||
if (newPos < 0) {
|
||||
newPos = 0;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
if (newPos > contentLength) {
|
||||
newPos = contentLength;
|
||||
n = newPos - this.position;
|
||||
}
|
||||
seek(newPos);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of the remaining available bytes
|
||||
* if the size is less than or equal to {@link Integer#MAX_VALUE},
|
||||
* otherwise, return {@link Integer#MAX_VALUE}.
|
||||
* <p>
|
||||
* This is to match the behavior of DFSInputStream.available(),
|
||||
* which some clients may rely on (HBase write-ahead log reading in
|
||||
* particular).
|
||||
*/
|
||||
@Override
|
||||
public synchronized int available() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(
|
||||
FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
final long remaining = this.contentLength - this.getPos();
|
||||
return remaining <= Integer.MAX_VALUE
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the length of the file that this stream refers to. Note that the length returned is the length
|
||||
* as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
|
||||
* they wont be reflected in the returned length.
|
||||
*
|
||||
* @return length of the file.
|
||||
* @throws IOException if the stream is closed
|
||||
*/
|
||||
public long length() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
return contentLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the current offset from the start of the file
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public synchronized long getPos() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
return position;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seeks a different copy of the data. Returns true if
|
||||
* found a new source, false otherwise.
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public boolean seekToNewSource(long l) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
closed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*
|
||||
* @param readlimit ignored
|
||||
*/
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*/
|
||||
@Override
|
||||
public synchronized void reset() throws IOException {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
|
||||
*
|
||||
* @return always {@code false}
|
||||
*/
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -1,337 +0,0 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.StreamCapabilities;
|
||||
import org.apache.hadoop.fs.Syncable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.ByteBufferPool;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedWrite;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
|
||||
|
||||
public class SeaweedOutputStream extends OutputStream implements Syncable, StreamCapabilities {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
private final Path path;
|
||||
private final int bufferSize;
|
||||
private final int maxConcurrentRequestCount;
|
||||
private final ThreadPoolExecutor threadExecutor;
|
||||
private final ExecutorCompletionService<Void> completionService;
|
||||
private final FilerProto.Entry.Builder entry;
|
||||
private final boolean supportFlush = false; // true;
|
||||
private final ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private long position;
|
||||
private boolean closed;
|
||||
private volatile IOException lastError;
|
||||
private long lastFlushOffset;
|
||||
private long lastTotalAppendOffset = 0;
|
||||
private ByteBuffer buffer;
|
||||
private long outputIndex;
|
||||
private String replication = "000";
|
||||
|
||||
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
|
||||
final long position, final int bufferSize, final String replication) {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.replication = replication;
|
||||
this.path = path;
|
||||
this.position = position;
|
||||
this.closed = false;
|
||||
this.lastError = null;
|
||||
this.lastFlushOffset = 0;
|
||||
this.bufferSize = bufferSize;
|
||||
this.buffer = ByteBufferPool.request(bufferSize);
|
||||
this.outputIndex = 0;
|
||||
this.writeOperations = new ConcurrentLinkedDeque<>();
|
||||
|
||||
this.maxConcurrentRequestCount = Runtime.getRuntime().availableProcessors();
|
||||
|
||||
this.threadExecutor
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
120L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
|
||||
|
||||
this.entry = entry;
|
||||
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
|
||||
try {
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
} catch (Exception ex) {
|
||||
throw new IOException(ex);
|
||||
}
|
||||
this.lastFlushOffset = offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final int byteVal) throws IOException {
|
||||
write(new byte[]{(byte) (byteVal & 0xFF)});
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void write(final byte[] data, final int off, final int length)
|
||||
throws IOException {
|
||||
maybeThrowLastError();
|
||||
|
||||
Preconditions.checkArgument(data != null, "null data");
|
||||
|
||||
if (off < 0 || length < 0 || length > data.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
// System.out.println(path + " write [" + (outputIndex + off) + "," + ((outputIndex + off) + length) + ")");
|
||||
|
||||
int currentOffset = off;
|
||||
int writableBytes = bufferSize - buffer.position();
|
||||
int numberOfBytesToWrite = length;
|
||||
|
||||
while (numberOfBytesToWrite > 0) {
|
||||
|
||||
if (numberOfBytesToWrite < writableBytes) {
|
||||
buffer.put(data, currentOffset, numberOfBytesToWrite);
|
||||
outputIndex += numberOfBytesToWrite;
|
||||
break;
|
||||
}
|
||||
|
||||
// System.out.println(path + " [" + (outputIndex + currentOffset) + "," + ((outputIndex + currentOffset) + writableBytes) + ") " + buffer.capacity());
|
||||
buffer.put(data, currentOffset, writableBytes);
|
||||
outputIndex += writableBytes;
|
||||
currentOffset += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
writableBytes = bufferSize - buffer.position();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes this output stream and forces any buffered output bytes to be
|
||||
* written out. If any data remains in the payload it is committed to the
|
||||
* service. Data is queued for writing and forced out to the service
|
||||
* before the call returns.
|
||||
*/
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
if (supportFlush) {
|
||||
flushInternalAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to posix fsync, flush out the data in client's user buffer
|
||||
* all the way to the disk device (but the disk may have it in its cache).
|
||||
*
|
||||
* @throws IOException if error occurs
|
||||
*/
|
||||
@Override
|
||||
public void hsync() throws IOException {
|
||||
if (supportFlush) {
|
||||
flushInternal();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush out the data in client's user buffer. After the return of
|
||||
* this call, new readers will see the data.
|
||||
*
|
||||
* @throws IOException if any error occurs
|
||||
*/
|
||||
@Override
|
||||
public void hflush() throws IOException {
|
||||
if (supportFlush) {
|
||||
flushInternal();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the stream for a specific capability.
|
||||
*
|
||||
* @param capability string to query the stream support for.
|
||||
* @return true for hsync and hflush.
|
||||
*/
|
||||
@Override
|
||||
public boolean hasCapability(String capability) {
|
||||
switch (capability.toLowerCase(Locale.ENGLISH)) {
|
||||
case StreamCapabilities.HSYNC:
|
||||
case StreamCapabilities.HFLUSH:
|
||||
return supportFlush;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Force all data in the output stream to be written to Azure storage.
|
||||
* Wait to return until this is complete. Close the access to the stream and
|
||||
* shutdown the upload thread pool.
|
||||
* If the blob was created, its lease will be released.
|
||||
* Any error encountered caught in threads and stored will be rethrown here
|
||||
* after cleanup.
|
||||
*/
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOG.debug("close path: {}", path);
|
||||
try {
|
||||
flushInternal();
|
||||
threadExecutor.shutdown();
|
||||
} finally {
|
||||
lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
ByteBufferPool.release(buffer);
|
||||
buffer = null;
|
||||
outputIndex = 0;
|
||||
closed = true;
|
||||
writeOperations.clear();
|
||||
if (!threadExecutor.isShutdown()) {
|
||||
threadExecutor.shutdownNow();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void writeCurrentBufferToService() throws IOException {
|
||||
if (buffer.position() == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
position += submitWriteBufferToService(buffer, position);
|
||||
|
||||
buffer = ByteBufferPool.request(bufferSize);
|
||||
|
||||
}
|
||||
|
||||
private synchronized int submitWriteBufferToService(final ByteBuffer bufferToWrite, final long writePosition) throws IOException {
|
||||
|
||||
bufferToWrite.flip();
|
||||
int bytesLength = bufferToWrite.limit() - bufferToWrite.position();
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount) {
|
||||
waitForTaskToComplete();
|
||||
}
|
||||
final Future<Void> job = completionService.submit(() -> {
|
||||
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path.toUri().getPath());
|
||||
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||
ByteBufferPool.release(bufferToWrite);
|
||||
return null;
|
||||
});
|
||||
|
||||
writeOperations.add(new WriteOperation(job, writePosition, bytesLength));
|
||||
|
||||
// Try to shrink the queue
|
||||
shrinkWriteOperationQueue();
|
||||
|
||||
return bytesLength;
|
||||
|
||||
}
|
||||
|
||||
private void waitForTaskToComplete() throws IOException {
|
||||
boolean completed;
|
||||
for (completed = false; completionService.poll() != null; completed = true) {
|
||||
// keep polling until there is no data
|
||||
}
|
||||
|
||||
if (!completed) {
|
||||
try {
|
||||
completionService.take();
|
||||
} catch (InterruptedException e) {
|
||||
lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e);
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void maybeThrowLastError() throws IOException {
|
||||
if (lastError != null) {
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to remove the completed write operations from the beginning of write
|
||||
* operation FIFO queue.
|
||||
*/
|
||||
private synchronized void shrinkWriteOperationQueue() throws IOException {
|
||||
try {
|
||||
while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) {
|
||||
writeOperations.peek().task.get();
|
||||
lastTotalAppendOffset += writeOperations.peek().length;
|
||||
writeOperations.remove();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
lastError = new IOException(e);
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void flushInternal() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToService();
|
||||
}
|
||||
|
||||
private synchronized void flushInternalAsync() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToServiceAsync();
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToService() throws IOException {
|
||||
for (WriteOperation writeOperation : writeOperations) {
|
||||
try {
|
||||
writeOperation.task.get();
|
||||
} catch (Exception ex) {
|
||||
lastError = new IOException(ex);
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
LOG.debug("flushWrittenBytesToService: {} position:{}", path, position);
|
||||
flushWrittenBytesToServiceInternal(position);
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceAsync() throws IOException {
|
||||
shrinkWriteOperationQueue();
|
||||
|
||||
if (this.lastTotalAppendOffset > this.lastFlushOffset) {
|
||||
this.flushWrittenBytesToServiceInternal(this.lastTotalAppendOffset);
|
||||
}
|
||||
}
|
||||
|
||||
private static class WriteOperation {
|
||||
private final Future<Void> task;
|
||||
private final long startOffset;
|
||||
private final long length;
|
||||
|
||||
WriteOperation(final Future<Void> task, final long startOffset, final long length) {
|
||||
Preconditions.checkNotNull(task, "task");
|
||||
Preconditions.checkArgument(startOffset >= 0, "startOffset");
|
||||
Preconditions.checkArgument(length >= 0, "length");
|
||||
|
||||
this.task = task;
|
||||
this.startOffset = startOffset;
|
||||
this.length = length;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -168,7 +168,7 @@ func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int6
|
|||
size: size,
|
||||
}
|
||||
}
|
||||
if actual := offset.ToAcutalOffset(); actual > maxOffset {
|
||||
if actual := offset.ToActualOffset(); actual > maxOffset {
|
||||
maxOffset = actual
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -15,6 +15,8 @@ var Commands = []*Command{
|
|||
cmdDownload,
|
||||
cmdExport,
|
||||
cmdFiler,
|
||||
cmdFilerCat,
|
||||
cmdFilerMetaTail,
|
||||
cmdFilerReplicate,
|
||||
cmdFilerSynchronize,
|
||||
cmdFix,
|
||||
|
@ -25,7 +27,6 @@ var Commands = []*Command{
|
|||
cmdScaffold,
|
||||
cmdServer,
|
||||
cmdShell,
|
||||
cmdWatch,
|
||||
cmdUpload,
|
||||
cmdVersion,
|
||||
cmdVolume,
|
||||
|
|
|
@ -113,7 +113,7 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
|
|||
nv, ok := needleMap.Get(n.Id)
|
||||
glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
|
||||
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
|
||||
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset {
|
||||
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset {
|
||||
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
|
||||
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
|
||||
n.LastModified, newerThanUnix)
|
||||
|
|
|
@ -3,6 +3,7 @@ package command
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -22,6 +23,8 @@ var (
|
|||
f FilerOptions
|
||||
filerStartS3 *bool
|
||||
filerS3Options S3Options
|
||||
filerStartWebDav *bool
|
||||
filerWebDavOptions WebDavOption
|
||||
)
|
||||
|
||||
type FilerOptions struct {
|
||||
|
@ -42,7 +45,7 @@ type FilerOptions struct {
|
|||
cipher *bool
|
||||
peers *string
|
||||
metricsHttpPort *int
|
||||
cacheToFilerLimit *int
|
||||
saveToFilerLimit *int
|
||||
defaultLevelDbDirectory *string
|
||||
}
|
||||
|
||||
|
@ -64,7 +67,7 @@ func init() {
|
|||
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
|
||||
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
||||
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||
f.cacheToFilerLimit = cmdFiler.Flag.Int("cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
|
||||
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
|
||||
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
|
||||
|
||||
// start s3 on filer
|
||||
|
@ -74,6 +77,16 @@ func init() {
|
|||
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
|
||||
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
|
||||
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
|
||||
filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
|
||||
|
||||
// start webdav on filer
|
||||
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
|
||||
filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
||||
filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
|
||||
filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
|
||||
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
|
||||
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||
filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
|
||||
}
|
||||
|
||||
var cmdFiler = &Command{
|
||||
|
@ -113,6 +126,15 @@ func runFiler(cmd *Command, args []string) bool {
|
|||
}()
|
||||
}
|
||||
|
||||
if *filerStartWebDav {
|
||||
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
|
||||
filerWebDavOptions.filer = &filerAddress
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
filerWebDavOptions.startWebDav()
|
||||
}()
|
||||
}
|
||||
|
||||
f.startFiler()
|
||||
|
||||
return true
|
||||
|
@ -148,7 +170,7 @@ func (fo *FilerOptions) startFiler() {
|
|||
Host: *fo.ip,
|
||||
Port: uint32(*fo.port),
|
||||
Cipher: *fo.cipher,
|
||||
CacheToFilerLimit: int64(*fo.cacheToFilerLimit),
|
||||
SaveToFilerLimit: *fo.saveToFilerLimit,
|
||||
Filers: peers,
|
||||
})
|
||||
if nfs_err != nil {
|
||||
|
|
118
weed/command/filer_cat.go
Normal file
118
weed/command/filer_cat.go
Normal file
|
@ -0,0 +1,118 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"google.golang.org/grpc"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
var (
|
||||
filerCat FilerCatOptions
|
||||
)
|
||||
|
||||
type FilerCatOptions struct {
|
||||
grpcDialOption grpc.DialOption
|
||||
filerAddress string
|
||||
filerClient filer_pb.SeaweedFilerClient
|
||||
output *string
|
||||
}
|
||||
|
||||
func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType {
|
||||
return func(fileId string) (targetUrls []string, err error) {
|
||||
vid := filer.VolumeId(fileId)
|
||||
resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
|
||||
VolumeIds: []string{vid},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
locations := resp.LocationsMap[vid]
|
||||
for _, loc := range locations.Locations {
|
||||
targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdFilerCat.Run = runFilerCat // break init cycle
|
||||
filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout")
|
||||
}
|
||||
|
||||
var cmdFilerCat = &Command{
|
||||
UsageLine: "filer.cat [-o <file>] http://localhost:8888/path/to/file",
|
||||
Short: "copy one file to local",
|
||||
Long: `read one file to stdout or write to a file
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
func runFilerCat(cmd *Command, args []string) bool {
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
|
||||
if len(args) == 0 {
|
||||
return false
|
||||
}
|
||||
filerSource := args[len(args)-1]
|
||||
|
||||
filerUrl, err := url.Parse(filerSource)
|
||||
if err != nil {
|
||||
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
|
||||
return false
|
||||
}
|
||||
urlPath := filerUrl.Path
|
||||
if strings.HasSuffix(urlPath, "/") {
|
||||
fmt.Printf("The last argument should be a file: %v\n", err)
|
||||
return false
|
||||
}
|
||||
|
||||
filerCat.filerAddress = filerUrl.Host
|
||||
filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
dir, name := util.FullPath(urlPath).DirAndName()
|
||||
|
||||
writer := os.Stdout
|
||||
if *filerCat.output != "" {
|
||||
|
||||
fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output)
|
||||
|
||||
f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
|
||||
if err != nil {
|
||||
fmt.Printf("open file %s: %v\n", *filerCat.output, err)
|
||||
return false
|
||||
}
|
||||
defer f.Close()
|
||||
writer = f
|
||||
}
|
||||
|
||||
pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||
Name: name,
|
||||
Directory: dir,
|
||||
}
|
||||
respLookupEntry, err := filer_pb.LookupEntry(client, request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filerCat.filerClient = client
|
||||
|
||||
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
|
||||
|
||||
})
|
||||
|
||||
return true
|
||||
}
|
|
@ -94,7 +94,7 @@ func runCopy(cmd *Command, args []string) bool {
|
|||
}
|
||||
urlPath := filerUrl.Path
|
||||
if !strings.HasSuffix(urlPath, "/") {
|
||||
fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err)
|
||||
fmt.Printf("The last argument should be a folder and end with \"/\"\n")
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
211
weed/command/filer_meta_tail.go
Normal file
211
weed/command/filer_meta_tail.go
Normal file
|
@ -0,0 +1,211 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/olivere/elastic/v7"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle
|
||||
}
|
||||
|
||||
var cmdFilerMetaTail = &Command{
|
||||
UsageLine: "filer.meta.tail [-filer=localhost:8888] [-target=/]",
|
||||
Short: "see recent changes on a filer",
|
||||
Long: `See recent changes on a filer.
|
||||
|
||||
weed filer.meta.tail -timeAgo=30h | grep truncate
|
||||
weed filer.meta.tail -timeAgo=30h | jq .
|
||||
weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
var (
|
||||
tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port")
|
||||
tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
|
||||
tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||
tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
|
||||
esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://<host:port>")
|
||||
esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name")
|
||||
)
|
||||
|
||||
func runFilerMetaTail(cmd *Command, args []string) bool {
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
var filterFunc func(dir, fname string) bool
|
||||
if *tailPattern != "" {
|
||||
if strings.Contains(*tailPattern, "/") {
|
||||
println("watch path pattern", *tailPattern)
|
||||
filterFunc = func(dir, fname string) bool {
|
||||
matched, err := filepath.Match(*tailPattern, dir+"/"+fname)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
return matched
|
||||
}
|
||||
} else {
|
||||
println("watch file pattern", *tailPattern)
|
||||
filterFunc = func(dir, fname string) bool {
|
||||
matched, err := filepath.Match(*tailPattern, fname)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
return matched
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
|
||||
if filterFunc == nil {
|
||||
return true
|
||||
}
|
||||
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
|
||||
return false
|
||||
}
|
||||
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
|
||||
return true
|
||||
}
|
||||
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
jsonpbMarshaler := jsonpb.Marshaler{
|
||||
EmitDefaults: false,
|
||||
}
|
||||
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
jsonpbMarshaler.Marshal(os.Stdout, resp)
|
||||
fmt.Fprintln(os.Stdout)
|
||||
return nil
|
||||
}
|
||||
if *esServers != "" {
|
||||
var err error
|
||||
eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex)
|
||||
if err != nil {
|
||||
fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "tail",
|
||||
PathPrefix: *tailTarget,
|
||||
SinceNs: time.Now().Add(-*tailStart).UnixNano(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
if listenErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
if !shouldPrint(resp) {
|
||||
continue
|
||||
}
|
||||
if err = eachEntryFunc(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
if tailErr != nil {
|
||||
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type EsDocument struct {
|
||||
Dir string `json:"dir,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
IsDirectory bool `json:"isDir,omitempty"`
|
||||
Size uint64 `json:"size,omitempty"`
|
||||
Uid uint32 `json:"uid,omitempty"`
|
||||
Gid uint32 `json:"gid,omitempty"`
|
||||
UserName string `json:"userName,omitempty"`
|
||||
Collection string `json:"collection,omitempty"`
|
||||
Crtime int64 `json:"crtime,omitempty"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Mime string `json:"mime,omitempty"`
|
||||
}
|
||||
|
||||
func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) {
|
||||
entry := event.NewEntry
|
||||
dir, name := event.NewParentPath, entry.Name
|
||||
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
|
||||
esEntry := &EsDocument{
|
||||
Dir: dir,
|
||||
Name: name,
|
||||
IsDirectory: entry.IsDirectory,
|
||||
Size: entry.Attributes.FileSize,
|
||||
Uid: entry.Attributes.Uid,
|
||||
Gid: entry.Attributes.Gid,
|
||||
UserName: entry.Attributes.UserName,
|
||||
Collection: entry.Attributes.Collection,
|
||||
Crtime: entry.Attributes.Crtime,
|
||||
Mtime: entry.Attributes.Mtime,
|
||||
Mime: entry.Attributes.Mime,
|
||||
}
|
||||
return esEntry, id
|
||||
}
|
||||
|
||||
func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) {
|
||||
options := []elastic.ClientOptionFunc{}
|
||||
options = append(options, elastic.SetURL(strings.Split(servers, ",")...))
|
||||
options = append(options, elastic.SetSniff(false))
|
||||
options = append(options, elastic.SetHealthcheck(false))
|
||||
client, err := elastic.NewClient(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
event := resp.EventNotification
|
||||
if event.OldEntry != nil &&
|
||||
(event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) {
|
||||
// delete or not update the same file
|
||||
dir, name := resp.Directory, event.OldEntry.Name
|
||||
id := util.Md5String([]byte(util.NewFullPath(dir, name)))
|
||||
println("delete", id)
|
||||
_, err := client.Delete().Index(esIndex).Id(id).Do(context.Background())
|
||||
return err
|
||||
}
|
||||
if event.NewEntry != nil {
|
||||
// add a new file or update the same file
|
||||
esEntry, id := toEsEntry(event)
|
||||
value, err := jsoniter.Marshal(esEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
println(string(value))
|
||||
_, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, nil
|
||||
}
|
|
@ -11,10 +11,10 @@ import (
|
|||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
|
||||
"github.com/chrislusf/seaweedfs/weed/replication/sub"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -98,13 +98,19 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
|||
replicator := replication.NewReplicator(config, "source.filer.", dataSink)
|
||||
|
||||
for {
|
||||
key, m, err := notificationInput.ReceiveMessage()
|
||||
key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage()
|
||||
if err != nil {
|
||||
glog.Errorf("receive %s: %+v", key, err)
|
||||
if onFailureFn != nil {
|
||||
onFailureFn()
|
||||
}
|
||||
continue
|
||||
}
|
||||
if key == "" {
|
||||
// long poll received no messages
|
||||
if onSuccessFn != nil {
|
||||
onSuccessFn()
|
||||
}
|
||||
continue
|
||||
}
|
||||
if m.OldEntry != nil && m.NewEntry == nil {
|
||||
|
@ -116,14 +122,20 @@ func runFilerReplicate(cmd *Command, args []string) bool {
|
|||
}
|
||||
if err = replicator.Replicate(context.Background(), key, m); err != nil {
|
||||
glog.Errorf("replicate %s: %+v", key, err)
|
||||
if onFailureFn != nil {
|
||||
onFailureFn()
|
||||
}
|
||||
} else {
|
||||
glog.V(1).Infof("replicated %s", key)
|
||||
if onSuccessFn != nil {
|
||||
onSuccessFn()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func validateOneEnabledInput(config *viper.Viper) {
|
||||
func validateOneEnabledInput(config *util.ViperProxy) {
|
||||
enabledInput := ""
|
||||
for _, input := range sub.NotificationInputs {
|
||||
if config.GetBool("notification." + input.GetName() + ".enabled") {
|
||||
|
|
|
@ -35,6 +35,8 @@ type SyncOptions struct {
|
|||
bDiskType *string
|
||||
aDebug *bool
|
||||
bDebug *bool
|
||||
aProxyByFiler *bool
|
||||
bProxyByFiler *bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -45,7 +47,7 @@ var (
|
|||
|
||||
func init() {
|
||||
cmdFilerSynchronize.Run = runFilerSynchronize // break init cycle
|
||||
syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow if true")
|
||||
syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow from A to B if true")
|
||||
syncOptions.filerA = cmdFilerSynchronize.Flag.String("a", "", "filer A in one SeaweedFS cluster")
|
||||
syncOptions.filerB = cmdFilerSynchronize.Flag.String("b", "", "filer B in the other SeaweedFS cluster")
|
||||
syncOptions.aPath = cmdFilerSynchronize.Flag.String("a.path", "/", "directory to sync on filer A")
|
||||
|
@ -58,6 +60,8 @@ func init() {
|
|||
syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B")
|
||||
syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd] choose between hard drive or solid state drive on filer A")
|
||||
syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd] choose between hard drive or solid state drive on filer B")
|
||||
syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers")
|
||||
syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers")
|
||||
syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files")
|
||||
syncOptions.bDebug = cmdFilerSynchronize.Flag.Bool("b.debug", false, "debug mode to print out filer B received files")
|
||||
syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
|
@ -66,8 +70,8 @@ func init() {
|
|||
|
||||
var cmdFilerSynchronize = &Command{
|
||||
UsageLine: "filer.sync -a=<oneFilerHost>:<oneFilerPort> -b=<otherFilerHost>:<otherFilerPort>",
|
||||
Short: "continuously synchronize between two active-active or active-passive SeaweedFS clusters",
|
||||
Long: `continuously synchronize file changes between two active-active or active-passive filers
|
||||
Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters",
|
||||
Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers
|
||||
|
||||
filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content,
|
||||
and write to the other destination. Different from filer.replicate:
|
||||
|
@ -90,8 +94,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
|||
|
||||
go func() {
|
||||
for {
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.filerB,
|
||||
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bDiskType, *syncOptions.bDebug)
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
|
||||
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
|
||||
time.Sleep(1747 * time.Millisecond)
|
||||
|
@ -102,8 +106,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
|||
if !*syncOptions.isActivePassive {
|
||||
go func() {
|
||||
for {
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.filerA,
|
||||
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aDiskType, *syncOptions.aDebug)
|
||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
|
||||
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
|
||||
if err != nil {
|
||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
|
||||
time.Sleep(2147 * time.Millisecond)
|
||||
|
@ -117,8 +121,8 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath, targetFiler, targetPath string,
|
||||
replicationStr, collection string, ttlSec int, diskType string, debug bool) error {
|
||||
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
|
||||
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
|
||||
|
||||
// read source filer signature
|
||||
sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler)
|
||||
|
@ -142,9 +146,9 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
|
|||
|
||||
// create filer sink
|
||||
filerSource := &source.FilerSource{}
|
||||
filerSource.DoInitialize(pb.ServerToGrpcAddress(sourceFiler), sourcePath)
|
||||
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
|
||||
filerSink := &filersink.FilerSink{}
|
||||
filerSink.DoInitialize(pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption)
|
||||
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
|
||||
filerSink.SetSourceFiler(filerSource)
|
||||
|
||||
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
|
|
|
@ -22,7 +22,7 @@ type MountOptions struct {
|
|||
allowOthers *bool
|
||||
umaskString *string
|
||||
nonempty *bool
|
||||
outsideContainerClusterMode *bool
|
||||
volumeServerAccess *string
|
||||
uidMap *string
|
||||
gidMap *string
|
||||
}
|
||||
|
@ -45,14 +45,14 @@ func init() {
|
|||
mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd] choose between hard drive or solid state drive")
|
||||
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
|
||||
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files")
|
||||
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 0, "limit concurrent goroutine writers if not 0")
|
||||
mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 128, "limit concurrent goroutine writers if not 0")
|
||||
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
|
||||
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)")
|
||||
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
|
||||
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
|
||||
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
|
||||
mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
|
||||
mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access volume servers with publicUrl")
|
||||
mountOptions.volumeServerAccess = cmdMount.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl|filerProxy]")
|
||||
mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated <local_uid>:<filer_uid>")
|
||||
mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated <local_gid>:<filer_gid>")
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
// try to connect to filer, filerBucketsPath may be useful later
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
var cipher bool
|
||||
|
@ -79,8 +80,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
dir := util.ResolvePath(*option.dir)
|
||||
chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
|
||||
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
if dir == "" {
|
||||
fmt.Printf("Please specify the mount directory via \"-dir\"")
|
||||
|
@ -102,9 +101,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
uid, gid := uint32(0), uint32(0)
|
||||
mountMode := os.ModeDir | 0777
|
||||
if err == nil {
|
||||
mountMode = os.ModeDir | fileInfo.Mode()
|
||||
mountMode = os.ModeDir | os.FileMode(0777)&^umask
|
||||
uid, gid = util.GetFileUidGid(fileInfo)
|
||||
fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode())
|
||||
fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, mountMode)
|
||||
} else {
|
||||
fmt.Printf("can not stat %s\n", dir)
|
||||
return false
|
||||
|
@ -152,6 +151,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
fuse.MaxReadahead(1024 * 128),
|
||||
fuse.AsyncRead(),
|
||||
fuse.WritebackCache(),
|
||||
fuse.MaxBackground(128),
|
||||
fuse.CongestionThreshold(128),
|
||||
}
|
||||
|
||||
options = append(options, osSpecificMountOptions()...)
|
||||
|
@ -175,6 +176,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
}
|
||||
|
||||
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
|
||||
MountDirectory: dir,
|
||||
FilerAddress: filer,
|
||||
FilerGrpcAddress: filerGrpcAddress,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
FilerMountRootPath: mountRoot,
|
||||
|
@ -194,7 +197,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
MountCtime: fileInfo.ModTime(),
|
||||
MountMtime: time.Now(),
|
||||
Umask: umask,
|
||||
OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode,
|
||||
VolumeServerAccess: *mountOptions.volumeServerAccess,
|
||||
Cipher: cipher,
|
||||
UidGidMapper: uidGidMapper,
|
||||
})
|
||||
|
@ -213,7 +216,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
})
|
||||
|
||||
glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
|
||||
err = fs.Serve(c, seaweedFileSystem)
|
||||
server := fs.New(c, nil)
|
||||
seaweedFileSystem.Server = server
|
||||
err = server.Serve(seaweedFileSystem)
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
|
|
|
@ -30,6 +30,7 @@ type S3Options struct {
|
|||
tlsPrivateKey *string
|
||||
tlsCertificate *string
|
||||
metricsHttpPort *int
|
||||
allowEmptyFolder *bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -41,6 +42,7 @@ func init() {
|
|||
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
|
||||
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
|
||||
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||
s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders")
|
||||
}
|
||||
|
||||
var cmdS3 = &Command{
|
||||
|
@ -181,6 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
|||
DomainName: *s3opt.domainName,
|
||||
BucketsPath: filerBucketsPath,
|
||||
GrpcDialOption: grpcDialOption,
|
||||
AllowEmptyFolder: *s3opt.allowEmptyFolder,
|
||||
})
|
||||
if s3ApiServer_err != nil {
|
||||
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
|
||||
|
|
|
@ -44,6 +44,8 @@ func runScaffold(cmd *Command, args []string) bool {
|
|||
content = SECURITY_TOML_EXAMPLE
|
||||
case "master":
|
||||
content = MASTER_TOML_EXAMPLE
|
||||
case "shell":
|
||||
content = SHELL_TOML_EXAMPLE
|
||||
}
|
||||
if content == "" {
|
||||
println("need a valid -config option")
|
||||
|
@ -85,9 +87,21 @@ buckets_folder = "/buckets"
|
|||
# local on disk, mostly for simple single-machine setup, fairly scalable
|
||||
# faster than previous leveldb, recommended.
|
||||
enabled = true
|
||||
dir = "." # directory to store level db files
|
||||
dir = "./filerldb2" # directory to store level db files
|
||||
|
||||
[mysql] # or tidb
|
||||
[leveldb3]
|
||||
# similar to leveldb2.
|
||||
# each bucket has its own meta store.
|
||||
enabled = false
|
||||
dir = "./filerldb3" # directory to store level db files
|
||||
|
||||
[rocksdb]
|
||||
# local on disk, similar to leveldb
|
||||
# since it is using a C wrapper, you need to install rocksdb and build it by yourself
|
||||
enabled = false
|
||||
dir = "./filerrdb" # directory to store rocksdb files
|
||||
|
||||
[mysql] # or memsql, tidb
|
||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
|
||||
# name VARCHAR(1000) COMMENT 'directory or file name',
|
||||
|
@ -104,9 +118,31 @@ password = ""
|
|||
database = "" # create or use an existing database
|
||||
connection_max_idle = 2
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
interpolateParams = false
|
||||
|
||||
[postgres] # or cockroachdb
|
||||
[mysql2] # or memsql, tidb
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS %s (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(1000),
|
||||
directory TEXT,
|
||||
meta LONGBLOB,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
) DEFAULT CHARSET=utf8;
|
||||
"""
|
||||
hostname = "localhost"
|
||||
port = 3306
|
||||
username = "root"
|
||||
password = ""
|
||||
database = "" # create or use an existing database
|
||||
connection_max_idle = 2
|
||||
connection_max_open = 100
|
||||
connection_max_lifetime_seconds = 0
|
||||
interpolateParams = false
|
||||
|
||||
[postgres] # or cockroachdb, YugabyteDB
|
||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||
# dirhash BIGINT,
|
||||
# name VARCHAR(65535),
|
||||
|
@ -119,7 +155,29 @@ hostname = "localhost"
|
|||
port = 5432
|
||||
username = "postgres"
|
||||
password = ""
|
||||
database = "" # create or use an existing database
|
||||
database = "postgres" # create or use an existing database
|
||||
schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
|
||||
[postgres2]
|
||||
enabled = false
|
||||
createTable = """
|
||||
CREATE TABLE IF NOT EXISTS %s (
|
||||
dirhash BIGINT,
|
||||
name VARCHAR(65535),
|
||||
directory VARCHAR(65535),
|
||||
meta bytea,
|
||||
PRIMARY KEY (dirhash, name)
|
||||
);
|
||||
"""
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
username = "postgres"
|
||||
password = ""
|
||||
database = "postgres" # create or use an existing database
|
||||
schema = ""
|
||||
sslmode = "disable"
|
||||
connection_max_idle = 100
|
||||
connection_max_open = 100
|
||||
|
@ -141,6 +199,11 @@ password=""
|
|||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
|
||||
[hbase]
|
||||
enabled = false
|
||||
zkquorum = ""
|
||||
table = "seaweedfs"
|
||||
|
||||
[redis2]
|
||||
enabled = false
|
||||
address = "localhost:6379"
|
||||
|
@ -161,9 +224,9 @@ addresses = [
|
|||
]
|
||||
password = ""
|
||||
# allows reads from slave servers or the master, but all writes still go to the master
|
||||
readOnly = true
|
||||
readOnly = false
|
||||
# automatically use the closest Redis server for reads
|
||||
routeByLatency = true
|
||||
routeByLatency = false
|
||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||
superLargeDirectories = []
|
||||
|
||||
|
@ -266,7 +329,8 @@ enabled = false
|
|||
# This URL will Dial the RabbitMQ server at the URL in the environment
|
||||
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
|
||||
# The exchange must have already been created by some other means, like
|
||||
# the RabbitMQ management plugin.
|
||||
# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then
|
||||
# create binding myexchange => myqueue
|
||||
topic_url = "rabbit://myexchange"
|
||||
sub_url = "rabbit://myqueue"
|
||||
`
|
||||
|
@ -287,6 +351,16 @@ grpcAddress = "localhost:18888"
|
|||
# i.e., all files with this "prefix" are sent to notification message queue.
|
||||
directory = "/buckets"
|
||||
|
||||
[sink.local]
|
||||
enabled = false
|
||||
directory = "/data"
|
||||
|
||||
[sink.local_incremental]
|
||||
# all replicated files are under modified time as yyyy-mm-dd directories
|
||||
# so each date directory contains all new and updated files.
|
||||
enabled = false
|
||||
directory = "/backup"
|
||||
|
||||
[sink.filer]
|
||||
enabled = false
|
||||
grpcAddress = "localhost:18888"
|
||||
|
@ -454,5 +528,19 @@ copy_other = 1 # create n x 1 = n actual volumes
|
|||
# if you are doing your own replication or periodic sync of volumes.
|
||||
treat_replication_as_minimums = false
|
||||
|
||||
`
|
||||
SHELL_TOML_EXAMPLE = `
|
||||
|
||||
[cluster]
|
||||
default = "c1"
|
||||
|
||||
[cluster.c1]
|
||||
master = "localhost:9333" # comma-separated master servers
|
||||
filer = "localhost:8888" # filer host and port
|
||||
|
||||
[cluster.c2]
|
||||
master = ""
|
||||
filer = ""
|
||||
|
||||
`
|
||||
)
|
||||
|
|
|
@ -24,6 +24,7 @@ var (
|
|||
masterOptions MasterOptions
|
||||
filerOptions FilerOptions
|
||||
s3Options S3Options
|
||||
webdavOptions WebDavOption
|
||||
msgBrokerOptions MessageBrokerOptions
|
||||
)
|
||||
|
||||
|
@ -61,9 +62,11 @@ var (
|
|||
serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||
|
||||
// pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
|
||||
isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server")
|
||||
isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server")
|
||||
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
|
||||
isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
|
||||
isStartingWebDav = cmdServer.Flag.Bool("webdav", false, "whether to start WebDAV gateway")
|
||||
isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker")
|
||||
|
||||
serverWhiteList []string
|
||||
|
@ -94,7 +97,7 @@ func init() {
|
|||
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
|
||||
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
|
||||
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
|
||||
filerOptions.cacheToFilerLimit = cmdServer.Flag.Int("filer.cacheToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
|
||||
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
|
||||
|
||||
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
|
||||
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
|
||||
|
@ -114,6 +117,14 @@ func init() {
|
|||
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
|
||||
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
|
||||
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
|
||||
s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
|
||||
|
||||
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
||||
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
|
||||
webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
|
||||
webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
|
||||
webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||
webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
|
||||
|
||||
msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")
|
||||
|
||||
|
@ -136,6 +147,9 @@ func runServer(cmd *Command, args []string) bool {
|
|||
if *isStartingS3 {
|
||||
*isStartingFiler = true
|
||||
}
|
||||
if *isStartingWebDav {
|
||||
*isStartingFiler = true
|
||||
}
|
||||
if *isStartingMsgBroker {
|
||||
*isStartingFiler = true
|
||||
}
|
||||
|
@ -170,6 +184,7 @@ func runServer(cmd *Command, args []string) bool {
|
|||
|
||||
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
|
||||
s3Options.filer = &filerAddress
|
||||
webdavOptions.filer = &filerAddress
|
||||
msgBrokerOptions.filer = &filerAddress
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
@ -211,6 +226,15 @@ func runServer(cmd *Command, args []string) bool {
|
|||
}()
|
||||
}
|
||||
|
||||
if *isStartingWebDav {
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
webdavOptions.startWebDav()
|
||||
|
||||
}()
|
||||
}
|
||||
|
||||
if *isStartingMsgBroker {
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
@ -224,7 +248,11 @@ func runServer(cmd *Command, args []string) bool {
|
|||
|
||||
}
|
||||
|
||||
startMaster(masterOptions, serverWhiteList)
|
||||
if *isStartingMasterServer {
|
||||
go startMaster(masterOptions, serverWhiteList)
|
||||
}
|
||||
|
||||
select {}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -11,12 +11,14 @@ import (
|
|||
var (
|
||||
shellOptions shell.ShellOptions
|
||||
shellInitialFiler *string
|
||||
shellCluster *string
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdShell.Run = runShell // break init cycle
|
||||
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
||||
shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
|
||||
shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333")
|
||||
shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888")
|
||||
shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml")
|
||||
}
|
||||
|
||||
var cmdShell = &Command{
|
||||
|
@ -24,6 +26,8 @@ var cmdShell = &Command{
|
|||
Short: "run interactive administrative commands",
|
||||
Long: `run interactive administrative commands.
|
||||
|
||||
Generate shell.toml via "weed scaffold -config=shell"
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
|
@ -32,6 +36,23 @@ func runShell(command *Command, args []string) bool {
|
|||
util.LoadConfiguration("security", false)
|
||||
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
if *shellOptions.Masters == "" && *shellInitialFiler == "" {
|
||||
util.LoadConfiguration("shell", false)
|
||||
v := util.GetViper()
|
||||
cluster := v.GetString("cluster.default")
|
||||
if *shellCluster != "" {
|
||||
cluster = *shellCluster
|
||||
}
|
||||
if cluster == "" {
|
||||
*shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888"
|
||||
} else {
|
||||
*shellOptions.Masters = v.GetString("cluster." + cluster + ".master")
|
||||
*shellInitialFiler = v.GetString("cluster." + cluster + ".filer")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
|
||||
|
||||
var err error
|
||||
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"google.golang.org/grpc"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
@ -67,6 +71,15 @@ func runUpload(cmd *Command, args []string) bool {
|
|||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master)
|
||||
if err != nil {
|
||||
fmt.Printf("upload: %v", err)
|
||||
return false
|
||||
}
|
||||
if *upload.replication == "" {
|
||||
*upload.replication = defaultCollection
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
if *upload.dir == "" {
|
||||
return false
|
||||
|
@ -106,3 +119,15 @@ func runUpload(cmd *Command, args []string) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) {
|
||||
err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get master %s configuration: %v", masterAddress, err)
|
||||
}
|
||||
replication = resp.DefaultReplication
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdWatch.Run = runWatch // break init cycle
|
||||
}
|
||||
|
||||
var cmdWatch = &Command{
|
||||
UsageLine: "watch [-filer=localhost:8888] [-target=/]",
|
||||
Short: "see recent changes on a filer",
|
||||
Long: `See recent changes on a filer.
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
var (
|
||||
watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port")
|
||||
watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer")
|
||||
watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||
watchPattern = cmdWatch.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ")
|
||||
)
|
||||
|
||||
func runWatch(cmd *Command, args []string) bool {
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
var filterFunc func(dir, fname string) bool
|
||||
if *watchPattern != "" {
|
||||
if strings.Contains(*watchPattern, "/") {
|
||||
println("watch path pattern", *watchPattern)
|
||||
filterFunc = func(dir, fname string) bool {
|
||||
matched, err := filepath.Match(*watchPattern, dir+"/"+fname)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
return matched
|
||||
}
|
||||
} else {
|
||||
println("watch file pattern", *watchPattern)
|
||||
filterFunc = func(dir, fname string) bool {
|
||||
matched, err := filepath.Match(*watchPattern, fname)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v", err)
|
||||
}
|
||||
return matched
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
|
||||
if filterFunc == nil {
|
||||
return true
|
||||
}
|
||||
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
|
||||
return false
|
||||
}
|
||||
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
|
||||
return true
|
||||
}
|
||||
if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||
ClientName: "watch",
|
||||
PathPrefix: *watchTarget,
|
||||
SinceNs: time.Now().Add(-*watchStart).UnixNano(),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
resp, listenErr := stream.Recv()
|
||||
if listenErr == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if listenErr != nil {
|
||||
return listenErr
|
||||
}
|
||||
if !shouldPrint(resp) {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("dir:%s %+v\n", resp.Directory, resp.EventNotification)
|
||||
}
|
||||
|
||||
})
|
||||
if watchErr != nil {
|
||||
fmt.Printf("watch %s: %v\n", *watchFiler, watchErr)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -9,19 +9,33 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type AbstractSqlStore struct {
|
||||
DB *sql.DB
|
||||
SqlInsert string
|
||||
SqlUpdate string
|
||||
SqlFind string
|
||||
SqlDelete string
|
||||
SqlDeleteFolderChildren string
|
||||
SqlListExclusive string
|
||||
SqlListInclusive string
|
||||
type SqlGenerator interface {
|
||||
GetSqlInsert(bucket string) string
|
||||
GetSqlUpdate(bucket string) string
|
||||
GetSqlFind(bucket string) string
|
||||
GetSqlDelete(bucket string) string
|
||||
GetSqlDeleteFolderChildren(bucket string) string
|
||||
GetSqlListExclusive(bucket string) string
|
||||
GetSqlListInclusive(bucket string) string
|
||||
GetSqlCreateTable(bucket string) string
|
||||
GetSqlDropTable(bucket string) string
|
||||
}
|
||||
|
||||
type AbstractSqlStore struct {
|
||||
SqlGenerator
|
||||
DB *sql.DB
|
||||
SupportBucketTable bool
|
||||
dbs map[string]bool
|
||||
dbsLock sync.Mutex
|
||||
}
|
||||
|
||||
const (
|
||||
DEFAULT_TABLE = "filemeta"
|
||||
)
|
||||
|
||||
type TxOrDB interface {
|
||||
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
||||
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
||||
|
@ -52,16 +66,65 @@ func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
|
||||
func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) {
|
||||
|
||||
shortPath = fullpath
|
||||
bucket = DEFAULT_TABLE
|
||||
|
||||
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
|
||||
return tx
|
||||
txOrDB = tx
|
||||
} else {
|
||||
txOrDB = store.DB
|
||||
}
|
||||
return store.DB
|
||||
|
||||
if !store.SupportBucketTable {
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(string(fullpath), "/buckets/") {
|
||||
return
|
||||
}
|
||||
|
||||
// detect bucket
|
||||
bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
|
||||
t := strings.Index(bucketAndObjectKey, "/")
|
||||
if t < 0 && !isForChildren {
|
||||
return
|
||||
}
|
||||
bucket = bucketAndObjectKey
|
||||
shortPath = "/"
|
||||
if t > 0 {
|
||||
bucket = bucketAndObjectKey[:t]
|
||||
shortPath = util.FullPath(bucketAndObjectKey[t:])
|
||||
}
|
||||
|
||||
if isValidBucket(bucket) {
|
||||
store.dbsLock.Lock()
|
||||
defer store.dbsLock.Unlock()
|
||||
|
||||
if store.dbs == nil {
|
||||
store.dbs = make(map[string]bool)
|
||||
}
|
||||
|
||||
if _, found := store.dbs[bucket]; !found {
|
||||
if err = store.CreateTable(ctx, bucket); err != nil {
|
||||
store.dbs[bucket] = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
||||
|
||||
dir, name := entry.FullPath.DirAndName()
|
||||
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
dir, name := shortPath.DirAndName()
|
||||
meta, err := entry.EncodeAttributesAndChunks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
|
@ -71,7 +134,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
|
|||
meta = util.MaybeGzipData(meta)
|
||||
}
|
||||
|
||||
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta)
|
||||
res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
@ -84,7 +147,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
|
|||
// now the insert failed possibly due to duplication constraints
|
||||
glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
|
||||
|
||||
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
|
||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upsert %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
@ -99,13 +162,18 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
|
|||
|
||||
func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
||||
|
||||
dir, name := entry.FullPath.DirAndName()
|
||||
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
dir, name := shortPath.DirAndName()
|
||||
meta, err := entry.EncodeAttributesAndChunks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
||||
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
|
||||
res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
@ -119,8 +187,13 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Ent
|
|||
|
||||
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
|
||||
|
||||
dir, name := fullpath.DirAndName()
|
||||
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir)
|
||||
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
dir, name := shortPath.DirAndName()
|
||||
row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir)
|
||||
|
||||
var data []byte
|
||||
if err := row.Scan(&data); err != nil {
|
||||
|
@ -142,9 +215,14 @@ func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.Full
|
|||
|
||||
func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
|
||||
|
||||
dir, name := fullpath.DirAndName()
|
||||
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("findDB %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir)
|
||||
dir, name := shortPath.DirAndName()
|
||||
|
||||
res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete %s: %s", fullpath, err)
|
||||
}
|
||||
|
@ -159,7 +237,23 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.Fu
|
|||
|
||||
func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
|
||||
|
||||
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath)
|
||||
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("findDB %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
if isValidBucket(bucket) && shortPath == "/" {
|
||||
if err = store.deleteTable(ctx, bucket); err == nil {
|
||||
store.dbsLock.Lock()
|
||||
delete(store.dbs, bucket)
|
||||
store.dbsLock.Unlock()
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
||||
}
|
||||
|
@ -172,15 +266,21 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
|
|||
return nil
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
|
||||
sqlText := store.SqlListExclusive
|
||||
if inclusive {
|
||||
sqlText = store.SqlListInclusive
|
||||
func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
|
||||
db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true)
|
||||
if err != nil {
|
||||
return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
|
||||
}
|
||||
|
||||
rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), prefix+"%", limit)
|
||||
sqlText := store.GetSqlListExclusive(bucket)
|
||||
if includeStartFile {
|
||||
sqlText = store.GetSqlListInclusive(bucket)
|
||||
}
|
||||
|
||||
rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list %s : %v", fullpath, err)
|
||||
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
|
@ -188,28 +288,52 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
|
|||
var name string
|
||||
var data []byte
|
||||
if err = rows.Scan(&name, &data); err != nil {
|
||||
glog.V(0).Infof("scan %s : %v", fullpath, err)
|
||||
return nil, fmt.Errorf("scan %s: %v", fullpath, err)
|
||||
glog.V(0).Infof("scan %s : %v", dirPath, err)
|
||||
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
|
||||
}
|
||||
lastFileName = name
|
||||
|
||||
entry := &filer.Entry{
|
||||
FullPath: util.NewFullPath(string(fullpath), name),
|
||||
FullPath: util.NewFullPath(string(dirPath), name),
|
||||
}
|
||||
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
||||
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
|
||||
return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
||||
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
entries = append(entries, entry)
|
||||
if !eachEntryFunc(entry) {
|
||||
break
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
return lastFileName, nil
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
|
||||
return store.ListDirectoryPrefixedEntries(ctx, fullpath, startFileName, inclusive, limit, "")
|
||||
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil)
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) Shutdown() {
|
||||
store.DB.Close()
|
||||
}
|
||||
|
||||
func isValidBucket(bucket string) bool {
|
||||
return bucket != DEFAULT_TABLE && bucket != ""
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) CreateTable(ctx context.Context, bucket string) error {
|
||||
if !store.SupportBucketTable {
|
||||
return nil
|
||||
}
|
||||
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket))
|
||||
return err
|
||||
}
|
||||
|
||||
func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error {
|
||||
if !store.SupportBucketTable {
|
||||
return nil
|
||||
}
|
||||
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket))
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -13,9 +13,14 @@ import (
|
|||
|
||||
func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
|
||||
|
||||
db, _, _, err := store.getTxOrDB(ctx, "", false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("findDB: %v", err)
|
||||
}
|
||||
|
||||
dirStr, dirHash, name := genDirAndName(key)
|
||||
|
||||
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, dirHash, name, dirStr, value)
|
||||
res, err := db.ExecContext(ctx, store.GetSqlInsert(DEFAULT_TABLE), dirHash, name, dirStr, value)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
@ -28,7 +33,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
|
|||
// now the insert failed possibly due to duplication constraints
|
||||
glog.V(1).Infof("kv insert falls back to update: %s", err)
|
||||
|
||||
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
|
||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kv upsert: %s", err)
|
||||
}
|
||||
|
@ -43,8 +48,13 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
|
|||
|
||||
func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
|
||||
|
||||
db, _, _, err := store.getTxOrDB(ctx, "", false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findDB: %v", err)
|
||||
}
|
||||
|
||||
dirStr, dirHash, name := genDirAndName(key)
|
||||
row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, dirHash, name, dirStr)
|
||||
row := db.QueryRowContext(ctx, store.GetSqlFind(DEFAULT_TABLE), dirHash, name, dirStr)
|
||||
|
||||
err = row.Scan(&value)
|
||||
|
||||
|
@ -61,9 +71,14 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b
|
|||
|
||||
func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||
|
||||
db, _, _, err := store.getTxOrDB(ctx, "", false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("findDB: %v", err)
|
||||
}
|
||||
|
||||
dirStr, dirHash, name := genDirAndName(key)
|
||||
|
||||
res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, dirHash, name, dirStr)
|
||||
res, err := db.ExecContext(ctx, store.GetSqlDelete(DEFAULT_TABLE), dirHash, name, dirStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kv delete: %s", err)
|
||||
}
|
||||
|
|
|
@ -168,41 +168,43 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath
|
|||
return nil
|
||||
}
|
||||
|
||||
func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
|
||||
return nil, filer.ErrUnsupportedListDirectoryPrefixed
|
||||
func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
|
||||
}
|
||||
|
||||
func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
|
||||
limit int) (entries []*filer.Entry, err error) {
|
||||
func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
|
||||
if _, ok := store.isSuperLargeDirectory(string(fullpath)); ok {
|
||||
if _, ok := store.isSuperLargeDirectory(string(dirPath)); ok {
|
||||
return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing
|
||||
}
|
||||
|
||||
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"
|
||||
if inclusive {
|
||||
if includeStartFile {
|
||||
cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?"
|
||||
}
|
||||
|
||||
var data []byte
|
||||
var name string
|
||||
iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter()
|
||||
iter := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter()
|
||||
for iter.Scan(&name, &data) {
|
||||
entry := &filer.Entry{
|
||||
FullPath: util.NewFullPath(string(fullpath), name),
|
||||
FullPath: util.NewFullPath(string(dirPath), name),
|
||||
}
|
||||
lastFileName = name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
if !eachEntryFunc(entry) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := iter.Close(); err != nil {
|
||||
glog.V(0).Infof("list iterator close: %v", err)
|
||||
}
|
||||
|
||||
return entries, err
|
||||
return lastFileName, err
|
||||
}
|
||||
|
||||
func (store *CassandraStore) Shutdown() {
|
||||
|
|
|
@ -2,7 +2,7 @@ package filer
|
|||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -12,7 +12,7 @@ var (
|
|||
Stores []FilerStore
|
||||
)
|
||||
|
||||
func (f *Filer) LoadConfiguration(config *viper.Viper) {
|
||||
func (f *Filer) LoadConfiguration(config *util.ViperProxy) {
|
||||
|
||||
validateOneEnabledStore(config)
|
||||
|
||||
|
@ -79,7 +79,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
|
|||
|
||||
}
|
||||
|
||||
func validateOneEnabledStore(config *viper.Viper) {
|
||||
func validateOneEnabledStore(config *util.ViperProxy) {
|
||||
enabledStore := ""
|
||||
for _, store := range Stores {
|
||||
if config.GetBool(store.GetName() + ".enabled") {
|
||||
|
|
|
@ -96,12 +96,12 @@ func (store *ElasticStore) RollbackTransaction(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
|
||||
return nil, filer.ErrUnsupportedListDirectoryPrefixed
|
||||
func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
|
||||
}
|
||||
|
||||
func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
||||
index := getIndex(entry.FullPath)
|
||||
index := getIndex(entry.FullPath, false)
|
||||
dir, _ := entry.FullPath.DirAndName()
|
||||
id := weed_util.Md5String([]byte(entry.FullPath))
|
||||
esEntry := &ESEntry{
|
||||
|
@ -131,7 +131,7 @@ func (store *ElasticStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
|
|||
}
|
||||
|
||||
func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) {
|
||||
index := getIndex(fullpath)
|
||||
index := getIndex(fullpath, false)
|
||||
id := weed_util.Md5String([]byte(fullpath))
|
||||
searchResult, err := store.client.Get().
|
||||
Index(index).
|
||||
|
@ -154,7 +154,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
|
|||
}
|
||||
|
||||
func (store *ElasticStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) {
|
||||
index := getIndex(fullpath)
|
||||
index := getIndex(fullpath, false)
|
||||
id := weed_util.Md5String([]byte(fullpath))
|
||||
if strings.Count(string(fullpath), "/") == 1 {
|
||||
return store.deleteIndex(ctx, index)
|
||||
|
@ -187,62 +187,30 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
|
|||
}
|
||||
|
||||
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
|
||||
if entries, err := store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32); err == nil {
|
||||
for _, entry := range entries {
|
||||
store.DeleteEntry(ctx, entry.FullPath)
|
||||
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
|
||||
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
|
||||
glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (store *ElasticStore) ListDirectoryEntries(
|
||||
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
|
||||
) (entries []*filer.Entry, err error) {
|
||||
if string(fullpath) == "/" {
|
||||
return store.listRootDirectoryEntries(ctx, startFileName, inclusive, limit)
|
||||
}
|
||||
return store.listDirectoryEntries(ctx, fullpath, startFileName, inclusive, limit)
|
||||
}
|
||||
|
||||
func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
|
||||
indexResult, err := store.client.CatIndices().Do(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("list indices %v.", err)
|
||||
return entries, err
|
||||
}
|
||||
for _, index := range indexResult {
|
||||
if index.Index == indexKV {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(index.Index, indexPrefix) {
|
||||
if entry, err := store.FindEntry(ctx,
|
||||
weed_util.FullPath("/"+strings.Replace(index.Index, indexPrefix, "", 1))); err == nil {
|
||||
fileName := getFileName(entry.FullPath)
|
||||
if fileName == startFileName && !inclusive {
|
||||
continue
|
||||
}
|
||||
limit--
|
||||
if limit < 0 {
|
||||
break
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
func (store *ElasticStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
return store.listDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc)
|
||||
}
|
||||
|
||||
func (store *ElasticStore) listDirectoryEntries(
|
||||
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int,
|
||||
) (entries []*filer.Entry, err error) {
|
||||
ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
first := true
|
||||
index := getIndex(fullpath)
|
||||
index := getIndex(fullpath, true)
|
||||
nextStart := ""
|
||||
parentId := weed_util.Md5String([]byte(fullpath))
|
||||
if _, err := store.client.Refresh(index).Do(ctx); err != nil {
|
||||
if _, err = store.client.Refresh(index).Do(ctx); err != nil {
|
||||
if elastic.IsNotFound(err) {
|
||||
store.client.CreateIndex(index).Do(ctx)
|
||||
return entries, nil
|
||||
return
|
||||
}
|
||||
}
|
||||
for {
|
||||
|
@ -250,7 +218,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
|||
if (startFileName == "" && first) || inclusive {
|
||||
if result, err = store.search(ctx, index, parentId); err != nil {
|
||||
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
return entries, err
|
||||
return
|
||||
}
|
||||
} else {
|
||||
fullPath := string(fullpath) + "/" + startFileName
|
||||
|
@ -260,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
|||
after := weed_util.Md5String([]byte(fullPath))
|
||||
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
|
||||
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
return entries, err
|
||||
return
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
@ -272,21 +240,24 @@ func (store *ElasticStore) listDirectoryEntries(
|
|||
if err := jsoniter.Unmarshal(hit.Source, esEntry); err == nil {
|
||||
limit--
|
||||
if limit < 0 {
|
||||
return entries, nil
|
||||
return lastFileName, nil
|
||||
}
|
||||
nextStart = string(esEntry.Entry.FullPath)
|
||||
fileName := getFileName(esEntry.Entry.FullPath)
|
||||
fileName := esEntry.Entry.FullPath.Name()
|
||||
if fileName == startFileName && !inclusive {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, esEntry.Entry)
|
||||
if !eachEntryFunc(esEntry.Entry) {
|
||||
break
|
||||
}
|
||||
lastFileName = fileName
|
||||
}
|
||||
}
|
||||
if len(result.Hits.Hits) < store.maxPageSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return
|
||||
}
|
||||
|
||||
func (store *ElasticStore) search(ctx context.Context, index, parentId string) (result *elastic.SearchResult, err error) {
|
||||
|
@ -321,18 +292,16 @@ func (store *ElasticStore) Shutdown() {
|
|||
store.client.Stop()
|
||||
}
|
||||
|
||||
func getIndex(fullpath weed_util.FullPath) string {
|
||||
func getIndex(fullpath weed_util.FullPath, isDirectory bool) string {
|
||||
path := strings.Split(string(fullpath), "/")
|
||||
if len(path) > 1 {
|
||||
return indexPrefix + path[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getFileName(fullpath weed_util.FullPath) string {
|
||||
path := strings.Split(string(fullpath), "/")
|
||||
if len(path) > 1 {
|
||||
return path[len(path)-1]
|
||||
if isDirectory && len(path) >= 2 {
|
||||
return indexPrefix + strings.ToLower(path[1])
|
||||
}
|
||||
if len(path) > 2 {
|
||||
return indexPrefix + strings.ToLower(path[1])
|
||||
}
|
||||
if len(path) == 2 {
|
||||
return indexPrefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package etcd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -101,7 +102,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPa
|
|||
|
||||
resp, err := store.client.Get(ctx, string(key))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
|
||||
return nil, fmt.Errorf("get %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
if len(resp.Kvs) == 0 {
|
||||
|
@ -139,25 +140,32 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_
|
|||
return nil
|
||||
}
|
||||
|
||||
func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*filer.Entry, err error) {
|
||||
return nil, filer.ErrUnsupportedListDirectoryPrefixed
|
||||
func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
|
||||
}
|
||||
|
||||
func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
|
||||
directoryPrefix := genDirectoryKeyPrefix(fullpath, "")
|
||||
func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
directoryPrefix := genDirectoryKeyPrefix(dirPath, "")
|
||||
lastFileStart := directoryPrefix
|
||||
if startFileName != "" {
|
||||
lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
|
||||
}
|
||||
|
||||
resp, err := store.client.Get(ctx, string(directoryPrefix),
|
||||
resp, err := store.client.Get(ctx, string(lastFileStart),
|
||||
clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list %s : %v", fullpath, err)
|
||||
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
|
||||
}
|
||||
|
||||
for _, kv := range resp.Kvs {
|
||||
if !bytes.HasPrefix(kv.Key, directoryPrefix) {
|
||||
break
|
||||
}
|
||||
fileName := getNameFromKey(kv.Key)
|
||||
if fileName == "" {
|
||||
continue
|
||||
}
|
||||
if fileName == startFileName && !inclusive {
|
||||
if fileName == startFileName && !includeStartFile {
|
||||
continue
|
||||
}
|
||||
limit--
|
||||
|
@ -165,17 +173,20 @@ func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_
|
|||
break
|
||||
}
|
||||
entry := &filer.Entry{
|
||||
FullPath: weed_util.NewFullPath(string(fullpath), fileName),
|
||||
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
|
||||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
if !eachEntryFunc(entry) {
|
||||
break
|
||||
}
|
||||
lastFileName = fileName
|
||||
}
|
||||
|
||||
return entries, err
|
||||
return lastFileName, err
|
||||
}
|
||||
|
||||
func genKey(dirPath, fileName string) (key []byte) {
|
||||
|
|
|
@ -3,6 +3,7 @@ package filer
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
@ -38,7 +39,7 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa
|
|||
return
|
||||
}
|
||||
|
||||
func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
||||
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
||||
// TODO maybe parallel this
|
||||
for _, chunk := range chunks {
|
||||
if !chunk.IsChunkManifest {
|
||||
|
@ -63,7 +64,7 @@ func ResolveChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunks []*fil
|
|||
return
|
||||
}
|
||||
|
||||
func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
||||
func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
||||
if !chunk.IsChunkManifest {
|
||||
return
|
||||
}
|
||||
|
@ -84,7 +85,7 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil
|
|||
}
|
||||
|
||||
// TODO fetch from cache for weed mount?
|
||||
func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
|
||||
func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
|
||||
urlStrings, err := lookupFileIdFn(fileId)
|
||||
if err != nil {
|
||||
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
|
@ -52,7 +53,7 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
|
|||
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
|
||||
}
|
||||
|
||||
func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
|
||||
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
|
||||
|
||||
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
|
||||
|
||||
|
@ -71,7 +72,7 @@ func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_
|
|||
return
|
||||
}
|
||||
|
||||
func MinusChunks(lookupFileIdFn LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
|
||||
func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
|
||||
|
||||
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
|
||||
if aErr != nil {
|
||||
|
@ -116,7 +117,7 @@ func (cv *ChunkView) IsFullChunk() bool {
|
|||
return cv.Size == cv.ChunkSize
|
||||
}
|
||||
|
||||
func ViewFromChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
|
||||
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
|
||||
|
||||
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
|
||||
|
||||
|
@ -222,7 +223,7 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
|
|||
|
||||
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
|
||||
// If the file chunk content is a chunk manifest
|
||||
func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
|
||||
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
|
||||
|
||||
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
|
||||
|
||||
|
|
|
@ -134,14 +134,57 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
|||
return nil
|
||||
}
|
||||
|
||||
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
|
||||
|
||||
/*
|
||||
if !hasWritePermission(lastDirectoryEntry, entry) {
|
||||
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
|
||||
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
|
||||
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
|
||||
}
|
||||
*/
|
||||
|
||||
if oldEntry == nil {
|
||||
|
||||
dirParts := strings.Split(string(entry.FullPath), "/")
|
||||
if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// fmt.Printf("directory parts: %+v\n", dirParts)
|
||||
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
||||
if err := f.Store.InsertEntry(ctx, entry); err != nil {
|
||||
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
} else {
|
||||
if o_excl {
|
||||
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
||||
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
||||
}
|
||||
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
||||
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
||||
glog.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
var lastDirectoryEntry *Entry
|
||||
f.maybeAddBucket(entry)
|
||||
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
|
||||
|
||||
for i := 1; i < len(dirParts); i++ {
|
||||
dirPath := "/" + util.Join(dirParts[:i]...)
|
||||
f.deleteChunksIfNotNew(oldEntry, entry)
|
||||
|
||||
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) {
|
||||
|
||||
if level == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dirPath := "/" + util.Join(dirParts[:level]...)
|
||||
// fmt.Printf("%d directory: %+v\n", i, dirPath)
|
||||
|
||||
// check the store directly
|
||||
|
@ -151,6 +194,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
|||
// no such existing directory
|
||||
if dirEntry == nil {
|
||||
|
||||
// ensure parent directory
|
||||
if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create the directory
|
||||
now := time.Now()
|
||||
|
||||
|
@ -186,53 +234,6 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
|||
return fmt.Errorf("%s is a file", dirPath)
|
||||
}
|
||||
|
||||
// remember the direct parent directory entry
|
||||
if i == len(dirParts)-1 {
|
||||
lastDirectoryEntry = dirEntry
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if lastDirectoryEntry == nil {
|
||||
glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
|
||||
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
|
||||
}
|
||||
|
||||
/*
|
||||
if !hasWritePermission(lastDirectoryEntry, entry) {
|
||||
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
|
||||
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
|
||||
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
|
||||
}
|
||||
*/
|
||||
|
||||
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
|
||||
|
||||
if oldEntry == nil {
|
||||
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
||||
if err := f.Store.InsertEntry(ctx, entry); err != nil {
|
||||
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
} else {
|
||||
if o_excl {
|
||||
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
||||
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
||||
}
|
||||
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
||||
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
||||
glog.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
f.maybeAddBucket(entry)
|
||||
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
|
||||
|
||||
f.deleteChunksIfNotNew(oldEntry, entry)
|
||||
|
||||
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -280,38 +281,19 @@ func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, e
|
|||
|
||||
}
|
||||
|
||||
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) ([]*Entry, error) {
|
||||
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
|
||||
p = p[0 : len(p)-1]
|
||||
}
|
||||
|
||||
var makeupEntries []*Entry
|
||||
entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix)
|
||||
for expiredCount > 0 && err == nil {
|
||||
makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix)
|
||||
if err == nil {
|
||||
entries = append(entries, makeupEntries...)
|
||||
}
|
||||
}
|
||||
|
||||
return entries, err
|
||||
}
|
||||
|
||||
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int, prefix string) (entries []*Entry, expiredCount int, lastFileName string, err error) {
|
||||
listedEntries, listErr := f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix)
|
||||
if listErr != nil {
|
||||
return listedEntries, expiredCount, "", listErr
|
||||
}
|
||||
for _, entry := range listedEntries {
|
||||
lastFileName = entry.Name()
|
||||
func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) {
|
||||
lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
|
||||
if entry.TtlSec > 0 {
|
||||
if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
|
||||
f.Store.DeleteOneEntry(ctx, entry)
|
||||
expiredCount++
|
||||
continue
|
||||
return true
|
||||
}
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
return eachEntryFunc(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return expiredCount, lastFileName, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -27,9 +27,9 @@ func (f *Filer) LoadBuckets() {
|
|||
buckets: make(map[BucketName]*BucketOption),
|
||||
}
|
||||
|
||||
limit := math.MaxInt32
|
||||
limit := int64(math.MaxInt32)
|
||||
|
||||
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
|
||||
entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "")
|
||||
|
||||
if err != nil {
|
||||
glog.V(1).Infof("no buckets found: %v", err)
|
||||
|
|
|
@ -30,7 +30,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
|||
// delete the folder children, not including the folder itself
|
||||
var dirChunks []*filer_pb.FileChunk
|
||||
var dirHardLinkIds []HardLinkId
|
||||
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isFromOtherCluster, signatures)
|
||||
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("delete directory %s: %v", p, err)
|
||||
return fmt.Errorf("delete directory %s: %v", p, err)
|
||||
|
@ -63,12 +63,13 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
|
||||
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
|
||||
|
||||
lastFileName := ""
|
||||
includeLastFile := false
|
||||
if !isDeletingBucket {
|
||||
for {
|
||||
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
|
||||
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "")
|
||||
if err != nil {
|
||||
glog.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
|
@ -84,7 +85,8 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
var dirChunks []*filer_pb.FileChunk
|
||||
var dirHardLinkIds []HardLinkId
|
||||
if sub.IsDirectory() {
|
||||
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, false, nil)
|
||||
subIsDeletingBucket := f.isBucket(sub)
|
||||
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil)
|
||||
chunks = append(chunks, dirChunks...)
|
||||
hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
|
||||
} else {
|
||||
|
@ -105,6 +107,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
|
||||
|
||||
|
|
|
@ -113,13 +113,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
|
|||
sizeBuf := make([]byte, 4)
|
||||
startTsNs := startTime.UnixNano()
|
||||
|
||||
dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "")
|
||||
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "")
|
||||
if listDayErr != nil {
|
||||
return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
|
||||
}
|
||||
for _, dayEntry := range dayEntries {
|
||||
// println("checking day", dayEntry.FullPath)
|
||||
hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "")
|
||||
hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "")
|
||||
if listHourMinuteErr != nil {
|
||||
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func
|
|||
return lastTsNs, err
|
||||
}
|
||||
if logEntry.TsNs <= ns {
|
||||
return lastTsNs, nil
|
||||
continue
|
||||
}
|
||||
// println("each log: ", logEntry.TsNs)
|
||||
if err := eachLogEntryFn(logEntry); err != nil {
|
||||
|
|
90
weed/filer/filer_search.go
Normal file
90
weed/filer/filer_search.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func splitPattern(pattern string) (prefix string, restPattern string) {
|
||||
position := strings.Index(pattern, "*")
|
||||
if position >= 0 {
|
||||
return pattern[:position], pattern[position:]
|
||||
}
|
||||
position = strings.Index(pattern, "?")
|
||||
if position >= 0 {
|
||||
return pattern[:position], pattern[position:]
|
||||
}
|
||||
return "", restPattern
|
||||
}
|
||||
|
||||
// For now, prefix and namePattern are mutually exclusive
|
||||
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string) (entries []*Entry, hasMore bool, err error) {
|
||||
|
||||
_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, func(entry *Entry) bool {
|
||||
entries = append(entries, entry)
|
||||
return true
|
||||
})
|
||||
|
||||
hasMore = int64(len(entries)) >= limit+1
|
||||
if hasMore {
|
||||
entries = entries[:limit]
|
||||
}
|
||||
|
||||
return entries, hasMore, err
|
||||
}
|
||||
|
||||
// For now, prefix and namePattern are mutually exclusive
|
||||
func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
|
||||
if strings.HasSuffix(string(p), "/") && len(p) > 1 {
|
||||
p = p[0 : len(p)-1]
|
||||
}
|
||||
|
||||
prefixInNamePattern, restNamePattern := splitPattern(namePattern)
|
||||
if prefixInNamePattern != "" {
|
||||
prefix = prefixInNamePattern
|
||||
}
|
||||
var missedCount int64
|
||||
|
||||
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, eachEntryFunc)
|
||||
|
||||
for missedCount > 0 && err == nil {
|
||||
missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, eachEntryFunc)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
|
||||
|
||||
if len(restNamePattern) == 0 {
|
||||
lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
|
||||
return 0, lastFileName, err
|
||||
}
|
||||
|
||||
lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
|
||||
nameToTest := strings.ToLower(entry.Name())
|
||||
if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && matched {
|
||||
if !eachEntryFunc(entry) {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
missedCount++
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Filer) doListValidEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
|
||||
var expiredCount int64
|
||||
expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
|
||||
for expiredCount > 0 && err == nil {
|
||||
expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix, eachEntryFunc)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -13,6 +13,8 @@ var (
|
|||
ErrKvNotFound = errors.New("kv: not found")
|
||||
)
|
||||
|
||||
type ListEachEntryFunc func(entry *Entry) bool
|
||||
|
||||
type FilerStore interface {
|
||||
// GetName gets the name to locate the configuration in filer.toml file
|
||||
GetName() string
|
||||
|
@ -24,8 +26,8 @@ type FilerStore interface {
|
|||
FindEntry(context.Context, util.FullPath) (entry *Entry, err error)
|
||||
DeleteEntry(context.Context, util.FullPath) (err error)
|
||||
DeleteFolderChildren(context.Context, util.FullPath) (err error)
|
||||
ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
|
||||
ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error)
|
||||
ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
|
||||
ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error)
|
||||
|
||||
BeginTransaction(ctx context.Context) (context.Context, error)
|
||||
CommitTransaction(ctx context.Context) error
|
||||
|
|
|
@ -106,32 +106,24 @@ func (t *FilerStorePathTranlator) DeleteFolderChildren(ctx context.Context, fp u
|
|||
return t.actualStore.DeleteFolderChildren(ctx, newFullPath)
|
||||
}
|
||||
|
||||
func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
|
||||
func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) {
|
||||
|
||||
newFullPath := t.translatePath(dirPath)
|
||||
|
||||
entries, err := t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
return t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
|
||||
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
|
||||
}
|
||||
return entries, err
|
||||
return eachEntryFunc(entry)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int, prefix string) ([]*Entry, error) {
|
||||
func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (string, error) {
|
||||
|
||||
newFullPath := t.translatePath(dirPath)
|
||||
|
||||
entries, err := t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
|
||||
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
|
||||
}
|
||||
return entries, nil
|
||||
return eachEntryFunc(entry)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *FilerStorePathTranlator) BeginTransaction(ctx context.Context) (context.Context, error) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue