mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
e67096656b
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -1,16 +1,28 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
|
||||
Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs
|
||||
|
||||
example of a good issue report:
|
||||
https://github.com/chrislusf/seaweedfs/issues/1005
|
||||
example of a bad issue report:
|
||||
https://github.com/chrislusf/seaweedfs/issues/1008
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**System Setup**
|
||||
List the command line to start "weed master", "weed volume", "weed filer", "weed s3", "weed mount".
|
||||
- List the command line to start "weed master", "weed volume", "weed filer", "weed s3", "weed mount".
|
||||
- OS version
|
||||
- output of `weed version`
|
||||
- if using filer, show the content of `filer.toml`
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
|
37
.github/workflows/go.yml
vendored
Normal file
37
.github/workflows/go.yml
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.13
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
cd weed; go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
|
||||
- name: Build
|
||||
run: cd weed; go build -v .
|
||||
|
||||
- name: Test
|
||||
run: cd weed; go test -v .
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -80,3 +80,6 @@ build
|
|||
target
|
||||
*.class
|
||||
other/java/hdfs/dependency-reduced-pom.xml
|
||||
|
||||
# binary file
|
||||
weed/weed
|
||||
|
|
50
.travis.yml
50
.travis.yml
|
@ -1,18 +1,19 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
|
||||
before_install:
|
||||
- export PATH=/home/travis/gopath/bin:$PATH
|
||||
- export PATH=/home/travis/gopath/bin:$PATH
|
||||
|
||||
install:
|
||||
- go get ./weed/...
|
||||
- export CGO_ENABLED="0"
|
||||
- go env
|
||||
|
||||
script:
|
||||
- go test ./weed/...
|
||||
- env GO111MODULE=on go test ./weed/...
|
||||
|
||||
before_deploy:
|
||||
- make release
|
||||
|
@ -22,23 +23,26 @@ deploy:
|
|||
api_key:
|
||||
secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI=
|
||||
file:
|
||||
- build/linux_arm.tar.gz
|
||||
- build/linux_arm64.tar.gz
|
||||
- build/linux_386.tar.gz
|
||||
- build/linux_amd64.tar.gz
|
||||
- build/darwin_amd64.tar.gz
|
||||
- build/windows_386.zip
|
||||
- build/windows_amd64.zip
|
||||
- build/freebsd_arm.tar.gz
|
||||
- build/freebsd_amd64.tar.gz
|
||||
- build/freebsd_386.tar.gz
|
||||
- build/netbsd_arm.tar.gz
|
||||
- build/netbsd_amd64.tar.gz
|
||||
- build/netbsd_386.tar.gz
|
||||
- build/openbsd_arm.tar.gz
|
||||
- build/openbsd_amd64.tar.gz
|
||||
- build/openbsd_386.tar.gz
|
||||
- build/linux_arm.tar.gz
|
||||
- build/linux_arm64.tar.gz
|
||||
- build/linux_386.tar.gz
|
||||
- build/linux_amd64.tar.gz
|
||||
- build/linux_amd64_large_disk.tar.gz
|
||||
- build/darwin_amd64.tar.gz
|
||||
- build/darwin_amd64_large_disk.tar.gz
|
||||
- build/windows_386.zip
|
||||
- build/windows_amd64.zip
|
||||
- build/windows_amd64_large_disk.zip
|
||||
- build/freebsd_arm.tar.gz
|
||||
- build/freebsd_amd64.tar.gz
|
||||
- build/freebsd_386.tar.gz
|
||||
- build/netbsd_arm.tar.gz
|
||||
- build/netbsd_amd64.tar.gz
|
||||
- build/netbsd_386.tar.gz
|
||||
- build/openbsd_arm.tar.gz
|
||||
- build/openbsd_amd64.tar.gz
|
||||
- build/openbsd_386.tar.gz
|
||||
on:
|
||||
tags: true
|
||||
repo: chrislusf/seaweedfs
|
||||
go: tip
|
||||
go: 1.14.x
|
||||
|
|
28
Makefile
28
Makefile
|
@ -8,10 +8,16 @@ appname := weed
|
|||
|
||||
sources := $(wildcard *.go)
|
||||
|
||||
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD)
|
||||
LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT}
|
||||
|
||||
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
|
||||
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
|
||||
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
|
||||
|
||||
all: build
|
||||
|
||||
|
@ -24,17 +30,31 @@ clean:
|
|||
|
||||
deps:
|
||||
go get $(GO_FLAGS) -d $(SOURCE_DIR)
|
||||
rm -rf /home/travis/gopath/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace
|
||||
rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
|
||||
|
||||
build: deps
|
||||
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
|
||||
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
linux: deps
|
||||
mkdir -p linux
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
|
||||
release: deps windows_build darwin_build linux_build bsd_build
|
||||
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build
|
||||
|
||||
##### LINUX BUILDS #####
|
||||
5_byte_linux_build:
|
||||
$(call build_large,linux,amd64,)
|
||||
$(call tar_large,linux,amd64)
|
||||
|
||||
5_byte_darwin_build:
|
||||
$(call build_large,darwin,amd64,)
|
||||
$(call tar_large,darwin,amd64)
|
||||
|
||||
5_byte_windows_build:
|
||||
$(call build_large,windows,amd64,.exe)
|
||||
$(call zip_large,windows,amd64,.exe)
|
||||
|
||||
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
|
||||
|
||||
build/linux_386.tar.gz: $(sources)
|
||||
|
|
225
README.md
225
README.md
|
@ -4,24 +4,22 @@
|
|||
[![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs)
|
||||
[![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
|
||||
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=604800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=86400)](https://hub.docker.com/r/chrislusf/seaweedfs/)
|
||||
|
||||
![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png)
|
||||
|
||||
<h2 align="center">Supporting SeaweedFS</h2>
|
||||
|
||||
SeaweedFS is an independent Apache-licensed open source project with its ongoing development made
|
||||
possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md).
|
||||
SeaweedFS is an independent Apache-licensed open source project with its ongoing development made
|
||||
possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md).
|
||||
If you'd like to grow SeaweedFS even stronger, please consider joining our
|
||||
<a href="https://www.patreon.com/seaweedfs">sponsors on Patreon</a>.
|
||||
|
||||
Platinum ($2500/month), Gold ($500/month): put your company logo on the SeaweedFS github page
|
||||
Generous Backer($50/month), Backer($10/month): put your name on the SeaweedFS backer page.
|
||||
|
||||
Your support will be really appreciated by me and other supporters!
|
||||
|
||||
<h3 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h3>
|
||||
|
||||
<!--
|
||||
<h4 align="center">Platinum</h4>
|
||||
|
||||
<p align="center">
|
||||
|
@ -45,61 +43,108 @@ Your support will be really appreciated by me and other supporters!
|
|||
</tbody>
|
||||
</table>
|
||||
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
|
||||
- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest)
|
||||
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTc4MmVlYmFlNjBmZTgzZmJlYmI1MDE1YzkyNWYyZjkwZDFiM2RlMDdjNjVlNjdjYzc4NGFhZGIyYzEyMzJkYTA)
|
||||
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
|
||||
- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS)
|
||||
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
|
||||
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
|
||||
- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
## Introduction
|
||||
* [Introduction](#introduction)
|
||||
* [Features](#features)
|
||||
* [Additional Features](#additional-features)
|
||||
* [Filer Features](#filer-features)
|
||||
* [Example Usage](#example-usage)
|
||||
* [Architecture](#architecture)
|
||||
* [Compared to Other File Systems](#compared-to-other-file-systems)
|
||||
* [Compared to HDFS](#compared-to-hdfs)
|
||||
* [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph)
|
||||
* [Compared to GlusterFS](#compared-to-glusterfs)
|
||||
* [Compared to Ceph](#compared-to-ceph)
|
||||
* [Dev Plan](#dev-plan)
|
||||
* [Installation Guide](#installation-guide)
|
||||
* [Disk Related Topics](#disk-related-topics)
|
||||
* [Benchmark](#Benchmark)
|
||||
* [License](#license)
|
||||
|
||||
## Introduction ##
|
||||
|
||||
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
|
||||
|
||||
1. to store billions of files!
|
||||
2. to serve the files fast!
|
||||
|
||||
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation).
|
||||
SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation).
|
||||
|
||||
SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes.
|
||||
|
||||
There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
|
||||
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf).
|
||||
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
|
||||
|
||||
SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Cassandra/LevelDB.
|
||||
On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc.
|
||||
|
||||
## Additional Features
|
||||
* Can choose no replication or different replication levels, rack and data center aware
|
||||
* Automatic master servers failover - no single point of failure (SPOF)
|
||||
* Automatic Gzip compression depending on file mime type
|
||||
* Automatic compaction to reclaim disk space after deletion or update
|
||||
* Servers in the same cluster can have different disk spaces, file systems, OS etc.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing
|
||||
* Optionally fix the orientation for jpeg pictures
|
||||
* Support Etag, Accept-Range, Last-Modified, etc.
|
||||
* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance.
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Filer Features
|
||||
## Additional Features ##
|
||||
* Can choose no replication or different replication levels, rack and data center aware.
|
||||
* Automatic master servers failover - no single point of failure (SPOF).
|
||||
* Automatic Gzip compression depending on file mime type.
|
||||
* Automatic compaction to reclaim disk space after deletion or update.
|
||||
* [Automatic entry TTL expiration][VolumeServerTTL].
|
||||
* Any server with some disk spaces can add to the total storage space.
|
||||
* Adding/Removing servers does **not** cause any data re-balancing.
|
||||
* Optional picture resizing.
|
||||
* Support ETag, Accept-Range, Last-Modified, etc.
|
||||
* Support in-memory/leveldb/readonly mode tuning for memory/performance balance.
|
||||
* Support rebalancing the writable and readonly volumes.
|
||||
* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data.
|
||||
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Filer Features ##
|
||||
* [filer server][Filer] provide "normal" directories and files via http.
|
||||
* [mount filer][Mount] to read and write files directly as a local directory via FUSE.
|
||||
* [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling.
|
||||
* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs.
|
||||
* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
|
||||
* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices.
|
||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||
* [File TTL][FilerTTL] automatically purge file metadata and actual file data.
|
||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=604800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||
|
||||
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
|
||||
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
|
||||
[AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API
|
||||
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud
|
||||
[Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System
|
||||
[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV
|
||||
[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage
|
||||
[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier
|
||||
[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption
|
||||
[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores
|
||||
[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live
|
||||
[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Example Usage ##
|
||||
|
||||
## Example Usage
|
||||
By default, the master node runs on port 9333, and the volume nodes run on port 8080.
|
||||
Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example.
|
||||
|
||||
SeaweedFS uses HTTP REST operations to read, write, and delete. The responses are in JSON or JSONP format.
|
||||
|
||||
### Start Master Server
|
||||
### Start Master Server ###
|
||||
|
||||
```
|
||||
> ./weed master
|
||||
|
@ -125,7 +170,7 @@ Second, to store the file content, send a HTTP multi-part POST request to `url +
|
|||
|
||||
```
|
||||
> curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6
|
||||
{"size": 43234}
|
||||
{"name":"myphoto.jpg","size":43234,"eTag":"1cc0118e"}
|
||||
```
|
||||
|
||||
To update, send another POST request with updated file content.
|
||||
|
@ -135,6 +180,7 @@ For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL:
|
|||
```
|
||||
> curl -X DELETE http://127.0.0.1:8080/3,01637037d6
|
||||
```
|
||||
|
||||
### Save File Id ###
|
||||
|
||||
Now, you can save the `fid`, 3,01637037d6 in this case, to a database field.
|
||||
|
@ -157,7 +203,7 @@ First look up the volume server's URLs by the file's volumeId:
|
|||
|
||||
```
|
||||
> curl http://localhost:9333/dir/lookup?volumeId=3
|
||||
{"locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]}
|
||||
{"volumeId":"3","locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]}
|
||||
```
|
||||
|
||||
Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read.
|
||||
|
@ -213,7 +259,7 @@ More details about replication can be found [on the wiki][Replication].
|
|||
|
||||
You can also set the default replication strategy when starting the master server.
|
||||
|
||||
### Allocate File Key on specific data center ###
|
||||
### Allocate File Key on Specific Data Center ###
|
||||
|
||||
Volume servers can be started with a specific data center name:
|
||||
|
||||
|
@ -239,6 +285,8 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass
|
|||
[feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files
|
||||
[feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Architecture ##
|
||||
|
||||
Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has.
|
||||
|
@ -279,12 +327,26 @@ Each individual file size is limited to the volume size.
|
|||
|
||||
All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
|
||||
|
||||
### Tiered Storage to the cloud ###
|
||||
|
||||
The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud.
|
||||
|
||||
Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data.
|
||||
|
||||
With the O(1) access time, the network latency cost is kept at minimum.
|
||||
|
||||
If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Compared to Other File Systems ##
|
||||
|
||||
Most other distributed file systems seem more complicated than necessary.
|
||||
|
||||
SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to HDFS ###
|
||||
|
||||
HDFS uses the chunk approach for each file, and is ideal for storing large files.
|
||||
|
@ -293,6 +355,7 @@ SeaweedFS is ideal for serving relatively smaller files quickly and concurrently
|
|||
|
||||
SeaweedFS can also store extra large files by splitting them into manageable data chunks, and store the file ids of the data chunks into a meta chunk. This is managed by "weed upload/download" tool, and the weed master or volume servers are agnostic about it.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to GlusterFS, Ceph ###
|
||||
|
||||
|
@ -300,7 +363,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
|
|||
|
||||
* SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files.
|
||||
* SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, MySql, Postgres, etc, and is easy to customized.
|
||||
* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized.
|
||||
* SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc.
|
||||
|
||||
| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files |
|
||||
|
@ -309,6 +372,9 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
|
|||
| SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes |
|
||||
| GlusterFS | hashing | | FUSE, NFS | | |
|
||||
| Ceph | hashing + rules | | FUSE | Yes | |
|
||||
| MooseFS | in memory | | FUSE | | No |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to GlusterFS ###
|
||||
|
||||
|
@ -316,11 +382,21 @@ GlusterFS stores files, both directories and content, in configurable volumes ca
|
|||
|
||||
GlusterFS hashes the path and filename into ids, and assigned to virtual volumes, and then mapped to "bricks".
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to MooseFS ###
|
||||
|
||||
MooseFS chooses to neglect small file issue. From moosefs 3.0 manual, "even a small file will occupy 64KiB plus additionally 4KiB of checksums and 1KiB for the header", because it "was initially designed for keeping large amounts (like several thousands) of very big files"
|
||||
|
||||
MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
### Compared to Ceph ###
|
||||
|
||||
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
|
||||
|
||||
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
|
||||
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
|
||||
|
||||
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
|
||||
|
||||
|
@ -328,7 +404,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl
|
|||
|
||||
SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read.
|
||||
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Cassandra, to manage file directories. There are proven, scalable, and easier to manage.
|
||||
SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. These stores are proven, scalable, and easier to manage.
|
||||
|
||||
| SeaweedFS | comparable to Ceph | advantage |
|
||||
| ------------- | ------------- | ---------------- |
|
||||
|
@ -336,16 +412,26 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Cassa
|
|||
| Volume | OSD | optimized for small files |
|
||||
| Filer | Ceph FS | linearly scalable, Customizable, O(1) or O(logN) |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Dev plan ##
|
||||
## Dev Plan ##
|
||||
|
||||
More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
|
||||
Other key features include: Erasure Encoding, JWT security.
|
||||
|
||||
This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)!
|
||||
|
||||
BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop:
|
||||
|
||||
## Installation guide for users who are not familiar with golang
|
||||
```
|
||||
$ ./util/gostd
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Installation Guide ##
|
||||
|
||||
> Installation guide for users who are not familiar with golang
|
||||
|
||||
Step 1: install go on your machine and setup the environment by following the instructions at:
|
||||
|
||||
|
@ -366,77 +452,87 @@ go get github.com/chrislusf/seaweedfs/weed
|
|||
|
||||
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
|
||||
|
||||
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
|
||||
Note:
|
||||
* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again.
|
||||
```
|
||||
panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace.
|
||||
```
|
||||
|
||||
Step 4: after you modify your code locally, you could start a local build by calling `go install` under
|
||||
|
||||
```
|
||||
$GOPATH/src/github.com/chrislusf/seaweedfs/weed
|
||||
```
|
||||
|
||||
## Disk Related topics ##
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Disk Related Topics ##
|
||||
|
||||
### Hard Drive Performance ###
|
||||
|
||||
When testing read performance on SeaweedFS, it basically becomes a performance test of your hard drive's random read speed. Hard drives usually get 100MB/s~200MB/s.
|
||||
|
||||
### Solid State Disk
|
||||
### Solid State Disk ###
|
||||
|
||||
To modify or delete small files, SSD must delete a whole block at a time, and move content in existing blocks to a new block. SSD is fast when brand new, but will get fragmented over time and you have to garbage collect, compacting blocks. SeaweedFS is friendly to SSD since it is append-only. Deletion and compaction are done on volume level in the background, not slowing reading and not causing fragmentation.
|
||||
|
||||
## Benchmark
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Benchmark ##
|
||||
|
||||
My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CPU: 1 Intel Core i7 2.6GHz.
|
||||
|
||||
Write 1 million 1KB file:
|
||||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 88.796 seconds
|
||||
Time taken for tests: 66.753 seconds
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106764659 bytes
|
||||
Requests per second: 11808.87 [#/sec]
|
||||
Transfer rate: 12172.05 [Kbytes/sec]
|
||||
Total transferred: 1106789009 bytes
|
||||
Requests per second: 15708.23 [#/sec]
|
||||
Transfer rate: 16191.69 [Kbytes/sec]
|
||||
|
||||
Connection Times (ms)
|
||||
min avg max std
|
||||
Total: 0.2 1.3 44.8 0.9
|
||||
Total: 0.3 1.0 84.3 0.9
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 1.1 ms
|
||||
66% 1.3 ms
|
||||
75% 1.5 ms
|
||||
80% 1.7 ms
|
||||
90% 2.1 ms
|
||||
95% 2.6 ms
|
||||
98% 3.7 ms
|
||||
99% 4.6 ms
|
||||
100% 44.8 ms
|
||||
50% 0.8 ms
|
||||
66% 1.0 ms
|
||||
75% 1.1 ms
|
||||
80% 1.2 ms
|
||||
90% 1.4 ms
|
||||
95% 1.7 ms
|
||||
98% 2.1 ms
|
||||
99% 2.6 ms
|
||||
100% 84.3 ms
|
||||
```
|
||||
|
||||
Randomly read 1 million files:
|
||||
```
|
||||
Concurrency Level: 16
|
||||
Time taken for tests: 34.263 seconds
|
||||
Time taken for tests: 22.301 seconds
|
||||
Complete requests: 1048576
|
||||
Failed requests: 0
|
||||
Total transferred: 1106762945 bytes
|
||||
Requests per second: 30603.34 [#/sec]
|
||||
Transfer rate: 31544.49 [Kbytes/sec]
|
||||
Total transferred: 1106812873 bytes
|
||||
Requests per second: 47019.38 [#/sec]
|
||||
Transfer rate: 48467.57 [Kbytes/sec]
|
||||
|
||||
Connection Times (ms)
|
||||
min avg max std
|
||||
Total: 0.0 0.5 20.7 0.7
|
||||
Total: 0.0 0.3 54.1 0.2
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 0.4 ms
|
||||
75% 0.5 ms
|
||||
95% 0.6 ms
|
||||
98% 0.8 ms
|
||||
99% 1.2 ms
|
||||
100% 20.7 ms
|
||||
50% 0.3 ms
|
||||
90% 0.4 ms
|
||||
98% 0.6 ms
|
||||
99% 0.7 ms
|
||||
100% 54.1 ms
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## License
|
||||
## License ##
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -450,7 +546,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts).
|
||||
|
||||
## Stargazers over time
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Stargazers over time ##
|
||||
|
||||
[![Stargazers over time](https://starcharts.herokuapp.com/chrislusf/seaweedfs.svg)](https://starcharts.herokuapp.com/chrislusf/seaweedfs)
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
|
||||
<h2 align="center">Generous Backers ($50+)</h2>
|
||||
|
||||
- [4Sight Imaging](https://www.4sightimaging.com/)
|
||||
- [Evercam Camera Management Software](https://evercam.io/)
|
||||
- [Admiral](https://getadmiral.com)
|
||||
|
||||
<h2 align="center">Backers</h2>
|
||||
|
||||
|
|
|
@ -1,9 +1,24 @@
|
|||
FROM frolvlad/alpine-glibc
|
||||
FROM alpine
|
||||
|
||||
# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format"
|
||||
RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \
|
||||
RUN \
|
||||
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
|
||||
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
||||
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
|
||||
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
|
||||
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \
|
||||
echo "Building for $ARCH" 1>&2 && \
|
||||
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
|
||||
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
|
||||
SUPERCRONIC=supercronic-linux-$ARCH && \
|
||||
# Install SeaweedFS and Supercronic ( for cron job mode )
|
||||
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
|
||||
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
|
||||
curl -fsSLO "$SUPERCRONIC_URL" && \
|
||||
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
|
||||
chmod +x "$SUPERCRONIC" && \
|
||||
mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \
|
||||
ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \
|
||||
apk del build-dependencies && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
|
@ -22,6 +37,8 @@ EXPOSE 9333
|
|||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
COPY filer.toml /etc/seaweedfs/filer.toml
|
||||
|
|
|
@ -1,5 +1,15 @@
|
|||
FROM golang:latest
|
||||
RUN go get github.com/chrislusf/seaweedfs/weed
|
||||
FROM frolvlad/alpine-glibc as builder
|
||||
RUN apk add git go g++
|
||||
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install
|
||||
|
||||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY --from=builder /root/go/bin/weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
|
||||
# volume server gprc port
|
||||
EXPOSE 18080
|
||||
|
@ -16,12 +26,10 @@ EXPOSE 9333
|
|||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||
RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
RUN cp /go/bin/weed /usr/bin/
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
|
29
docker/Dockerfile.local
Normal file
29
docker/Dockerfile.local
Normal file
|
@ -0,0 +1,29 @@
|
|||
FROM alpine AS final
|
||||
LABEL author="Chris Lu"
|
||||
COPY ./weed /usr/bin/
|
||||
RUN mkdir -p /etc/seaweedfs
|
||||
COPY ./filer.toml /etc/seaweedfs/filer.toml
|
||||
COPY ./entrypoint.sh /entrypoint.sh
|
||||
|
||||
# volume server grpc port
|
||||
EXPOSE 18080
|
||||
# volume server http port
|
||||
EXPOSE 8080
|
||||
# filer server grpc port
|
||||
EXPOSE 18888
|
||||
# filer server http port
|
||||
EXPOSE 8888
|
||||
# master server shared grpc port
|
||||
EXPOSE 19333
|
||||
# master server shared http port
|
||||
EXPOSE 9333
|
||||
# s3 server http port
|
||||
EXPOSE 8333
|
||||
|
||||
RUN mkdir -p /data/filerldb2
|
||||
|
||||
VOLUME /data
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
19
docker/Makefile
Normal file
19
docker/Makefile
Normal file
|
@ -0,0 +1,19 @@
|
|||
all: gen
|
||||
|
||||
.PHONY : gen
|
||||
|
||||
gen: dev
|
||||
|
||||
build:
|
||||
cd ../weed; GOOS=linux go build; mv weed ../docker/
|
||||
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
|
||||
rm ./weed
|
||||
|
||||
dev: build
|
||||
docker-compose -f local-dev-compose.yml -p seaweedfs up
|
||||
|
||||
cluster: build
|
||||
docker-compose -f local-cluster-compose.yml -p seaweedfs up
|
||||
|
||||
clean:
|
||||
rm ./weed
|
|
@ -11,11 +11,29 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up
|
|||
|
||||
```
|
||||
|
||||
## Development
|
||||
## Try latest tip
|
||||
|
||||
```bash
|
||||
|
||||
wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml
|
||||
|
||||
docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up
|
||||
|
||||
```
|
||||
|
||||
## Local Development
|
||||
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker
|
||||
|
||||
docker-compose -f dev-compose.yml -p seaweedfs up
|
||||
|
||||
make
|
||||
```
|
||||
|
||||
## Build and push a multiarch build
|
||||
|
||||
Make sure that `docker buildx` is supported (might be an experimental docker feature)
|
||||
```bash
|
||||
BUILDER=$(docker buildx create --driver docker-container --use)
|
||||
docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 . -t chrislusf/seaweedfs
|
||||
docker buildx stop $BUILDER
|
||||
```
|
||||
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master"
|
||||
volume:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -max=5 -mserver="master:9333" -port=8080'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.go_build
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -3,48 +3,46 @@
|
|||
case "$1" in
|
||||
|
||||
'master')
|
||||
ARGS="-ip `hostname -i` -mdir /data"
|
||||
# Is this instance linked with an other master? (Docker commandline "--link master1:master")
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024"
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'volume')
|
||||
ARGS="-ip `hostname -i` -dir /data"
|
||||
# Is this instance linked with a master? (Docker commandline "--link master1:master")
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
ARGS="-dir=/data -max=0"
|
||||
if [[ $@ == *"-max="* ]]; then
|
||||
ARGS="-dir=/data"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'server')
|
||||
ARGS="-ip `hostname -i` -dir /data"
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
if [[ $@ == *"-volume.max="* ]]; then
|
||||
ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'filer')
|
||||
ARGS="-ip `hostname -i` "
|
||||
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
|
||||
fi
|
||||
mkdir -p /data/filerdb
|
||||
ARGS=""
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
's3')
|
||||
ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE"
|
||||
if [ -n "$FILER_PORT_8888_TCP_ADDR" ] ; then
|
||||
ARGS="$ARGS -filer=$FILER_PORT_8888_TCP_ADDR:$FILER_PORT_8888_TCP_PORT"
|
||||
fi
|
||||
exec /usr/bin/weed $@ $ARGS
|
||||
;;
|
||||
|
||||
'cronjob')
|
||||
MASTER=${WEED_MASTER-localhost:9333}
|
||||
FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *}
|
||||
echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab
|
||||
BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *}
|
||||
echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab
|
||||
echo "Running Crontab:"
|
||||
cat /crontab
|
||||
exec supercronic /crontab
|
||||
;;
|
||||
*)
|
||||
exec /usr/bin/weed $@
|
||||
;;
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
[leveldb]
|
||||
[leveldb2]
|
||||
enabled = true
|
||||
dir = "/data/filerdb"
|
||||
dir = "/data/filerldb2"
|
||||
|
|
53
docker/local-cluster-compose.yml
Normal file
53
docker/local-cluster-compose.yml
Normal file
|
@ -0,0 +1,53 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master0:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335"
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335"
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume
|
||||
- filer
|
35
docker/local-dev-compose.yml
Normal file
35
docker/local-dev-compose.yml
Normal file
|
@ -0,0 +1,35 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
|
@ -4,34 +4,44 @@ services:
|
|||
master:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master"
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -max=15 -mserver="master:9333" -port=8080'
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master:9333" -port=8080'
|
||||
depends_on:
|
||||
- master
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
tty: true
|
||||
stdin_open: true
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- master
|
||||
- volume
|
||||
cronjob:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
command: 'cronjob'
|
||||
environment:
|
||||
# Run re-replication every 2 minutes
|
||||
CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *'
|
||||
WEED_MASTER: master:9333 # Default: localhost:9333
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs # use a remote image
|
||||
ports:
|
||||
- 8333:8333
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
- master
|
||||
- volume
|
||||
- filer
|
||||
|
|
35
docker/seaweedfs-dev-compose.yml
Normal file
35
docker/seaweedfs-dev-compose.yml
Normal file
|
@ -0,0 +1,35 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master"
|
||||
volume:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master:9333" -port=8080 -ip=volume'
|
||||
depends_on:
|
||||
- master
|
||||
filer:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master:9333"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:dev # use a remote dev image
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master
|
||||
- volume
|
||||
- filer
|
101
go.mod
Normal file
101
go.mod
Normal file
|
@ -0,0 +1,101 @@
|
|||
module github.com/chrislusf/seaweedfs
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.44.3
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0
|
||||
github.com/DataDog/zstd v1.4.1 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.2
|
||||
github.com/Shopify/sarama v1.23.1
|
||||
github.com/aws/aws-sdk-go v1.23.13
|
||||
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/chrislusf/raft v1.0.1
|
||||
github.com/coreos/bbolt v1.3.3 // indirect
|
||||
github.com/coreos/etcd v3.3.15+incompatible // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/eapache/go-resiliency v1.2.0 // indirect
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
|
||||
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/frankban/quicktest v1.7.2 // indirect
|
||||
github.com/go-redis/redis v6.15.7+incompatible
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/gorilla/websocket v1.4.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible
|
||||
github.com/karlseguin/expect v1.0.1 // indirect
|
||||
github.com/klauspost/compress v1.10.9
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/klauspost/crc32 v1.2.0
|
||||
github.com/klauspost/reedsolomon v1.9.2
|
||||
github.com/kurin/blazer v0.5.3
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.0.4 // indirect
|
||||
github.com/onsi/ginkgo v1.10.1 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/peterh/liner v1.1.0
|
||||
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
|
||||
github.com/prometheus/client_golang v1.1.0
|
||||
github.com/prometheus/procfs v0.0.4 // indirect
|
||||
github.com/rakyll/statik v0.1.7
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff
|
||||
github.com/seaweedfs/goexif v1.0.2
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tidwall/gjson v1.3.2
|
||||
github.com/tidwall/match v1.0.1
|
||||
github.com/willf/bitset v1.1.10 // indirect
|
||||
github.com/willf/bloom v2.0.3+incompatible
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
|
||||
go.etcd.io/etcd v3.3.15+incompatible
|
||||
go.mongodb.org/mongo-driver v1.3.2
|
||||
gocloud.dev v0.16.0
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5
|
||||
google.golang.org/api v0.9.0
|
||||
google.golang.org/appengine v1.6.2 // indirect
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/protobuf v1.24.0
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect
|
||||
sigs.k8s.io/yaml v1.1.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/satori/go.uuid v1.2.0 => github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b
|
||||
go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
|
||||
)
|
763
go.sum
Normal file
763
go.sum
Normal file
|
@ -0,0 +1,763 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
|
||||
contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
|
||||
contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
|
||||
github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
|
||||
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
|
||||
github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
|
||||
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
|
||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs=
|
||||
github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM=
|
||||
github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA=
|
||||
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI=
|
||||
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo=
|
||||
github.com/chrislusf/raft v1.0.0 h1:tRGtB3nWOg8VOPSoeZ4NfHMLJcjKgyNkUZQbcyKWf1Y=
|
||||
github.com/chrislusf/raft v1.0.0/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/chrislusf/raft v1.0.1 h1:Wa4ffkmkysW7cX3T/gMC/Mk3PhnOXhsqOVwQJcMndhw=
|
||||
github.com/chrislusf/raft v1.0.1/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=
|
||||
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
|
||||
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
|
||||
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
|
||||
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M=
|
||||
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA=
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk=
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk=
|
||||
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
|
||||
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
|
||||
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
|
||||
github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
|
||||
github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
|
||||
github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
|
||||
github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
|
||||
github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
|
||||
github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
|
||||
github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
|
||||
github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
|
||||
github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
|
||||
github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
|
||||
github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
|
||||
github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
|
||||
github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
|
||||
github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
|
||||
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
|
||||
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
|
||||
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A=
|
||||
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=
|
||||
github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
|
||||
github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=
|
||||
github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE=
|
||||
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=
|
||||
github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0 h1:aT5ISUniaOTErogCQ+4pGoYNBB6rm6Fq3g1v8QwYGas=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.11.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
|
||||
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
|
||||
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU=
|
||||
github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w=
|
||||
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
|
||||
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
|
||||
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
|
||||
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.9 h1:pPRt1Z78crspaHISkpSSHjDlx+Tt9suHe519dsI0vF4=
|
||||
github.com/klauspost/compress v1.10.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I=
|
||||
github.com/klauspost/crc32 v1.2.0/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/reedsolomon v1.9.2 h1:E9CMS2Pqbv+C7tsrYad4YC9MfhnMVWhMRsTi7U0UB18=
|
||||
github.com/klauspost/reedsolomon v1.9.2/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk=
|
||||
github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY=
|
||||
github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4=
|
||||
github.com/nats-io/jwt v0.2.14/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/nats-server/v2 v2.0.0/go.mod h1:RyVdsHHvY4B6c9pWG+uRLpZ0h0XsqiuKp2XCTurP5LI=
|
||||
github.com/nats-io/nats-server/v2 v2.0.4 h1:XOMeQRbhl1lGNTIctPhih6pTa15NGif54Uas6ZW5q7g=
|
||||
github.com/nats-io/nats-server/v2 v2.0.4/go.mod h1:AWdGEVbjKRS9ZIx4DSP5eKW48nfFm7q3uiSkP/1KD7M=
|
||||
github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ=
|
||||
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
|
||||
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||
github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os=
|
||||
github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc=
|
||||
github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78=
|
||||
github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
|
||||
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg=
|
||||
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0=
|
||||
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
|
||||
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
|
||||
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA=
|
||||
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
|
||||
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 h1:s71VGheLtWmCYsnNjf+s7XE8HsrZnd3EYGrLGWVm7nY=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM=
|
||||
go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ=
|
||||
go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
|
||||
go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug=
|
||||
go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
|
||||
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4=
|
||||
go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
||||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM=
|
||||
gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0=
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI=
|
||||
gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E=
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg=
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc=
|
||||
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI=
|
||||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ=
|
||||
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
|
||||
google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
|
||||
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs=
|
||||
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw=
|
||||
gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
23
k8s/README.md
Normal file
23
k8s/README.md
Normal file
|
@ -0,0 +1,23 @@
|
|||
## SEAWEEDFS - helm chart (2.x)
|
||||
|
||||
### info:
|
||||
* master/filer/volume are stateful sets with anti-affinity on the hostname,
|
||||
so your deployment will be spread/HA.
|
||||
* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances)
|
||||
and backup/HA memsql can provide.
|
||||
* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer
|
||||
with ENV.
|
||||
* cert config exists and can be enabled, but not been tested.
|
||||
|
||||
### current instances config (AIO):
|
||||
1 instance for each type (master/filer/volume/s3)
|
||||
|
||||
instances need node labels:
|
||||
* sw-volume: true (for volume instance, specific tag)
|
||||
* sw-backend: true (for all others, as they less resource demanding)
|
||||
|
||||
you can update the replicas count for each node type in values.yaml,
|
||||
need to add more nodes with the corresponding label.
|
||||
|
||||
most of the configuration are available through values.yaml
|
||||
|
22
k8s/seaweedfs/.helmignore
Normal file
22
k8s/seaweedfs/.helmignore
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
4
k8s/seaweedfs/Chart.yaml
Normal file
4
k8s/seaweedfs/Chart.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
version: 1.84
|
114
k8s/seaweedfs/templates/_helpers.tpl
Normal file
114
k8s/seaweedfs/templates/_helpers.tpl
Normal file
|
@ -0,0 +1,114 @@
|
|||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to
|
||||
this (by the DNS naming spec). If release name contains chart name it will
|
||||
be used as a full name.
|
||||
*/}}
|
||||
{{- define "seaweedfs.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "seaweedfs.chart" -}}
|
||||
{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "seaweedfs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Inject extra environment vars in the format key:value, if populated
|
||||
*/}}
|
||||
{{- define "seaweedfs.extraEnvironmentVars" -}}
|
||||
{{- if .extraEnvironmentVars -}}
|
||||
{{- range $key, $value := .extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper filer image */}}
|
||||
{{- define "filer.image" -}}
|
||||
{{- if .Values.filer.imageOverride -}}
|
||||
{{- $imageOverride := .Values.filer.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper postgresqlSchema image */}}
|
||||
{{- define "filer.dbSchema.image" -}}
|
||||
{{- if .Values.filer.dbSchema.imageOverride -}}
|
||||
{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.global.repository | toString -}}
|
||||
{{- $name := .Values.filer.dbSchema.imageName | toString -}}
|
||||
{{- $tag := .Values.filer.dbSchema.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper master image */}}
|
||||
{{- define "master.image" -}}
|
||||
{{- if .Values.master.imageOverride -}}
|
||||
{{- $imageOverride := .Values.master.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper s3 image */}}
|
||||
{{- define "s3.image" -}}
|
||||
{{- if .Values.s3.imageOverride -}}
|
||||
{{- $imageOverride := .Values.s3.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* Return the proper volume image */}}
|
||||
{{- define "volume.image" -}}
|
||||
{{- if .Values.volume.imageOverride -}}
|
||||
{{- $imageOverride := .Values.volume.imageOverride -}}
|
||||
{{- printf "%s" $imageOverride -}}
|
||||
{{- else -}}
|
||||
{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}}
|
||||
{{- $repositoryName := .Values.image.repository | toString -}}
|
||||
{{- $name := .Values.global.imageName | toString -}}
|
||||
{{- $tag := .Values.global.imageTag | toString -}}
|
||||
{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
14
k8s/seaweedfs/templates/ca-cert.yaml
Normal file
14
k8s/seaweedfs/templates/ca-cert.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
commonName: "{{ template "seaweedfs.name" . }}-root-ca"
|
||||
isCA: true
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
{{- end }}
|
8
k8s/seaweedfs/templates/cert-clusterissuer.yaml
Normal file
8
k8s/seaweedfs/templates/cert-clusterissuer.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
spec:
|
||||
selfSigned: {}
|
||||
{{- end }}
|
33
k8s/seaweedfs/templates/client-cert.yaml
Normal file
33
k8s/seaweedfs/templates/client-cert.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-client-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
33
k8s/seaweedfs/templates/filer-cert.yaml
Normal file
33
k8s/seaweedfs/templates/filer-cert.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
22
k8s/seaweedfs/templates/filer-service.yaml
Normal file
22
k8s/seaweedfs/templates/filer-service.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: "swfs-filer"
|
||||
port: {{ .Values.filer.port }}
|
||||
targetPort: {{ .Values.filer.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-filer-grpc"
|
||||
port: {{ .Values.filer.grpcPort }}
|
||||
targetPort: {{ .Values.filer.grpcPort }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: filer
|
207
k8s/seaweedfs/templates/filer-statefulset.yaml
Normal file
207
k8s/seaweedfs/templates/filer-statefulset.yaml
Normal file
|
@ -0,0 +1,207 @@
|
|||
{{- if .Values.filer.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-filer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-filer
|
||||
podManagementPolicy: Parallel
|
||||
replicas: {{ .Values.filer.replicas }}
|
||||
{{- if (gt (int .Values.filer.updatePartition) 0) }}
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
partition: {{ .Values.filer.updatePartition }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: filer
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: filer
|
||||
spec:
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }}
|
||||
{{- if .Values.filer.affinity }}
|
||||
affinity:
|
||||
{{ tpl .Values.filer.affinity . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.filer.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.filer.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration
|
||||
terminationGracePeriodSeconds: 60
|
||||
{{- if .Values.filer.priorityClassName }}
|
||||
priorityClassName: {{ .Values.filer.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "filer.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: WEED_MYSQL_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: secret-seaweedfs-db
|
||||
key: user
|
||||
- name: WEED_MYSQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: secret-seaweedfs-db
|
||||
key: password
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
{{- if .Values.filer.extraEnvironmentVars }}
|
||||
{{- range $key, $value := .Values.filer.extraEnvironmentVars }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.filer.loggingOverrideLevel }}
|
||||
-v={{ .Values.filer.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
filer \
|
||||
-port={{ .Values.filer.port }} \
|
||||
{{- if .Values.filer.disableHttp }}
|
||||
-disableHttp \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.disableDirListing }}
|
||||
-disableDirListing \
|
||||
{{- end }}
|
||||
-dirListLimit={{ .Values.filer.dirListLimit }} \
|
||||
-ip=${POD_IP} \
|
||||
-master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
{{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }}
|
||||
volumeMounts:
|
||||
- name: seaweedfs-filer-log-volume
|
||||
mountPath: "/logs/"
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.filer.port }}
|
||||
name: swfs-filer
|
||||
- containerPort: {{ .Values.filer.grpcPort }}
|
||||
#name: swfs-filer-grpc
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.filer.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.filer.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
{{- if .Values.filer.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.filer.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: seaweedfs-filer-log-volume
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/filer
|
||||
type: DirectoryOrCreate
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{ tpl .Values.filer.extraVolumes . | indent 8 | trim }}
|
||||
{{- if .Values.filer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.filer.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{/* volumeClaimTemplates:*/}}
|
||||
{{/* - metadata:*/}}
|
||||
{{/* name: data-{{ .Release.Namespace }}*/}}
|
||||
{{/* spec:*/}}
|
||||
{{/* accessModes:*/}}
|
||||
{{/* - ReadWriteOnce*/}}
|
||||
{{/* resources:*/}}
|
||||
{{/* requests:*/}}
|
||||
{{/* storage: {{ .Values.filer.storage }}*/}}
|
||||
{{/* {{- if .Values.filer.storageClass }}*/}}
|
||||
{{/* storageClassName: {{ .Values.filer.storageClass }}*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{- end }}
|
59
k8s/seaweedfs/templates/ingress.yaml
Normal file
59
k8s/seaweedfs/templates/ingress.yaml
Normal file
|
@ -0,0 +1,59 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-{{ template "seaweedfs.name" . }}-filer
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
||||
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
|
||||
nginx.ingress.kubernetes.io/service-upstream: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
|
||||
sub_filter '="/' '="./'; #make absolute paths to relative
|
||||
sub_filter '=/' '=./';
|
||||
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
||||
sub_filter_once off;
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /sw-filer/?(.*)
|
||||
backend:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-filer
|
||||
servicePort: {{ .Values.filer.port }}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-{{ template "seaweedfs.name" . }}-master
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
||||
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
|
||||
nginx.ingress.kubernetes.io/service-upstream: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
|
||||
sub_filter '="/' '="./'; #make absolute paths to relative
|
||||
sub_filter '=/' '=./';
|
||||
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
||||
sub_filter_once off;
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /sw-master/?(.*)
|
||||
backend:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-master
|
||||
servicePort: {{ .Values.master.port }}
|
33
k8s/seaweedfs/templates/master-cert.yaml
Normal file
33
k8s/seaweedfs/templates/master-cert.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-master-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
24
k8s/seaweedfs/templates/master-service.yaml
Normal file
24
k8s/seaweedfs/templates/master-service.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-master
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: master
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: "swfs-master"
|
||||
port: {{ .Values.master.port }}
|
||||
targetPort: {{ .Values.master.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-master-grpc"
|
||||
port: {{ .Values.master.grpcPort }}
|
||||
targetPort: {{ .Values.master.grpcPort }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: master
|
199
k8s/seaweedfs/templates/master-statefulset.yaml
Normal file
199
k8s/seaweedfs/templates/master-statefulset.yaml
Normal file
|
@ -0,0 +1,199 @@
|
|||
{{- if .Values.master.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-master
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-master
|
||||
podManagementPolicy: Parallel
|
||||
replicas: {{ .Values.master.replicas }}
|
||||
{{- if (gt (int .Values.master.updatePartition) 0) }}
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
partition: {{ .Values.master.updatePartition }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: master
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: master
|
||||
spec:
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }}
|
||||
{{- if .Values.master.affinity }}
|
||||
affinity:
|
||||
{{ tpl .Values.master.affinity . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.master.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.master.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 60
|
||||
{{- if .Values.master.priorityClassName }}
|
||||
priorityClassName: {{ .Values.master.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "master.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.master.loggingOverrideLevel }}
|
||||
-v={{ .Values.master.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
master \
|
||||
-port={{ .Values.master.port }} \
|
||||
-mdir=/data \
|
||||
-ip.bind={{ .Values.master.ipBind }} \
|
||||
{{- if .Values.master.volumePreallocate }}
|
||||
-volumePreallocate \
|
||||
{{- end }}
|
||||
{{- if .Values.global.monitoring.enabled }}
|
||||
-metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \
|
||||
{{- end }}
|
||||
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
|
||||
{{- if .Values.master.disableHttp }}
|
||||
-disableHttp \
|
||||
{{- end }}
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \
|
||||
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
- name : data-{{ .Release.Namespace }}
|
||||
mountPath: /data
|
||||
- name: seaweedfs-master-log-volume
|
||||
mountPath: "/logs/"
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.master.port }}
|
||||
name: swfs-master
|
||||
- containerPort: {{ .Values.master.grpcPort }}
|
||||
#name: swfs-master-grpc
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /cluster/status
|
||||
port: {{ .Values.master.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 15
|
||||
successThreshold: 2
|
||||
failureThreshold: 100
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /cluster/status
|
||||
port: {{ .Values.master.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 6
|
||||
{{- if .Values.master.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.master.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: seaweedfs-master-log-volume
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/master
|
||||
type: DirectoryOrCreate
|
||||
- name: data-{{ .Release.Namespace }}
|
||||
hostPath:
|
||||
path: /ssd/seaweed-master/
|
||||
type: DirectoryOrCreate
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{ tpl .Values.master.extraVolumes . | indent 8 | trim }}
|
||||
{{- if .Values.master.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.master.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{/* volumeClaimTemplates:*/}}
|
||||
{{/* - metadata:*/}}
|
||||
{{/* name: data-{{ .Release.Namespace }}*/}}
|
||||
{{/* spec:*/}}
|
||||
{{/* accessModes:*/}}
|
||||
{{/* - ReadWriteOnce*/}}
|
||||
{{/* resources:*/}}
|
||||
{{/* requests:*/}}
|
||||
{{/* storage: {{ .Values.master.storage }}*/}}
|
||||
{{/* {{- if .Values.master.storageClass }}*/}}
|
||||
{{/* storageClassName: {{ .Values.master.storageClass }}*/}}
|
||||
{{/* {{- end }}*/}}
|
||||
{{- end }}
|
158
k8s/seaweedfs/templates/s3-deployment.yaml
Normal file
158
k8s/seaweedfs/templates/s3-deployment.yaml
Normal file
|
@ -0,0 +1,158 @@
|
|||
{{- if .Values.s3.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-s3
|
||||
replicas: {{ .Values.s3.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: s3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: s3
|
||||
spec:
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }}
|
||||
{{- if .Values.s3.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.s3.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
{{- if .Values.s3.priorityClassName }}
|
||||
priorityClassName: {{ .Values.s3.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "s3.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed \
|
||||
{{- if .Values.s3.loggingOverrideLevel }}
|
||||
-v={{ .Values.s3.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
s3 \
|
||||
-port={{ .Values.s3.port }} \
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
-cert.file=/usr/local/share/ca-certificates/client/tls.crt \
|
||||
-key.file=/usr/local/share/ca-certificates/client/tls.key \
|
||||
{{- end }}
|
||||
{{- if .Values.s3.domainName }}
|
||||
-domainName={{ .Values.s3.domainName }} \
|
||||
{{- end }}
|
||||
-filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }}
|
||||
{{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }}
|
||||
volumeMounts:
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.s3.port }}
|
||||
name: swfs-s3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.s3.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.s3.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 60
|
||||
successThreshold: 1
|
||||
failureThreshold: 20
|
||||
{{- if .Values.s3.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.s3.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{ tpl .Values.s3.extraVolumes . | indent 8 | trim }}
|
||||
{{- if .Values.s3.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.s3.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- end }}
|
17
k8s/seaweedfs/templates/s3-service.yaml
Normal file
17
k8s/seaweedfs/templates/s3-service.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-s3
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
||||
spec:
|
||||
ports:
|
||||
- name: "swfs-s3"
|
||||
port: {{ .Values.s3.port }}
|
||||
targetPort: {{ .Values.s3.port }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: s3
|
1352
k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
Normal file
1352
k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml
Normal file
File diff suppressed because it is too large
Load diff
14
k8s/seaweedfs/templates/secret-seaweedfs-db.yaml
Normal file
14
k8s/seaweedfs/templates/secret-seaweedfs-db.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
name: secret-seaweedfs-db
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
"helm.sh/hook": "pre-install"
|
||||
stringData:
|
||||
user: "YourSWUser"
|
||||
password: "HardCodedPassword"
|
||||
# better to random generate and create in DB
|
||||
# password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }}
|
52
k8s/seaweedfs/templates/security-configmap.yaml
Normal file
52
k8s/seaweedfs/templates/security-configmap.yaml
Normal file
|
@ -0,0 +1,52 @@
|
|||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
data:
|
||||
security.toml: |-
|
||||
# this file is read by master, volume server, and filer
|
||||
|
||||
# the jwt signing key is read by master and volume server
|
||||
# a jwt expires in 10 seconds
|
||||
[jwt.signing]
|
||||
key = "{{ randAlphaNum 10 | b64enc }}"
|
||||
|
||||
# all grpc tls authentications are mutual
|
||||
# the values for the following ca, cert, and key are paths to the PERM files.
|
||||
[grpc]
|
||||
ca = "/usr/local/share/ca-certificates/ca/tls.crt"
|
||||
|
||||
[grpc.volume]
|
||||
cert = "/usr/local/share/ca-certificates/volume/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/volume/tls.key"
|
||||
|
||||
[grpc.master]
|
||||
cert = "/usr/local/share/ca-certificates/master/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/master/tls.key"
|
||||
|
||||
[grpc.filer]
|
||||
cert = "/usr/local/share/ca-certificates/filer/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/filer/tls.key"
|
||||
|
||||
# use this for any place needs a grpc client
|
||||
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
|
||||
[grpc.client]
|
||||
cert = "/usr/local/share/ca-certificates/client/tls.crt"
|
||||
key = "/usr/local/share/ca-certificates/client/tls.key"
|
||||
|
||||
# volume server https options
|
||||
# Note: work in progress!
|
||||
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
|
||||
[https.client]
|
||||
enabled = false
|
||||
[https.volume]
|
||||
cert = ""
|
||||
key = ""
|
||||
{{- end }}
|
29
k8s/seaweedfs/templates/service-account.yaml
Normal file
29
k8s/seaweedfs/templates/service-account.yaml
Normal file
|
@ -0,0 +1,29 @@
|
|||
#hack for delete pod master after migration
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: seaweefds-rw-cr
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: seaweefds-rw-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: system:serviceaccount:seaweefds-rw-sa:default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: seaweefds-rw-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: seaweefds-rw-cr
|
33
k8s/seaweedfs/templates/volume-cert.yaml
Normal file
33
k8s/seaweedfs/templates/volume-cert.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
{{- if .Values.global.enableSecurity }}
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
issuerRef:
|
||||
name: {{ template "seaweedfs.name" . }}-clusterissuer
|
||||
kind: ClusterIssuer
|
||||
commonName: {{ .Values.certificates.commonName }}
|
||||
organization:
|
||||
- "SeaweedFS CA"
|
||||
dnsNames:
|
||||
- '*.{{ .Release.Namespace }}'
|
||||
- '*.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc'
|
||||
- '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local'
|
||||
{{- if .Values.certificates.ipAddresses }}
|
||||
ipAddresses:
|
||||
{{- range .Values.certificates.ipAddresses }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
keyAlgorithm: {{ .Values.certificates.keyAlgorithm }}
|
||||
keySize: {{ .Values.certificates.keySize }}
|
||||
duration: {{ .Values.certificates.duration }}
|
||||
renewBefore: {{ .Values.certificates.renewBefore }}
|
||||
{{- end }}
|
22
k8s/seaweedfs/templates/volume-service.yaml
Normal file
22
k8s/seaweedfs/templates/volume-service.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: "swfs-volume"
|
||||
port: {{ .Values.volume.port }}
|
||||
targetPort: {{ .Values.volume.port }}
|
||||
protocol: TCP
|
||||
- name: "swfs-volume-18080"
|
||||
port: {{ .Values.volume.grpcPort }}
|
||||
targetPort: {{ .Values.volume.grpcPort }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
component: volume
|
184
k8s/seaweedfs/templates/volume-statefulset.yaml
Normal file
184
k8s/seaweedfs/templates/volume-statefulset.yaml
Normal file
|
@ -0,0 +1,184 @@
|
|||
{{- if .Values.volume.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "seaweedfs.name" . }}-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceName: {{ template "seaweedfs.name" . }}-volume
|
||||
replicas: {{ .Values.volume.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: volume
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
chart: {{ template "seaweedfs.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: volume
|
||||
spec:
|
||||
{{- if .Values.volume.affinity }}
|
||||
affinity:
|
||||
{{ tpl .Values.volume.affinity . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }}
|
||||
{{- if .Values.volume.tolerations }}
|
||||
tolerations:
|
||||
{{ tpl .Values.volume.tolerations . | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.global.imagePullSecrets }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
{{- if .Values.volume.priorityClassName }}
|
||||
priorityClassName: {{ .Values.volume.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
enableServiceLinks: false
|
||||
containers:
|
||||
- name: seaweedfs
|
||||
image: {{ template "volume.image" . }}
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
- name: SEAWEEDFS_FULLNAME
|
||||
value: "{{ template "seaweedfs.name" . }}"
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
exec /usr/bin/weed -logdir=/logs \
|
||||
{{- if .Values.volume.loggingOverrideLevel }}
|
||||
-v={{ .Values.volume.loggingOverrideLevel }} \
|
||||
{{- else }}
|
||||
-v={{ .Values.global.loggingLevel }} \
|
||||
{{- end }}
|
||||
volume \
|
||||
-port={{ .Values.volume.port }} \
|
||||
-dir={{ .Values.volume.dir }} \
|
||||
-max={{ .Values.volume.maxVolumes }} \
|
||||
{{- if .Values.volume.rack }}
|
||||
-rack={{ .Values.volume.rack }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.dataCenter }}
|
||||
-dataCenter={{ .Values.volume.dataCenter }} \
|
||||
{{- end }}
|
||||
-ip.bind={{ .Values.volume.ipBind }} \
|
||||
-read.redirect={{ .Values.volume.readRedirect }} \
|
||||
{{- if .Values.volume.whiteList }}
|
||||
-whiteList={{ .Values.volume.whiteList }} \
|
||||
{{- end }}
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \
|
||||
-compactionMBps={{ .Values.volume.compactionMBps }} \
|
||||
-mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
- name: seaweedfs-volume-storage
|
||||
mountPath: "/data/"
|
||||
- name: seaweedfs-volume-log-volume
|
||||
mountPath: "/logs/"
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
readOnly: true
|
||||
mountPath: /etc/seaweedfs/security.toml
|
||||
subPath: security.toml
|
||||
- name: ca-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/ca/
|
||||
- name: master-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/master/
|
||||
- name: volume-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/volume/
|
||||
- name: filer-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/filer/
|
||||
- name: client-cert
|
||||
readOnly: true
|
||||
mountPath: /usr/local/share/ca-certificates/client/
|
||||
{{- end }}
|
||||
{{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.volume.port }}
|
||||
name: swfs-vol
|
||||
- containerPort: {{ .Values.volume.grpcPort }}
|
||||
#name: swfs-vol-grpc
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.volume.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 15
|
||||
successThreshold: 1
|
||||
failureThreshold: 100
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.volume.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
{{- if .Values.volume.resources }}
|
||||
resources:
|
||||
{{ tpl .Values.volume.resources . | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: seaweedfs-volume-log-volume
|
||||
hostPath:
|
||||
path: /storage/logs/seaweedfs/volume
|
||||
type: DirectoryOrCreate
|
||||
- name: seaweedfs-volume-storage
|
||||
hostPath:
|
||||
path: /storage/object_store/
|
||||
type: DirectoryOrCreate
|
||||
{{- if .Values.global.enableSecurity }}
|
||||
- name: security-config
|
||||
configMap:
|
||||
name: {{ template "seaweedfs.name" . }}-security-config
|
||||
- name: ca-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-ca-cert
|
||||
- name: master-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-master-cert
|
||||
- name: volume-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-volume-cert
|
||||
- name: filer-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-filer-cert
|
||||
- name: client-cert
|
||||
secret:
|
||||
secretName: {{ template "seaweedfs.name" . }}-client-cert
|
||||
{{- end }}
|
||||
{{- if .Values.volume.extraVolumes }}
|
||||
{{ tpl .Values.volume.extraVolumes . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- if .Values.volume.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ tpl .Values.volume.nodeSelector . | indent 8 | trim }}
|
||||
{{- end }}
|
||||
{{- end }}
|
306
k8s/seaweedfs/values.yaml
Normal file
306
k8s/seaweedfs/values.yaml
Normal file
|
@ -0,0 +1,306 @@
|
|||
# Available parameters and their default values for the SeaweedFS chart.
|
||||
|
||||
global:
|
||||
registry: ""
|
||||
repository: ""
|
||||
imageName: chrislusf/seaweedfs
|
||||
imageTag: "1.84"
|
||||
imagePullPolicy: IfNotPresent
|
||||
imagePullSecrets: imagepullsecret
|
||||
restartPolicy: Always
|
||||
loggingLevel: 1
|
||||
enableSecurity: false
|
||||
monitoring:
|
||||
enabled: false
|
||||
gatewayHost: null
|
||||
gatewayPort: null
|
||||
|
||||
image:
|
||||
registry: ""
|
||||
repository: ""
|
||||
|
||||
master:
|
||||
enabled: true
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
imageOverride: null
|
||||
restartPolicy: null
|
||||
replicas: 1
|
||||
port: 9333
|
||||
grpcPort: 19333
|
||||
ipBind: "0.0.0.0"
|
||||
volumePreallocate: false
|
||||
volumeSizeLimitMB: 30000
|
||||
loggingOverrideLevel: null
|
||||
|
||||
# Disable http request, only gRpc operations are allowed
|
||||
disableHttp: false
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# storage and storageClass are the settings for configuring stateful
|
||||
# storage for the master pods. storage should be set to the disk size of
|
||||
# the attached volume. storageClass is the class of storage which defaults
|
||||
# to null (the Kube cluster will pick the default).
|
||||
storage: 25Gi
|
||||
storageClass: null
|
||||
|
||||
# Resource requests, limits, etc. for the master cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# updatePartition is used to control a careful rolling update of SeaweedFS
|
||||
# masters.
|
||||
updatePartition: 0
|
||||
|
||||
# Affinity Settings
|
||||
# Commenting out or setting as empty the affinity variable, will allow
|
||||
# deployment to single node services such as Minikube
|
||||
affinity: |
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
release: "{{ .Release.Name }}"
|
||||
component: master
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# Toleration Settings for master pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for master pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
# used to assign priority to master pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
|
||||
volume:
|
||||
enabled: true
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
imageOverride: null
|
||||
restartPolicy: null
|
||||
port: 8080
|
||||
grpcPort: 18080
|
||||
ipBind: "0.0.0.0"
|
||||
replicas: 1
|
||||
loggingOverrideLevel: null
|
||||
|
||||
# limit background compaction or copying speed in mega bytes per second
|
||||
compactionMBps: "40"
|
||||
|
||||
# Directories to store data files. dir[,dir]... (default "/tmp")
|
||||
dir: "/data"
|
||||
|
||||
# Maximum numbers of volumes, count[,count]...
|
||||
# If set to zero on non-windows OS, the limit will be auto configured. (default "7")
|
||||
maxVolumes: "0"
|
||||
|
||||
# Volume server's rack name
|
||||
rack: null
|
||||
|
||||
# Volume server's data center name
|
||||
dataCenter: null
|
||||
|
||||
# Redirect moved or non-local volumes. (default true)
|
||||
readRedirect: true
|
||||
|
||||
# Comma separated Ip addresses having write permission. No limit if empty.
|
||||
whiteList: null
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# Affinity Settings
|
||||
# Commenting out or setting as empty the affinity variable, will allow
|
||||
# deployment to single node services such as Minikube
|
||||
affinity: |
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
release: "{{ .Release.Name }}"
|
||||
component: volume
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# Resource requests, limits, etc. for the server cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# Toleration Settings for server pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-volume: "true"
|
||||
|
||||
# used to assign priority to server pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
|
||||
filer:
|
||||
enabled: true
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
imageOverride: null
|
||||
restartPolicy: null
|
||||
replicas: 1
|
||||
port: 8888
|
||||
grpcPort: 18888
|
||||
loggingOverrideLevel: null
|
||||
|
||||
# Limit sub dir listing size (default 100000)
|
||||
dirListLimit: 100000
|
||||
|
||||
# Turn off directory listing
|
||||
disableDirListing: false
|
||||
|
||||
# Disable http request, only gRpc operations are allowed
|
||||
disableHttp: false
|
||||
|
||||
# storage and storageClass are the settings for configuring stateful
|
||||
# storage for the master pods. storage should be set to the disk size of
|
||||
# the attached volume. storageClass is the class of storage which defaults
|
||||
# to null (the Kube cluster will pick the default).
|
||||
storage: 25Gi
|
||||
storageClass: null
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# Affinity Settings
|
||||
# Commenting out or setting as empty the affinity variable, will allow
|
||||
# deployment to single node services such as Minikube
|
||||
affinity: |
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: {{ template "seaweedfs.name" . }}
|
||||
release: "{{ .Release.Name }}"
|
||||
component: filer
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# updatePartition is used to control a careful rolling update of SeaweedFS
|
||||
# masters.
|
||||
updatePartition: 0
|
||||
|
||||
# Resource requests, limits, etc. for the server cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# Toleration Settings for server pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
# used to assign priority to server pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
dbSchema:
|
||||
imageName: db-schema
|
||||
imageTag: "development"
|
||||
imageOverride: ""
|
||||
|
||||
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
|
||||
extraEnvironmentVars:
|
||||
WEED_MYSQL_ENABLED: "true"
|
||||
WEED_MYSQL_HOSTNAME: "mysql-db-host"
|
||||
WEED_MYSQL_PORT: "3306"
|
||||
WEED_MYSQL_DATABASE: "sw_database"
|
||||
WEED_MYSQL_CONNECTION_MAX_IDLE: "10"
|
||||
WEED_MYSQL_CONNECTION_MAX_OPEN: "150"
|
||||
# enable usage of memsql as filer backend
|
||||
WEED_MYSQL_INTERPOLATEPARAMS: "true"
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
# with http DELETE, by default the filer would check whether a folder is empty.
|
||||
# recursive_delete will delete all sub folders and files, similar to "rm -Rf"
|
||||
WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false"
|
||||
# directories under this folder will be automatically creating a separate bucket
|
||||
WEED_FILER_BUCKETS_FOLDER: "/buckets"
|
||||
# directories under this folder will be store message queue data
|
||||
WEED_FILER_QUEUES_FOLDER: "/queues"
|
||||
|
||||
s3:
|
||||
enabled: true
|
||||
repository: null
|
||||
imageName: null
|
||||
imageTag: null
|
||||
restartPolicy: null
|
||||
replicas: 1
|
||||
port: 8333
|
||||
loggingOverrideLevel: null
|
||||
|
||||
# Suffix of the host name, {bucket}.{domainName}
|
||||
domainName: ""
|
||||
|
||||
extraVolumes: ""
|
||||
extraVolumeMounts: ""
|
||||
|
||||
# Resource requests, limits, etc. for the server cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec,
|
||||
# formatted as a multi-line string. By default no direct resource request
|
||||
# is made.
|
||||
resources: null
|
||||
|
||||
# Toleration Settings for server pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: ""
|
||||
|
||||
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: |
|
||||
sw-backend: "true"
|
||||
|
||||
# used to assign priority to server pods
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||
priorityClassName: ""
|
||||
|
||||
certificates:
|
||||
commonName: "SeaweedFS CA"
|
||||
ipAddresses: []
|
||||
keyAlgorithm: rsa
|
||||
keySize: 2048
|
||||
duration: 2160h # 90d
|
||||
renewBefore: 360h # 15d
|
|
@ -1,10 +1,11 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.0.5</version>
|
||||
<version>1.2.9</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
@ -13,12 +14,18 @@
|
|||
</parent>
|
||||
|
||||
<properties>
|
||||
<protobuf.version>3.5.1</protobuf.version>
|
||||
<grpc.version>1.16.1</grpc.version>
|
||||
<guava.version>26.0-jre</guava.version>
|
||||
<protobuf.version>3.9.1</protobuf.version>
|
||||
<!-- follow https://github.com/grpc/grpc-java -->
|
||||
<grpc.version>1.23.0</grpc.version>
|
||||
<guava.version>28.0-jre</guava.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.moandjiezana.toml</groupId>
|
||||
<artifactId>toml4j</artifactId>
|
||||
<version>0.7.2</version>
|
||||
</dependency>
|
||||
<!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java -->
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
|
@ -74,7 +81,7 @@
|
|||
<extension>
|
||||
<groupId>kr.motd.maven</groupId>
|
||||
<artifactId>os-maven-plugin</artifactId>
|
||||
<version>1.5.0.Final</version>
|
||||
<version>1.6.2</version>
|
||||
</extension>
|
||||
</extensions>
|
||||
<plugins>
|
||||
|
@ -82,18 +89,20 @@
|
|||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.xolstice.maven.plugins</groupId>
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<version>0.5.1</version>
|
||||
<version>0.6.1</version>
|
||||
<configuration>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}-1:exe:${os.detected.classifier}</protocArtifact>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<pluginId>grpc-java</pluginId>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}</pluginArtifact>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
|
||||
</pluginArtifact>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
|
|
139
other/java/client/pom_debug.xml
Normal file
139
other/java/client/pom_debug.xml
Normal file
|
@ -0,0 +1,139 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.2.9</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<version>9</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<protobuf.version>3.9.1</protobuf.version>
|
||||
<!-- follow https://github.com/grpc/grpc-java -->
|
||||
<grpc.version>1.23.0</grpc.version>
|
||||
<guava.version>28.0-jre</guava.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.moandjiezana.toml</groupId>
|
||||
<artifactId>toml4j</artifactId>
|
||||
<version>0.7.2</version>
|
||||
</dependency>
|
||||
<!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java -->
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
<version>${protobuf.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>${guava.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-netty-shaded</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-protobuf</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.grpc</groupId>
|
||||
<artifactId>grpc-stub</artifactId>
|
||||
<version>${grpc.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.25</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpmime</artifactId>
|
||||
<version>4.5.6</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<extensions>
|
||||
<extension>
|
||||
<groupId>kr.motd.maven</groupId>
|
||||
<artifactId>os-maven-plugin</artifactId>
|
||||
<version>1.6.2</version>
|
||||
</extension>
|
||||
</extensions>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.xolstice.maven.plugins</groupId>
|
||||
<artifactId>protobuf-maven-plugin</artifactId>
|
||||
<version>0.6.1</version>
|
||||
<configuration>
|
||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<pluginId>grpc-java</pluginId>
|
||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
|
||||
</pluginArtifact>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>compile</goal>
|
||||
<goal>compile-custom</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
|
@ -0,0 +1,27 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class ChunkCache {
|
||||
|
||||
private final Cache<String, byte[]> cache;
|
||||
|
||||
public ChunkCache(int maxEntries) {
|
||||
this.cache = CacheBuilder.newBuilder()
|
||||
.maximumSize(maxEntries)
|
||||
.expireAfterAccess(1, TimeUnit.HOURS)
|
||||
.build();
|
||||
}
|
||||
|
||||
public byte[] getChunk(String fileId) {
|
||||
return this.cache.getIfPresent(fileId);
|
||||
}
|
||||
|
||||
public void setChunk(String fileId, byte[] data) {
|
||||
this.cache.put(fileId, data);
|
||||
}
|
||||
|
||||
}
|
|
@ -7,13 +7,14 @@ import java.nio.file.Path;
|
|||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class FilerClient {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
|
||||
|
||||
private FilerGrpcClient filerGrpcClient;
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
|
||||
public FilerClient(String host, int grpcPort) {
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
|
@ -34,13 +35,12 @@ public class FilerClient {
|
|||
|
||||
public boolean mkdirs(String path, int mode, int uid, int gid, String userName, String[] groupNames) {
|
||||
|
||||
Path pathObject = Paths.get(path);
|
||||
String parent = pathObject.getParent().toString();
|
||||
String name = pathObject.getFileName().toString();
|
||||
|
||||
if ("/".equals(path)) {
|
||||
return true;
|
||||
}
|
||||
Path pathObject = Paths.get(path);
|
||||
String parent = pathObject.getParent().toString();
|
||||
String name = pathObject.getFileName().toString();
|
||||
|
||||
mkdirs(parent, mode, uid, gid, userName, groupNames);
|
||||
|
||||
|
@ -51,23 +51,38 @@ public class FilerClient {
|
|||
}
|
||||
|
||||
return createEntry(
|
||||
parent,
|
||||
newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build()
|
||||
parent,
|
||||
newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build()
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
public boolean rm(String path, boolean isRecursive) {
|
||||
public boolean mv(String oldPath, String newPath) {
|
||||
|
||||
Path oldPathObject = Paths.get(oldPath);
|
||||
String oldParent = oldPathObject.getParent().toString();
|
||||
String oldName = oldPathObject.getFileName().toString();
|
||||
|
||||
Path newPathObject = Paths.get(newPath);
|
||||
String newParent = newPathObject.getParent().toString();
|
||||
String newName = newPathObject.getFileName().toString();
|
||||
|
||||
return atomicRenameEntry(oldParent, oldName, newParent, newName);
|
||||
|
||||
}
|
||||
|
||||
public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
|
||||
|
||||
Path pathObject = Paths.get(path);
|
||||
String parent = pathObject.getParent().toString();
|
||||
String name = pathObject.getFileName().toString();
|
||||
|
||||
return deleteEntry(
|
||||
parent,
|
||||
name,
|
||||
true,
|
||||
isRecursive);
|
||||
parent,
|
||||
name,
|
||||
true,
|
||||
isRecursive,
|
||||
ignoreRecusiveError);
|
||||
}
|
||||
|
||||
public boolean touch(String path, int mode) {
|
||||
|
@ -84,18 +99,18 @@ public class FilerClient {
|
|||
FilerProto.Entry entry = lookupEntry(parent, name);
|
||||
if (entry == null) {
|
||||
return createEntry(
|
||||
parent,
|
||||
newFileEntry(name, mode, uid, gid, userName, groupNames).build()
|
||||
parent,
|
||||
newFileEntry(name, mode, uid, gid, userName, groupNames).build()
|
||||
);
|
||||
}
|
||||
long now = System.currentTimeMillis() / 1000L;
|
||||
FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder()
|
||||
.setMtime(now)
|
||||
.setUid(uid)
|
||||
.setGid(gid)
|
||||
.setUserName(userName)
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(groupNames));
|
||||
.setMtime(now)
|
||||
.setUid(uid)
|
||||
.setGid(gid)
|
||||
.setUserName(userName)
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(groupNames));
|
||||
return updateEntry(parent, entry.toBuilder().setAttributes(attr).build());
|
||||
}
|
||||
|
||||
|
@ -105,17 +120,17 @@ public class FilerClient {
|
|||
long now = System.currentTimeMillis() / 1000L;
|
||||
|
||||
return FilerProto.Entry.newBuilder()
|
||||
.setName(name)
|
||||
.setIsDirectory(true)
|
||||
.setAttributes(FilerProto.FuseAttributes.newBuilder()
|
||||
.setMtime(now)
|
||||
.setCrtime(now)
|
||||
.setUid(uid)
|
||||
.setGid(gid)
|
||||
.setFileMode(mode | 1 << 31)
|
||||
.setUserName(userName)
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(groupNames)));
|
||||
.setName(name)
|
||||
.setIsDirectory(true)
|
||||
.setAttributes(FilerProto.FuseAttributes.newBuilder()
|
||||
.setMtime(now)
|
||||
.setCrtime(now)
|
||||
.setUid(uid)
|
||||
.setGid(gid)
|
||||
.setFileMode(mode | 1 << 31)
|
||||
.setUserName(userName)
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(groupNames)));
|
||||
}
|
||||
|
||||
public FilerProto.Entry.Builder newFileEntry(String name, int mode,
|
||||
|
@ -124,17 +139,17 @@ public class FilerClient {
|
|||
long now = System.currentTimeMillis() / 1000L;
|
||||
|
||||
return FilerProto.Entry.newBuilder()
|
||||
.setName(name)
|
||||
.setIsDirectory(false)
|
||||
.setAttributes(FilerProto.FuseAttributes.newBuilder()
|
||||
.setMtime(now)
|
||||
.setCrtime(now)
|
||||
.setUid(uid)
|
||||
.setGid(gid)
|
||||
.setFileMode(mode)
|
||||
.setUserName(userName)
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(groupNames)));
|
||||
.setName(name)
|
||||
.setIsDirectory(false)
|
||||
.setAttributes(FilerProto.FuseAttributes.newBuilder()
|
||||
.setMtime(now)
|
||||
.setCrtime(now)
|
||||
.setUid(uid)
|
||||
.setGid(gid)
|
||||
.setFileMode(mode)
|
||||
.setUserName(userName)
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(groupNames)));
|
||||
}
|
||||
|
||||
public List<FilerProto.Entry> listEntries(String path) {
|
||||
|
@ -159,22 +174,35 @@ public class FilerClient {
|
|||
}
|
||||
|
||||
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit) {
|
||||
return filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
|
||||
.setDirectory(path)
|
||||
.setPrefix(entryPrefix)
|
||||
.setStartFromFileName(lastEntryName)
|
||||
.setLimit(limit)
|
||||
.build()).getEntriesList();
|
||||
Iterator<FilerProto.ListEntriesResponse> iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
|
||||
.setDirectory(path)
|
||||
.setPrefix(entryPrefix)
|
||||
.setStartFromFileName(lastEntryName)
|
||||
.setLimit(limit)
|
||||
.build());
|
||||
List<FilerProto.Entry> entries = new ArrayList<>();
|
||||
while (iter.hasNext()) {
|
||||
FilerProto.ListEntriesResponse resp = iter.next();
|
||||
entries.add(fixEntryAfterReading(resp.getEntry()));
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
public FilerProto.Entry lookupEntry(String directory, String entryName) {
|
||||
try {
|
||||
return filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
|
||||
FilerProto.LookupDirectoryEntryRequest.newBuilder()
|
||||
.setDirectory(directory)
|
||||
.setName(entryName)
|
||||
.build()).getEntry();
|
||||
FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
|
||||
FilerProto.LookupDirectoryEntryRequest.newBuilder()
|
||||
.setDirectory(directory)
|
||||
.setName(entryName)
|
||||
.build()).getEntry();
|
||||
if (entry == null) {
|
||||
return null;
|
||||
}
|
||||
return fixEntryAfterReading(entry);
|
||||
} catch (Exception e) {
|
||||
if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) {
|
||||
return null;
|
||||
}
|
||||
LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e);
|
||||
return null;
|
||||
}
|
||||
|
@ -184,9 +212,9 @@ public class FilerClient {
|
|||
public boolean createEntry(String parent, FilerProto.Entry entry) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
} catch (Exception e) {
|
||||
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
return false;
|
||||
|
@ -197,9 +225,9 @@ public class FilerClient {
|
|||
public boolean updateEntry(String parent, FilerProto.Entry entry) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
.setDirectory(parent)
|
||||
.setEntry(entry)
|
||||
.build());
|
||||
} catch (Exception e) {
|
||||
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
|
||||
return false;
|
||||
|
@ -207,14 +235,15 @@ public class FilerClient {
|
|||
return true;
|
||||
}
|
||||
|
||||
public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive) {
|
||||
public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive, boolean ignoreRecusiveError) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
|
||||
.setDirectory(parent)
|
||||
.setName(entryName)
|
||||
.setIsDeleteData(isDeleteFileChunk)
|
||||
.setIsRecursive(isRecursive)
|
||||
.build());
|
||||
.setDirectory(parent)
|
||||
.setName(entryName)
|
||||
.setIsDeleteData(isDeleteFileChunk)
|
||||
.setIsRecursive(isRecursive)
|
||||
.setIgnoreRecursiveError(ignoreRecusiveError)
|
||||
.build());
|
||||
} catch (Exception e) {
|
||||
LOG.warn("deleteEntry {}/{}: {}", parent, entryName, e);
|
||||
return false;
|
||||
|
@ -222,4 +251,39 @@ public class FilerClient {
|
|||
return true;
|
||||
}
|
||||
|
||||
public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) {
|
||||
try {
|
||||
filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
|
||||
.setOldDirectory(oldParent)
|
||||
.setOldName(oldName)
|
||||
.setNewDirectory(newParent)
|
||||
.setNewName(newName)
|
||||
.build());
|
||||
} catch (Exception e) {
|
||||
LOG.warn("atomicRenameEntry {}/{} => {}/{}: {}", oldParent, oldName, newParent, newName, e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) {
|
||||
if (entry.getChunksList().size() <= 0) {
|
||||
return entry;
|
||||
}
|
||||
String fileId = entry.getChunks(0).getFileId();
|
||||
if (fileId != null && fileId.length() != 0) {
|
||||
return entry;
|
||||
}
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
entryBuilder.clearChunks();
|
||||
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
|
||||
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
|
||||
FilerProto.FileId fid = chunk.getFid();
|
||||
fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie());
|
||||
chunkBuilder.setFileId(fileId);
|
||||
entryBuilder.addChunks(chunkBuilder);
|
||||
}
|
||||
return entryBuilder.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -2,22 +2,55 @@ package seaweedfs.client;
|
|||
|
||||
import io.grpc.ManagedChannel;
|
||||
import io.grpc.ManagedChannelBuilder;
|
||||
import io.grpc.netty.shaded.io.grpc.netty.NegotiationType;
|
||||
import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder;
|
||||
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.ssl.SSLException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
public class FilerGrpcClient {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(FilerGrpcClient.class.getName());
|
||||
private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class);
|
||||
static SslContext sslContext;
|
||||
|
||||
static {
|
||||
try {
|
||||
sslContext = FilerSslContext.loadSslContext();
|
||||
} catch (SSLException e) {
|
||||
logger.warn("failed to load ssl context", e);
|
||||
}
|
||||
}
|
||||
|
||||
private final ManagedChannel channel;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub;
|
||||
private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub;
|
||||
|
||||
private boolean cipher = false;
|
||||
private String collection = "";
|
||||
private String replication = "";
|
||||
|
||||
public FilerGrpcClient(String host, int grpcPort) {
|
||||
this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext());
|
||||
this(host, grpcPort, sslContext);
|
||||
}
|
||||
|
||||
public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) {
|
||||
|
||||
this(sslContext == null ?
|
||||
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() :
|
||||
NettyChannelBuilder.forAddress(host, grpcPort)
|
||||
.negotiationType(NegotiationType.TLS)
|
||||
.sslContext(sslContext));
|
||||
|
||||
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
|
||||
this.getBlockingStub().getFilerConfiguration(
|
||||
FilerProto.GetFilerConfigurationRequest.newBuilder().build());
|
||||
cipher = filerConfigurationResponse.getCipher();
|
||||
collection = filerConfigurationResponse.getCollection();
|
||||
replication = filerConfigurationResponse.getReplication();
|
||||
|
||||
}
|
||||
|
||||
public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
|
||||
|
@ -27,6 +60,18 @@ public class FilerGrpcClient {
|
|||
futureStub = SeaweedFilerGrpc.newFutureStub(channel);
|
||||
}
|
||||
|
||||
public boolean isCipher() {
|
||||
return cipher;
|
||||
}
|
||||
|
||||
public String getCollection() {
|
||||
return collection;
|
||||
}
|
||||
|
||||
public String getReplication() {
|
||||
return replication;
|
||||
}
|
||||
|
||||
public void shutdown() throws InterruptedException {
|
||||
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
|
||||
}
|
||||
|
@ -42,4 +87,5 @@ public class FilerGrpcClient {
|
|||
public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() {
|
||||
return futureStub;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import com.moandjiezana.toml.Toml;
|
||||
import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts;
|
||||
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
|
||||
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder;
|
||||
import io.grpc.netty.shaded.io.netty.handler.ssl.util.InsecureTrustManagerFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.net.ssl.SSLException;
|
||||
import java.io.File;
|
||||
|
||||
public class FilerSslContext {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(FilerSslContext.class);
|
||||
|
||||
public static SslContext loadSslContext() throws SSLException {
|
||||
String securityFileName = "security.toml";
|
||||
String home = System.getProperty("user.home");
|
||||
File f1 = new File("./"+securityFileName);
|
||||
File f2 = new File(home + "/.seaweedfs/"+securityFileName);
|
||||
File f3 = new File(home + "/etc/seaweedfs/"+securityFileName);
|
||||
|
||||
File securityFile = f1.exists()? f1 : f2.exists() ? f2 : f3.exists()? f3 : null;
|
||||
|
||||
if (securityFile==null){
|
||||
return null;
|
||||
}
|
||||
|
||||
Toml toml = new Toml().read(securityFile);
|
||||
logger.debug("reading ssl setup from {}", securityFile);
|
||||
|
||||
String trustCertCollectionFilePath = toml.getString("grpc.ca");
|
||||
logger.debug("loading ca from {}", trustCertCollectionFilePath);
|
||||
String clientCertChainFilePath = toml.getString("grpc.client.cert");
|
||||
logger.debug("loading client ca from {}", clientCertChainFilePath);
|
||||
String clientPrivateKeyFilePath = toml.getString("grpc.client.key");
|
||||
logger.debug("loading client key from {}", clientPrivateKeyFilePath);
|
||||
|
||||
if (Strings.isNullOrEmpty(clientPrivateKeyFilePath) && Strings.isNullOrEmpty(clientPrivateKeyFilePath)){
|
||||
return null;
|
||||
}
|
||||
|
||||
// possibly fix the format https://netty.io/wiki/sslcontextbuilder-and-private-key.html
|
||||
|
||||
return buildSslContext(trustCertCollectionFilePath, clientCertChainFilePath, clientPrivateKeyFilePath);
|
||||
}
|
||||
|
||||
|
||||
private static SslContext buildSslContext(String trustCertCollectionFilePath,
|
||||
String clientCertChainFilePath,
|
||||
String clientPrivateKeyFilePath) throws SSLException {
|
||||
SslContextBuilder builder = GrpcSslContexts.forClient();
|
||||
if (trustCertCollectionFilePath != null) {
|
||||
builder.trustManager(new File(trustCertCollectionFilePath));
|
||||
}
|
||||
if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) {
|
||||
builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath));
|
||||
}
|
||||
return builder.trustManager(InsecureTrustManagerFactory.INSTANCE).build();
|
||||
}
|
||||
}
|
37
other/java/client/src/main/java/seaweedfs/client/Gzip.java
Normal file
37
other/java/client/src/main/java/seaweedfs/client/Gzip.java
Normal file
|
@ -0,0 +1,37 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
public class Gzip {
|
||||
public static byte[] compress(byte[] data) throws IOException {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length);
|
||||
GZIPOutputStream gzip = new GZIPOutputStream(bos);
|
||||
gzip.write(data);
|
||||
gzip.close();
|
||||
byte[] compressed = bos.toByteArray();
|
||||
bos.close();
|
||||
return compressed;
|
||||
}
|
||||
|
||||
public static byte[] decompress(byte[] compressed) throws IOException {
|
||||
ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
|
||||
GZIPInputStream gis = new GZIPInputStream(bis);
|
||||
return readAll(gis);
|
||||
}
|
||||
|
||||
private static byte[] readAll(InputStream input) throws IOException {
|
||||
try( ByteArrayOutputStream output = new ByteArrayOutputStream()){
|
||||
byte[] buffer = new byte[4096];
|
||||
int n;
|
||||
while (-1 != (n = input.read(buffer))) {
|
||||
output.write(buffer, 0, n);
|
||||
}
|
||||
return output.toByteArray();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import javax.crypto.Cipher;
|
||||
import javax.crypto.spec.GCMParameterSpec;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
public class SeaweedCipher {
|
||||
// AES-GCM parameters
|
||||
public static final int AES_KEY_SIZE = 256; // in bits
|
||||
public static final int GCM_NONCE_LENGTH = 12; // in bytes
|
||||
public static final int GCM_TAG_LENGTH = 16; // in bytes
|
||||
|
||||
private static SecureRandom random = new SecureRandom();
|
||||
|
||||
public static byte[] genCipherKey() throws Exception {
|
||||
byte[] key = new byte[AES_KEY_SIZE / 8];
|
||||
random.nextBytes(key);
|
||||
return key;
|
||||
}
|
||||
|
||||
public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception {
|
||||
return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey);
|
||||
}
|
||||
|
||||
public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception {
|
||||
|
||||
final byte[] nonce = new byte[GCM_NONCE_LENGTH];
|
||||
random.nextBytes(nonce);
|
||||
GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce);
|
||||
SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
|
||||
|
||||
Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
|
||||
AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec);
|
||||
|
||||
byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length);
|
||||
|
||||
byte[] iv = AES_cipherInstance.getIV();
|
||||
byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH];
|
||||
System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH);
|
||||
System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length);
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception {
|
||||
final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding");
|
||||
GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH);
|
||||
SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES");
|
||||
AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params);
|
||||
byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH);
|
||||
return decryptedText;
|
||||
}
|
||||
|
||||
}
|
|
@ -5,25 +5,25 @@ import org.apache.http.HttpHeaders;
|
|||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.impl.client.DefaultHttpClient;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
public class SeaweedRead {
|
||||
|
||||
// private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class);
|
||||
|
||||
static ChunkCache chunkCache = new ChunkCache(1000);
|
||||
|
||||
// returns bytesRead
|
||||
public static long read(FilerGrpcClient filerGrpcClient, List<VisibleInterval> visibleIntervals,
|
||||
final long position, final byte[] buffer, final int bufferOffset,
|
||||
final int bufferLength) {
|
||||
final int bufferLength) throws IOException {
|
||||
|
||||
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength);
|
||||
|
||||
|
@ -34,7 +34,7 @@ public class SeaweedRead {
|
|||
}
|
||||
|
||||
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
.getBlockingStub().lookupVolume(lookupRequest.build());
|
||||
|
||||
Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap();
|
||||
|
||||
|
@ -58,35 +58,64 @@ public class SeaweedRead {
|
|||
return readCount;
|
||||
}
|
||||
|
||||
private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) {
|
||||
HttpClient client = HttpClientBuilder.create().build();
|
||||
HttpGet request = new HttpGet(
|
||||
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
||||
private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
if (!chunkView.isFullChunk) {
|
||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
||||
request.setHeader(HttpHeaders.RANGE,
|
||||
String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size));
|
||||
byte[] chunkData = chunkCache.getChunk(chunkView.fileId);
|
||||
|
||||
if (chunkData == null) {
|
||||
chunkData = doFetchFullChunkData(chunkView, locations);
|
||||
}
|
||||
|
||||
int len = (int) chunkView.size;
|
||||
LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} buffer.length:{} startOffset:{} len:{}",
|
||||
chunkView.fileId, chunkData.length, chunkView.offset, buffer.length, startOffset, len);
|
||||
System.arraycopy(chunkData, (int) chunkView.offset, buffer, startOffset, len);
|
||||
|
||||
chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
private static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException {
|
||||
|
||||
HttpClient client = new DefaultHttpClient();
|
||||
HttpGet request = new HttpGet(
|
||||
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
||||
|
||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
||||
|
||||
byte[] data = null;
|
||||
|
||||
try {
|
||||
HttpResponse response = client.execute(request);
|
||||
HttpEntity entity = response.getEntity();
|
||||
|
||||
int len = (int) (chunkView.logicOffset - position + chunkView.size);
|
||||
OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len));
|
||||
entity.writeTo(outputStream);
|
||||
// LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len);
|
||||
data = EntityUtils.toByteArray(entity);
|
||||
|
||||
return len;
|
||||
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
if (client instanceof Closeable) {
|
||||
Closeable t = (Closeable) client;
|
||||
t.close();
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
if (chunkView.isGzipped) {
|
||||
data = Gzip.decompress(data);
|
||||
}
|
||||
|
||||
if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
|
||||
try {
|
||||
data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
|
||||
} catch (Exception e) {
|
||||
throw new IOException("fail to decrypt", e);
|
||||
}
|
||||
}
|
||||
|
||||
return data;
|
||||
|
||||
}
|
||||
|
||||
public static List<ChunkView> viewFromVisibles(List<VisibleInterval> visibleIntervals, long offset, long size) {
|
||||
protected static List<ChunkView> viewFromVisibles(List<VisibleInterval> visibleIntervals, long offset, long size) {
|
||||
List<ChunkView> views = new ArrayList<>();
|
||||
|
||||
long stop = offset + size;
|
||||
|
@ -94,11 +123,13 @@ public class SeaweedRead {
|
|||
if (chunk.start <= offset && offset < chunk.stop && offset < stop) {
|
||||
boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop;
|
||||
views.add(new ChunkView(
|
||||
chunk.fileId,
|
||||
offset - chunk.start,
|
||||
Math.min(chunk.stop, stop) - offset,
|
||||
offset,
|
||||
isFullChunk
|
||||
chunk.fileId,
|
||||
offset - chunk.start,
|
||||
Math.min(chunk.stop, stop) - offset,
|
||||
offset,
|
||||
isFullChunk,
|
||||
chunk.cipherKey,
|
||||
chunk.isGzipped
|
||||
));
|
||||
offset = Math.min(chunk.stop, stop);
|
||||
}
|
||||
|
@ -128,11 +159,13 @@ public class SeaweedRead {
|
|||
List<VisibleInterval> newVisibles,
|
||||
FilerProto.FileChunk chunk) {
|
||||
VisibleInterval newV = new VisibleInterval(
|
||||
chunk.getOffset(),
|
||||
chunk.getOffset() + chunk.getSize(),
|
||||
chunk.getFileId(),
|
||||
chunk.getMtime(),
|
||||
true
|
||||
chunk.getOffset(),
|
||||
chunk.getOffset() + chunk.getSize(),
|
||||
chunk.getFileId(),
|
||||
chunk.getMtime(),
|
||||
true,
|
||||
chunk.getCipherKey().toByteArray(),
|
||||
chunk.getIsGzipped()
|
||||
);
|
||||
|
||||
// easy cases to speed up
|
||||
|
@ -148,21 +181,25 @@ public class SeaweedRead {
|
|||
for (VisibleInterval v : visibles) {
|
||||
if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) {
|
||||
newVisibles.add(new VisibleInterval(
|
||||
v.start,
|
||||
chunk.getOffset(),
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
false
|
||||
v.start,
|
||||
chunk.getOffset(),
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isGzipped
|
||||
));
|
||||
}
|
||||
long chunkStop = chunk.getOffset() + chunk.getSize();
|
||||
if (v.start < chunkStop && chunkStop < v.stop) {
|
||||
newVisibles.add(new VisibleInterval(
|
||||
chunkStop,
|
||||
v.stop,
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
false
|
||||
chunkStop,
|
||||
v.stop,
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isGzipped
|
||||
));
|
||||
}
|
||||
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
|
||||
|
@ -209,24 +246,30 @@ public class SeaweedRead {
|
|||
public final long modifiedTime;
|
||||
public final String fileId;
|
||||
public final boolean isFullChunk;
|
||||
public final byte[] cipherKey;
|
||||
public final boolean isGzipped;
|
||||
|
||||
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) {
|
||||
public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
|
||||
this.start = start;
|
||||
this.stop = stop;
|
||||
this.modifiedTime = modifiedTime;
|
||||
this.fileId = fileId;
|
||||
this.isFullChunk = isFullChunk;
|
||||
this.cipherKey = cipherKey;
|
||||
this.isGzipped = isGzipped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "VisibleInterval{" +
|
||||
"start=" + start +
|
||||
", stop=" + stop +
|
||||
", modifiedTime=" + modifiedTime +
|
||||
", fileId='" + fileId + '\'' +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
'}';
|
||||
"start=" + start +
|
||||
", stop=" + stop +
|
||||
", modifiedTime=" + modifiedTime +
|
||||
", fileId='" + fileId + '\'' +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
", cipherKey=" + Arrays.toString(cipherKey) +
|
||||
", isGzipped=" + isGzipped +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,24 +279,30 @@ public class SeaweedRead {
|
|||
public final long size;
|
||||
public final long logicOffset;
|
||||
public final boolean isFullChunk;
|
||||
public final byte[] cipherKey;
|
||||
public final boolean isGzipped;
|
||||
|
||||
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) {
|
||||
public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) {
|
||||
this.fileId = fileId;
|
||||
this.offset = offset;
|
||||
this.size = size;
|
||||
this.logicOffset = logicOffset;
|
||||
this.isFullChunk = isFullChunk;
|
||||
this.cipherKey = cipherKey;
|
||||
this.isGzipped = isGzipped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ChunkView{" +
|
||||
"fileId='" + fileId + '\'' +
|
||||
", offset=" + offset +
|
||||
", size=" + size +
|
||||
", logicOffset=" + logicOffset +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
'}';
|
||||
"fileId='" + fileId + '\'' +
|
||||
", offset=" + offset +
|
||||
", size=" + size +
|
||||
", logicOffset=" + logicOffset +
|
||||
", isFullChunk=" + isFullChunk +
|
||||
", cipherKey=" + Arrays.toString(cipherKey) +
|
||||
", isGzipped=" + isGzipped +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,18 +1,23 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.mime.HttpMultipartMode;
|
||||
import org.apache.http.entity.mime.MultipartEntityBuilder;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.impl.client.DefaultHttpClient;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
public class SeaweedWrite {
|
||||
|
||||
private static SecureRandom random = new SecureRandom();
|
||||
|
||||
public static void writeData(FilerProto.Entry.Builder entry,
|
||||
final String replication,
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
|
@ -20,53 +25,83 @@ public class SeaweedWrite {
|
|||
final byte[] bytes,
|
||||
final long bytesOffset, final long bytesLength) throws IOException {
|
||||
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
|
||||
FilerProto.AssignVolumeRequest.newBuilder()
|
||||
.setCollection("")
|
||||
.setReplication(replication)
|
||||
.setDataCenter("")
|
||||
.setReplication("")
|
||||
.setTtlSec(0)
|
||||
.build());
|
||||
FilerProto.AssignVolumeRequest.newBuilder()
|
||||
.setCollection(filerGrpcClient.getCollection())
|
||||
.setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
|
||||
.setDataCenter("")
|
||||
.setTtlSec(0)
|
||||
.build());
|
||||
String fileId = response.getFileId();
|
||||
String url = response.getUrl();
|
||||
String auth = response.getAuth();
|
||||
String targetUrl = String.format("http://%s/%s", url, fileId);
|
||||
|
||||
String etag = multipartUpload(targetUrl, bytes, bytesOffset, bytesLength);
|
||||
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
|
||||
byte[] cipherKey = null;
|
||||
if (filerGrpcClient.isCipher()) {
|
||||
cipherKey = genCipherKey();
|
||||
cipherKeyString = ByteString.copyFrom(cipherKey);
|
||||
}
|
||||
|
||||
entry.addChunks(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId(fileId)
|
||||
.setOffset(offset)
|
||||
.setSize(bytesLength)
|
||||
.setMtime(System.currentTimeMillis() / 10000L)
|
||||
.setETag(etag)
|
||||
);
|
||||
String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey);
|
||||
|
||||
synchronized (entry) {
|
||||
entry.addChunks(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId(fileId)
|
||||
.setOffset(offset)
|
||||
.setSize(bytesLength)
|
||||
.setMtime(System.currentTimeMillis() / 10000L)
|
||||
.setETag(etag)
|
||||
.setCipherKey(cipherKeyString)
|
||||
);
|
||||
}
|
||||
|
||||
// cache fileId ~ bytes
|
||||
SeaweedRead.chunkCache.setChunk(fileId, bytes);
|
||||
|
||||
}
|
||||
|
||||
public static void writeMeta(final FilerGrpcClient filerGrpcClient,
|
||||
final String parentDirectory, final FilerProto.Entry.Builder entry) {
|
||||
filerGrpcClient.getBlockingStub().createEntry(
|
||||
FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parentDirectory)
|
||||
.setEntry(entry)
|
||||
.build()
|
||||
);
|
||||
synchronized (entry){
|
||||
filerGrpcClient.getBlockingStub().createEntry(
|
||||
FilerProto.CreateEntryRequest.newBuilder()
|
||||
.setDirectory(parentDirectory)
|
||||
.setEntry(entry)
|
||||
.build()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private static String multipartUpload(String targetUrl,
|
||||
String auth,
|
||||
final byte[] bytes,
|
||||
final long bytesOffset, final long bytesLength) throws IOException {
|
||||
final long bytesOffset, final long bytesLength,
|
||||
byte[] cipherKey) throws IOException {
|
||||
|
||||
CloseableHttpClient client = HttpClientBuilder.create().setUserAgent("hdfs-client").build();
|
||||
HttpClient client = new DefaultHttpClient();
|
||||
|
||||
InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
|
||||
InputStream inputStream = null;
|
||||
if (cipherKey == null || cipherKey.length == 0) {
|
||||
inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
|
||||
} else {
|
||||
try {
|
||||
byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey);
|
||||
inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length);
|
||||
} catch (Exception e) {
|
||||
throw new IOException("fail to encrypt data", e);
|
||||
}
|
||||
}
|
||||
|
||||
HttpPost post = new HttpPost(targetUrl);
|
||||
if (auth != null && auth.length() != 0) {
|
||||
post.addHeader("Authorization", "BEARER " + auth);
|
||||
}
|
||||
|
||||
post.setEntity(MultipartEntityBuilder.create()
|
||||
.setMode(HttpMultipartMode.BROWSER_COMPATIBLE)
|
||||
.addBinaryBody("upload", inputStream)
|
||||
.build());
|
||||
.setMode(HttpMultipartMode.BROWSER_COMPATIBLE)
|
||||
.addBinaryBody("upload", inputStream)
|
||||
.build());
|
||||
|
||||
try {
|
||||
HttpResponse response = client.execute(post);
|
||||
|
@ -79,8 +114,17 @@ public class SeaweedWrite {
|
|||
|
||||
return etag;
|
||||
} finally {
|
||||
client.close();
|
||||
if (client instanceof Closeable) {
|
||||
Closeable t = (Closeable) client;
|
||||
t.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static byte[] genCipherKey() {
|
||||
byte[] b = new byte[32];
|
||||
random.nextBytes(b);
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ syntax = "proto3";
|
|||
|
||||
package filer_pb;
|
||||
|
||||
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb";
|
||||
option java_package = "seaweedfs.client";
|
||||
option java_outer_classname = "FilerProto";
|
||||
|
||||
|
@ -12,7 +13,7 @@ service SeaweedFiler {
|
|||
rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) {
|
||||
}
|
||||
|
||||
rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) {
|
||||
rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) {
|
||||
}
|
||||
|
||||
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
|
||||
|
@ -21,9 +22,15 @@ service SeaweedFiler {
|
|||
rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) {
|
||||
}
|
||||
|
||||
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
|
||||
}
|
||||
|
||||
|
@ -36,6 +43,21 @@ service SeaweedFiler {
|
|||
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
|
||||
}
|
||||
|
||||
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
|
||||
}
|
||||
|
||||
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
|
||||
}
|
||||
|
||||
rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) {
|
||||
}
|
||||
|
||||
rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
@ -58,7 +80,7 @@ message ListEntriesRequest {
|
|||
}
|
||||
|
||||
message ListEntriesResponse {
|
||||
repeated Entry entries = 1;
|
||||
Entry entry = 1;
|
||||
}
|
||||
|
||||
message Entry {
|
||||
|
@ -69,19 +91,36 @@ message Entry {
|
|||
map<string, bytes> extended = 5;
|
||||
}
|
||||
|
||||
message FullEntry {
|
||||
string dir = 1;
|
||||
Entry entry = 2;
|
||||
}
|
||||
|
||||
message EventNotification {
|
||||
Entry old_entry = 1;
|
||||
Entry new_entry = 2;
|
||||
bool delete_chunks = 3;
|
||||
string new_parent_path = 4;
|
||||
bool is_from_other_cluster = 5;
|
||||
}
|
||||
|
||||
message FileChunk {
|
||||
string file_id = 1;
|
||||
string file_id = 1; // to be deprecated
|
||||
int64 offset = 2;
|
||||
uint64 size = 3;
|
||||
int64 mtime = 4;
|
||||
string e_tag = 5;
|
||||
string source_file_id = 6;
|
||||
string source_file_id = 6; // to be deprecated
|
||||
FileId fid = 7;
|
||||
FileId source_fid = 8;
|
||||
bytes cipher_key = 9;
|
||||
bool is_compressed = 10;
|
||||
}
|
||||
|
||||
message FileId {
|
||||
uint32 volume_id = 1;
|
||||
uint64 file_key = 2;
|
||||
fixed32 cookie = 3;
|
||||
}
|
||||
|
||||
message FuseAttributes {
|
||||
|
@ -98,32 +137,58 @@ message FuseAttributes {
|
|||
string user_name = 11; // for hdfs
|
||||
repeated string group_name = 12; // for hdfs
|
||||
string symlink_target = 13;
|
||||
bytes md5 = 14;
|
||||
}
|
||||
|
||||
message CreateEntryRequest {
|
||||
string directory = 1;
|
||||
Entry entry = 2;
|
||||
bool o_excl = 3;
|
||||
bool is_from_other_cluster = 4;
|
||||
}
|
||||
|
||||
message CreateEntryResponse {
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message UpdateEntryRequest {
|
||||
string directory = 1;
|
||||
Entry entry = 2;
|
||||
bool is_from_other_cluster = 3;
|
||||
}
|
||||
message UpdateEntryResponse {
|
||||
}
|
||||
|
||||
message AppendToEntryRequest {
|
||||
string directory = 1;
|
||||
string entry_name = 2;
|
||||
repeated FileChunk chunks = 3;
|
||||
}
|
||||
message AppendToEntryResponse {
|
||||
}
|
||||
|
||||
message DeleteEntryRequest {
|
||||
string directory = 1;
|
||||
string name = 2;
|
||||
// bool is_directory = 3;
|
||||
bool is_delete_data = 4;
|
||||
bool is_recursive = 5;
|
||||
bool ignore_recursive_error = 6;
|
||||
bool is_from_other_cluster = 7;
|
||||
}
|
||||
|
||||
message DeleteEntryResponse {
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message AtomicRenameEntryRequest {
|
||||
string old_directory = 1;
|
||||
string old_name = 2;
|
||||
string new_directory = 3;
|
||||
string new_name = 4;
|
||||
}
|
||||
|
||||
message AtomicRenameEntryResponse {
|
||||
}
|
||||
|
||||
message AssignVolumeRequest {
|
||||
|
@ -132,6 +197,7 @@ message AssignVolumeRequest {
|
|||
string replication = 3;
|
||||
int32 ttl_sec = 4;
|
||||
string data_center = 5;
|
||||
string parent_path = 6;
|
||||
}
|
||||
|
||||
message AssignVolumeResponse {
|
||||
|
@ -139,6 +205,10 @@ message AssignVolumeResponse {
|
|||
string url = 2;
|
||||
string public_url = 3;
|
||||
int32 count = 4;
|
||||
string auth = 5;
|
||||
string collection = 6;
|
||||
string replication = 7;
|
||||
string error = 8;
|
||||
}
|
||||
|
||||
message LookupVolumeRequest {
|
||||
|
@ -177,3 +247,53 @@ message StatisticsResponse {
|
|||
uint64 used_size = 5;
|
||||
uint64 file_count = 6;
|
||||
}
|
||||
|
||||
message GetFilerConfigurationRequest {
|
||||
}
|
||||
message GetFilerConfigurationResponse {
|
||||
repeated string masters = 1;
|
||||
string replication = 2;
|
||||
string collection = 3;
|
||||
uint32 max_mb = 4;
|
||||
string dir_buckets = 5;
|
||||
bool cipher = 7;
|
||||
}
|
||||
|
||||
message SubscribeMetadataRequest {
|
||||
string client_name = 1;
|
||||
string path_prefix = 2;
|
||||
int64 since_ns = 3;
|
||||
}
|
||||
message SubscribeMetadataResponse {
|
||||
string directory = 1;
|
||||
EventNotification event_notification = 2;
|
||||
int64 ts_ns = 3;
|
||||
}
|
||||
|
||||
message LogEntry {
|
||||
int64 ts_ns = 1;
|
||||
int32 partition_key_hash = 2;
|
||||
bytes data = 3;
|
||||
}
|
||||
|
||||
message KeepConnectedRequest {
|
||||
string name = 1;
|
||||
uint32 grpc_port = 2;
|
||||
repeated string resources = 3;
|
||||
}
|
||||
message KeepConnectedResponse {
|
||||
}
|
||||
|
||||
message LocateBrokerRequest {
|
||||
string resource = 1;
|
||||
}
|
||||
message LocateBrokerResponse {
|
||||
bool found = 1;
|
||||
// if found, send the exact address
|
||||
// if not found, send the full list of existing brokers
|
||||
message Resource {
|
||||
string grpc_addresses = 1;
|
||||
int32 resource_count = 2;
|
||||
}
|
||||
repeated Resource resources = 2;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Base64;
|
||||
|
||||
import static seaweedfs.client.SeaweedCipher.decrypt;
|
||||
import static seaweedfs.client.SeaweedCipher.encrypt;
|
||||
|
||||
public class SeaweedCipherTest {
|
||||
|
||||
@Test
|
||||
public void testSameAsGoImplemnetation() throws Exception {
|
||||
byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes();
|
||||
|
||||
String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
|
||||
|
||||
System.out.println("Original Text : " + plainText);
|
||||
|
||||
byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
|
||||
System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
|
||||
|
||||
byte[] decryptedText = decrypt(cipherText, secretKey);
|
||||
System.out.println("DeCrypted Text : " + new String(decryptedText));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEncryptDecrypt() throws Exception {
|
||||
byte[] secretKey = SeaweedCipher.genCipherKey();
|
||||
|
||||
String plainText = "Now we need to generate a 256-bit key for AES 256 GCM";
|
||||
|
||||
System.out.println("Original Text : " + plainText);
|
||||
|
||||
byte[] cipherText = encrypt(plainText.getBytes(), secretKey);
|
||||
System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText));
|
||||
|
||||
byte[] decryptedText = decrypt(cipherText, secretKey);
|
||||
System.out.println("DeCrypted Text : " + new String(decryptedText));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedFilerTest {
|
||||
public static void main(String[] args){
|
||||
|
||||
FilerClient filerClient = new FilerClient("localhost", 18888);
|
||||
|
||||
List<FilerProto.Entry> entries = filerClient.listEntries("/");
|
||||
|
||||
for (FilerProto.Entry entry : entries) {
|
||||
System.out.println(entry.toString());
|
||||
}
|
||||
|
||||
filerClient.mkdirs("/new_folder", 0755);
|
||||
filerClient.touch("/new_folder/new_empty_file", 0755);
|
||||
filerClient.touch("/new_folder/new_empty_file2", 0755);
|
||||
filerClient.rm("/new_folder/new_empty_file", false, true);
|
||||
filerClient.rm("/new_folder", true, true);
|
||||
|
||||
}
|
||||
}
|
133
other/java/hdfs2/dependency-reduced-pom.xml
Normal file
133
other/java/hdfs2/dependency-reduced-pom.xml
Normal file
|
@ -0,0 +1,133 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<parent>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<version>9</version>
|
||||
<relativePath>../pom.xml/pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>${seaweedfs.client.version}</version>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<filters>
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/*.SF</exclude>
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
<exclude>org/slf4j/**</exclude>
|
||||
<exclude>META-INF/maven/org.slf4j/**</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<transformers>
|
||||
<transformer />
|
||||
</transformers>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>com.google</pattern>
|
||||
<shadedPattern>shaded.com.google</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>io.grpc.internal</pattern>
|
||||
<shadedPattern>shaded.io.grpc.internal</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.commons</pattern>
|
||||
<shadedPattern>shaded.org.apache.commons</shadedPattern>
|
||||
<excludes>
|
||||
<exclude>org.apache.hadoop</exclude>
|
||||
<exclude>org.apache.log4j</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.http</pattern>
|
||||
<shadedPattern>shaded.org.apache.http</shadedPattern>
|
||||
</relocation>
|
||||
</relocations>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.5</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>sign-artifacts</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>sign</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
|
||||
<autoReleaseAfterClose>true</autoReleaseAfterClose>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.2.9</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
163
other/java/hdfs2/pom.xml
Normal file
163
other/java/hdfs2/pom.xml
Normal file
|
@ -0,0 +1,163 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.2.9</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>${seaweedfs.client.version}</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<version>9</version>
|
||||
</parent>
|
||||
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<filters>
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/*.SF</exclude>
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
<exclude>org/slf4j/**</exclude>
|
||||
<exclude>META-INF/maven/org.slf4j/**</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<transformers>
|
||||
<transformer
|
||||
implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
</transformers>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>com.google</pattern>
|
||||
<shadedPattern>shaded.com.google</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>io.grpc.internal</pattern>
|
||||
<shadedPattern>shaded.io.grpc.internal</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.commons</pattern>
|
||||
<shadedPattern>shaded.org.apache.commons</shadedPattern>
|
||||
<excludes>
|
||||
<exclude>org.apache.hadoop</exclude>
|
||||
<exclude>org.apache.log4j</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.http</pattern>
|
||||
<shadedPattern>shaded.org.apache.http</shadedPattern>
|
||||
</relocation>
|
||||
</relocations>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.5</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>sign-artifacts</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>sign</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
|
||||
<autoReleaseAfterClose>true</autoReleaseAfterClose>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-client</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>${seaweedfs.client.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>${hadoop.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
|
@ -1,14 +1,7 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -73,6 +66,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
this.uri = uri;
|
||||
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -86,6 +80,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
|
||||
return new FSDataInputStream(inputStream);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -103,10 +98,36 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* @throws FileNotFoundException if the parent directory is not present -or
|
||||
* is not a directory.
|
||||
*/
|
||||
@Override
|
||||
public FSDataOutputStream createNonRecursive(Path path,
|
||||
FsPermission permission,
|
||||
EnumSet<CreateFlag> flags,
|
||||
int bufferSize,
|
||||
short replication,
|
||||
long blockSize,
|
||||
Progressable progress) throws IOException {
|
||||
Path parent = path.getParent();
|
||||
if (parent != null) {
|
||||
// expect this to raise an exception if there is no parent
|
||||
if (!getFileStatus(parent).isDirectory()) {
|
||||
throw new FileAlreadyExistsException("Not a directory: " + parent);
|
||||
}
|
||||
}
|
||||
return create(path, permission,
|
||||
flags.contains(CreateFlag.OVERWRITE), bufferSize,
|
||||
replication, blockSize, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
|
||||
|
||||
|
@ -117,6 +138,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -206,8 +228,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
|
||||
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
|
||||
return seaweedFileSystemStore.createDirectory(path, currentUser,
|
||||
fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
|
||||
FsPermission.getUMask(getConf()));
|
||||
fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
|
||||
FsPermission.getUMask(getConf()));
|
||||
|
||||
}
|
||||
|
||||
|
@ -238,7 +260,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
*/
|
||||
@Override
|
||||
public void setOwner(Path path, final String owner, final String group)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
LOG.debug("setOwner path: {}", path);
|
||||
path = qualify(path);
|
||||
|
||||
|
@ -271,54 +293,55 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
|
||||
/**
|
||||
* Concat existing files together.
|
||||
* @param trg the path to the target destination.
|
||||
*
|
||||
* @param trg the path to the target destination.
|
||||
* @param psrcs the paths to the sources to use for the concatenation.
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
* (default).
|
||||
*/
|
||||
@Override
|
||||
public void concat(final Path trg, final Path [] psrcs) throws IOException {
|
||||
public void concat(final Path trg, final Path[] psrcs) throws IOException {
|
||||
throw new UnsupportedOperationException("Not implemented by the " +
|
||||
getClass().getSimpleName() + " FileSystem implementation");
|
||||
getClass().getSimpleName() + " FileSystem implementation");
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate the file in the indicated path to the indicated size.
|
||||
* <ul>
|
||||
* <li>Fails if path is a directory.</li>
|
||||
* <li>Fails if path does not exist.</li>
|
||||
* <li>Fails if path is not closed.</li>
|
||||
* <li>Fails if new size is greater than current size.</li>
|
||||
* <li>Fails if path is a directory.</li>
|
||||
* <li>Fails if path does not exist.</li>
|
||||
* <li>Fails if path is not closed.</li>
|
||||
* <li>Fails if new size is greater than current size.</li>
|
||||
* </ul>
|
||||
* @param f The path to the file to be truncated
|
||||
* @param newLength The size the file is to be truncated to
|
||||
*
|
||||
* @param f The path to the file to be truncated
|
||||
* @param newLength The size the file is to be truncated to
|
||||
* @return <code>true</code> if the file has been truncated to the desired
|
||||
* <code>newLength</code> and is immediately available to be reused for
|
||||
* write operations such as <code>append</code>, or
|
||||
* <code>false</code> if a background process of adjusting the length of
|
||||
* the last block has been started, and clients should wait for it to
|
||||
* complete before proceeding with further file updates.
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
* (default).
|
||||
*/
|
||||
@Override
|
||||
public boolean truncate(Path f, long newLength) throws IOException {
|
||||
throw new UnsupportedOperationException("Not implemented by the " +
|
||||
getClass().getSimpleName() + " FileSystem implementation");
|
||||
getClass().getSimpleName() + " FileSystem implementation");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createSymlink(final Path target, final Path link,
|
||||
final boolean createParent) throws AccessControlException,
|
||||
FileAlreadyExistsException, FileNotFoundException,
|
||||
ParentNotDirectoryException, UnsupportedFileSystemException,
|
||||
IOException {
|
||||
FileAlreadyExistsException, FileNotFoundException,
|
||||
ParentNotDirectoryException, UnsupportedFileSystemException,
|
||||
IOException {
|
||||
// Supporting filesystems should override this method
|
||||
throw new UnsupportedOperationException(
|
||||
"Filesystem does not support symlinks!");
|
||||
"Filesystem does not support symlinks!");
|
||||
}
|
||||
|
||||
public boolean supportsSymlinks() {
|
||||
|
@ -327,48 +350,51 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
|
||||
/**
|
||||
* Create a snapshot.
|
||||
* @param path The directory where snapshots will be taken.
|
||||
*
|
||||
* @param path The directory where snapshots will be taken.
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @return the snapshot path.
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
*/
|
||||
@Override
|
||||
public Path createSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support createSnapshot");
|
||||
+ " doesn't support createSnapshot");
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename a snapshot.
|
||||
* @param path The directory path where the snapshot was taken
|
||||
*
|
||||
* @param path The directory path where the snapshot was taken
|
||||
* @param snapshotOldName Old name of the snapshot
|
||||
* @param snapshotNewName New name of the snapshot
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void renameSnapshot(Path path, String snapshotOldName,
|
||||
String snapshotNewName) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support renameSnapshot");
|
||||
+ " doesn't support renameSnapshot");
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot of a directory.
|
||||
* @param path The directory that the to-be-deleted snapshot belongs to
|
||||
*
|
||||
* @param path The directory that the to-be-deleted snapshot belongs to
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void deleteSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support deleteSnapshot");
|
||||
+ " doesn't support deleteSnapshot");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -377,49 +403,49 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
* ACL entries that are not specified in this call are retained without
|
||||
* changes. (Modifications are merged into the current ACL.)
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param path Path to modify
|
||||
* @param aclSpec List<AclEntry> describing modifications
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support modifyAclEntries");
|
||||
+ " doesn't support modifyAclEntries");
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes ACL entries from files and directories. Other ACL entries are
|
||||
* retained.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param path Path to modify
|
||||
* @param aclSpec List describing entries to remove
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeAclEntries");
|
||||
+ " doesn't support removeAclEntries");
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all default ACL entries from files and directories.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeDefaultAcl(Path path)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeDefaultAcl");
|
||||
+ " doesn't support removeDefaultAcl");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -428,32 +454,32 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
* bits.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @throws IOException if an ACL could not be removed
|
||||
* @throws IOException if an ACL could not be removed
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeAcl(Path path)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeAcl");
|
||||
+ " doesn't support removeAcl");
|
||||
}
|
||||
|
||||
/**
|
||||
* Fully replaces ACL of files and directories, discarding all existing
|
||||
* entries.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param path Path to modify
|
||||
* @param aclSpec List describing modifications, which must include entries
|
||||
* for user, group, and others for compatibility with permission bits.
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* for user, group, and others for compatibility with permission bits.
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setAcl");
|
||||
+ " doesn't support setAcl");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -461,14 +487,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
*
|
||||
* @param path Path to get
|
||||
* @return AclStatus describing the ACL of the file or directory
|
||||
* @throws IOException if an ACL could not be read
|
||||
* @throws IOException if an ACL could not be read
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getAclStatus");
|
||||
+ " doesn't support getAclStatus");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -478,19 +504,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException IO failure
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setXAttr");
|
||||
+ " doesn't support setXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -503,14 +529,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttr");
|
||||
+ " doesn't support getXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -522,14 +548,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map describing the XAttrs of the file or directory
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -539,18 +565,18 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map describing the XAttrs of the file or directory
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -562,14 +588,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return List{@literal <String>} of the XAttr names of the file or directory
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public List<String> listXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support listXAttrs");
|
||||
+ " doesn't support listXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -581,14 +607,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
|
|||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException IO failure
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeXAttr");
|
||||
+ " doesn't support removeXAttr");
|
||||
}
|
||||
|
||||
}
|
|
@ -64,6 +64,16 @@ public class SeaweedFileSystemStore {
|
|||
public FileStatus[] listEntries(final Path path) {
|
||||
LOG.debug("listEntries path: {}", path);
|
||||
|
||||
FileStatus pathStatus = getFileStatus(path);
|
||||
|
||||
if (pathStatus == null) {
|
||||
return new FileStatus[0];
|
||||
}
|
||||
|
||||
if (!pathStatus.isDirectory()) {
|
||||
return new FileStatus[]{pathStatus};
|
||||
}
|
||||
|
||||
List<FileStatus> fileStatuses = new ArrayList<FileStatus>();
|
||||
|
||||
List<FilerProto.Entry> entries = filerClient.listEntries(path.toUri().getPath());
|
||||
|
@ -74,7 +84,9 @@ public class SeaweedFileSystemStore {
|
|||
|
||||
fileStatuses.add(fileStatus);
|
||||
}
|
||||
LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
|
||||
return fileStatuses.toArray(new FileStatus[0]);
|
||||
|
||||
}
|
||||
|
||||
public FileStatus getFileStatus(final Path path) {
|
||||
|
@ -106,7 +118,7 @@ public class SeaweedFileSystemStore {
|
|||
}
|
||||
}
|
||||
|
||||
return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive);
|
||||
return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive, true);
|
||||
}
|
||||
|
||||
private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) {
|
||||
|
@ -137,41 +149,13 @@ public class SeaweedFileSystemStore {
|
|||
if (source.isRoot()) {
|
||||
return;
|
||||
}
|
||||
LOG.warn("rename lookupEntry source: {}", source);
|
||||
LOG.info("rename source: {} destination:{}", source, destination);
|
||||
FilerProto.Entry entry = lookupEntry(source);
|
||||
if (entry == null) {
|
||||
LOG.warn("rename non-existing source: {}", source);
|
||||
return;
|
||||
}
|
||||
LOG.warn("rename moveEntry source: {}", source);
|
||||
moveEntry(source.getParent(), entry, destination);
|
||||
}
|
||||
|
||||
private boolean moveEntry(Path oldParent, FilerProto.Entry entry, Path destination) {
|
||||
|
||||
LOG.debug("moveEntry: {}/{} => {}", oldParent, entry.getName(), destination);
|
||||
|
||||
FilerProto.Entry.Builder newEntry = entry.toBuilder().setName(destination.getName());
|
||||
boolean isDirectoryCreated = filerClient.createEntry(getParentDirectory(destination), newEntry.build());
|
||||
|
||||
if (!isDirectoryCreated) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (entry.getIsDirectory()) {
|
||||
Path entryPath = new Path(oldParent, entry.getName());
|
||||
List<FilerProto.Entry> entries = filerClient.listEntries(entryPath.toUri().getPath());
|
||||
for (FilerProto.Entry ent : entries) {
|
||||
boolean isSucess = moveEntry(entryPath, ent, new Path(destination, ent.getName()));
|
||||
if (!isSucess) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filerClient.deleteEntry(
|
||||
oldParent.toUri().getPath(), entry.getName(), false, false);
|
||||
|
||||
filerClient.mv(source.toUri().getPath(), destination.toUri().getPath());
|
||||
}
|
||||
|
||||
public OutputStream createFile(final Path path,
|
||||
|
@ -199,10 +183,10 @@ public class SeaweedFileSystemStore {
|
|||
entry = FilerProto.Entry.newBuilder();
|
||||
entry.mergeFrom(existingEntry);
|
||||
entry.getAttributesBuilder().setMtime(now);
|
||||
LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
|
||||
writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
|
||||
replication = existingEntry.getAttributes().getReplication();
|
||||
}
|
||||
LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
|
||||
writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
|
||||
replication = existingEntry.getAttributes().getReplication();
|
||||
}
|
||||
if (entry == null) {
|
||||
entry = FilerProto.Entry.newBuilder()
|
||||
|
@ -294,4 +278,5 @@ public class SeaweedFileSystemStore {
|
|||
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedWrite;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
|
||||
|
||||
public class SeaweedOutputStream extends OutputStream {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class);
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
private final Path path;
|
||||
private final int bufferSize;
|
||||
private final int maxConcurrentRequestCount;
|
||||
private final ThreadPoolExecutor threadExecutor;
|
||||
private final ExecutorCompletionService<Void> completionService;
|
||||
private FilerProto.Entry.Builder entry;
|
||||
private long position;
|
||||
private boolean closed;
|
||||
private boolean supportFlush = true;
|
||||
private volatile IOException lastError;
|
||||
private long lastFlushOffset;
|
||||
private long lastTotalAppendOffset = 0;
|
||||
private byte[] buffer;
|
||||
private int bufferIndex;
|
||||
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||
private String replication = "000";
|
||||
|
||||
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
|
||||
final long position, final int bufferSize, final String replication) {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.replication = replication;
|
||||
this.path = path;
|
||||
this.position = position;
|
||||
this.closed = false;
|
||||
this.lastError = null;
|
||||
this.lastFlushOffset = 0;
|
||||
this.bufferSize = bufferSize;
|
||||
this.buffer = new byte[bufferSize];
|
||||
this.bufferIndex = 0;
|
||||
this.writeOperations = new ConcurrentLinkedDeque<>();
|
||||
|
||||
this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors();
|
||||
|
||||
this.threadExecutor
|
||||
= new ThreadPoolExecutor(maxConcurrentRequestCount,
|
||||
maxConcurrentRequestCount,
|
||||
10L,
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>());
|
||||
this.completionService = new ExecutorCompletionService<>(this.threadExecutor);
|
||||
|
||||
this.entry = entry;
|
||||
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
|
||||
try {
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
} catch (Exception ex) {
|
||||
throw new IOException(ex);
|
||||
}
|
||||
this.lastFlushOffset = offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final int byteVal) throws IOException {
|
||||
write(new byte[]{(byte) (byteVal & 0xFF)});
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void write(final byte[] data, final int off, final int length)
|
||||
throws IOException {
|
||||
maybeThrowLastError();
|
||||
|
||||
Preconditions.checkArgument(data != null, "null data");
|
||||
|
||||
if (off < 0 || length < 0 || length > data.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
int currentOffset = off;
|
||||
int writableBytes = bufferSize - bufferIndex;
|
||||
int numberOfBytesToWrite = length;
|
||||
|
||||
while (numberOfBytesToWrite > 0) {
|
||||
if (writableBytes <= numberOfBytesToWrite) {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
|
||||
bufferIndex += writableBytes;
|
||||
writeCurrentBufferToService();
|
||||
currentOffset += writableBytes;
|
||||
numberOfBytesToWrite = numberOfBytesToWrite - writableBytes;
|
||||
} else {
|
||||
System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite);
|
||||
bufferIndex += numberOfBytesToWrite;
|
||||
numberOfBytesToWrite = 0;
|
||||
}
|
||||
|
||||
writableBytes = bufferSize - bufferIndex;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes this output stream and forces any buffered output bytes to be
|
||||
* written out. If any data remains in the payload it is committed to the
|
||||
* service. Data is queued for writing and forced out to the service
|
||||
* before the call returns.
|
||||
*/
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
if (supportFlush) {
|
||||
flushInternalAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Force all data in the output stream to be written to Azure storage.
|
||||
* Wait to return until this is complete. Close the access to the stream and
|
||||
* shutdown the upload thread pool.
|
||||
* If the blob was created, its lease will be released.
|
||||
* Any error encountered caught in threads and stored will be rethrown here
|
||||
* after cleanup.
|
||||
*/
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOG.debug("close path: {}", path);
|
||||
try {
|
||||
flushInternal();
|
||||
threadExecutor.shutdown();
|
||||
} finally {
|
||||
lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
buffer = null;
|
||||
bufferIndex = 0;
|
||||
closed = true;
|
||||
writeOperations.clear();
|
||||
if (!threadExecutor.isShutdown()) {
|
||||
threadExecutor.shutdownNow();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void writeCurrentBufferToService() throws IOException {
|
||||
if (bufferIndex == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
final byte[] bytes = buffer;
|
||||
final int bytesLength = bufferIndex;
|
||||
|
||||
buffer = new byte[bufferSize];
|
||||
bufferIndex = 0;
|
||||
final long offset = position;
|
||||
position += bytesLength;
|
||||
|
||||
if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) {
|
||||
waitForTaskToComplete();
|
||||
}
|
||||
|
||||
final Future<Void> job = completionService.submit(new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
// originally: client.append(path, offset, bytes, 0, bytesLength);
|
||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
writeOperations.add(new WriteOperation(job, offset, bytesLength));
|
||||
|
||||
// Try to shrink the queue
|
||||
shrinkWriteOperationQueue();
|
||||
}
|
||||
|
||||
private void waitForTaskToComplete() throws IOException {
|
||||
boolean completed;
|
||||
for (completed = false; completionService.poll() != null; completed = true) {
|
||||
// keep polling until there is no data
|
||||
}
|
||||
|
||||
if (!completed) {
|
||||
try {
|
||||
completionService.take();
|
||||
} catch (InterruptedException e) {
|
||||
lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e);
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void maybeThrowLastError() throws IOException {
|
||||
if (lastError != null) {
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to remove the completed write operations from the beginning of write
|
||||
* operation FIFO queue.
|
||||
*/
|
||||
private synchronized void shrinkWriteOperationQueue() throws IOException {
|
||||
try {
|
||||
while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) {
|
||||
writeOperations.peek().task.get();
|
||||
lastTotalAppendOffset += writeOperations.peek().length;
|
||||
writeOperations.remove();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
lastError = new IOException(e);
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void flushInternal() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToService();
|
||||
}
|
||||
|
||||
private synchronized void flushInternalAsync() throws IOException {
|
||||
maybeThrowLastError();
|
||||
writeCurrentBufferToService();
|
||||
flushWrittenBytesToServiceAsync();
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToService() throws IOException {
|
||||
for (WriteOperation writeOperation : writeOperations) {
|
||||
try {
|
||||
writeOperation.task.get();
|
||||
} catch (Exception ex) {
|
||||
lastError = new IOException(ex);
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
LOG.debug("flushWrittenBytesToService: {} position:{}", path, position);
|
||||
flushWrittenBytesToServiceInternal(position);
|
||||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceAsync() throws IOException {
|
||||
shrinkWriteOperationQueue();
|
||||
|
||||
if (this.lastTotalAppendOffset > this.lastFlushOffset) {
|
||||
this.flushWrittenBytesToServiceInternal(this.lastTotalAppendOffset);
|
||||
}
|
||||
}
|
||||
|
||||
private static class WriteOperation {
|
||||
private final Future<Void> task;
|
||||
private final long startOffset;
|
||||
private final long length;
|
||||
|
||||
WriteOperation(final Future<Void> task, final long startOffset, final long length) {
|
||||
Preconditions.checkNotNull(task, "task");
|
||||
Preconditions.checkArgument(startOffset >= 0, "startOffset");
|
||||
Preconditions.checkArgument(length >= 0, "length");
|
||||
|
||||
this.task = task;
|
||||
this.startOffset = startOffset;
|
||||
this.length = length;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
133
other/java/hdfs3/dependency-reduced-pom.xml
Normal file
133
other/java/hdfs3/dependency-reduced-pom.xml
Normal file
|
@ -0,0 +1,133 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
<parent>
|
||||
<artifactId>oss-parent</artifactId>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
<version>9</version>
|
||||
<relativePath>../pom.xml/pom.xml</relativePath>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop3-client</artifactId>
|
||||
<version>${seaweedfs.client.version}</version>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>7</source>
|
||||
<target>7</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<filters>
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/*.SF</exclude>
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
<exclude>org/slf4j/**</exclude>
|
||||
<exclude>META-INF/maven/org.slf4j/**</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<transformers>
|
||||
<transformer />
|
||||
</transformers>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>com.google</pattern>
|
||||
<shadedPattern>shaded.com.google</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>io.grpc.internal</pattern>
|
||||
<shadedPattern>shaded.io.grpc.internal</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.commons</pattern>
|
||||
<shadedPattern>shaded.org.apache.commons</shadedPattern>
|
||||
<excludes>
|
||||
<exclude>org.apache.hadoop</exclude>
|
||||
<exclude>org.apache.log4j</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.http</pattern>
|
||||
<shadedPattern>shaded.org.apache.http</shadedPattern>
|
||||
</relocation>
|
||||
</relocations>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-gpg-plugin</artifactId>
|
||||
<version>1.5</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>sign-artifacts</id>
|
||||
<phase>verify</phase>
|
||||
<goals>
|
||||
<goal>sign</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.sonatype.plugins</groupId>
|
||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||
<version>1.6.7</version>
|
||||
<extensions>true</extensions>
|
||||
<configuration>
|
||||
<serverId>ossrh</serverId>
|
||||
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
|
||||
<autoReleaseAfterClose>true</autoReleaseAfterClose>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-source-plugin</artifactId>
|
||||
<version>2.2.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-sources</id>
|
||||
<goals>
|
||||
<goal>jar-no-fork</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>2.9.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<distributionManagement>
|
||||
<snapshotRepository>
|
||||
<id>ossrh</id>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.2.9</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
|
@ -5,12 +5,12 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.0.5</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.2.9</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop-client</artifactId>
|
||||
<artifactId>seaweedfs-hadoop3-client</artifactId>
|
||||
<version>${seaweedfs.client.version}</version>
|
||||
|
||||
<parent>
|
||||
|
@ -79,6 +79,10 @@
|
|||
<exclude>org.apache.log4j</exclude>
|
||||
</excludes>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.http</pattern>
|
||||
<shadedPattern>shaded.org.apache.http</shadedPattern>
|
||||
</relocation>
|
||||
</relocations>
|
||||
</configuration>
|
||||
</execution>
|
137
other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java
Normal file
137
other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java
Normal file
|
@ -0,0 +1,137 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
class ReadBuffer {
|
||||
|
||||
private SeaweedInputStream stream;
|
||||
private long offset; // offset within the file for the buffer
|
||||
private int length; // actual length, set after the buffer is filles
|
||||
private int requestedLength; // requested length of the read
|
||||
private byte[] buffer; // the buffer itself
|
||||
private int bufferindex = -1; // index in the buffers array in Buffer manager
|
||||
private ReadBufferStatus status; // status of the buffer
|
||||
private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client
|
||||
// waiting on this buffer gets unblocked
|
||||
|
||||
// fields to help with eviction logic
|
||||
private long timeStamp = 0; // tick at which buffer became available to read
|
||||
private boolean isFirstByteConsumed = false;
|
||||
private boolean isLastByteConsumed = false;
|
||||
private boolean isAnyByteConsumed = false;
|
||||
|
||||
public SeaweedInputStream getStream() {
|
||||
return stream;
|
||||
}
|
||||
|
||||
public void setStream(SeaweedInputStream stream) {
|
||||
this.stream = stream;
|
||||
}
|
||||
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getRequestedLength() {
|
||||
return requestedLength;
|
||||
}
|
||||
|
||||
public void setRequestedLength(int requestedLength) {
|
||||
this.requestedLength = requestedLength;
|
||||
}
|
||||
|
||||
public byte[] getBuffer() {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public void setBuffer(byte[] buffer) {
|
||||
this.buffer = buffer;
|
||||
}
|
||||
|
||||
public int getBufferindex() {
|
||||
return bufferindex;
|
||||
}
|
||||
|
||||
public void setBufferindex(int bufferindex) {
|
||||
this.bufferindex = bufferindex;
|
||||
}
|
||||
|
||||
public ReadBufferStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public void setStatus(ReadBufferStatus status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public CountDownLatch getLatch() {
|
||||
return latch;
|
||||
}
|
||||
|
||||
public void setLatch(CountDownLatch latch) {
|
||||
this.latch = latch;
|
||||
}
|
||||
|
||||
public long getTimeStamp() {
|
||||
return timeStamp;
|
||||
}
|
||||
|
||||
public void setTimeStamp(long timeStamp) {
|
||||
this.timeStamp = timeStamp;
|
||||
}
|
||||
|
||||
public boolean isFirstByteConsumed() {
|
||||
return isFirstByteConsumed;
|
||||
}
|
||||
|
||||
public void setFirstByteConsumed(boolean isFirstByteConsumed) {
|
||||
this.isFirstByteConsumed = isFirstByteConsumed;
|
||||
}
|
||||
|
||||
public boolean isLastByteConsumed() {
|
||||
return isLastByteConsumed;
|
||||
}
|
||||
|
||||
public void setLastByteConsumed(boolean isLastByteConsumed) {
|
||||
this.isLastByteConsumed = isLastByteConsumed;
|
||||
}
|
||||
|
||||
public boolean isAnyByteConsumed() {
|
||||
return isAnyByteConsumed;
|
||||
}
|
||||
|
||||
public void setAnyByteConsumed(boolean isAnyByteConsumed) {
|
||||
this.isAnyByteConsumed = isAnyByteConsumed;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,394 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package seaweed.hdfs;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Queue;
|
||||
import java.util.Stack;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
* The Read Buffer Manager for Rest AbfsClient.
|
||||
*/
|
||||
final class ReadBufferManager {
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class);
|
||||
|
||||
private static final int NUM_BUFFERS = 16;
|
||||
private static final int BLOCK_SIZE = 4 * 1024 * 1024;
|
||||
private static final int NUM_THREADS = 8;
|
||||
private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold
|
||||
|
||||
private Thread[] threads = new Thread[NUM_THREADS];
|
||||
private byte[][] buffers; // array of byte[] buffers, to hold the data that is read
|
||||
private Stack<Integer> freeList = new Stack<>(); // indices in buffers[] array that are available
|
||||
|
||||
private Queue<ReadBuffer> readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet
|
||||
private LinkedList<ReadBuffer> inProgressList = new LinkedList<>(); // requests being processed by worker threads
|
||||
private LinkedList<ReadBuffer> completedReadList = new LinkedList<>(); // buffers available for reading
|
||||
private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block
|
||||
|
||||
static {
|
||||
BUFFER_MANAGER = new ReadBufferManager();
|
||||
BUFFER_MANAGER.init();
|
||||
}
|
||||
|
||||
static ReadBufferManager getBufferManager() {
|
||||
return BUFFER_MANAGER;
|
||||
}
|
||||
|
||||
private void init() {
|
||||
buffers = new byte[NUM_BUFFERS][];
|
||||
for (int i = 0; i < NUM_BUFFERS; i++) {
|
||||
buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC
|
||||
freeList.add(i);
|
||||
}
|
||||
for (int i = 0; i < NUM_THREADS; i++) {
|
||||
Thread t = new Thread(new ReadBufferWorker(i));
|
||||
t.setDaemon(true);
|
||||
threads[i] = t;
|
||||
t.setName("SeaweedFS-prefetch-" + i);
|
||||
t.start();
|
||||
}
|
||||
ReadBufferWorker.UNLEASH_WORKERS.countDown();
|
||||
}
|
||||
|
||||
// hide instance constructor
|
||||
private ReadBufferManager() {
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* SeaweedInputStream-facing methods
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* {@link SeaweedInputStream} calls this method to queue read-aheads.
|
||||
*
|
||||
* @param stream The {@link SeaweedInputStream} for which to do the read-ahead
|
||||
* @param requestedOffset The offset in the file which shoukd be read
|
||||
* @param requestedLength The length to read
|
||||
*/
|
||||
void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Start Queueing readAhead for {} offset {} length {}",
|
||||
stream.getPath(), requestedOffset, requestedLength);
|
||||
}
|
||||
ReadBuffer buffer;
|
||||
synchronized (this) {
|
||||
if (isAlreadyQueued(stream, requestedOffset)) {
|
||||
return; // already queued, do not queue again
|
||||
}
|
||||
if (freeList.isEmpty() && !tryEvict()) {
|
||||
return; // no buffers available, cannot queue anything
|
||||
}
|
||||
|
||||
buffer = new ReadBuffer();
|
||||
buffer.setStream(stream);
|
||||
buffer.setOffset(requestedOffset);
|
||||
buffer.setLength(0);
|
||||
buffer.setRequestedLength(requestedLength);
|
||||
buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE);
|
||||
buffer.setLatch(new CountDownLatch(1));
|
||||
|
||||
Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already
|
||||
|
||||
buffer.setBuffer(buffers[bufferIndex]);
|
||||
buffer.setBufferindex(bufferIndex);
|
||||
readAheadQueue.add(buffer);
|
||||
notifyAll();
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}",
|
||||
stream.getPath(), requestedOffset, buffer.getBufferindex());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a
|
||||
* remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading
|
||||
* the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead
|
||||
* but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because
|
||||
* depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own
|
||||
* read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time).
|
||||
*
|
||||
* @param stream the file to read bytes for
|
||||
* @param position the offset in the file to do a read for
|
||||
* @param length the length to read
|
||||
* @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0.
|
||||
* @return the number of bytes read
|
||||
*/
|
||||
int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) {
|
||||
// not synchronized, so have to be careful with locking
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("getBlock for file {} position {} thread {}",
|
||||
stream.getPath(), position, Thread.currentThread().getName());
|
||||
}
|
||||
|
||||
waitForProcess(stream, position);
|
||||
|
||||
int bytesRead = 0;
|
||||
synchronized (this) {
|
||||
bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer);
|
||||
}
|
||||
if (bytesRead > 0) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Done read from Cache for {} position {} length {}",
|
||||
stream.getPath(), position, bytesRead);
|
||||
}
|
||||
return bytesRead;
|
||||
}
|
||||
|
||||
// otherwise, just say we got nothing - calling thread can do its own read
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Internal methods
|
||||
*
|
||||
*/
|
||||
|
||||
private void waitForProcess(final SeaweedInputStream stream, final long position) {
|
||||
ReadBuffer readBuf;
|
||||
synchronized (this) {
|
||||
clearFromReadAheadQueue(stream, position);
|
||||
readBuf = getFromList(inProgressList, stream, position);
|
||||
}
|
||||
if (readBuf != null) { // if in in-progress queue, then block for it
|
||||
try {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}",
|
||||
stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex());
|
||||
}
|
||||
readBuf.getLatch().await(); // blocking wait on the caller stream's thread
|
||||
// Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread
|
||||
// is done processing it (in doneReading). There, the latch is set after removing the buffer from
|
||||
// inProgressList. So this latch is safe to be outside the synchronized block.
|
||||
// Putting it in synchronized would result in a deadlock, since this thread would be holding the lock
|
||||
// while waiting, so no one will be able to change any state. If this becomes more complex in the future,
|
||||
// then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched.
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("latch done for file {} buffer idx {} length {}",
|
||||
stream.getPath(), readBuf.getBufferindex(), readBuf.getLength());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list.
|
||||
* The objective is to find just one buffer - there is no advantage to evicting more than one.
|
||||
*
|
||||
* @return whether the eviction succeeeded - i.e., were we able to free up one buffer
|
||||
*/
|
||||
private synchronized boolean tryEvict() {
|
||||
ReadBuffer nodeToEvict = null;
|
||||
if (completedReadList.size() <= 0) {
|
||||
return false; // there are no evict-able buffers
|
||||
}
|
||||
|
||||
// first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed)
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) {
|
||||
nodeToEvict = buf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (nodeToEvict != null) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see)
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.isAnyByteConsumed()) {
|
||||
nodeToEvict = buf;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (nodeToEvict != null) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// next, try any old nodes that have not been consumed
|
||||
long earliestBirthday = Long.MAX_VALUE;
|
||||
for (ReadBuffer buf : completedReadList) {
|
||||
if (buf.getTimeStamp() < earliestBirthday) {
|
||||
nodeToEvict = buf;
|
||||
earliestBirthday = buf.getTimeStamp();
|
||||
}
|
||||
}
|
||||
if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) {
|
||||
return evict(nodeToEvict);
|
||||
}
|
||||
|
||||
// nothing can be evicted
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean evict(final ReadBuffer buf) {
|
||||
freeList.push(buf.getBufferindex());
|
||||
completedReadList.remove(buf);
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}",
|
||||
buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) {
|
||||
// returns true if any part of the buffer is already queued
|
||||
return (isInList(readAheadQueue, stream, requestedOffset)
|
||||
|| isInList(inProgressList, stream, requestedOffset)
|
||||
|| isInList(completedReadList, stream, requestedOffset));
|
||||
}
|
||||
|
||||
private boolean isInList(final Collection<ReadBuffer> list, final SeaweedInputStream stream, final long requestedOffset) {
|
||||
return (getFromList(list, stream, requestedOffset) != null);
|
||||
}
|
||||
|
||||
private ReadBuffer getFromList(final Collection<ReadBuffer> list, final SeaweedInputStream stream, final long requestedOffset) {
|
||||
for (ReadBuffer buffer : list) {
|
||||
if (buffer.getStream() == stream) {
|
||||
if (buffer.getStatus() == ReadBufferStatus.AVAILABLE
|
||||
&& requestedOffset >= buffer.getOffset()
|
||||
&& requestedOffset < buffer.getOffset() + buffer.getLength()) {
|
||||
return buffer;
|
||||
} else if (requestedOffset >= buffer.getOffset()
|
||||
&& requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) {
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) {
|
||||
ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset);
|
||||
if (buffer != null) {
|
||||
readAheadQueue.remove(buffer);
|
||||
notifyAll(); // lock is held in calling method
|
||||
freeList.push(buffer.getBufferindex());
|
||||
}
|
||||
}
|
||||
|
||||
private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length,
|
||||
final byte[] buffer) {
|
||||
ReadBuffer buf = getFromList(completedReadList, stream, position);
|
||||
if (buf == null || position >= buf.getOffset() + buf.getLength()) {
|
||||
return 0;
|
||||
}
|
||||
int cursor = (int) (position - buf.getOffset());
|
||||
int availableLengthInBuffer = buf.getLength() - cursor;
|
||||
int lengthToCopy = Math.min(length, availableLengthInBuffer);
|
||||
System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy);
|
||||
if (cursor == 0) {
|
||||
buf.setFirstByteConsumed(true);
|
||||
}
|
||||
if (cursor + lengthToCopy == buf.getLength()) {
|
||||
buf.setLastByteConsumed(true);
|
||||
}
|
||||
buf.setAnyByteConsumed(true);
|
||||
return lengthToCopy;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ReadBufferWorker-thread-facing methods
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* ReadBufferWorker thread calls this to get the next buffer that it should work on.
|
||||
*
|
||||
* @return {@link ReadBuffer}
|
||||
* @throws InterruptedException if thread is interrupted
|
||||
*/
|
||||
ReadBuffer getNextBlockToRead() throws InterruptedException {
|
||||
ReadBuffer buffer = null;
|
||||
synchronized (this) {
|
||||
//buffer = readAheadQueue.take(); // blocking method
|
||||
while (readAheadQueue.size() == 0) {
|
||||
wait();
|
||||
}
|
||||
buffer = readAheadQueue.remove();
|
||||
notifyAll();
|
||||
if (buffer == null) {
|
||||
return null; // should never happen
|
||||
}
|
||||
buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS);
|
||||
inProgressList.add(buffer);
|
||||
}
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("ReadBufferWorker picked file {} for offset {}",
|
||||
buffer.getStream().getPath(), buffer.getOffset());
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* ReadBufferWorker thread calls this method to post completion.
|
||||
*
|
||||
* @param buffer the buffer whose read was completed
|
||||
* @param result the {@link ReadBufferStatus} after the read operation in the worker thread
|
||||
* @param bytesActuallyRead the number of bytes that the worker thread was actually able to read
|
||||
*/
|
||||
void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}",
|
||||
buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead);
|
||||
}
|
||||
synchronized (this) {
|
||||
inProgressList.remove(buffer);
|
||||
if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) {
|
||||
buffer.setStatus(ReadBufferStatus.AVAILABLE);
|
||||
buffer.setTimeStamp(currentTimeMillis());
|
||||
buffer.setLength(bytesActuallyRead);
|
||||
completedReadList.add(buffer);
|
||||
} else {
|
||||
freeList.push(buffer.getBufferindex());
|
||||
// buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC
|
||||
}
|
||||
}
|
||||
//outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results
|
||||
buffer.getLatch().countDown(); // wake up waiting threads (if any)
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to System.currentTimeMillis, except implemented with System.nanoTime().
|
||||
* System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization),
|
||||
* making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core.
|
||||
* Note: it is not monotonic across Sockets, and even within a CPU, its only the
|
||||
* more recent parts which share a clock across all cores.
|
||||
*
|
||||
* @return current time in milliseconds
|
||||
*/
|
||||
private long currentTimeMillis() {
|
||||
return System.nanoTime() / 1000 / 1000;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
/**
|
||||
* The ReadBufferStatus for Rest AbfsClient
|
||||
*/
|
||||
public enum ReadBufferStatus {
|
||||
NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats
|
||||
READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList
|
||||
AVAILABLE, // data is available in buffer. It should be in completedList
|
||||
READ_FAILED // read completed, but failed.
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package seaweed.hdfs;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
class ReadBufferWorker implements Runnable {
|
||||
|
||||
protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1);
|
||||
private int id;
|
||||
|
||||
ReadBufferWorker(final int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* return the ID of ReadBufferWorker.
|
||||
*/
|
||||
public int getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits until a buffer becomes available in ReadAheadQueue.
|
||||
* Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager.
|
||||
* Rinse and repeat. Forever.
|
||||
*/
|
||||
public void run() {
|
||||
try {
|
||||
UNLEASH_WORKERS.await();
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
ReadBufferManager bufferManager = ReadBufferManager.getBufferManager();
|
||||
ReadBuffer buffer;
|
||||
while (true) {
|
||||
try {
|
||||
buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
if (buffer != null) {
|
||||
try {
|
||||
// do the actual read, from the file.
|
||||
int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength());
|
||||
bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager
|
||||
} catch (Exception ex) {
|
||||
bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,620 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||
|
||||
public class SeaweedFileSystem extends FileSystem {
|
||||
|
||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
||||
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
|
||||
private static int BUFFER_SIZE = 16 * 1024 * 1024;
|
||||
|
||||
private URI uri;
|
||||
private Path workingDirectory = new Path("/");
|
||||
private SeaweedFileSystemStore seaweedFileSystemStore;
|
||||
|
||||
public URI getUri() {
|
||||
return uri;
|
||||
}
|
||||
|
||||
public String getScheme() {
|
||||
return "seaweedfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(URI uri, Configuration conf) throws IOException { // get
|
||||
super.initialize(uri, conf);
|
||||
|
||||
// get host information from uri (overrides info in conf)
|
||||
String host = uri.getHost();
|
||||
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
|
||||
if (host == null) {
|
||||
throw new IOException("Invalid host specified");
|
||||
}
|
||||
conf.set(FS_SEAWEED_FILER_HOST, host);
|
||||
|
||||
// get port information from uri, (overrides info in conf)
|
||||
int port = uri.getPort();
|
||||
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
||||
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
||||
|
||||
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE);
|
||||
|
||||
setConf(conf);
|
||||
this.uri = uri;
|
||||
|
||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
|
||||
|
||||
LOG.debug("open path: {} bufferSize:{}", path, bufferSize);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
try {
|
||||
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
|
||||
return new FSDataInputStream(inputStream);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream create(Path path, FsPermission permission, final boolean overwrite, final int bufferSize,
|
||||
final short replication, final long blockSize, final Progressable progress) throws IOException {
|
||||
|
||||
LOG.debug("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
try {
|
||||
String replicaPlacement = String.format("%03d", replication - 1);
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* @throws FileNotFoundException if the parent directory is not present -or
|
||||
* is not a directory.
|
||||
*/
|
||||
@Override
|
||||
public FSDataOutputStream createNonRecursive(Path path,
|
||||
FsPermission permission,
|
||||
EnumSet<CreateFlag> flags,
|
||||
int bufferSize,
|
||||
short replication,
|
||||
long blockSize,
|
||||
Progressable progress) throws IOException {
|
||||
Path parent = path.getParent();
|
||||
if (parent != null) {
|
||||
// expect this to raise an exception if there is no parent
|
||||
if (!getFileStatus(parent).isDirectory()) {
|
||||
throw new FileAlreadyExistsException("Not a directory: " + parent);
|
||||
}
|
||||
}
|
||||
return create(path, permission,
|
||||
flags.contains(CreateFlag.OVERWRITE), bufferSize,
|
||||
replication, blockSize, progress);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
|
||||
|
||||
LOG.debug("append path: {} bufferSize:{}", path, bufferSize);
|
||||
|
||||
path = qualify(path);
|
||||
try {
|
||||
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
|
||||
return new FSDataOutputStream(outputStream, statistics);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean rename(Path src, Path dst) {
|
||||
|
||||
LOG.debug("rename path: {} => {}", src, dst);
|
||||
|
||||
if (src.isRoot()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (src.equals(dst)) {
|
||||
return true;
|
||||
}
|
||||
FileStatus dstFileStatus = getFileStatus(dst);
|
||||
|
||||
String sourceFileName = src.getName();
|
||||
Path adjustedDst = dst;
|
||||
|
||||
if (dstFileStatus != null) {
|
||||
if (!dstFileStatus.isDirectory()) {
|
||||
return false;
|
||||
}
|
||||
adjustedDst = new Path(dst, sourceFileName);
|
||||
}
|
||||
|
||||
Path qualifiedSrcPath = qualify(src);
|
||||
Path qualifiedDstPath = qualify(adjustedDst);
|
||||
|
||||
seaweedFileSystemStore.rename(qualifiedSrcPath, qualifiedDstPath);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete(Path path, boolean recursive) {
|
||||
|
||||
LOG.debug("delete path: {} recursive:{}", path, recursive);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
|
||||
if (fileStatus == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus[] listStatus(Path path) throws IOException {
|
||||
|
||||
LOG.debug("listStatus path: {}", path);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
return seaweedFileSystemStore.listEntries(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getWorkingDirectory() {
|
||||
return workingDirectory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWorkingDirectory(Path path) {
|
||||
if (path.isAbsolute()) {
|
||||
workingDirectory = path;
|
||||
} else {
|
||||
workingDirectory = new Path(workingDirectory, path);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean mkdirs(Path path, FsPermission fsPermission) throws IOException {
|
||||
|
||||
LOG.debug("mkdirs path: {}", path);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
FileStatus fileStatus = getFileStatus(path);
|
||||
|
||||
if (fileStatus == null) {
|
||||
|
||||
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
|
||||
return seaweedFileSystemStore.createDirectory(path, currentUser,
|
||||
fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
|
||||
FsPermission.getUMask(getConf()));
|
||||
|
||||
}
|
||||
|
||||
if (fileStatus.isDirectory()) {
|
||||
return true;
|
||||
} else {
|
||||
throw new FileAlreadyExistsException("Path is a file: " + path);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStatus getFileStatus(Path path) {
|
||||
|
||||
LOG.debug("getFileStatus path: {}", path);
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
return seaweedFileSystemStore.getFileStatus(path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set owner of a path (i.e. a file or a directory).
|
||||
* The parameters owner and group cannot both be null.
|
||||
*
|
||||
* @param path The path
|
||||
* @param owner If it is null, the original username remains unchanged.
|
||||
* @param group If it is null, the original groupname remains unchanged.
|
||||
*/
|
||||
@Override
|
||||
public void setOwner(Path path, final String owner, final String group)
|
||||
throws IOException {
|
||||
LOG.debug("setOwner path: {}", path);
|
||||
path = qualify(path);
|
||||
|
||||
seaweedFileSystemStore.setOwner(path, owner, group);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Set permission of a path.
|
||||
*
|
||||
* @param path The path
|
||||
* @param permission Access permission
|
||||
*/
|
||||
@Override
|
||||
public void setPermission(Path path, final FsPermission permission) throws IOException {
|
||||
LOG.debug("setPermission path: {}", path);
|
||||
|
||||
if (permission == null) {
|
||||
throw new IllegalArgumentException("The permission can't be null");
|
||||
}
|
||||
|
||||
path = qualify(path);
|
||||
|
||||
seaweedFileSystemStore.setPermission(path, permission);
|
||||
}
|
||||
|
||||
Path qualify(Path path) {
|
||||
return path.makeQualified(uri, workingDirectory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Concat existing files together.
|
||||
*
|
||||
* @param trg the path to the target destination.
|
||||
* @param psrcs the paths to the sources to use for the concatenation.
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
*/
|
||||
@Override
|
||||
public void concat(final Path trg, final Path[] psrcs) throws IOException {
|
||||
throw new UnsupportedOperationException("Not implemented by the " +
|
||||
getClass().getSimpleName() + " FileSystem implementation");
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate the file in the indicated path to the indicated size.
|
||||
* <ul>
|
||||
* <li>Fails if path is a directory.</li>
|
||||
* <li>Fails if path does not exist.</li>
|
||||
* <li>Fails if path is not closed.</li>
|
||||
* <li>Fails if new size is greater than current size.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @param f The path to the file to be truncated
|
||||
* @param newLength The size the file is to be truncated to
|
||||
* @return <code>true</code> if the file has been truncated to the desired
|
||||
* <code>newLength</code> and is immediately available to be reused for
|
||||
* write operations such as <code>append</code>, or
|
||||
* <code>false</code> if a background process of adjusting the length of
|
||||
* the last block has been started, and clients should wait for it to
|
||||
* complete before proceeding with further file updates.
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
*/
|
||||
@Override
|
||||
public boolean truncate(Path f, long newLength) throws IOException {
|
||||
throw new UnsupportedOperationException("Not implemented by the " +
|
||||
getClass().getSimpleName() + " FileSystem implementation");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createSymlink(final Path target, final Path link,
|
||||
final boolean createParent) throws AccessControlException,
|
||||
FileAlreadyExistsException, FileNotFoundException,
|
||||
ParentNotDirectoryException, UnsupportedFileSystemException,
|
||||
IOException {
|
||||
// Supporting filesystems should override this method
|
||||
throw new UnsupportedOperationException(
|
||||
"Filesystem does not support symlinks!");
|
||||
}
|
||||
|
||||
public boolean supportsSymlinks() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a snapshot.
|
||||
*
|
||||
* @param path The directory where snapshots will be taken.
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @return the snapshot path.
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
*/
|
||||
@Override
|
||||
public Path createSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support createSnapshot");
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename a snapshot.
|
||||
*
|
||||
* @param path The directory path where the snapshot was taken
|
||||
* @param snapshotOldName Old name of the snapshot
|
||||
* @param snapshotNewName New name of the snapshot
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void renameSnapshot(Path path, String snapshotOldName,
|
||||
String snapshotNewName) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support renameSnapshot");
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot of a directory.
|
||||
*
|
||||
* @param path The directory that the to-be-deleted snapshot belongs to
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void deleteSnapshot(Path path, String snapshotName)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support deleteSnapshot");
|
||||
}
|
||||
|
||||
/**
|
||||
* Modifies ACL entries of files and directories. This method can add new ACL
|
||||
* entries or modify the permissions on existing ACL entries. All existing
|
||||
* ACL entries that are not specified in this call are retained without
|
||||
* changes. (Modifications are merged into the current ACL.)
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param aclSpec List<AclEntry> describing modifications
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support modifyAclEntries");
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes ACL entries from files and directories. Other ACL entries are
|
||||
* retained.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param aclSpec List describing entries to remove
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeAclEntries");
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all default ACL entries from files and directories.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeDefaultAcl(Path path)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeDefaultAcl");
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all but the base ACL entries of files and directories. The entries
|
||||
* for user, group, and others are retained for compatibility with permission
|
||||
* bits.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @throws IOException if an ACL could not be removed
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeAcl(Path path)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeAcl");
|
||||
}
|
||||
|
||||
/**
|
||||
* Fully replaces ACL of files and directories, discarding all existing
|
||||
* entries.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param aclSpec List describing modifications, which must include entries
|
||||
* for user, group, and others for compatibility with permission bits.
|
||||
* @throws IOException if an ACL could not be modified
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setAcl");
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the ACL of a file or directory.
|
||||
*
|
||||
* @param path Path to get
|
||||
* @return AclStatus describing the ACL of the file or directory
|
||||
* @throws IOException if an ACL could not be read
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getAclStatus");
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an xattr name and value for a file or directory.
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattr name/value pairs for a file or directory.
|
||||
* Only those xattrs which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map describing the XAttrs of the file or directory
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs name/value pairs for a file or directory.
|
||||
* Only those xattrs which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map describing the XAttrs of the file or directory
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattr names for a file or directory.
|
||||
* Only those xattr names which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return List{@literal <String>} of the XAttr names of the file or directory
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public List<String> listXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support listXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default outcome).
|
||||
*/
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeXAttr");
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,282 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerClient;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedRead;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedFileSystemStore {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class);
|
||||
|
||||
private FilerGrpcClient filerGrpcClient;
|
||||
private FilerClient filerClient;
|
||||
|
||||
public SeaweedFileSystemStore(String host, int port) {
|
||||
int grpcPort = 10000 + port;
|
||||
filerGrpcClient = new FilerGrpcClient(host, grpcPort);
|
||||
filerClient = new FilerClient(filerGrpcClient);
|
||||
}
|
||||
|
||||
public static String getParentDirectory(Path path) {
|
||||
return path.isRoot() ? "/" : path.getParent().toUri().getPath();
|
||||
}
|
||||
|
||||
static int permissionToMode(FsPermission permission, boolean isDirectory) {
|
||||
int p = permission.toShort();
|
||||
if (isDirectory) {
|
||||
p = p | 1 << 31;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
public boolean createDirectory(final Path path, UserGroupInformation currentUser,
|
||||
final FsPermission permission, final FsPermission umask) {
|
||||
|
||||
LOG.debug("createDirectory path: {} permission: {} umask: {}",
|
||||
path,
|
||||
permission,
|
||||
umask);
|
||||
|
||||
return filerClient.mkdirs(
|
||||
path.toUri().getPath(),
|
||||
permissionToMode(permission, true),
|
||||
currentUser.getUserName(),
|
||||
currentUser.getGroupNames()
|
||||
);
|
||||
}
|
||||
|
||||
public FileStatus[] listEntries(final Path path) {
|
||||
LOG.debug("listEntries path: {}", path);
|
||||
|
||||
FileStatus pathStatus = getFileStatus(path);
|
||||
|
||||
if (pathStatus == null) {
|
||||
return new FileStatus[0];
|
||||
}
|
||||
|
||||
if (!pathStatus.isDirectory()) {
|
||||
return new FileStatus[]{pathStatus};
|
||||
}
|
||||
|
||||
List<FileStatus> fileStatuses = new ArrayList<FileStatus>();
|
||||
|
||||
List<FilerProto.Entry> entries = filerClient.listEntries(path.toUri().getPath());
|
||||
|
||||
for (FilerProto.Entry entry : entries) {
|
||||
|
||||
FileStatus fileStatus = doGetFileStatus(new Path(path, entry.getName()), entry);
|
||||
|
||||
fileStatuses.add(fileStatus);
|
||||
}
|
||||
LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size());
|
||||
return fileStatuses.toArray(new FileStatus[0]);
|
||||
|
||||
}
|
||||
|
||||
public FileStatus getFileStatus(final Path path) {
|
||||
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
if (entry == null) {
|
||||
return null;
|
||||
}
|
||||
LOG.debug("doGetFileStatus path:{} entry:{}", path, entry);
|
||||
|
||||
FileStatus fileStatus = doGetFileStatus(path, entry);
|
||||
return fileStatus;
|
||||
}
|
||||
|
||||
public boolean deleteEntries(final Path path, boolean isDirectory, boolean recursive) {
|
||||
LOG.debug("deleteEntries path: {} isDirectory {} recursive: {}",
|
||||
path,
|
||||
String.valueOf(isDirectory),
|
||||
String.valueOf(recursive));
|
||||
|
||||
if (path.isRoot()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (recursive && isDirectory) {
|
||||
List<FilerProto.Entry> entries = filerClient.listEntries(path.toUri().getPath());
|
||||
for (FilerProto.Entry entry : entries) {
|
||||
deleteEntries(new Path(path, entry.getName()), entry.getIsDirectory(), true);
|
||||
}
|
||||
}
|
||||
|
||||
return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive, true);
|
||||
}
|
||||
|
||||
private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) {
|
||||
FilerProto.FuseAttributes attributes = entry.getAttributes();
|
||||
long length = SeaweedRead.totalSize(entry.getChunksList());
|
||||
boolean isDir = entry.getIsDirectory();
|
||||
int block_replication = 1;
|
||||
int blocksize = 512;
|
||||
long modification_time = attributes.getMtime() * 1000; // milliseconds
|
||||
long access_time = 0;
|
||||
FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode());
|
||||
String owner = attributes.getUserName();
|
||||
String group = attributes.getGroupNameCount() > 0 ? attributes.getGroupName(0) : "";
|
||||
return new FileStatus(length, isDir, block_replication, blocksize,
|
||||
modification_time, access_time, permission, owner, group, null, path);
|
||||
}
|
||||
|
||||
private FilerProto.Entry lookupEntry(Path path) {
|
||||
|
||||
return filerClient.lookupEntry(getParentDirectory(path), path.getName());
|
||||
|
||||
}
|
||||
|
||||
public void rename(Path source, Path destination) {
|
||||
|
||||
LOG.debug("rename source: {} destination:{}", source, destination);
|
||||
|
||||
if (source.isRoot()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("rename source: {} destination:{}", source, destination);
|
||||
FilerProto.Entry entry = lookupEntry(source);
|
||||
if (entry == null) {
|
||||
LOG.warn("rename non-existing source: {}", source);
|
||||
return;
|
||||
}
|
||||
filerClient.mv(source.toUri().getPath(), destination.toUri().getPath());
|
||||
}
|
||||
|
||||
public OutputStream createFile(final Path path,
|
||||
final boolean overwrite,
|
||||
FsPermission permission,
|
||||
int bufferSize,
|
||||
String replication) throws IOException {
|
||||
|
||||
permission = permission == null ? FsPermission.getFileDefault() : permission;
|
||||
|
||||
LOG.debug("createFile path: {} overwrite: {} permission: {}",
|
||||
path,
|
||||
overwrite,
|
||||
permission.toString());
|
||||
|
||||
UserGroupInformation userGroupInformation = UserGroupInformation.getCurrentUser();
|
||||
long now = System.currentTimeMillis() / 1000L;
|
||||
|
||||
FilerProto.Entry.Builder entry = null;
|
||||
long writePosition = 0;
|
||||
if (!overwrite) {
|
||||
FilerProto.Entry existingEntry = lookupEntry(path);
|
||||
LOG.debug("createFile merged entry path:{} existingEntry:{}", path, existingEntry);
|
||||
if (existingEntry != null) {
|
||||
entry = FilerProto.Entry.newBuilder();
|
||||
entry.mergeFrom(existingEntry);
|
||||
entry.getAttributesBuilder().setMtime(now);
|
||||
LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
|
||||
writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
|
||||
replication = existingEntry.getAttributes().getReplication();
|
||||
}
|
||||
}
|
||||
if (entry == null) {
|
||||
entry = FilerProto.Entry.newBuilder()
|
||||
.setName(path.getName())
|
||||
.setIsDirectory(false)
|
||||
.setAttributes(FilerProto.FuseAttributes.newBuilder()
|
||||
.setFileMode(permissionToMode(permission, false))
|
||||
.setReplication(replication)
|
||||
.setCrtime(now)
|
||||
.setMtime(now)
|
||||
.setUserName(userGroupInformation.getUserName())
|
||||
.clearGroupName()
|
||||
.addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames()))
|
||||
);
|
||||
}
|
||||
|
||||
return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication);
|
||||
|
||||
}
|
||||
|
||||
public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics,
|
||||
int bufferSize) throws IOException {
|
||||
|
||||
LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize);
|
||||
|
||||
int readAheadQueueDepth = 2;
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
|
||||
if (entry == null) {
|
||||
throw new FileNotFoundException("read non-exist file " + path);
|
||||
}
|
||||
|
||||
return new SeaweedInputStream(filerGrpcClient,
|
||||
statistics,
|
||||
path.toUri().getPath(),
|
||||
entry,
|
||||
bufferSize,
|
||||
readAheadQueueDepth);
|
||||
}
|
||||
|
||||
public void setOwner(Path path, String owner, String group) {
|
||||
|
||||
LOG.debug("setOwner path:{} owner:{} group:{}", path, owner, group);
|
||||
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
if (entry == null) {
|
||||
LOG.debug("setOwner path:{} entry:{}", path, entry);
|
||||
return;
|
||||
}
|
||||
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
FilerProto.FuseAttributes.Builder attributesBuilder = entry.getAttributes().toBuilder();
|
||||
|
||||
if (owner != null) {
|
||||
attributesBuilder.setUserName(owner);
|
||||
}
|
||||
if (group != null) {
|
||||
attributesBuilder.clearGroupName();
|
||||
attributesBuilder.addGroupName(group);
|
||||
}
|
||||
|
||||
entryBuilder.setAttributes(attributesBuilder);
|
||||
|
||||
LOG.debug("setOwner path:{} entry:{}", path, entryBuilder);
|
||||
|
||||
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
|
||||
|
||||
}
|
||||
|
||||
public void setPermission(Path path, FsPermission permission) {
|
||||
|
||||
LOG.debug("setPermission path:{} permission:{}", path, permission);
|
||||
|
||||
FilerProto.Entry entry = lookupEntry(path);
|
||||
if (entry == null) {
|
||||
LOG.debug("setPermission path:{} entry:{}", path, entry);
|
||||
return;
|
||||
}
|
||||
|
||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||
FilerProto.FuseAttributes.Builder attributesBuilder = entry.getAttributes().toBuilder();
|
||||
|
||||
attributesBuilder.setFileMode(permissionToMode(permission, entry.getIsDirectory()));
|
||||
|
||||
entryBuilder.setAttributes(attributesBuilder);
|
||||
|
||||
LOG.debug("setPermission path:{} entry:{}", path, entryBuilder);
|
||||
|
||||
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,371 @@
|
|||
package seaweed.hdfs;
|
||||
|
||||
// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import seaweedfs.client.FilerGrpcClient;
|
||||
import seaweedfs.client.FilerProto;
|
||||
import seaweedfs.client.SeaweedRead;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public class SeaweedInputStream extends FSInputStream {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class);
|
||||
|
||||
private final FilerGrpcClient filerGrpcClient;
|
||||
private final Statistics statistics;
|
||||
private final String path;
|
||||
private final FilerProto.Entry entry;
|
||||
private final List<SeaweedRead.VisibleInterval> visibleIntervalList;
|
||||
private final long contentLength;
|
||||
private final int bufferSize; // default buffer size
|
||||
private final int readAheadQueueDepth; // initialized in constructor
|
||||
private final boolean readAheadEnabled; // whether enable readAhead;
|
||||
|
||||
private byte[] buffer = null; // will be initialized on first use
|
||||
|
||||
private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server
|
||||
private long fCursorAfterLastRead = -1;
|
||||
private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer
|
||||
private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1
|
||||
// of valid bytes in buffer)
|
||||
private boolean closed = false;
|
||||
|
||||
public SeaweedInputStream(
|
||||
final FilerGrpcClient filerGrpcClient,
|
||||
final Statistics statistics,
|
||||
final String path,
|
||||
final FilerProto.Entry entry,
|
||||
final int bufferSize,
|
||||
final int readAheadQueueDepth) {
|
||||
this.filerGrpcClient = filerGrpcClient;
|
||||
this.statistics = statistics;
|
||||
this.path = path;
|
||||
this.entry = entry;
|
||||
this.contentLength = SeaweedRead.totalSize(entry.getChunksList());
|
||||
this.bufferSize = bufferSize;
|
||||
this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors();
|
||||
this.readAheadEnabled = true;
|
||||
|
||||
this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList());
|
||||
|
||||
LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList);
|
||||
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
byte[] b = new byte[1];
|
||||
int numberOfBytesRead = read(b, 0, 1);
|
||||
if (numberOfBytesRead < 0) {
|
||||
return -1;
|
||||
} else {
|
||||
return (b[0] & 0xFF);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int read(final byte[] b, final int off, final int len) throws IOException {
|
||||
int currentOff = off;
|
||||
int currentLen = len;
|
||||
int lastReadBytes;
|
||||
int totalReadBytes = 0;
|
||||
do {
|
||||
lastReadBytes = readOneBlock(b, currentOff, currentLen);
|
||||
if (lastReadBytes > 0) {
|
||||
currentOff += lastReadBytes;
|
||||
currentLen -= lastReadBytes;
|
||||
totalReadBytes += lastReadBytes;
|
||||
}
|
||||
if (currentLen <= 0 || currentLen > b.length - currentOff) {
|
||||
break;
|
||||
}
|
||||
} while (lastReadBytes > 0);
|
||||
return totalReadBytes > 0 ? totalReadBytes : lastReadBytes;
|
||||
}
|
||||
|
||||
private int readOneBlock(final byte[] b, final int off, final int len) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
|
||||
Preconditions.checkNotNull(b);
|
||||
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (this.available() == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (off < 0 || len < 0 || len > b.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
//If buffer is empty, then fill the buffer.
|
||||
if (bCursor == limit) {
|
||||
//If EOF, then return -1
|
||||
if (fCursor >= contentLength) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
long bytesRead = 0;
|
||||
//reset buffer to initial state - i.e., throw away existing data
|
||||
bCursor = 0;
|
||||
limit = 0;
|
||||
if (buffer == null) {
|
||||
buffer = new byte[bufferSize];
|
||||
}
|
||||
|
||||
// Enable readAhead when reading sequentially
|
||||
if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) {
|
||||
bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false);
|
||||
} else {
|
||||
bytesRead = readInternal(fCursor, buffer, 0, b.length, true);
|
||||
}
|
||||
|
||||
if (bytesRead == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
limit += bytesRead;
|
||||
fCursor += bytesRead;
|
||||
fCursorAfterLastRead = fCursor;
|
||||
}
|
||||
|
||||
//If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer)
|
||||
//(bytes returned may be less than requested)
|
||||
int bytesRemaining = limit - bCursor;
|
||||
int bytesToRead = Math.min(len, bytesRemaining);
|
||||
System.arraycopy(buffer, bCursor, b, off, bytesToRead);
|
||||
bCursor += bytesToRead;
|
||||
if (statistics != null) {
|
||||
statistics.incrementBytesRead(bytesToRead);
|
||||
}
|
||||
return bytesToRead;
|
||||
}
|
||||
|
||||
|
||||
private int readInternal(final long position, final byte[] b, final int offset, final int length,
|
||||
final boolean bypassReadAhead) throws IOException {
|
||||
if (readAheadEnabled && !bypassReadAhead) {
|
||||
// try reading from read-ahead
|
||||
if (offset != 0) {
|
||||
throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets");
|
||||
}
|
||||
int receivedBytes;
|
||||
|
||||
// queue read-aheads
|
||||
int numReadAheads = this.readAheadQueueDepth;
|
||||
long nextSize;
|
||||
long nextOffset = position;
|
||||
while (numReadAheads > 0 && nextOffset < contentLength) {
|
||||
nextSize = Math.min((long) bufferSize, contentLength - nextOffset);
|
||||
ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize);
|
||||
nextOffset = nextOffset + nextSize;
|
||||
numReadAheads--;
|
||||
}
|
||||
|
||||
// try reading from buffers first
|
||||
receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b);
|
||||
if (receivedBytes > 0) {
|
||||
return receivedBytes;
|
||||
}
|
||||
|
||||
// got nothing from read-ahead, do our own read now
|
||||
receivedBytes = readRemote(position, b, offset, length);
|
||||
return receivedBytes;
|
||||
} else {
|
||||
return readRemote(position, b, offset, length);
|
||||
}
|
||||
}
|
||||
|
||||
int readRemote(long position, byte[] b, int offset, int length) throws IOException {
|
||||
if (position < 0) {
|
||||
throw new IllegalArgumentException("attempting to read from negative offset");
|
||||
}
|
||||
if (position >= contentLength) {
|
||||
return -1; // Hadoop prefers -1 to EOFException
|
||||
}
|
||||
if (b == null) {
|
||||
throw new IllegalArgumentException("null byte array passed in to read() method");
|
||||
}
|
||||
if (offset >= b.length) {
|
||||
throw new IllegalArgumentException("offset greater than length of array");
|
||||
}
|
||||
if (length < 0) {
|
||||
throw new IllegalArgumentException("requested read length is less than zero");
|
||||
}
|
||||
if (length > (b.length - offset)) {
|
||||
throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer");
|
||||
}
|
||||
|
||||
long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length);
|
||||
if (bytesRead > Integer.MAX_VALUE) {
|
||||
throw new IOException("Unexpected Content-Length");
|
||||
}
|
||||
return (int) bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seek to given position in stream.
|
||||
*
|
||||
* @param n position to seek to
|
||||
* @throws IOException if there is an error
|
||||
* @throws EOFException if attempting to seek past end of file
|
||||
*/
|
||||
@Override
|
||||
public synchronized void seek(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
if (n < 0) {
|
||||
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
|
||||
}
|
||||
if (n > contentLength) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
|
||||
if (n >= fCursor - limit && n <= fCursor) { // within buffer
|
||||
bCursor = (int) (n - (fCursor - limit));
|
||||
return;
|
||||
}
|
||||
|
||||
// next read will read from here
|
||||
fCursor = n;
|
||||
|
||||
//invalidate buffer
|
||||
limit = 0;
|
||||
bCursor = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long skip(long n) throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
long currentPos = getPos();
|
||||
if (currentPos == contentLength) {
|
||||
if (n > 0) {
|
||||
throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
|
||||
}
|
||||
}
|
||||
long newPos = currentPos + n;
|
||||
if (newPos < 0) {
|
||||
newPos = 0;
|
||||
n = newPos - currentPos;
|
||||
}
|
||||
if (newPos > contentLength) {
|
||||
newPos = contentLength;
|
||||
n = newPos - currentPos;
|
||||
}
|
||||
seek(newPos);
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of the remaining available bytes
|
||||
* if the size is less than or equal to {@link Integer#MAX_VALUE},
|
||||
* otherwise, return {@link Integer#MAX_VALUE}.
|
||||
* <p>
|
||||
* This is to match the behavior of DFSInputStream.available(),
|
||||
* which some clients may rely on (HBase write-ahead log reading in
|
||||
* particular).
|
||||
*/
|
||||
@Override
|
||||
public synchronized int available() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(
|
||||
FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
final long remaining = this.contentLength - this.getPos();
|
||||
return remaining <= Integer.MAX_VALUE
|
||||
? (int) remaining : Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the length of the file that this stream refers to. Note that the length returned is the length
|
||||
* as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file,
|
||||
* they wont be reflected in the returned length.
|
||||
*
|
||||
* @return length of the file.
|
||||
* @throws IOException if the stream is closed
|
||||
*/
|
||||
public long length() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
return contentLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the current offset from the start of the file
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public synchronized long getPos() throws IOException {
|
||||
if (closed) {
|
||||
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
|
||||
}
|
||||
return fCursor - limit + bCursor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Seeks a different copy of the data. Returns true if
|
||||
* found a new source, false otherwise.
|
||||
*
|
||||
* @throws IOException throws {@link IOException} if there is an error
|
||||
*/
|
||||
@Override
|
||||
public boolean seekToNewSource(long l) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
closed = true;
|
||||
buffer = null; // de-reference the buffer so it can be GC'ed sooner
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*
|
||||
* @param readlimit ignored
|
||||
*/
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* Not supported by this stream. Throws {@link UnsupportedOperationException}
|
||||
*/
|
||||
@Override
|
||||
public synchronized void reset() throws IOException {
|
||||
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false.
|
||||
*
|
||||
* @return always {@code false}
|
||||
*/
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -78,9 +78,6 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
|||
}
|
||||
|
||||
private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException {
|
||||
|
||||
LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry);
|
||||
|
||||
try {
|
||||
SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry);
|
||||
} catch (Exception ex) {
|
49
snap/README.md
Normal file
49
snap/README.md
Normal file
|
@ -0,0 +1,49 @@
|
|||
Hi
|
||||
|
||||
This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store.
|
||||
|
||||
If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports
|
||||
|
||||
To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps.
|
||||
|
||||
```
|
||||
snap install snapcraft --classic
|
||||
git clone https://github.com/popey/seaweedfs
|
||||
cd seaweedfs
|
||||
git checkout add-snapcraft
|
||||
snapcraft
|
||||
```
|
||||
|
||||
The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:-
|
||||
|
||||
snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous
|
||||
|
||||
(the --dangerous is necessary because we’re installing an app which hasn’t gone through the snap store review process)
|
||||
|
||||
Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an ‘alias’ so users can use the ‘weed’ command rather than the namespaced ‘seaweedfs.weed’
|
||||
|
||||
- Run the command
|
||||
- Create sample config. Snaps are securely confined so their home directory is in a different place
|
||||
mkdir ~/snap/seaweedfs/current/.seaweedfs
|
||||
seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml
|
||||
- Run a server
|
||||
seaweedfs.weed server
|
||||
- Run a benchmark
|
||||
seaweedfs.weed benchmark
|
||||
|
||||
Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/
|
||||
|
||||
If landed, you will need to:-
|
||||
|
||||
- Register an account in the snap store https://snapcraft.io/account
|
||||
- Register the ‘seaweedfs’ name in the store
|
||||
- snapcraft login
|
||||
- snapcraft register seaweedfs
|
||||
- Upload a built snap to the store
|
||||
- snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge
|
||||
- Test installing on a clean Ubuntu 16.04 machine
|
||||
- snap install seaweedfs --edge
|
||||
|
||||
The store supports multiple risk levels as “channels” with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed.
|
||||
|
||||
Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there).
|
53
snap/snapcraft.yaml
Normal file
53
snap/snapcraft.yaml
Normal file
|
@ -0,0 +1,53 @@
|
|||
# Name of snap as registered in the store
|
||||
name: seaweedfs
|
||||
# Automatically derive snap version from git tags
|
||||
version: git
|
||||
# Short human readable name as seen in 'snap find $SNAPNAME'
|
||||
summary: SeaweedFS
|
||||
# Longer multi-line description found in 'snap info $SNAPNAME'
|
||||
description: |
|
||||
SeaweedFS is a simple and highly scalable distributed file system,
|
||||
to store and serve billions of files fast!
|
||||
SeaweedFS implements an object store with O(1) disk seek,
|
||||
transparent cloud integration, and an optional Filer with POSIX interface,
|
||||
supporting S3 API, Rack-Aware Erasure Coding for warm storage,
|
||||
FUSE mount, Hadoop compatible, WebDAV.
|
||||
|
||||
# Grade is stable for snaps expected to land in the stable channel
|
||||
grade: stable
|
||||
# Uses the strict confinement model and uses interfaces to open up access to
|
||||
# resources on the target host
|
||||
confinement: strict
|
||||
|
||||
# List of parts which comprise the snap
|
||||
parts:
|
||||
# The main part which defines how to build the application in the snap
|
||||
seaweedfs:
|
||||
# This part needs a newer version of golang, so we use a separate part
|
||||
# which defines how to get a newer golang during the build
|
||||
after: [go]
|
||||
# The go plugin knows how to build go applications into a snap
|
||||
plugin: go
|
||||
# Snapcraft will look in this location for the source of the application
|
||||
source: .
|
||||
go-importpath: github.com/chrislusf/seaweedfs
|
||||
go:
|
||||
# Defines the version of golang which will be bootstrapped into the snap
|
||||
source-tag: go1.14
|
||||
|
||||
# Apps exposes the binaries inside the snap to the host system once installed
|
||||
apps:
|
||||
# We expose the weed command.
|
||||
# This differs from the snap name, so it will be namespaced as seaweedfs.weed
|
||||
# An alias can be added to expose this as 'weed' if requested in the snapcraft forum
|
||||
weed:
|
||||
# The path to the binary inside the snap, relative to the $SNAP home
|
||||
command: bin/weed
|
||||
# Plugs connect the snap to resources on the host system. We enable network connectivity
|
||||
# We also add home and removable-media (latter not autoconnected by default)
|
||||
# so users can access files in their home or on removable disks
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
- removable-media
|
|
@ -8,7 +8,9 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -46,9 +48,10 @@ func main() {
|
|||
if err != nil {
|
||||
glog.Fatalf("Open Volume Data File [ERROR]: %v", err)
|
||||
}
|
||||
defer datFile.Close()
|
||||
datBackend := backend.NewDiskFile(datFile)
|
||||
defer datBackend.Close()
|
||||
|
||||
superBlock, err := storage.ReadSuperBlock(datFile)
|
||||
superBlock, err := super_block.ReadSuperBlock(datBackend)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("cannot parse existing super block: %v", err)
|
||||
|
@ -60,7 +63,7 @@ func main() {
|
|||
hasChange := false
|
||||
|
||||
if *targetReplica != "" {
|
||||
replica, err := storage.NewReplicaPlacementFromString(*targetReplica)
|
||||
replica, err := super_block.NewReplicaPlacementFromString(*targetReplica)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
|
||||
|
@ -73,7 +76,7 @@ func main() {
|
|||
}
|
||||
|
||||
if *targetTTL != "" {
|
||||
ttl, err := storage.ReadTTL(*targetTTL)
|
||||
ttl, err := needle.ReadTTL(*targetTTL)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
|
||||
|
|
42
unmaintained/check_disk_size/check_disk_size.go
Normal file
42
unmaintained/check_disk_size/check_disk_size.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flag.String("dir", ".", "the directory which uses a disk")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
fillInDiskStatus(*dir)
|
||||
|
||||
fmt.Printf("OS: %v\n", runtime.GOOS)
|
||||
fmt.Printf("Arch: %v\n", runtime.GOARCH)
|
||||
|
||||
}
|
||||
|
||||
func fillInDiskStatus(dir string) {
|
||||
fs := syscall.Statfs_t{}
|
||||
err := syscall.Statfs(dir, &fs)
|
||||
if err != nil {
|
||||
fmt.Printf("failed to statfs on %s: %v\n", dir, err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("statfs: %+v\n", fs)
|
||||
fmt.Println()
|
||||
|
||||
total := fs.Blocks * uint64(fs.Bsize)
|
||||
free := fs.Bfree * uint64(fs.Bsize)
|
||||
fmt.Printf("Total: %d blocks x %d block size = %d bytes\n", fs.Blocks, uint64(fs.Bsize), total)
|
||||
fmt.Printf("Free : %d blocks x %d block size = %d bytes\n", fs.Bfree, uint64(fs.Bsize), free)
|
||||
fmt.Printf("Used : %d blocks x %d block size = %d bytes\n", fs.Blocks-fs.Bfree, uint64(fs.Bsize), total-free)
|
||||
fmt.Printf("Free Percentage : %.2f%%\n", float32((float64(free)/float64(total))*100))
|
||||
fmt.Printf("Used Percentage : %.2f%%\n", float32((float64(total-free)/float64(total))*100))
|
||||
return
|
||||
}
|
39
unmaintained/compact_leveldb/compact_leveldb.go
Normal file
39
unmaintained/compact_leveldb/compact_leveldb.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flag.String("dir", ".", "data directory to store leveldb files")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
flag.Parse()
|
||||
|
||||
opts := &opt.Options{
|
||||
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
|
||||
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
|
||||
CompactionTableSizeMultiplier: 10,
|
||||
OpenFilesCacheCapacity: -1,
|
||||
}
|
||||
|
||||
db, err := leveldb.OpenFile(*dir, opts)
|
||||
if errors.IsCorrupted(err) {
|
||||
db, err = leveldb.RecoverFile(*dir, opts)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
if err := db.CompactRange(util.Range{}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -9,7 +9,9 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
@ -43,11 +45,13 @@ func main() {
|
|||
glog.Fatalf("Read Volume Index %v", err)
|
||||
}
|
||||
defer indexFile.Close()
|
||||
datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDONLY, 0644)
|
||||
datFileName := path.Join(*fixVolumePath, fileName+".dat")
|
||||
datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Read Volume Data %v", err)
|
||||
}
|
||||
defer datFile.Close()
|
||||
datBackend := backend.NewDiskFile(datFile)
|
||||
defer datBackend.Close()
|
||||
|
||||
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
|
||||
if err != nil {
|
||||
|
@ -55,21 +59,21 @@ func main() {
|
|||
}
|
||||
defer newDatFile.Close()
|
||||
|
||||
superBlock, err := storage.ReadSuperBlock(datFile)
|
||||
superBlock, err := super_block.ReadSuperBlock(datBackend)
|
||||
if err != nil {
|
||||
glog.Fatalf("Read Volume Data superblock %v", err)
|
||||
}
|
||||
newDatFile.Write(superBlock.Bytes())
|
||||
|
||||
iterateEntries(datFile, indexFile, func(n *storage.Needle, offset int64) {
|
||||
iterateEntries(datBackend, indexFile, func(n *needle.Needle, offset int64) {
|
||||
fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize)
|
||||
_, s, _, e := n.Append(newDatFile, superBlock.Version())
|
||||
_, s, _, e := n.Append(datBackend, superBlock.Version)
|
||||
fmt.Printf("size %d error %v\n", s, e)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needle, offset int64)) {
|
||||
func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) {
|
||||
// start to read index file
|
||||
var readerOffset int64
|
||||
bytes := make([]byte, 16)
|
||||
|
@ -77,14 +81,14 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
|
|||
readerOffset += int64(count)
|
||||
|
||||
// start to read dat file
|
||||
superBlock, err := storage.ReadSuperBlock(datFile)
|
||||
superBlock, err := super_block.ReadSuperBlock(datBackend)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot read dat file super block: %v", err)
|
||||
return
|
||||
}
|
||||
offset := int64(superBlock.BlockSize())
|
||||
version := superBlock.Version()
|
||||
n, rest, err := storage.ReadNeedleHeader(datFile, version, offset)
|
||||
version := superBlock.Version
|
||||
n, _, rest, err := needle.ReadNeedleHeader(datBackend, version, offset)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot read needle header: %v", err)
|
||||
return
|
||||
|
@ -106,7 +110,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
|
|||
|
||||
fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex)
|
||||
|
||||
rest = storage.NeedleBodyLength(sizeFromIndex, version)
|
||||
rest = needle.NeedleBodyLength(sizeFromIndex, version)
|
||||
|
||||
func() {
|
||||
defer func() {
|
||||
|
@ -114,7 +118,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
|
|||
fmt.Println("Recovered in f", r)
|
||||
}
|
||||
}()
|
||||
if err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleEntrySize), rest); err != nil {
|
||||
if _, err = n.ReadNeedleBody(datBackend, version, offset+int64(types.NeedleHeaderSize), rest); err != nil {
|
||||
fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err)
|
||||
}
|
||||
}()
|
||||
|
@ -124,9 +128,9 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
|
|||
}
|
||||
visitNeedle(n, offset)
|
||||
|
||||
offset += types.NeedleEntrySize + rest
|
||||
offset += types.NeedleHeaderSize + rest
|
||||
//fmt.Printf("==> new entry offset %d\n", offset)
|
||||
if n, rest, err = storage.ReadNeedleHeader(datFile, version, offset); err != nil {
|
||||
if n, _, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
|
|
155
unmaintained/load_test/load_test_leveldb/load_test_leveldb.go
Normal file
155
unmaintained/load_test/load_test_leveldb/load_test_leveldb.go
Normal file
|
@ -0,0 +1,155 @@
|
|||
package load_test_leveldb
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flag.String("dir", "./t", "directory to store level db files")
|
||||
useHash = flag.Bool("isHash", false, "hash the path as the key")
|
||||
dbCount = flag.Int("dbCount", 1, "the number of leveldb")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
flag.Parse()
|
||||
|
||||
totalTenants := 300
|
||||
totalYears := 3
|
||||
|
||||
opts := &opt.Options{
|
||||
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
|
||||
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
|
||||
CompactionTableSizeMultiplier: 4,
|
||||
}
|
||||
|
||||
var dbs []*leveldb.DB
|
||||
var chans []chan string
|
||||
for d := 0; d < *dbCount; d++ {
|
||||
dbFolder := fmt.Sprintf("%s/%02d", *dir, d)
|
||||
os.MkdirAll(dbFolder, 0755)
|
||||
db, err := leveldb.OpenFile(dbFolder, opts)
|
||||
if err != nil {
|
||||
log.Printf("filer store open dir %s: %v", *dir, err)
|
||||
return
|
||||
}
|
||||
dbs = append(dbs, db)
|
||||
chans = append(chans, make(chan string, 1024))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for d := 0; d < *dbCount; d++ {
|
||||
wg.Add(1)
|
||||
go func(d int) {
|
||||
defer wg.Done()
|
||||
|
||||
ch := chans[d]
|
||||
db := dbs[d]
|
||||
|
||||
for p := range ch {
|
||||
if *useHash {
|
||||
insertAsHash(db, p)
|
||||
} else {
|
||||
insertAsFullPath(db, p)
|
||||
}
|
||||
}
|
||||
}(d)
|
||||
}
|
||||
|
||||
counter := int64(0)
|
||||
lastResetTime := time.Now()
|
||||
|
||||
r := rand.New(rand.NewSource(35))
|
||||
|
||||
for y := 0; y < totalYears; y++ {
|
||||
for m := 0; m < 12; m++ {
|
||||
for d := 0; d < 31; d++ {
|
||||
for h := 0; h < 24; h++ {
|
||||
for min := 0; min < 60; min++ {
|
||||
for i := 0; i < totalTenants; i++ {
|
||||
p := fmt.Sprintf("tenent%03d/%4d/%02d/%02d/%02d/%02d", i, 2015+y, 1+m, 1+d, h, min)
|
||||
|
||||
x := r.Intn(*dbCount)
|
||||
|
||||
chans[x] <- p
|
||||
|
||||
counter++
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
if lastResetTime.Add(time.Second).Before(t) {
|
||||
p := fmt.Sprintf("%4d/%02d/%02d/%02d/%02d", 2015+y, 1+m, 1+d, h, min)
|
||||
fmt.Printf("%s = %4d put/sec\n", p, counter)
|
||||
counter = 0
|
||||
lastResetTime = t
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for d := 0; d < *dbCount; d++ {
|
||||
close(chans[d])
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
|
||||
func insertAsFullPath(db *leveldb.DB, p string) {
|
||||
_, getErr := db.Get([]byte(p), nil)
|
||||
if getErr == leveldb.ErrNotFound {
|
||||
putErr := db.Put([]byte(p), []byte(p), nil)
|
||||
if putErr != nil {
|
||||
log.Printf("failed to put %s", p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func insertAsHash(db *leveldb.DB, p string) {
|
||||
key := fmt.Sprintf("%d:%s", hashToLong(p), p)
|
||||
_, getErr := db.Get([]byte(key), nil)
|
||||
if getErr == leveldb.ErrNotFound {
|
||||
putErr := db.Put([]byte(key), []byte(p), nil)
|
||||
if putErr != nil {
|
||||
log.Printf("failed to put %s", p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hashToLong(dir string) (v int64) {
|
||||
h := md5.New()
|
||||
io.WriteString(h, dir)
|
||||
|
||||
b := h.Sum(nil)
|
||||
|
||||
v += int64(b[0])
|
||||
v <<= 8
|
||||
v += int64(b[1])
|
||||
v <<= 8
|
||||
v += int64(b[2])
|
||||
v <<= 8
|
||||
v += int64(b[3])
|
||||
v <<= 8
|
||||
v += int64(b[4])
|
||||
v <<= 8
|
||||
v += int64(b[5])
|
||||
v <<= 8
|
||||
v += int64(b[6])
|
||||
v <<= 8
|
||||
v += int64(b[7])
|
||||
|
||||
return
|
||||
}
|
95
unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
Normal file
95
unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
)
|
||||
|
||||
var (
|
||||
volumePath = flag.String("dir", "/tmp", "data directory to store files")
|
||||
volumeCollection = flag.String("collection", "", "the volume collection name")
|
||||
volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
|
||||
)
|
||||
|
||||
func Checksum(n *needle.Needle) string {
|
||||
return fmt.Sprintf("%s%x", n.Id, n.Cookie)
|
||||
}
|
||||
|
||||
type VolumeFileScanner4SeeDat struct {
|
||||
version needle.Version
|
||||
block super_block.SuperBlock
|
||||
|
||||
dir string
|
||||
hashes map[string]bool
|
||||
dat *os.File
|
||||
datBackend backend.BackendStorageFile
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error {
|
||||
scanner.version = superBlock.Version
|
||||
scanner.block = superBlock
|
||||
return nil
|
||||
|
||||
}
|
||||
func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
|
||||
|
||||
if scanner.datBackend == nil {
|
||||
newFileName := filepath.Join(*volumePath, "dat_fixed")
|
||||
newDatFile, err := os.Create(newFileName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Write New Volume Data %v", err)
|
||||
}
|
||||
scanner.datBackend = backend.NewDiskFile(newDatFile)
|
||||
scanner.datBackend.WriteAt(scanner.block.Bytes(), 0)
|
||||
}
|
||||
|
||||
checksum := Checksum(n)
|
||||
|
||||
if scanner.hashes[checksum] {
|
||||
glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
|
||||
return nil
|
||||
}
|
||||
scanner.hashes[checksum] = true
|
||||
|
||||
_, s, _, e := n.Append(scanner.datBackend, scanner.version)
|
||||
fmt.Printf("size %d error %v\n", s, e)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
vid := needle.VolumeId(*volumeId)
|
||||
|
||||
outpath, _ := filepath.Abs(filepath.Dir(os.Args[0]))
|
||||
|
||||
scanner := &VolumeFileScanner4SeeDat{
|
||||
dir: filepath.Join(outpath, "out"),
|
||||
hashes: map[string]bool{},
|
||||
}
|
||||
|
||||
if _, err := os.Stat(scanner.dir); err != nil {
|
||||
if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil {
|
||||
glog.Fatalf("could not create output dir : %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
|
||||
if err != nil {
|
||||
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
|
||||
}
|
|
@ -1,45 +1,73 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
var (
|
||||
master = flag.String("master", "127.0.0.1:9333", "the master server")
|
||||
repeat = flag.Int("n", 5, "repeat how many times")
|
||||
master = flag.String("master", "127.0.0.1:9333", "the master server")
|
||||
repeat = flag.Int("n", 5, "repeat how many times")
|
||||
garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold")
|
||||
replication = flag.String("replication", "", "replication 000, 001, 002, etc")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
genFile(grpcDialOption, 0)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
println("vacuum threshold", *garbageThreshold)
|
||||
_, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
|
||||
if err != nil {
|
||||
log.Fatalf("vacuum: %v", err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < *repeat; i++ {
|
||||
assignResult, err := operation.Assign(*master, &operation.VolumeAssignRequest{Count: 1})
|
||||
if err != nil {
|
||||
log.Fatalf("assign: %v", err)
|
||||
}
|
||||
// create 2 files, and delete one of them
|
||||
|
||||
data := make([]byte, 1024)
|
||||
rand.Read(data)
|
||||
reader := bytes.NewReader(data)
|
||||
assignResult, targetUrl := genFile(grpcDialOption, i)
|
||||
|
||||
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
|
||||
|
||||
_, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, "")
|
||||
if err != nil {
|
||||
log.Fatalf("upload: %v", err)
|
||||
}
|
||||
|
||||
util.Delete(targetUrl, "")
|
||||
|
||||
util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master))
|
||||
util.Delete(targetUrl, string(assignResult.Auth))
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
|
||||
assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{
|
||||
Count: 1,
|
||||
Replication: *replication,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("assign: %v", err)
|
||||
}
|
||||
|
||||
data := make([]byte, 1024)
|
||||
rand.Read(data)
|
||||
|
||||
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
|
||||
|
||||
_, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth)
|
||||
if err != nil {
|
||||
log.Fatalf("upload: %v", err)
|
||||
}
|
||||
return assignResult, targetUrl
|
||||
}
|
||||
|
|
|
@ -2,8 +2,13 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -13,32 +18,33 @@ var (
|
|||
)
|
||||
|
||||
type VolumeFileScanner4SeeDat struct {
|
||||
version storage.Version
|
||||
version needle.Version
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error {
|
||||
scanner.version = superBlock.Version()
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error {
|
||||
scanner.version = superBlock.Version
|
||||
return nil
|
||||
|
||||
}
|
||||
func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *storage.Needle, offset int64) error {
|
||||
glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie)
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
|
||||
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
|
||||
glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
|
||||
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
vid := storage.VolumeId(*volumeId)
|
||||
vid := needle.VolumeId(*volumeId)
|
||||
|
||||
scanner := &VolumeFileScanner4SeeDat{}
|
||||
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
|
||||
if err != nil {
|
||||
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
83
unmaintained/see_dat/see_dat_gzip.go
Normal file
83
unmaintained/see_dat/see_dat_gzip.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
type VolumeFileScanner4SeeDat struct {
|
||||
version needle.Version
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error {
|
||||
scanner.version = superBlock.Version
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
files = int64(0)
|
||||
filebytes = int64(0)
|
||||
diffbytes = int64(0)
|
||||
)
|
||||
|
||||
func Compresssion(data []byte) float64 {
|
||||
if len(data) <= 128 {
|
||||
return 100.0
|
||||
}
|
||||
compressed, _ := util.GzipData(data[0:128])
|
||||
return float64(len(compressed)*10) / 1280.0
|
||||
}
|
||||
|
||||
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
|
||||
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
|
||||
glog.V(0).Info("----------------------------------------------------------------------------------")
|
||||
glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v hasmime[%t] mime[%s] (len: %d)",
|
||||
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.HasMime(), string(n.Mime), len(n.Mime))
|
||||
r, err := gzip.NewReader(bytes.NewReader(n.Data))
|
||||
if err == nil {
|
||||
buf := bytes.Buffer{}
|
||||
h := md5.New()
|
||||
c, _ := io.Copy(&buf, r)
|
||||
d := buf.Bytes()
|
||||
io.Copy(h, bytes.NewReader(d))
|
||||
diff := (int64(n.DataSize) - int64(c))
|
||||
diffbytes += diff
|
||||
glog.V(0).Infof("was gzip! stored_size: %d orig_size: %d diff: %d(%d) mime:%s compression-of-128: %.2f md5: %x", n.DataSize, c, diff, diffbytes, http.DetectContentType(d), Compresssion(d), h.Sum(nil))
|
||||
} else {
|
||||
glog.V(0).Infof("no gzip!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ = ioutil.ReadAll
|
||||
volumePath = flag.String("dir", "/tmp", "data directory to store files")
|
||||
volumeCollection = flag.String("collection", "", "the volume collection name")
|
||||
volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
vid := needle.VolumeId(*volumeId)
|
||||
glog.V(0).Info("Starting")
|
||||
scanner := &VolumeFileScanner4SeeDat{}
|
||||
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
|
||||
if err != nil {
|
||||
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
}
|
|
@ -3,12 +3,13 @@ package main
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
|
@ -35,8 +36,8 @@ func main() {
|
|||
}
|
||||
defer indexFile.Close()
|
||||
|
||||
storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||
fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size)
|
||||
idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||
fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size)))
|
||||
return nil
|
||||
})
|
||||
|
||||
|
|
75
unmaintained/see_log_entry/see_log_entry.go
Normal file
75
unmaintained/see_log_entry/see_log_entry.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
var (
|
||||
logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to open %s: %v", *logdataFile, err)
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
err = walkLogEntryFile(dst)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to visit %s: %v", *logdataFile, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func walkLogEntryFile(dst *os.File) error {
|
||||
|
||||
sizeBuf := make([]byte, 4)
|
||||
|
||||
for {
|
||||
if n, err := dst.Read(sizeBuf); n != 4 {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
size := util.BytesToUint32(sizeBuf)
|
||||
|
||||
data := make([]byte, int(size))
|
||||
|
||||
if n, err := dst.Read(data); n != len(data) {
|
||||
return err
|
||||
}
|
||||
|
||||
logEntry := &filer_pb.LogEntry{}
|
||||
err := proto.Unmarshal(data, logEntry)
|
||||
if err != nil {
|
||||
log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
event := &filer_pb.SubscribeMetadataResponse{}
|
||||
err = proto.Unmarshal(logEntry.Data, event)
|
||||
if err != nil {
|
||||
log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("event: %+v\n", event)
|
||||
|
||||
}
|
||||
|
||||
}
|
68
unmaintained/see_meta/see_meta.go
Normal file
68
unmaintained/see_meta/see_meta.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
var (
|
||||
metaFile = flag.String("meta", "", "meta file generated via fs.meta.save")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to open %s: %v", *metaFile, err)
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
err = walkMetaFile(dst)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to visit %s: %v", *metaFile, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func walkMetaFile(dst *os.File) error {
|
||||
|
||||
sizeBuf := make([]byte, 4)
|
||||
|
||||
for {
|
||||
if n, err := dst.Read(sizeBuf); n != 4 {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
size := util.BytesToUint32(sizeBuf)
|
||||
|
||||
data := make([]byte, int(size))
|
||||
|
||||
if n, err := dst.Read(data); n != len(data) {
|
||||
return err
|
||||
}
|
||||
|
||||
fullEntry := &filer_pb.FullEntry{}
|
||||
if err := proto.Unmarshal(data, fullEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
|
||||
for i, chunk := range fullEntry.Entry.Chunks {
|
||||
fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,136 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
size = flag.Int("size", 1024, "file size")
|
||||
concurrency = flag.Int("c", 4, "concurrent number of uploads")
|
||||
times = flag.Int("n", 1024, "repeated number of times")
|
||||
fileCount = flag.Int("fileCount", 1, "number of files to write")
|
||||
destination = flag.String("to", "http://localhost:8888/", "destination directory on filer")
|
||||
|
||||
statsChan = make(chan stat, 8)
|
||||
)
|
||||
|
||||
type stat struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
flag.Parse()
|
||||
|
||||
data := make([]byte, *size)
|
||||
println("data len", len(data))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for x := 0; x < *concurrency; x++ {
|
||||
wg.Add(1)
|
||||
|
||||
go func(x int) {
|
||||
defer wg.Done()
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
MaxConnsPerHost: 1024,
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
}}
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))
|
||||
|
||||
for t := 0; t < *times; t++ {
|
||||
for f := 0; f < *fileCount; f++ {
|
||||
fn := r.Intn(*fileCount)
|
||||
if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil {
|
||||
statsChan <- stat{
|
||||
size: size,
|
||||
}
|
||||
} else {
|
||||
log.Fatalf("client %d upload %d times: %v", x, t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(x)
|
||||
}
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(1000 * time.Millisecond)
|
||||
|
||||
var lastTime time.Time
|
||||
var counter, size int64
|
||||
for {
|
||||
select {
|
||||
case stat := <-statsChan:
|
||||
size += stat.size
|
||||
counter++
|
||||
case x := <-ticker.C:
|
||||
if !lastTime.IsZero() {
|
||||
elapsed := x.Sub(lastTime).Seconds()
|
||||
fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n",
|
||||
float64(counter)/elapsed,
|
||||
float64(size/1024/1024)/elapsed)
|
||||
}
|
||||
lastTime = x
|
||||
size = 0
|
||||
counter = 0
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
|
||||
func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) {
|
||||
|
||||
if !strings.HasSuffix(destination, "/") {
|
||||
destination = destination + "/"
|
||||
}
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
part, err := writer.CreateFormFile("file", filename)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fail to create form %v: %v", filename, err)
|
||||
}
|
||||
|
||||
part.Write(data)
|
||||
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fail to write part %v: %v", filename, err)
|
||||
}
|
||||
|
||||
uri := destination + filename
|
||||
|
||||
request, err := http.NewRequest("POST", uri, body)
|
||||
request.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
// request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also.
|
||||
|
||||
resp, err := client.Do(request)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("http POST %s: %v", uri, err)
|
||||
} else {
|
||||
body := &bytes.Buffer{}
|
||||
_, err := body.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
return int64(len(data)), nil
|
||||
}
|
|
@ -0,0 +1,150 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
dir = flag.String("dir", ".", "upload files under this directory")
|
||||
concurrency = flag.Int("c", 1, "concurrent number of uploads")
|
||||
times = flag.Int("n", 1, "repeated number of times")
|
||||
destination = flag.String("to", "http://localhost:8888/", "destination directory on filer")
|
||||
|
||||
statsChan = make(chan stat, 8)
|
||||
)
|
||||
|
||||
type stat struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
flag.Parse()
|
||||
|
||||
var fileNames []string
|
||||
|
||||
files, err := ioutil.ReadDir(*dir)
|
||||
if err != nil {
|
||||
log.Fatalf("fail to read dir %v: %v", *dir, err)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
fileNames = append(fileNames, filepath.Join(*dir, file.Name()))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for x := 0; x < *concurrency; x++ {
|
||||
wg.Add(1)
|
||||
|
||||
client := &http.Client{}
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
rand.Shuffle(len(fileNames), func(i, j int) {
|
||||
fileNames[i], fileNames[j] = fileNames[j], fileNames[i]
|
||||
})
|
||||
for t := 0; t < *times; t++ {
|
||||
for _, filename := range fileNames {
|
||||
if size, err := uploadFileToFiler(client, filename, *destination); err == nil {
|
||||
statsChan <- stat{
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
|
||||
var lastTime time.Time
|
||||
var counter, size int64
|
||||
for {
|
||||
select {
|
||||
case stat := <-statsChan:
|
||||
size += stat.size
|
||||
counter++
|
||||
case x := <-ticker.C:
|
||||
if !lastTime.IsZero() {
|
||||
elapsed := x.Sub(lastTime).Seconds()
|
||||
fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n",
|
||||
float64(counter)/elapsed,
|
||||
float64(size/1024/1024)/elapsed)
|
||||
}
|
||||
lastTime = x
|
||||
size = 0
|
||||
counter = 0
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
|
||||
func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
fi, err := file.Stat()
|
||||
|
||||
if !strings.HasSuffix(destination, "/") {
|
||||
destination = destination + "/"
|
||||
}
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
part, err := writer.CreateFormFile("file", file.Name())
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fail to create form %v: %v", file.Name(), err)
|
||||
}
|
||||
_, err = io.Copy(part, file)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err)
|
||||
}
|
||||
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err)
|
||||
}
|
||||
|
||||
uri := destination + file.Name()
|
||||
|
||||
request, err := http.NewRequest("POST", uri, body)
|
||||
request.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := client.Do(request)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("http POST %s: %v", uri, err)
|
||||
} else {
|
||||
body := &bytes.Buffer{}
|
||||
_, err := body.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
return fi.Size(), nil
|
||||
}
|
69
unmaintained/volume_tailer/volume_tailer.go
Normal file
69
unmaintained/volume_tailer/volume_tailer.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
util2 "github.com/chrislusf/seaweedfs/weed/util"
|
||||
"golang.org/x/tools/godoc/util"
|
||||
)
|
||||
|
||||
var (
|
||||
master = flag.String("master", "localhost:9333", "master server host and port")
|
||||
volumeId = flag.Int("volumeId", -1, "a volume id")
|
||||
rewindDuration = flag.Duration("rewind", -1, "rewind back in time. -1 means from the first entry. 0 means from now.")
|
||||
timeoutSeconds = flag.Int("timeoutSeconds", 0, "disconnect if no activity after these seconds")
|
||||
showTextFile = flag.Bool("showTextFile", false, "display textual file content")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
util2.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client")
|
||||
|
||||
vid := needle.VolumeId(*volumeId)
|
||||
|
||||
var sinceTimeNs int64
|
||||
if *rewindDuration == 0 {
|
||||
sinceTimeNs = time.Now().UnixNano()
|
||||
} else if *rewindDuration == -1 {
|
||||
sinceTimeNs = 0
|
||||
} else if *rewindDuration > 0 {
|
||||
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
|
||||
}
|
||||
|
||||
err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
|
||||
if n.Size == 0 {
|
||||
println("-", n.String())
|
||||
return nil
|
||||
} else {
|
||||
println("+", n.String())
|
||||
}
|
||||
|
||||
if *showTextFile {
|
||||
|
||||
data := n.Data
|
||||
if n.IsCompressed() {
|
||||
if data, err = util2.DecompressData(data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if util.IsText(data) {
|
||||
println(string(data))
|
||||
}
|
||||
|
||||
println("-", n.String(), "compressed", n.IsCompressed(), "original size", len(data))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Error VolumeTailSender volume %d: %v", vid, err)
|
||||
}
|
||||
|
||||
}
|
98
util/gostd
Executable file
98
util/gostd
Executable file
|
@ -0,0 +1,98 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
############################ GLOBAL VARIABLES
|
||||
regex=' '
|
||||
branch="master"
|
||||
max_length=150
|
||||
|
||||
REGEX_SUFFIX_GO=".+\.go$"
|
||||
|
||||
############################ FUNCTIONS
|
||||
msg() {
|
||||
printf '%b' "$1" >&2
|
||||
}
|
||||
|
||||
die() {
|
||||
msg "\33[31m[✘]\33[0m ${1}${2}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
succ() {
|
||||
msg "\33[34m[√]\33[0m ${1}${2}"
|
||||
}
|
||||
|
||||
gostd() {
|
||||
local branch=$1
|
||||
local reg4exclude=$2
|
||||
local max_length=$3
|
||||
|
||||
for file in `git diff $branch --name-only`
|
||||
do
|
||||
if ! [[ $file =~ $REGEX_SUFFIX_GO ]] || [[ $file =~ $reg4exclude ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
error=`go fmt $file 2>&1`
|
||||
if ! [ $? -eq 0 ]; then
|
||||
die "go fmt $file:" "$error"
|
||||
fi
|
||||
|
||||
succ "$file\n"
|
||||
|
||||
grep -n -E --color=always ".{$max_length}" $file | awk '{ printf ("%4s %s\n", "", $0) }'
|
||||
done
|
||||
}
|
||||
|
||||
get_options() {
|
||||
while getopts "b:e:hl:" opts
|
||||
do
|
||||
case $opts in
|
||||
b)
|
||||
branch=$OPTARG
|
||||
;;
|
||||
e)
|
||||
regex=$OPTARG
|
||||
;;
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
l)
|
||||
max_length=$OPTARG
|
||||
;;
|
||||
\?)
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
usage () {
|
||||
cat << _EOC_
|
||||
Usage:
|
||||
gostd [options]
|
||||
|
||||
Options:
|
||||
-b <branch/commit> Specify the git diff branch or commit.
|
||||
(default: master)
|
||||
-e <regex> Regex for excluding file or directory.
|
||||
-h Print this usage.
|
||||
-l <length> Show files that exceed the limit line length.
|
||||
(default: 150)
|
||||
|
||||
Examples:
|
||||
gostd
|
||||
gostd -b master -l 100
|
||||
gostd -b 59d532a -e weed/pb -l 100
|
||||
_EOC_
|
||||
}
|
||||
|
||||
main() {
|
||||
get_options "$@"
|
||||
|
||||
gostd "$branch" "$regex" "$max_length"
|
||||
}
|
||||
|
||||
############################ MAIN()
|
||||
main "$@"
|
|
@ -3,6 +3,11 @@ package command
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
)
|
||||
|
@ -12,10 +17,12 @@ var (
|
|||
)
|
||||
|
||||
type BackupOptions struct {
|
||||
master *string
|
||||
collection *string
|
||||
dir *string
|
||||
volumeId *int
|
||||
master *string
|
||||
collection *string
|
||||
dir *string
|
||||
volumeId *int
|
||||
ttl *string
|
||||
replication *string
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -24,32 +31,45 @@ func init() {
|
|||
s.collection = cmdBackup.Flag.String("collection", "", "collection name")
|
||||
s.dir = cmdBackup.Flag.String("dir", ".", "directory to store volume data files")
|
||||
s.volumeId = cmdBackup.Flag.Int("volumeId", -1, "a volume id. The volume .dat and .idx files should already exist in the dir.")
|
||||
s.ttl = cmdBackup.Flag.String("ttl", "", `backup volume's time to live, format:
|
||||
3m: 3 minutes
|
||||
4h: 4 hours
|
||||
5d: 5 days
|
||||
6w: 6 weeks
|
||||
7M: 7 months
|
||||
8y: 8 years
|
||||
default is the same with origin`)
|
||||
s.replication = cmdBackup.Flag.String("replication", "", "backup volume's replication, default is the same with origin")
|
||||
}
|
||||
|
||||
var cmdBackup = &Command{
|
||||
UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333",
|
||||
Short: "incrementally backup a volume to local folder",
|
||||
Long: `Incrementally backup volume data.
|
||||
|
||||
|
||||
It is expected that you use this inside a script, to loop through
|
||||
all possible volume ids that needs to be backup to local folder.
|
||||
|
||||
|
||||
The volume id does not need to exist locally or even remotely.
|
||||
This will help to backup future new volumes.
|
||||
|
||||
|
||||
Usually backing up is just copying the .dat (and .idx) files.
|
||||
But it's tricky to incrementally copy the differences.
|
||||
|
||||
|
||||
The complexity comes when there are multiple addition, deletion and compaction.
|
||||
This tool will handle them correctly and efficiently, avoiding unnecessary data transporation.
|
||||
This tool will handle them correctly and efficiently, avoiding unnecessary data transportation.
|
||||
`,
|
||||
}
|
||||
|
||||
func runBackup(cmd *Command, args []string) bool {
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
if *s.volumeId == -1 {
|
||||
return false
|
||||
}
|
||||
vid := storage.VolumeId(*s.volumeId)
|
||||
vid := needle.VolumeId(*s.volumeId)
|
||||
|
||||
// find volume location, replication, ttl info
|
||||
lookup, err := operation.Lookup(*s.master, vid.String())
|
||||
|
@ -59,29 +79,73 @@ func runBackup(cmd *Command, args []string) bool {
|
|||
}
|
||||
volumeServer := lookup.Locations[0].Url
|
||||
|
||||
stats, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid))
|
||||
stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
|
||||
if err != nil {
|
||||
fmt.Printf("Error get volume %d status: %v\n", vid, err)
|
||||
return true
|
||||
}
|
||||
ttl, err := storage.ReadTTL(stats.Ttl)
|
||||
if err != nil {
|
||||
fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
|
||||
return true
|
||||
var ttl *needle.TTL
|
||||
if *s.ttl != "" {
|
||||
ttl, err = needle.ReadTTL(*s.ttl)
|
||||
if err != nil {
|
||||
fmt.Printf("Error generate volume %d ttl %s: %v\n", vid, *s.ttl, err)
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
ttl, err = needle.ReadTTL(stats.Ttl)
|
||||
if err != nil {
|
||||
fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
|
||||
return true
|
||||
}
|
||||
}
|
||||
replication, err := storage.NewReplicaPlacementFromString(stats.Replication)
|
||||
if err != nil {
|
||||
fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
|
||||
return true
|
||||
var replication *super_block.ReplicaPlacement
|
||||
if *s.replication != "" {
|
||||
replication, err = super_block.NewReplicaPlacementFromString(*s.replication)
|
||||
if err != nil {
|
||||
fmt.Printf("Error generate volume %d replication %s : %v\n", vid, *s.replication, err)
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
replication, err = super_block.NewReplicaPlacementFromString(stats.Replication)
|
||||
if err != nil {
|
||||
fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0)
|
||||
v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
||||
return true
|
||||
}
|
||||
|
||||
if err := v.Synchronize(volumeServer); err != nil {
|
||||
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
|
||||
if err = v.Compact2(30*1024*1024*1024, 0); err != nil {
|
||||
fmt.Printf("Compact Volume before synchronizing %v\n", err)
|
||||
return true
|
||||
}
|
||||
if err = v.CommitCompact(); err != nil {
|
||||
fmt.Printf("Commit Compact before synchronizing %v\n", err)
|
||||
return true
|
||||
}
|
||||
v.SuperBlock.CompactionRevision = uint16(stats.CompactRevision)
|
||||
v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0)
|
||||
}
|
||||
|
||||
datSize, _, _ := v.FileStat()
|
||||
|
||||
if datSize > stats.TailOffset {
|
||||
// remove the old data
|
||||
v.Destroy()
|
||||
// recreate an empty volume
|
||||
v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
||||
return true
|
||||
}
|
||||
}
|
||||
defer v.Close()
|
||||
|
||||
if err := v.IncrementalBackup(volumeServer, grpcDialOption); err != nil {
|
||||
fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package command
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -15,6 +14,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
|
@ -33,15 +34,18 @@ type BenchmarkOptions struct {
|
|||
read *bool
|
||||
sequentialRead *bool
|
||||
collection *string
|
||||
replication *string
|
||||
cpuprofile *string
|
||||
maxCpu *int
|
||||
secretKey *string
|
||||
grpcDialOption grpc.DialOption
|
||||
masterClient *wdclient.MasterClient
|
||||
fsync *bool
|
||||
}
|
||||
|
||||
var (
|
||||
b BenchmarkOptions
|
||||
sharedBytes []byte
|
||||
masterClient *wdclient.MasterClient
|
||||
b BenchmarkOptions
|
||||
sharedBytes []byte
|
||||
isSecure bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -57,14 +61,15 @@ func init() {
|
|||
b.read = cmdBenchmark.Flag.Bool("read", true, "enable read")
|
||||
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
|
||||
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
|
||||
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
|
||||
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
|
||||
b.secretKey = cmdBenchmark.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
|
||||
b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write")
|
||||
sharedBytes = make([]byte, 1024)
|
||||
}
|
||||
|
||||
var cmdBenchmark = &Command{
|
||||
UsageLine: "benchmark -server=localhost:9333 -c=10 -n=100000",
|
||||
UsageLine: "benchmark -master=localhost:9333 -c=10 -n=100000",
|
||||
Short: "benchmark on writing millions of files and read out",
|
||||
Long: `benchmark on an empty SeaweedFS file system.
|
||||
|
||||
|
@ -102,7 +107,11 @@ var (
|
|||
)
|
||||
|
||||
func runBenchmark(cmd *Command, args []string) bool {
|
||||
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
if *b.maxCpu < 1 {
|
||||
*b.maxCpu = runtime.NumCPU()
|
||||
}
|
||||
|
@ -116,9 +125,9 @@ func runBenchmark(cmd *Command, args []string) bool {
|
|||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
masterClient = wdclient.NewMasterClient(context.Background(), "benchmark", strings.Split(*b.masters, ","))
|
||||
go masterClient.KeepConnectedToMaster()
|
||||
masterClient.WaitUntilConnected()
|
||||
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, strings.Split(*b.masters, ","))
|
||||
go b.masterClient.KeepConnectedToMaster()
|
||||
b.masterClient.WaitUntilConnected()
|
||||
|
||||
if *b.write {
|
||||
benchWrite()
|
||||
|
@ -188,7 +197,6 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
|
|||
defer wait.Done()
|
||||
delayedDeleteChan := make(chan *delayedFile, 100)
|
||||
var waitForDeletions sync.WaitGroup
|
||||
secret := security.Secret(*b.secretKey)
|
||||
|
||||
for i := 0; i < 7; i++ {
|
||||
waitForDeletions.Add(1)
|
||||
|
@ -198,8 +206,11 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
|
|||
if df.enterTime.After(time.Now()) {
|
||||
time.Sleep(df.enterTime.Sub(time.Now()))
|
||||
}
|
||||
if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid,
|
||||
security.GenJwt(secret, df.fp.Fid)); e == nil {
|
||||
var jwtAuthorization security.EncodedJwt
|
||||
if isSecure {
|
||||
jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), df.fp.Fid)
|
||||
}
|
||||
if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil {
|
||||
s.completed++
|
||||
} else {
|
||||
s.failed++
|
||||
|
@ -214,17 +225,22 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
|
|||
start := time.Now()
|
||||
fileSize := int64(*b.fileSize + random.Intn(64))
|
||||
fp := &operation.FilePart{
|
||||
Reader: &FakeReader{id: uint64(id), size: fileSize},
|
||||
Reader: &FakeReader{id: uint64(id), size: fileSize, random: random},
|
||||
FileSize: fileSize,
|
||||
MimeType: "image/bench", // prevent gzip benchmark content
|
||||
Fsync: *b.fsync,
|
||||
}
|
||||
ar := &operation.VolumeAssignRequest{
|
||||
Count: 1,
|
||||
Collection: *b.collection,
|
||||
Count: 1,
|
||||
Collection: *b.collection,
|
||||
Replication: *b.replication,
|
||||
}
|
||||
if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil {
|
||||
if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
|
||||
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
|
||||
if _, err := fp.Upload(0, masterClient.GetMaster(), secret); err == nil {
|
||||
if !isSecure && assignResult.Auth != "" {
|
||||
isSecure = true
|
||||
}
|
||||
if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil {
|
||||
if random.Intn(100) < *b.deletePercentage {
|
||||
s.total++
|
||||
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
|
||||
|
@ -264,19 +280,24 @@ func readFiles(fileIdLineChan chan string, s *stat) {
|
|||
fmt.Printf("reading file %s\n", fid)
|
||||
}
|
||||
start := time.Now()
|
||||
url, err := masterClient.LookupFileId(fid)
|
||||
var bytesRead int
|
||||
var err error
|
||||
url, err := b.masterClient.LookupFileId(fid)
|
||||
if err != nil {
|
||||
s.failed++
|
||||
println("!!!! ", fid, " location not found!!!!!")
|
||||
continue
|
||||
}
|
||||
if bytesRead, err := util.Get(url); err == nil {
|
||||
var bytes []byte
|
||||
bytes, err = util.Get(url)
|
||||
bytesRead = len(bytes)
|
||||
if err == nil {
|
||||
s.completed++
|
||||
s.transferred += int64(len(bytesRead))
|
||||
s.transferred += int64(bytesRead)
|
||||
readStats.addSample(time.Now().Sub(start))
|
||||
} else {
|
||||
s.failed++
|
||||
fmt.Printf("Failed to read %s error:%v\n", url, err)
|
||||
fmt.Printf("Failed to read %s error:%v\n", fid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -338,7 +359,7 @@ func readFileIds(fileName string, fileIdLineChan chan string) {
|
|||
}
|
||||
|
||||
const (
|
||||
benchResolution = 10000 //0.1 microsecond
|
||||
benchResolution = 10000 // 0.1 microsecond
|
||||
benchBucket = 1000000000 / benchResolution
|
||||
)
|
||||
|
||||
|
@ -461,7 +482,7 @@ func (s *stats) printStats() {
|
|||
fmt.Printf("\nConnection Times (ms)\n")
|
||||
fmt.Printf(" min avg max std\n")
|
||||
fmt.Printf("Total: %2.1f %3.1f %3.1f %3.1f\n", float32(min)/10, float32(avg)/10, float32(max)/10, std/10)
|
||||
//printing percentiles
|
||||
// printing percentiles
|
||||
fmt.Printf("\nPercentage of the requests served within a certain time (ms)\n")
|
||||
percentiles := make([]int, len(percentages))
|
||||
for i := 0; i < len(percentages); i++ {
|
||||
|
@ -495,8 +516,9 @@ func (s *stats) printStats() {
|
|||
|
||||
// a fake reader to generate content to upload
|
||||
type FakeReader struct {
|
||||
id uint64 // an id number
|
||||
size int64 // max bytes
|
||||
id uint64 // an id number
|
||||
size int64 // max bytes
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
func (l *FakeReader) Read(p []byte) (n int, err error) {
|
||||
|
@ -512,6 +534,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) {
|
|||
for i := 0; i < 8; i++ {
|
||||
p[i] = byte(l.id >> uint(i*8))
|
||||
}
|
||||
l.random.Read(p[8:])
|
||||
}
|
||||
l.size -= int64(n)
|
||||
return
|
||||
|
|
|
@ -12,21 +12,23 @@ var Commands = []*Command{
|
|||
cmdBackup,
|
||||
cmdCompact,
|
||||
cmdCopy,
|
||||
cmdFix,
|
||||
cmdFilerExport,
|
||||
cmdFilerReplicate,
|
||||
cmdServer,
|
||||
cmdMaster,
|
||||
cmdFiler,
|
||||
cmdS3,
|
||||
cmdUpload,
|
||||
cmdDownload,
|
||||
cmdExport,
|
||||
cmdFiler,
|
||||
cmdFilerReplicate,
|
||||
cmdFix,
|
||||
cmdMaster,
|
||||
cmdMount,
|
||||
cmdS3,
|
||||
cmdMsgBroker,
|
||||
cmdScaffold,
|
||||
cmdServer,
|
||||
cmdShell,
|
||||
cmdWatch,
|
||||
cmdUpload,
|
||||
cmdVersion,
|
||||
cmdVolume,
|
||||
cmdExport,
|
||||
cmdMount,
|
||||
cmdWebDav,
|
||||
}
|
||||
|
||||
type Command struct {
|
||||
|
|
|
@ -3,6 +3,7 @@ package command
|
|||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -16,6 +17,9 @@ var cmdCompact = &Command{
|
|||
The compacted .dat file is stored as .cpd file.
|
||||
The compacted .idx file is stored as .cpx file.
|
||||
|
||||
For method=0, it compacts based on the .dat file, works if .idx file is corrupted.
|
||||
For method=1, it compacts based on the .idx file, works if deletion happened but not written to .dat files.
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
|
@ -35,18 +39,18 @@ func runCompact(cmd *Command, args []string) bool {
|
|||
|
||||
preallocate := *compactVolumePreallocate * (1 << 20)
|
||||
|
||||
vid := storage.VolumeId(*compactVolumeId)
|
||||
vid := needle.VolumeId(*compactVolumeId)
|
||||
v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid,
|
||||
storage.NeedleMapInMemory, nil, nil, preallocate)
|
||||
storage.NeedleMapInMemory, nil, nil, preallocate, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("Load Volume [ERROR] %s\n", err)
|
||||
}
|
||||
if *compactMethod == 0 {
|
||||
if err = v.Compact(preallocate); err != nil {
|
||||
if err = v.Compact(preallocate, 0); err != nil {
|
||||
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||
}
|
||||
} else {
|
||||
if err = v.Compact2(); err != nil {
|
||||
if err = v.Compact2(preallocate, 0); err != nil {
|
||||
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue