keep update from original (#1)

keep update from original
This commit is contained in:
bingoohuang 2019-07-16 11:13:23 +08:00 committed by GitHub
commit d19bbee98d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
293 changed files with 19032 additions and 4932 deletions

View file

@ -1,11 +1,19 @@
--- ---
name: Bug report name: Bug report
about: Create a report to help us improve about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
--- ---
Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs
example of a good issue report:
https://github.com/chrislusf/seaweedfs/issues/1005
example of a bad issue report:
https://github.com/chrislusf/seaweedfs/issues/1008
**Describe the bug** **Describe the bug**
A clear and concise description of what the bug is. A clear and concise description of what the bug is.

3
.gitignore vendored
View file

@ -80,3 +80,6 @@ build
target target
*.class *.class
other/java/hdfs/dependency-reduced-pom.xml other/java/hdfs/dependency-reduced-pom.xml
# binary file
weed/weed

View file

@ -1,18 +1,21 @@
sudo: false sudo: false
language: go language: go
go: go:
- 1.10.x - 1.10.x
- 1.11.x - 1.11.x
- tip - 1.12.x
# - tip
before_install: before_install:
- export PATH=/home/travis/gopath/bin:$PATH - export PATH=/home/travis/gopath/bin:$PATH
install: install:
- go get ./weed/... - export CGO_ENABLED="0"
- go env
- go get -u ./weed/...
script: script:
- go test ./weed/... - go test ./weed/...
before_deploy: before_deploy:
- make release - make release
@ -22,23 +25,26 @@ deploy:
api_key: api_key:
secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI= secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI=
file: file:
- build/linux_arm.tar.gz - build/linux_arm.tar.gz
- build/linux_arm64.tar.gz - build/linux_arm64.tar.gz
- build/linux_386.tar.gz - build/linux_386.tar.gz
- build/linux_amd64.tar.gz - build/linux_amd64.tar.gz
- build/darwin_amd64.tar.gz - build/linux_amd64_large_disk.tar.gz
- build/windows_386.zip - build/darwin_amd64.tar.gz
- build/windows_amd64.zip - build/darwin_amd64_large_disk.tar.gz
- build/freebsd_arm.tar.gz - build/windows_386.zip
- build/freebsd_amd64.tar.gz - build/windows_amd64.zip
- build/freebsd_386.tar.gz - build/windows_amd64_large_disk.zip
- build/netbsd_arm.tar.gz - build/freebsd_arm.tar.gz
- build/netbsd_amd64.tar.gz - build/freebsd_amd64.tar.gz
- build/netbsd_386.tar.gz - build/freebsd_386.tar.gz
- build/openbsd_arm.tar.gz - build/netbsd_arm.tar.gz
- build/openbsd_amd64.tar.gz - build/netbsd_amd64.tar.gz
- build/openbsd_386.tar.gz - build/netbsd_386.tar.gz
- build/openbsd_arm.tar.gz
- build/openbsd_amd64.tar.gz
- build/openbsd_386.tar.gz
on: on:
tags: true tags: true
repo: chrislusf/seaweedfs repo: chrislusf/seaweedfs
go: tip go: 1.12.x

View file

@ -12,6 +12,9 @@ build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -stat
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR)
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
all: build all: build
@ -32,9 +35,21 @@ linux: deps
mkdir -p linux mkdir -p linux
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)
release: deps windows_build darwin_build linux_build bsd_build release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build
##### LINUX BUILDS ##### ##### LINUX BUILDS #####
5_byte_linux_build:
$(call build_large,linux,amd64,)
$(call tar_large,linux,amd64)
5_byte_darwin_build:
$(call build_large,darwin,amd64,)
$(call tar_large,darwin,amd64)
5_byte_windows_build:
$(call build_large,windows,amd64,.exe)
$(call zip_large,windows,amd64,.exe)
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
build/linux_386.tar.gz: $(sources) build/linux_386.tar.gz: $(sources)

128
README.md
View file

@ -10,18 +10,16 @@
<h2 align="center">Supporting SeaweedFS</h2> <h2 align="center">Supporting SeaweedFS</h2>
SeaweedFS is an independent Apache-licensed open source project with its ongoing development made SeaweedFS is an independent Apache-licensed open source project with its ongoing development made
possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md). possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md).
If you'd like to grow SeaweedFS even stronger, please consider joining our If you'd like to grow SeaweedFS even stronger, please consider joining our
<a href="https://www.patreon.com/seaweedfs">sponsors on Patreon</a>. <a href="https://www.patreon.com/seaweedfs">sponsors on Patreon</a>.
Platinum ($2500/month), Gold ($500/month): put your company logo on the SeaweedFS github page
Generous Backer($50/month), Backer($10/month): put your name on the SeaweedFS backer page.
Your support will be really appreciated by me and other supporters! Your support will be really appreciated by me and other supporters!
<h3 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h3> <h3 align="center"><a href="https://www.patreon.com/seaweedfs">Sponsor SeaweedFS via Patreon</a></h3>
<!--
<h4 align="center">Platinum</h4> <h4 align="center">Platinum</h4>
<p align="center"> <p align="center">
@ -45,6 +43,8 @@ Your support will be really appreciated by me and other supporters!
</tbody> </tbody>
</table> </table>
-->
--- ---
@ -52,9 +52,29 @@ Your support will be really appreciated by me and other supporters!
- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTc4MmVlYmFlNjBmZTgzZmJlYmI1MDE1YzkyNWYyZjkwZDFiM2RlMDdjNjVlNjdjYzc4NGFhZGIyYzEyMzJkYTA) - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTc4MmVlYmFlNjBmZTgzZmJlYmI1MDE1YzkyNWYyZjkwZDFiM2RlMDdjNjVlNjdjYzc4NGFhZGIyYzEyMzJkYTA)
- [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs)
- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki)
- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction)
Table of Contents
=================
## Introduction * [Introduction](#introduction)
* [Features](#features)
* [Additional Features](#additional-features)
* [Filer Features](#filer-features)
* [Example Usage](#example-usage)
* [Architecture](#architecture)
* [Compared to Other File Systems](#compared-to-other-file-systems)
* [Compared to HDFS](#compared-to-hdfs)
* [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph)
* [Compared to GlusterFS](#compared-to-glusterfs)
* [Compared to Ceph](#compared-to-ceph)
* [Dev Plan](#dev-plan)
* [Installation Guide](#installation-guide)
* [Disk Related Topics](#disk-related-topics)
* [Benchmark](#Benchmark)
* [License](#license)
## Introduction ##
SeaweedFS is a simple and highly scalable distributed file system. There are two objectives: SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
@ -65,41 +85,57 @@ SeaweedFS started as an Object Store to handle small files efficiently. Instead
There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases.
SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebooks Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf)
SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Cassandra/LevelDB. SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Cassandra/LevelDB.
## Additional Features [Back to TOC](#table-of-contents)
* Can choose no replication or different replication levels, rack and data center aware
* Automatic master servers failover - no single point of failure (SPOF)
* Automatic Gzip compression depending on file mime type
* Automatic compaction to reclaim disk space after deletion or update
* Servers in the same cluster can have different disk spaces, file systems, OS etc.
* Adding/Removing servers does **not** cause any data re-balancing
* Optionally fix the orientation for jpeg pictures
* Support Etag, Accept-Range, Last-Modified, etc.
* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance.
## Filer Features ## Features ##
[Back to TOC](#table-of-contents)
## Additional Features ##
* Can choose no replication or different replication levels, rack and data center aware.
* Automatic master servers failover - no single point of failure (SPOF).
* Automatic Gzip compression depending on file mime type.
* Automatic compaction to reclaim disk space after deletion or update.
* Servers in the same cluster can have different disk spaces, file systems, OS etc.
* Adding/Removing servers does **not** cause any data re-balancing.
* Optionally fix the orientation for jpeg pictures.
* Support ETag, Accept-Range, Last-Modified, etc.
* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance.
* Support rebalancing the writable and readonly volumes.
[Back to TOC](#table-of-contents)
## Filer Features ##
* [filer server][Filer] provide "normal" directories and files via http. * [filer server][Filer] provide "normal" directories and files via http.
* [mount filer][Mount] to read and write files directly as a local directory via FUSE. * [mount filer][Mount] to read and write files directly as a local directory via FUSE.
* [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling. * [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling.
* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability.
* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs.
* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze.
* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices.
[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files
[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount [Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount
[AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API [AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API
[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud [BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud
[Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System
[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV
[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage
[Back to TOC](#table-of-contents)
## Example Usage ##
## Example Usage
By default, the master node runs on port 9333, and the volume nodes run on port 8080. By default, the master node runs on port 9333, and the volume nodes run on port 8080.
Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example. Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example.
SeaweedFS uses HTTP REST operations to read, write, and delete. The responses are in JSON or JSONP format. SeaweedFS uses HTTP REST operations to read, write, and delete. The responses are in JSON or JSONP format.
### Start Master Server ### Start Master Server ###
``` ```
> ./weed master > ./weed master
@ -125,7 +161,7 @@ Second, to store the file content, send a HTTP multi-part POST request to `url +
``` ```
> curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6 > curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6
{"size": 43234} {"name":"myphoto.jpg","size":43234,"eTag":"1cc0118e"}
``` ```
To update, send another POST request with updated file content. To update, send another POST request with updated file content.
@ -135,6 +171,7 @@ For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL:
``` ```
> curl -X DELETE http://127.0.0.1:8080/3,01637037d6 > curl -X DELETE http://127.0.0.1:8080/3,01637037d6
``` ```
### Save File Id ### ### Save File Id ###
Now, you can save the `fid`, 3,01637037d6 in this case, to a database field. Now, you can save the `fid`, 3,01637037d6 in this case, to a database field.
@ -157,7 +194,7 @@ First look up the volume server's URLs by the file's volumeId:
``` ```
> curl http://localhost:9333/dir/lookup?volumeId=3 > curl http://localhost:9333/dir/lookup?volumeId=3
{"locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]} {"volumeId":"3","locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]}
``` ```
Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read. Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read.
@ -213,7 +250,7 @@ More details about replication can be found [on the wiki][Replication].
You can also set the default replication strategy when starting the master server. You can also set the default replication strategy when starting the master server.
### Allocate File Key on specific data center ### ### Allocate File Key on Specific Data Center ###
Volume servers can be started with a specific data center name: Volume servers can be started with a specific data center name:
@ -239,6 +276,8 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass
[feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files [feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files
[feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space [feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space
[Back to TOC](#table-of-contents)
## Architecture ## ## Architecture ##
Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has. Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has.
@ -279,12 +318,16 @@ Each individual file size is limited to the volume size.
All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does.
[Back to TOC](#table-of-contents)
## Compared to Other File Systems ## ## Compared to Other File Systems ##
Most other distributed file systems seem more complicated than necessary. Most other distributed file systems seem more complicated than necessary.
SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications. SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications.
[Back to TOC](#table-of-contents)
### Compared to HDFS ### ### Compared to HDFS ###
HDFS uses the chunk approach for each file, and is ideal for storing large files. HDFS uses the chunk approach for each file, and is ideal for storing large files.
@ -293,6 +336,7 @@ SeaweedFS is ideal for serving relatively smaller files quickly and concurrently
SeaweedFS can also store extra large files by splitting them into manageable data chunks, and store the file ids of the data chunks into a meta chunk. This is managed by "weed upload/download" tool, and the weed master or volume servers are agnostic about it. SeaweedFS can also store extra large files by splitting them into manageable data chunks, and store the file ids of the data chunks into a meta chunk. This is managed by "weed upload/download" tool, and the weed master or volume servers are agnostic about it.
[Back to TOC](#table-of-contents)
### Compared to GlusterFS, Ceph ### ### Compared to GlusterFS, Ceph ###
@ -310,17 +354,21 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa
| GlusterFS | hashing | | FUSE, NFS | | | | GlusterFS | hashing | | FUSE, NFS | | |
| Ceph | hashing + rules | | FUSE | Yes | | | Ceph | hashing + rules | | FUSE | Yes | |
[Back to TOC](#table-of-contents)
### Compared to GlusterFS ### ### Compared to GlusterFS ###
GlusterFS stores files, both directories and content, in configurable volumes called "bricks". GlusterFS stores files, both directories and content, in configurable volumes called "bricks".
GlusterFS hashes the path and filename into ids, and assigned to virtual volumes, and then mapped to "bricks". GlusterFS hashes the path and filename into ids, and assigned to virtual volumes, and then mapped to "bricks".
[Back to TOC](#table-of-contents)
### Compared to Ceph ### ### Compared to Ceph ###
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120) Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage. SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews. Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
@ -336,16 +384,26 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Cassa
| Volume | OSD | optimized for small files | | Volume | OSD | optimized for small files |
| Filer | Ceph FS | linearly scalable, Customizable, O(1) or O(logN) | | Filer | Ceph FS | linearly scalable, Customizable, O(1) or O(logN) |
[Back to TOC](#table-of-contents)
## Dev plan ## ## Dev Plan ##
More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc. More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc.
Other key features include: Erasure Encoding, JWT security. Other key features include: Erasure Encoding, JWT security.
This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)! This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)!
BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop:
## Installation guide for users who are not familiar with golang ```
$ ./util/gostd
```
[Back to TOC](#table-of-contents)
## Installation Guide ##
> Installation guide for users who are not familiar with golang
Step 1: install go on your machine and setup the environment by following the instructions at: Step 1: install go on your machine and setup the environment by following the instructions at:
@ -366,23 +424,27 @@ go get github.com/chrislusf/seaweedfs/weed
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
Step 4: after you modify your code locally, you could start a local build by calling `go install` under Step 4: after you modify your code locally, you could start a local build by calling `go install` under
``` ```
$GOPATH/src/github.com/chrislusf/seaweedfs/weed $GOPATH/src/github.com/chrislusf/seaweedfs/weed
``` ```
## Disk Related topics ## [Back to TOC](#table-of-contents)
## Disk Related Topics ##
### Hard Drive Performance ### ### Hard Drive Performance ###
When testing read performance on SeaweedFS, it basically becomes a performance test of your hard drive's random read speed. Hard drives usually get 100MB/s~200MB/s. When testing read performance on SeaweedFS, it basically becomes a performance test of your hard drive's random read speed. Hard drives usually get 100MB/s~200MB/s.
### Solid State Disk ### Solid State Disk ###
To modify or delete small files, SSD must delete a whole block at a time, and move content in existing blocks to a new block. SSD is fast when brand new, but will get fragmented over time and you have to garbage collect, compacting blocks. SeaweedFS is friendly to SSD since it is append-only. Deletion and compaction are done on volume level in the background, not slowing reading and not causing fragmentation. To modify or delete small files, SSD must delete a whole block at a time, and move content in existing blocks to a new block. SSD is fast when brand new, but will get fragmented over time and you have to garbage collect, compacting blocks. SeaweedFS is friendly to SSD since it is append-only. Deletion and compaction are done on volume level in the background, not slowing reading and not causing fragmentation.
## Benchmark [Back to TOC](#table-of-contents)
## Benchmark ##
My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CPU: 1 Intel Core i7 2.6GHz. My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CPU: 1 Intel Core i7 2.6GHz.
@ -435,8 +497,9 @@ Percentage of the requests served within a certain time (ms)
100% 20.7 ms 100% 20.7 ms
``` ```
[Back to TOC](#table-of-contents)
## License ## License ##
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -450,7 +513,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
[Back to TOC](#table-of-contents)
## Stargazers over time ## Stargazers over time ##
[![Stargazers over time](https://starcharts.herokuapp.com/chrislusf/seaweedfs.svg)](https://starcharts.herokuapp.com/chrislusf/seaweedfs) [![Stargazers over time](https://starcharts.herokuapp.com/chrislusf/seaweedfs.svg)](https://starcharts.herokuapp.com/chrislusf/seaweedfs)

View file

@ -1,9 +1,20 @@
FROM frolvlad/alpine-glibc FROM frolvlad/alpine-glibc
# Supercronic install settings
ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \
SUPERCRONIC=supercronic-linux-amd64 \
SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea
# Install SeaweedFS and Supercronic ( for cron job mode )
# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format" # Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format"
RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \
tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \
curl -fsSLO "$SUPERCRONIC_URL" && \
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
chmod +x "$SUPERCRONIC" && \
mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \
ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \
apk del build-dependencies && \ apk del build-dependencies && \
rm -rf /tmp/* rm -rf /tmp/*
@ -22,6 +33,8 @@ EXPOSE 9333
# s3 server http port # s3 server http port
EXPOSE 8333 EXPOSE 8333
RUN mkdir -p /data/filerldb2
VOLUME /data VOLUME /data
COPY filer.toml /etc/seaweedfs/filer.toml COPY filer.toml /etc/seaweedfs/filer.toml

View file

@ -16,6 +16,8 @@ EXPOSE 9333
# s3 server http port # s3 server http port
EXPOSE 8333 EXPOSE 8333
RUN mkdir -p /data/filerldb2
VOLUME /data VOLUME /data
RUN mkdir -p /etc/seaweedfs RUN mkdir -p /etc/seaweedfs

View file

@ -29,11 +29,10 @@ case "$1" in
;; ;;
'filer') 'filer')
ARGS="-ip `hostname -i` " ARGS=""
if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then
ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT"
fi fi
mkdir -p /data/filerdb
exec /usr/bin/weed $@ $ARGS exec /usr/bin/weed $@ $ARGS
;; ;;
@ -45,6 +44,16 @@ case "$1" in
exec /usr/bin/weed $@ $ARGS exec /usr/bin/weed $@ $ARGS
;; ;;
'cronjob')
MASTER=${WEED_MASTER-localhost:9333}
FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *}
echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab
BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *}
echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab
echo "Running Crontab:"
cat /crontab
exec supercronic /crontab
;;
*) *)
exec /usr/bin/weed $@ exec /usr/bin/weed $@
;; ;;

View file

@ -1,3 +1,3 @@
[leveldb] [leveldb2]
enabled = true enabled = true
dir = "/data/filerdb" dir = "/data/filerldb2"

View file

@ -26,6 +26,16 @@ services:
depends_on: depends_on:
- master - master
- volume - volume
cronjob:
image: chrislusf/seaweedfs # use a remote image
command: 'cronjob'
environment:
# Run re-replication every 2 minutes
CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *'
WEED_MASTER: master:9333 # Default: localhost:9333
depends_on:
- master
- volume
s3: s3:
image: chrislusf/seaweedfs # use a remote image image: chrislusf/seaweedfs # use a remote image
ports: ports:

View file

@ -4,7 +4,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.0.5</version> <version>1.1.0</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

View file

@ -51,12 +51,26 @@ public class FilerClient {
} }
return createEntry( return createEntry(
parent, parent,
newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build() newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build()
); );
} }
public boolean mv(String oldPath, String newPath) {
Path oldPathObject = Paths.get(oldPath);
String oldParent = oldPathObject.getParent().toString();
String oldName = oldPathObject.getFileName().toString();
Path newPathObject = Paths.get(newPath);
String newParent = newPathObject.getParent().toString();
String newName = newPathObject.getFileName().toString();
return atomicRenameEntry(oldParent, oldName, newParent, newName);
}
public boolean rm(String path, boolean isRecursive) { public boolean rm(String path, boolean isRecursive) {
Path pathObject = Paths.get(path); Path pathObject = Paths.get(path);
@ -64,10 +78,10 @@ public class FilerClient {
String name = pathObject.getFileName().toString(); String name = pathObject.getFileName().toString();
return deleteEntry( return deleteEntry(
parent, parent,
name, name,
true, true,
isRecursive); isRecursive);
} }
public boolean touch(String path, int mode) { public boolean touch(String path, int mode) {
@ -84,18 +98,18 @@ public class FilerClient {
FilerProto.Entry entry = lookupEntry(parent, name); FilerProto.Entry entry = lookupEntry(parent, name);
if (entry == null) { if (entry == null) {
return createEntry( return createEntry(
parent, parent,
newFileEntry(name, mode, uid, gid, userName, groupNames).build() newFileEntry(name, mode, uid, gid, userName, groupNames).build()
); );
} }
long now = System.currentTimeMillis() / 1000L; long now = System.currentTimeMillis() / 1000L;
FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder() FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder()
.setMtime(now) .setMtime(now)
.setUid(uid) .setUid(uid)
.setGid(gid) .setGid(gid)
.setUserName(userName) .setUserName(userName)
.clearGroupName() .clearGroupName()
.addAllGroupName(Arrays.asList(groupNames)); .addAllGroupName(Arrays.asList(groupNames));
return updateEntry(parent, entry.toBuilder().setAttributes(attr).build()); return updateEntry(parent, entry.toBuilder().setAttributes(attr).build());
} }
@ -105,17 +119,17 @@ public class FilerClient {
long now = System.currentTimeMillis() / 1000L; long now = System.currentTimeMillis() / 1000L;
return FilerProto.Entry.newBuilder() return FilerProto.Entry.newBuilder()
.setName(name) .setName(name)
.setIsDirectory(true) .setIsDirectory(true)
.setAttributes(FilerProto.FuseAttributes.newBuilder() .setAttributes(FilerProto.FuseAttributes.newBuilder()
.setMtime(now) .setMtime(now)
.setCrtime(now) .setCrtime(now)
.setUid(uid) .setUid(uid)
.setGid(gid) .setGid(gid)
.setFileMode(mode | 1 << 31) .setFileMode(mode | 1 << 31)
.setUserName(userName) .setUserName(userName)
.clearGroupName() .clearGroupName()
.addAllGroupName(Arrays.asList(groupNames))); .addAllGroupName(Arrays.asList(groupNames)));
} }
public FilerProto.Entry.Builder newFileEntry(String name, int mode, public FilerProto.Entry.Builder newFileEntry(String name, int mode,
@ -124,17 +138,17 @@ public class FilerClient {
long now = System.currentTimeMillis() / 1000L; long now = System.currentTimeMillis() / 1000L;
return FilerProto.Entry.newBuilder() return FilerProto.Entry.newBuilder()
.setName(name) .setName(name)
.setIsDirectory(false) .setIsDirectory(false)
.setAttributes(FilerProto.FuseAttributes.newBuilder() .setAttributes(FilerProto.FuseAttributes.newBuilder()
.setMtime(now) .setMtime(now)
.setCrtime(now) .setCrtime(now)
.setUid(uid) .setUid(uid)
.setGid(gid) .setGid(gid)
.setFileMode(mode) .setFileMode(mode)
.setUserName(userName) .setUserName(userName)
.clearGroupName() .clearGroupName()
.addAllGroupName(Arrays.asList(groupNames))); .addAllGroupName(Arrays.asList(groupNames)));
} }
public List<FilerProto.Entry> listEntries(String path) { public List<FilerProto.Entry> listEntries(String path) {
@ -159,21 +173,27 @@ public class FilerClient {
} }
public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit) { public List<FilerProto.Entry> listEntries(String path, String entryPrefix, String lastEntryName, int limit) {
return filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() List<FilerProto.Entry> entries = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder()
.setDirectory(path) .setDirectory(path)
.setPrefix(entryPrefix) .setPrefix(entryPrefix)
.setStartFromFileName(lastEntryName) .setStartFromFileName(lastEntryName)
.setLimit(limit) .setLimit(limit)
.build()).getEntriesList(); .build()).getEntriesList();
List<FilerProto.Entry> fixedEntries = new ArrayList<>(entries.size());
for (FilerProto.Entry entry : entries) {
fixedEntries.add(fixEntryAfterReading(entry));
}
return fixedEntries;
} }
public FilerProto.Entry lookupEntry(String directory, String entryName) { public FilerProto.Entry lookupEntry(String directory, String entryName) {
try { try {
return filerGrpcClient.getBlockingStub().lookupDirectoryEntry( FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry(
FilerProto.LookupDirectoryEntryRequest.newBuilder() FilerProto.LookupDirectoryEntryRequest.newBuilder()
.setDirectory(directory) .setDirectory(directory)
.setName(entryName) .setName(entryName)
.build()).getEntry(); .build()).getEntry();
return fixEntryAfterReading(entry);
} catch (Exception e) { } catch (Exception e) {
LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e); LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e);
return null; return null;
@ -184,9 +204,9 @@ public class FilerClient {
public boolean createEntry(String parent, FilerProto.Entry entry) { public boolean createEntry(String parent, FilerProto.Entry entry) {
try { try {
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder() filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
.setDirectory(parent) .setDirectory(parent)
.setEntry(entry) .setEntry(entry)
.build()); .build());
} catch (Exception e) { } catch (Exception e) {
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
return false; return false;
@ -197,9 +217,9 @@ public class FilerClient {
public boolean updateEntry(String parent, FilerProto.Entry entry) { public boolean updateEntry(String parent, FilerProto.Entry entry) {
try { try {
filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder() filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder()
.setDirectory(parent) .setDirectory(parent)
.setEntry(entry) .setEntry(entry)
.build()); .build());
} catch (Exception e) { } catch (Exception e) {
LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e);
return false; return false;
@ -210,11 +230,11 @@ public class FilerClient {
public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive) { public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive) {
try { try {
filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder() filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder()
.setDirectory(parent) .setDirectory(parent)
.setName(entryName) .setName(entryName)
.setIsDeleteData(isDeleteFileChunk) .setIsDeleteData(isDeleteFileChunk)
.setIsRecursive(isRecursive) .setIsRecursive(isRecursive)
.build()); .build());
} catch (Exception e) { } catch (Exception e) {
LOG.warn("deleteEntry {}/{}: {}", parent, entryName, e); LOG.warn("deleteEntry {}/{}: {}", parent, entryName, e);
return false; return false;
@ -222,4 +242,39 @@ public class FilerClient {
return true; return true;
} }
public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) {
try {
filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder()
.setOldDirectory(oldParent)
.setOldName(oldName)
.setNewDirectory(newParent)
.setNewName(newName)
.build());
} catch (Exception e) {
LOG.warn("atomicRenameEntry {}/{} => {}/{}: {}", oldParent, oldName, newParent, newName, e);
return false;
}
return true;
}
private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) {
if (entry.getChunksList().size() <= 0) {
return entry;
}
String fileId = entry.getChunks(0).getFileId();
if (fileId != null && fileId.length() != 0) {
return entry;
}
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
entryBuilder.clearChunks();
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
FilerProto.FileId fid = chunk.getFid();
fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie());
chunkBuilder.setFileId(fileId);
entryBuilder.addChunks(chunkBuilder);
}
return entryBuilder.build();
}
} }

View file

@ -2,7 +2,14 @@ package seaweedfs.client;
import io.grpc.ManagedChannel; import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder; import io.grpc.ManagedChannelBuilder;
import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts;
import io.grpc.netty.shaded.io.grpc.netty.NegotiationType;
import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder;
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder;
import javax.net.ssl.SSLException;
import java.io.File;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.logging.Logger; import java.util.logging.Logger;
@ -20,6 +27,16 @@ public class FilerGrpcClient {
this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()); this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext());
} }
public FilerGrpcClient(String host, int grpcPort,
String caFilePath,
String clientCertFilePath,
String clientPrivateKeyFilePath) throws SSLException {
this(NettyChannelBuilder.forAddress(host, grpcPort)
.negotiationType(NegotiationType.TLS)
.sslContext(buildSslContext(caFilePath,clientCertFilePath,clientPrivateKeyFilePath)));
}
public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) { public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
channel = channelBuilder.build(); channel = channelBuilder.build();
blockingStub = SeaweedFilerGrpc.newBlockingStub(channel); blockingStub = SeaweedFilerGrpc.newBlockingStub(channel);
@ -42,4 +59,18 @@ public class FilerGrpcClient {
public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() { public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() {
return futureStub; return futureStub;
} }
private static SslContext buildSslContext(String trustCertCollectionFilePath,
String clientCertChainFilePath,
String clientPrivateKeyFilePath) throws SSLException {
SslContextBuilder builder = GrpcSslContexts.forClient();
if (trustCertCollectionFilePath != null) {
builder.trustManager(new File(trustCertCollectionFilePath));
}
if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) {
builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath));
}
return builder.build();
}
} }

View file

@ -20,25 +20,26 @@ public class SeaweedWrite {
final byte[] bytes, final byte[] bytes,
final long bytesOffset, final long bytesLength) throws IOException { final long bytesOffset, final long bytesLength) throws IOException {
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume( FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
FilerProto.AssignVolumeRequest.newBuilder() FilerProto.AssignVolumeRequest.newBuilder()
.setCollection("") .setCollection("")
.setReplication(replication) .setReplication(replication)
.setDataCenter("") .setDataCenter("")
.setReplication("") .setReplication("")
.setTtlSec(0) .setTtlSec(0)
.build()); .build());
String fileId = response.getFileId(); String fileId = response.getFileId();
String url = response.getUrl(); String url = response.getUrl();
String auth = response.getAuth();
String targetUrl = String.format("http://%s/%s", url, fileId); String targetUrl = String.format("http://%s/%s", url, fileId);
String etag = multipartUpload(targetUrl, bytes, bytesOffset, bytesLength); String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength);
entry.addChunks(FilerProto.FileChunk.newBuilder() entry.addChunks(FilerProto.FileChunk.newBuilder()
.setFileId(fileId) .setFileId(fileId)
.setOffset(offset) .setOffset(offset)
.setSize(bytesLength) .setSize(bytesLength)
.setMtime(System.currentTimeMillis() / 10000L) .setMtime(System.currentTimeMillis() / 10000L)
.setETag(etag) .setETag(etag)
); );
} }
@ -46,14 +47,15 @@ public class SeaweedWrite {
public static void writeMeta(final FilerGrpcClient filerGrpcClient, public static void writeMeta(final FilerGrpcClient filerGrpcClient,
final String parentDirectory, final FilerProto.Entry.Builder entry) { final String parentDirectory, final FilerProto.Entry.Builder entry) {
filerGrpcClient.getBlockingStub().createEntry( filerGrpcClient.getBlockingStub().createEntry(
FilerProto.CreateEntryRequest.newBuilder() FilerProto.CreateEntryRequest.newBuilder()
.setDirectory(parentDirectory) .setDirectory(parentDirectory)
.setEntry(entry) .setEntry(entry)
.build() .build()
); );
} }
private static String multipartUpload(String targetUrl, private static String multipartUpload(String targetUrl,
String auth,
final byte[] bytes, final byte[] bytes,
final long bytesOffset, final long bytesLength) throws IOException { final long bytesOffset, final long bytesLength) throws IOException {
@ -62,11 +64,14 @@ public class SeaweedWrite {
InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength);
HttpPost post = new HttpPost(targetUrl); HttpPost post = new HttpPost(targetUrl);
if (auth != null && auth.length() != 0) {
post.addHeader("Authorization", "BEARER " + auth);
}
post.setEntity(MultipartEntityBuilder.create() post.setEntity(MultipartEntityBuilder.create()
.setMode(HttpMultipartMode.BROWSER_COMPATIBLE) .setMode(HttpMultipartMode.BROWSER_COMPATIBLE)
.addBinaryBody("upload", inputStream) .addBinaryBody("upload", inputStream)
.build()); .build());
try { try {
HttpResponse response = client.execute(post); HttpResponse response = client.execute(post);

View file

@ -24,6 +24,9 @@ service SeaweedFiler {
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
} }
rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
}
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
} }
@ -36,6 +39,9 @@ service SeaweedFiler {
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
} }
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
} }
////////////////////////////////////////////////// //////////////////////////////////////////////////
@ -69,19 +75,33 @@ message Entry {
map<string, bytes> extended = 5; map<string, bytes> extended = 5;
} }
message FullEntry {
string dir = 1;
Entry entry = 2;
}
message EventNotification { message EventNotification {
Entry old_entry = 1; Entry old_entry = 1;
Entry new_entry = 2; Entry new_entry = 2;
bool delete_chunks = 3; bool delete_chunks = 3;
string new_parent_path = 4;
} }
message FileChunk { message FileChunk {
string file_id = 1; string file_id = 1; // to be deprecated
int64 offset = 2; int64 offset = 2;
uint64 size = 3; uint64 size = 3;
int64 mtime = 4; int64 mtime = 4;
string e_tag = 5; string e_tag = 5;
string source_file_id = 6; string source_file_id = 6; // to be deprecated
FileId fid = 7;
FileId source_fid = 8;
}
message FileId {
uint32 volume_id = 1;
uint64 file_key = 2;
fixed32 cookie = 3;
} }
message FuseAttributes { message FuseAttributes {
@ -126,6 +146,16 @@ message DeleteEntryRequest {
message DeleteEntryResponse { message DeleteEntryResponse {
} }
message AtomicRenameEntryRequest {
string old_directory = 1;
string old_name = 2;
string new_directory = 3;
string new_name = 4;
}
message AtomicRenameEntryResponse {
}
message AssignVolumeRequest { message AssignVolumeRequest {
int32 count = 1; int32 count = 1;
string collection = 2; string collection = 2;
@ -139,6 +169,7 @@ message AssignVolumeResponse {
string url = 2; string url = 2;
string public_url = 3; string public_url = 3;
int32 count = 4; int32 count = 4;
string auth = 5;
} }
message LookupVolumeRequest { message LookupVolumeRequest {
@ -177,3 +208,12 @@ message StatisticsResponse {
uint64 used_size = 5; uint64 used_size = 5;
uint64 file_count = 6; uint64 file_count = 6;
} }
message GetFilerConfigurationRequest {
}
message GetFilerConfigurationResponse {
repeated string masters = 1;
string replication = 2;
string collection = 3;
uint32 max_mb = 4;
}

View file

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.0.5</seaweedfs.client.version> <seaweedfs.client.version>1.1.0</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>

View file

@ -34,6 +34,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
public static final String FS_SEAWEED_GRPC_CA = "fs.seaweed.ca";
public static final String FS_SEAWEED_GRPC_CLIENT_KEY = "fs.seaweed.client.key";
public static final String FS_SEAWEED_GRPC_CLIENT_CERT = "fs.seaweed.client.cert";
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
private static int BUFFER_SIZE = 16 * 1024 * 1024; private static int BUFFER_SIZE = 16 * 1024 * 1024;
@ -72,7 +75,17 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
setConf(conf); setConf(conf);
this.uri = uri; this.uri = uri;
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); if (conf.get(FS_SEAWEED_GRPC_CA) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CA).length() != 0
&& conf.get(FS_SEAWEED_GRPC_CLIENT_CERT) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_CERT).length() != 0
&& conf.get(FS_SEAWEED_GRPC_CLIENT_KEY) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_KEY).length() != 0) {
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port,
conf.get(FS_SEAWEED_GRPC_CA),
conf.get(FS_SEAWEED_GRPC_CLIENT_CERT),
conf.get(FS_SEAWEED_GRPC_CLIENT_KEY));
} else {
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
}
} }
@Override @Override
@ -206,8 +219,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return seaweedFileSystemStore.createDirectory(path, currentUser, return seaweedFileSystemStore.createDirectory(path, currentUser,
fsPermission == null ? FsPermission.getDirDefault() : fsPermission, fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
FsPermission.getUMask(getConf())); FsPermission.getUMask(getConf()));
} }
@ -238,7 +251,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*/ */
@Override @Override
public void setOwner(Path path, final String owner, final String group) public void setOwner(Path path, final String owner, final String group)
throws IOException { throws IOException {
LOG.debug("setOwner path: {}", path); LOG.debug("setOwner path: {}", path);
path = qualify(path); path = qualify(path);
@ -271,54 +284,55 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
/** /**
* Concat existing files together. * Concat existing files together.
* @param trg the path to the target destination. *
* @param trg the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation. * @param psrcs the paths to the sources to use for the concatenation.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default). * (default).
*/ */
@Override @Override
public void concat(final Path trg, final Path [] psrcs) throws IOException { public void concat(final Path trg, final Path[] psrcs) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " + throw new UnsupportedOperationException("Not implemented by the " +
getClass().getSimpleName() + " FileSystem implementation"); getClass().getSimpleName() + " FileSystem implementation");
} }
/** /**
* Truncate the file in the indicated path to the indicated size. * Truncate the file in the indicated path to the indicated size.
* <ul> * <ul>
* <li>Fails if path is a directory.</li> * <li>Fails if path is a directory.</li>
* <li>Fails if path does not exist.</li> * <li>Fails if path does not exist.</li>
* <li>Fails if path is not closed.</li> * <li>Fails if path is not closed.</li>
* <li>Fails if new size is greater than current size.</li> * <li>Fails if new size is greater than current size.</li>
* </ul> * </ul>
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
* *
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
* @return <code>true</code> if the file has been truncated to the desired * @return <code>true</code> if the file has been truncated to the desired
* <code>newLength</code> and is immediately available to be reused for * <code>newLength</code> and is immediately available to be reused for
* write operations such as <code>append</code>, or * write operations such as <code>append</code>, or
* <code>false</code> if a background process of adjusting the length of * <code>false</code> if a background process of adjusting the length of
* the last block has been started, and clients should wait for it to * the last block has been started, and clients should wait for it to
* complete before proceeding with further file updates. * complete before proceeding with further file updates.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default). * (default).
*/ */
@Override @Override
public boolean truncate(Path f, long newLength) throws IOException { public boolean truncate(Path f, long newLength) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " + throw new UnsupportedOperationException("Not implemented by the " +
getClass().getSimpleName() + " FileSystem implementation"); getClass().getSimpleName() + " FileSystem implementation");
} }
@Override @Override
public void createSymlink(final Path target, final Path link, public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException, final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException, FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException, ParentNotDirectoryException, UnsupportedFileSystemException,
IOException { IOException {
// Supporting filesystems should override this method // Supporting filesystems should override this method
throw new UnsupportedOperationException( throw new UnsupportedOperationException(
"Filesystem does not support symlinks!"); "Filesystem does not support symlinks!");
} }
public boolean supportsSymlinks() { public boolean supportsSymlinks() {
@ -327,48 +341,51 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
/** /**
* Create a snapshot. * Create a snapshot.
* @param path The directory where snapshots will be taken. *
* @param path The directory where snapshots will be taken.
* @param snapshotName The name of the snapshot * @param snapshotName The name of the snapshot
* @return the snapshot path. * @return the snapshot path.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
*/ */
@Override @Override
public Path createSnapshot(Path path, String snapshotName) public Path createSnapshot(Path path, String snapshotName)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot"); + " doesn't support createSnapshot");
} }
/** /**
* Rename a snapshot. * Rename a snapshot.
* @param path The directory path where the snapshot was taken *
* @param path The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot * @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot * @param snapshotNewName New name of the snapshot
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void renameSnapshot(Path path, String snapshotOldName, public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException { String snapshotNewName) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support renameSnapshot"); + " doesn't support renameSnapshot");
} }
/** /**
* Delete a snapshot of a directory. * Delete a snapshot of a directory.
* @param path The directory that the to-be-deleted snapshot belongs to *
* @param path The directory that the to-be-deleted snapshot belongs to
* @param snapshotName The name of the snapshot * @param snapshotName The name of the snapshot
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void deleteSnapshot(Path path, String snapshotName) public void deleteSnapshot(Path path, String snapshotName)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support deleteSnapshot"); + " doesn't support deleteSnapshot");
} }
/** /**
@ -377,49 +394,49 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* ACL entries that are not specified in this call are retained without * ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.) * changes. (Modifications are merged into the current ACL.)
* *
* @param path Path to modify * @param path Path to modify
* @param aclSpec List&lt;AclEntry&gt; describing modifications * @param aclSpec List&lt;AclEntry&gt; describing modifications
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec) public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support modifyAclEntries"); + " doesn't support modifyAclEntries");
} }
/** /**
* Removes ACL entries from files and directories. Other ACL entries are * Removes ACL entries from files and directories. Other ACL entries are
* retained. * retained.
* *
* @param path Path to modify * @param path Path to modify
* @param aclSpec List describing entries to remove * @param aclSpec List describing entries to remove
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec) public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAclEntries"); + " doesn't support removeAclEntries");
} }
/** /**
* Removes all default ACL entries from files and directories. * Removes all default ACL entries from files and directories.
* *
* @param path Path to modify * @param path Path to modify
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeDefaultAcl(Path path) public void removeDefaultAcl(Path path)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeDefaultAcl"); + " doesn't support removeDefaultAcl");
} }
/** /**
@ -428,32 +445,32 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* bits. * bits.
* *
* @param path Path to modify * @param path Path to modify
* @throws IOException if an ACL could not be removed * @throws IOException if an ACL could not be removed
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeAcl(Path path) public void removeAcl(Path path)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAcl"); + " doesn't support removeAcl");
} }
/** /**
* Fully replaces ACL of files and directories, discarding all existing * Fully replaces ACL of files and directories, discarding all existing
* entries. * entries.
* *
* @param path Path to modify * @param path Path to modify
* @param aclSpec List describing modifications, which must include entries * @param aclSpec List describing modifications, which must include entries
* for user, group, and others for compatibility with permission bits. * for user, group, and others for compatibility with permission bits.
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setAcl"); + " doesn't support setAcl");
} }
/** /**
@ -461,14 +478,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to get * @param path Path to get
* @return AclStatus describing the ACL of the file or directory * @return AclStatus describing the ACL of the file or directory
* @throws IOException if an ACL could not be read * @throws IOException if an ACL could not be read
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public AclStatus getAclStatus(Path path) throws IOException { public AclStatus getAclStatus(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAclStatus"); + " doesn't support getAclStatus");
} }
/** /**
@ -478,19 +495,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* <p> * <p>
* Refer to the HDFS extended attributes user documentation for details. * Refer to the HDFS extended attributes user documentation for details.
* *
* @param path Path to modify * @param path Path to modify
* @param name xattr name. * @param name xattr name.
* @param value xattr value. * @param value xattr value.
* @param flag xattr set flag * @param flag xattr set flag
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void setXAttr(Path path, String name, byte[] value, public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException { EnumSet<XAttrSetFlag> flag) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setXAttr"); + " doesn't support setXAttr");
} }
/** /**
@ -503,14 +520,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* @param path Path to get extended attribute * @param path Path to get extended attribute
* @param name xattr name. * @param name xattr name.
* @return byte[] xattr value. * @return byte[] xattr value.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public byte[] getXAttr(Path path, String name) throws IOException { public byte[] getXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttr"); + " doesn't support getXAttr");
} }
/** /**
@ -522,14 +539,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to get extended attributes * @param path Path to get extended attributes
* @return Map describing the XAttrs of the file or directory * @return Map describing the XAttrs of the file or directory
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException { public Map<String, byte[]> getXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs"); + " doesn't support getXAttrs");
} }
/** /**
@ -539,18 +556,18 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* <p> * <p>
* Refer to the HDFS extended attributes user documentation for details. * Refer to the HDFS extended attributes user documentation for details.
* *
* @param path Path to get extended attributes * @param path Path to get extended attributes
* @param names XAttr names. * @param names XAttr names.
* @return Map describing the XAttrs of the file or directory * @return Map describing the XAttrs of the file or directory
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public Map<String, byte[]> getXAttrs(Path path, List<String> names) public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs"); + " doesn't support getXAttrs");
} }
/** /**
@ -562,14 +579,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to get extended attributes * @param path Path to get extended attributes
* @return List{@literal <String>} of the XAttr names of the file or directory * @return List{@literal <String>} of the XAttr names of the file or directory
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public List<String> listXAttrs(Path path) throws IOException { public List<String> listXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support listXAttrs"); + " doesn't support listXAttrs");
} }
/** /**
@ -581,14 +598,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to remove extended attribute * @param path Path to remove extended attribute
* @param name xattr name * @param name xattr name
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeXAttr(Path path, String name) throws IOException { public void removeXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeXAttr"); + " doesn't support removeXAttr");
} }
} }

View file

@ -12,6 +12,7 @@ import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerProto; import seaweedfs.client.FilerProto;
import seaweedfs.client.SeaweedRead; import seaweedfs.client.SeaweedRead;
import javax.net.ssl.SSLException;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
@ -33,6 +34,13 @@ public class SeaweedFileSystemStore {
filerClient = new FilerClient(filerGrpcClient); filerClient = new FilerClient(filerGrpcClient);
} }
public SeaweedFileSystemStore(String host, int port,
String caFile, String clientCertFile, String clientKeyFile) throws SSLException {
int grpcPort = 10000 + port;
filerGrpcClient = new FilerGrpcClient(host, grpcPort, caFile, clientCertFile, clientKeyFile);
filerClient = new FilerClient(filerGrpcClient);
}
public static String getParentDirectory(Path path) { public static String getParentDirectory(Path path) {
return path.isRoot() ? "/" : path.getParent().toUri().getPath(); return path.isRoot() ? "/" : path.getParent().toUri().getPath();
} }
@ -143,35 +151,7 @@ public class SeaweedFileSystemStore {
LOG.warn("rename non-existing source: {}", source); LOG.warn("rename non-existing source: {}", source);
return; return;
} }
LOG.warn("rename moveEntry source: {}", source); filerClient.mv(source.toUri().getPath(), destination.toUri().getPath());
moveEntry(source.getParent(), entry, destination);
}
private boolean moveEntry(Path oldParent, FilerProto.Entry entry, Path destination) {
LOG.debug("moveEntry: {}/{} => {}", oldParent, entry.getName(), destination);
FilerProto.Entry.Builder newEntry = entry.toBuilder().setName(destination.getName());
boolean isDirectoryCreated = filerClient.createEntry(getParentDirectory(destination), newEntry.build());
if (!isDirectoryCreated) {
return false;
}
if (entry.getIsDirectory()) {
Path entryPath = new Path(oldParent, entry.getName());
List<FilerProto.Entry> entries = filerClient.listEntries(entryPath.toUri().getPath());
for (FilerProto.Entry ent : entries) {
boolean isSucess = moveEntry(entryPath, ent, new Path(destination, ent.getName()));
if (!isSucess) {
return false;
}
}
}
return filerClient.deleteEntry(
oldParent.toUri().getPath(), entry.getName(), false, false);
} }
public OutputStream createFile(final Path path, public OutputStream createFile(final Path path,

View file

@ -9,6 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
) )
var ( var (
@ -73,7 +74,7 @@ func main() {
} }
if *targetTTL != "" { if *targetTTL != "" {
ttl, err := storage.ReadTTL(*targetTTL) ttl, err := needle.ReadTTL(*targetTTL)
if err != nil { if err != nil {
glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err) glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)

View file

@ -0,0 +1,35 @@
package main
import (
"flag"
"log"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
dir = flag.String("dir", ".", "data directory to store leveldb files")
)
func main() {
flag.Parse()
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10,
OpenFilesCacheCapacity: -1,
}
db, err := leveldb.OpenFile(*dir, opts)
if err != nil {
log.Fatal(err)
}
defer db.Close()
if err := db.CompactRange(util.Range{}); err != nil {
log.Fatal(err)
}
}

View file

@ -10,6 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -61,7 +62,7 @@ func main() {
} }
newDatFile.Write(superBlock.Bytes()) newDatFile.Write(superBlock.Bytes())
iterateEntries(datFile, indexFile, func(n *storage.Needle, offset int64) { iterateEntries(datFile, indexFile, func(n *needle.Needle, offset int64) {
fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize) fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize)
_, s, _, e := n.Append(newDatFile, superBlock.Version()) _, s, _, e := n.Append(newDatFile, superBlock.Version())
fmt.Printf("size %d error %v\n", s, e) fmt.Printf("size %d error %v\n", s, e)
@ -69,7 +70,7 @@ func main() {
} }
func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needle, offset int64)) { func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) {
// start to read index file // start to read index file
var readerOffset int64 var readerOffset int64
bytes := make([]byte, 16) bytes := make([]byte, 16)
@ -84,7 +85,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
} }
offset := int64(superBlock.BlockSize()) offset := int64(superBlock.BlockSize())
version := superBlock.Version() version := superBlock.Version()
n, rest, err := storage.ReadNeedleHeader(datFile, version, offset) n, _, rest, err := needle.ReadNeedleHeader(datFile, version, offset)
if err != nil { if err != nil {
fmt.Printf("cannot read needle header: %v", err) fmt.Printf("cannot read needle header: %v", err)
return return
@ -106,7 +107,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex) fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex)
rest = storage.NeedleBodyLength(sizeFromIndex, version) rest = needle.NeedleBodyLength(sizeFromIndex, version)
func() { func() {
defer func() { defer func() {
@ -114,7 +115,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
fmt.Println("Recovered in f", r) fmt.Println("Recovered in f", r)
} }
}() }()
if err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleEntrySize), rest); err != nil { if _, err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleHeaderSize), rest); err != nil {
fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err) fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err)
} }
}() }()
@ -124,9 +125,9 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
} }
visitNeedle(n, offset) visitNeedle(n, offset)
offset += types.NeedleEntrySize + rest offset += types.NeedleHeaderSize + rest
//fmt.Printf("==> new entry offset %d\n", offset) //fmt.Printf("==> new entry offset %d\n", offset)
if n, rest, err = storage.ReadNeedleHeader(datFile, version, offset); err != nil { if n, _, rest, err = needle.ReadNeedleHeader(datFile, version, offset); err != nil {
if err == io.EOF { if err == io.EOF {
return return
} }

View file

@ -0,0 +1,156 @@
package main
import (
"crypto/md5"
"flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"sync"
"time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
var (
dir = flag.String("dir", "./t", "directory to store level db files")
useHash = flag.Bool("isHash", false, "hash the path as the key")
dbCount = flag.Int("dbCount", 1, "the number of leveldb")
)
func main() {
flag.Parse()
totalTenants := 300
totalYears := 3
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
}
var dbs []*leveldb.DB
var chans []chan string
for d := 0 ; d < *dbCount; d++ {
dbFolder := fmt.Sprintf("%s/%02d", *dir, d)
os.MkdirAll(dbFolder, 0755)
db, err := leveldb.OpenFile(dbFolder, opts)
if err != nil {
log.Printf("filer store open dir %s: %v", *dir, err)
return
}
dbs = append(dbs, db)
chans = append(chans, make(chan string, 1024))
}
var wg sync.WaitGroup
for d := 0 ; d < *dbCount; d++ {
wg.Add(1)
go func(d int){
defer wg.Done()
ch := chans[d]
db := dbs[d]
for p := range ch {
if *useHash {
insertAsHash(db, p)
}else{
insertAsFullPath(db, p)
}
}
}(d)
}
counter := int64(0)
lastResetTime := time.Now()
r := rand.New(rand.NewSource(35))
for y := 0; y < totalYears; y++ {
for m := 0; m < 12; m++ {
for d := 0; d < 31; d++ {
for h := 0; h < 24; h++ {
for min := 0; min < 60; min++ {
for i := 0; i < totalTenants; i++ {
p := fmt.Sprintf("tenent%03d/%4d/%02d/%02d/%02d/%02d", i, 2015+y, 1+m, 1+d, h, min)
x := r.Intn(*dbCount)
chans[x] <- p
counter++
}
t := time.Now()
if lastResetTime.Add(time.Second).Before(t) {
p := fmt.Sprintf("%4d/%02d/%02d/%02d/%02d", 2015+y, 1+m, 1+d, h, min)
fmt.Printf("%s = %4d put/sec\n", p, counter)
counter = 0
lastResetTime = t
}
}
}
}
}
}
for d := 0 ; d < *dbCount; d++ {
close(chans[d])
}
wg.Wait()
}
func insertAsFullPath(db *leveldb.DB, p string) {
_, getErr := db.Get([]byte(p), nil)
if getErr == leveldb.ErrNotFound {
putErr := db.Put([]byte(p), []byte(p), nil)
if putErr != nil {
log.Printf("failed to put %s", p)
}
}
}
func insertAsHash(db *leveldb.DB, p string) {
key := fmt.Sprintf("%d:%s", hashToLong(p), p)
_, getErr := db.Get([]byte(key), nil)
if getErr == leveldb.ErrNotFound {
putErr := db.Put([]byte(key), []byte(p), nil)
if putErr != nil {
log.Printf("failed to put %s", p)
}
}
}
func hashToLong(dir string) (v int64) {
h := md5.New()
io.WriteString(h, dir)
b := h.Sum(nil)
v += int64(b[0])
v <<= 8
v += int64(b[1])
v <<= 8
v += int64(b[2])
v <<= 8
v += int64(b[3])
v <<= 8
v += int64(b[4])
v <<= 8
v += int64(b[5])
v <<= 8
v += int64(b[6])
v <<= 8
v += int64(b[7])
return
}

View file

@ -7,6 +7,9 @@ import (
"log" "log"
"math/rand" "math/rand"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -19,8 +22,11 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
for i := 0; i < *repeat; i++ { for i := 0; i < *repeat; i++ {
assignResult, err := operation.Assign(*master, &operation.VolumeAssignRequest{Count: 1}) assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1})
if err != nil { if err != nil {
log.Fatalf("assign: %v", err) log.Fatalf("assign: %v", err)
} }
@ -31,12 +37,12 @@ func main() {
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
_, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, "") _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth)
if err != nil { if err != nil {
log.Fatalf("upload: %v", err) log.Fatalf("upload: %v", err)
} }
util.Delete(targetUrl, "") util.Delete(targetUrl, string(assignResult.Auth))
util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master))

View file

@ -2,8 +2,12 @@ package main
import ( import (
"flag" "flag"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"time"
) )
var ( var (
@ -13,7 +17,7 @@ var (
) )
type VolumeFileScanner4SeeDat struct { type VolumeFileScanner4SeeDat struct {
version storage.Version version needle.Version
} }
func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error {
@ -22,18 +26,19 @@ func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.Supe
} }
func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
return false return true
} }
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *storage.Needle, offset int64) error { func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64) error {
glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie) t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x appendedAt %v", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie, t)
return nil return nil
} }
func main() { func main() {
flag.Parse() flag.Parse()
vid := storage.VolumeId(*volumeId) vid := needle.VolumeId(*volumeId)
scanner := &VolumeFileScanner4SeeDat{} scanner := &VolumeFileScanner4SeeDat{}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)

View file

@ -8,7 +8,7 @@ import (
"strconv" "strconv"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
) )
@ -35,7 +35,7 @@ func main() {
} }
defer indexFile.Close() defer indexFile.Close()
storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size) fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size)
return nil return nil
}) })

View file

@ -0,0 +1,68 @@
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
)
var (
metaFile = flag.String("meta", "", "meta file generated via fs.meta.save")
)
func main() {
flag.Parse()
dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644)
if err != nil {
log.Fatalf("failed to open %s: %v", *metaFile, err)
}
defer dst.Close()
err = walkMetaFile(dst)
if err != nil {
log.Fatalf("failed to visit %s: %v", *metaFile, err)
}
}
func walkMetaFile(dst *os.File) error {
sizeBuf := make([]byte, 4)
for {
if n, err := dst.Read(sizeBuf); n != 4 {
if err == io.EOF {
return nil
}
return err
}
size := util.BytesToUint32(sizeBuf)
data := make([]byte, int(size))
if n, err := dst.Read(data); n != len(data) {
return err
}
fullEntry := &filer_pb.FullEntry{}
if err := proto.Unmarshal(data, fullEntry); err != nil {
return err
}
fmt.Fprintf(os.Stdout, "file %s %v\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
for i, chunk := range fullEntry.Entry.Chunks {
fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String())
}
}
}

View file

@ -0,0 +1,136 @@
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"mime/multipart"
"net/http"
"os"
"strings"
"sync"
"time"
)
var (
size = flag.Int("size", 1024, "file size")
concurrency = flag.Int("c", 4, "concurrent number of uploads")
times = flag.Int("n", 1024, "repeated number of times")
fileCount = flag.Int("fileCount", 1, "number of files to write")
destination = flag.String("to", "http://localhost:8888/", "destination directory on filer")
statsChan = make(chan stat, 8)
)
type stat struct {
size int64
}
func main() {
flag.Parse()
data := make([]byte, *size)
println("data len", len(data))
var wg sync.WaitGroup
for x := 0; x < *concurrency; x++ {
wg.Add(1)
go func(x int) {
defer wg.Done()
client := &http.Client{Transport: &http.Transport{
MaxConnsPerHost: 1024,
MaxIdleConnsPerHost: 1024,
}}
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))
for t := 0; t < *times; t++ {
for f := 0; f < *fileCount; f++ {
fn := r.Intn(*fileCount)
if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil {
statsChan <- stat{
size: size,
}
} else {
log.Fatalf("client %d upload %d times: %v", x, t, err)
}
}
}
}(x)
}
go func() {
ticker := time.NewTicker(1000 * time.Millisecond)
var lastTime time.Time
var counter, size int64
for {
select {
case stat := <-statsChan:
size += stat.size
counter++
case x := <-ticker.C:
if !lastTime.IsZero() {
elapsed := x.Sub(lastTime).Seconds()
fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n",
float64(counter)/elapsed,
float64(size/1024/1024)/elapsed)
}
lastTime = x
size = 0
counter = 0
}
}
}()
wg.Wait()
}
func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) {
if !strings.HasSuffix(destination, "/") {
destination = destination + "/"
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", filename)
if err != nil {
return 0, fmt.Errorf("fail to create form %v: %v", filename, err)
}
part.Write(data)
err = writer.Close()
if err != nil {
return 0, fmt.Errorf("fail to write part %v: %v", filename, err)
}
uri := destination + filename
request, err := http.NewRequest("POST", uri, body)
request.Header.Set("Content-Type", writer.FormDataContentType())
// request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also.
resp, err := client.Do(request)
if err != nil {
return 0, fmt.Errorf("http POST %s: %v", uri, err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
}
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
return int64(len(data)), nil
}

View file

@ -0,0 +1,150 @@
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
var (
dir = flag.String("dir", ".", "upload files under this directory")
concurrency = flag.Int("c", 1, "concurrent number of uploads")
times = flag.Int("n", 1, "repeated number of times")
destination = flag.String("to", "http://localhost:8888/", "destination directory on filer")
statsChan = make(chan stat, 8)
)
type stat struct {
size int64
}
func main() {
flag.Parse()
var fileNames []string
files, err := ioutil.ReadDir(*dir)
if err != nil {
log.Fatalf("fail to read dir %v: %v", *dir, err)
}
for _, file := range files {
if file.IsDir() {
continue
}
fileNames = append(fileNames, filepath.Join(*dir, file.Name()))
}
var wg sync.WaitGroup
for x := 0; x < *concurrency; x++ {
wg.Add(1)
client := &http.Client{}
go func() {
defer wg.Done()
rand.Shuffle(len(fileNames), func(i, j int) {
fileNames[i], fileNames[j] = fileNames[j], fileNames[i]
})
for t := 0; t < *times; t++ {
for _, filename := range fileNames {
if size, err := uploadFileToFiler(client, filename, *destination); err == nil {
statsChan <- stat{
size: size,
}
}
}
}
}()
}
go func() {
ticker := time.NewTicker(500 * time.Millisecond)
var lastTime time.Time
var counter, size int64
for {
select {
case stat := <-statsChan:
size += stat.size
counter++
case x := <-ticker.C:
if !lastTime.IsZero() {
elapsed := x.Sub(lastTime).Seconds()
fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n",
float64(counter)/elapsed,
float64(size/1024/1024)/elapsed)
}
lastTime = x
size = 0
counter = 0
}
}
}()
wg.Wait()
}
func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) {
file, err := os.Open(filename)
if err != nil {
panic(err)
}
defer file.Close()
fi, err := file.Stat()
if !strings.HasSuffix(destination, "/") {
destination = destination + "/"
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", file.Name())
if err != nil {
return 0, fmt.Errorf("fail to create form %v: %v", file.Name(), err)
}
_, err = io.Copy(part, file)
if err != nil {
return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err)
}
err = writer.Close()
if err != nil {
return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err)
}
uri := destination + file.Name()
request, err := http.NewRequest("POST", uri, body)
request.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := client.Do(request)
if err != nil {
return 0, fmt.Errorf("http POST %s: %v", uri, err)
} else {
body := &bytes.Buffer{}
_, err := body.ReadFrom(resp.Body)
if err != nil {
return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
}
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
return fi.Size(), nil
}

View file

@ -0,0 +1,70 @@
package main
import (
"flag"
"log"
"time"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
util2 "github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
"golang.org/x/tools/godoc/util"
)
var (
master = flag.String("master", "localhost:9333", "master server host and port")
volumeId = flag.Int("volumeId", -1, "a volume id")
rewindDuration = flag.Duration("rewind", -1, "rewind back in time. -1 means from the first entry. 0 means from now.")
timeoutSeconds = flag.Int("timeoutSeconds", 0, "disconnect if no activity after these seconds")
showTextFile = flag.Bool("showTextFile", false, "display textual file content")
)
func main() {
flag.Parse()
util2.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
vid := needle.VolumeId(*volumeId)
var sinceTimeNs int64
if *rewindDuration == 0 {
sinceTimeNs = time.Now().UnixNano()
} else if *rewindDuration == -1 {
sinceTimeNs = 0
} else if *rewindDuration > 0 {
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
}
err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
if n.Size == 0 {
println("-", n.String())
return nil
} else {
println("+", n.String())
}
if *showTextFile {
data := n.Data
if n.IsGzipped() {
if data, err = util2.UnGzipData(data); err != nil {
return err
}
}
if util.IsText(data) {
println(string(data))
}
println("-", n.String(), "compressed", n.IsGzipped(), "original size", len(data))
}
return nil
})
if err != nil {
log.Printf("Error VolumeTailSender volume %d: %v", vid, err)
}
}

98
util/gostd Executable file
View file

@ -0,0 +1,98 @@
#!/usr/bin/env bash
############################ GLOBAL VARIABLES
regex=' '
branch="master"
max_length=150
REGEX_SUFFIX_GO=".+\.go$"
############################ FUNCTIONS
msg() {
printf '%b' "$1" >&2
}
die() {
msg "\33[31m[✘]\33[0m ${1}${2}"
exit 1
}
succ() {
msg "\33[34m[√]\33[0m ${1}${2}"
}
gostd() {
local branch=$1
local reg4exclude=$2
local max_length=$3
for file in `git diff $branch --name-only`
do
if ! [[ $file =~ $REGEX_SUFFIX_GO ]] || [[ $file =~ $reg4exclude ]]; then
continue
fi
error=`go fmt $file 2>&1`
if ! [ $? -eq 0 ]; then
die "go fmt $file:" "$error"
fi
succ "$file\n"
grep -n -E --color=always ".{$max_length}" $file | awk '{ printf ("%4s %s\n", "", $0) }'
done
}
get_options() {
while getopts "b:e:hl:" opts
do
case $opts in
b)
branch=$OPTARG
;;
e)
regex=$OPTARG
;;
h)
usage
exit 0
;;
l)
max_length=$OPTARG
;;
\?)
usage
exit 1
;;
esac
done
}
usage () {
cat << _EOC_
Usage:
gostd [options]
Options:
-b <branch/commit> Specify the git diff branch or commit.
(default: master)
-e <regex> Regex for excluding file or directory.
-h Print this usage.
-l <length> Show files that exceed the limit line length.
(default: 150)
Examples:
gostd
gostd -b master -l 100
gostd -b 59d532a -e weed/pb -l 100
_EOC_
}
main() {
get_options "$@"
gostd "$branch" "$regex" "$max_length"
}
############################ MAIN()
main "$@"

View file

@ -3,6 +3,11 @@ package command
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
) )
@ -30,26 +35,30 @@ var cmdBackup = &Command{
UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333", UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333",
Short: "incrementally backup a volume to local folder", Short: "incrementally backup a volume to local folder",
Long: `Incrementally backup volume data. Long: `Incrementally backup volume data.
It is expected that you use this inside a script, to loop through It is expected that you use this inside a script, to loop through
all possible volume ids that needs to be backup to local folder. all possible volume ids that needs to be backup to local folder.
The volume id does not need to exist locally or even remotely. The volume id does not need to exist locally or even remotely.
This will help to backup future new volumes. This will help to backup future new volumes.
Usually backing up is just copying the .dat (and .idx) files. Usually backing up is just copying the .dat (and .idx) files.
But it's tricky to incrementally copy the differences. But it's tricky to incrementally copy the differences.
The complexity comes when there are multiple addition, deletion and compaction. The complexity comes when there are multiple addition, deletion and compaction.
This tool will handle them correctly and efficiently, avoiding unnecessary data transporation. This tool will handle them correctly and efficiently, avoiding unnecessary data transportation.
`, `,
} }
func runBackup(cmd *Command, args []string) bool { func runBackup(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
if *s.volumeId == -1 { if *s.volumeId == -1 {
return false return false
} }
vid := storage.VolumeId(*s.volumeId) vid := needle.VolumeId(*s.volumeId)
// find volume location, replication, ttl info // find volume location, replication, ttl info
lookup, err := operation.Lookup(*s.master, vid.String()) lookup, err := operation.Lookup(*s.master, vid.String())
@ -59,12 +68,12 @@ func runBackup(cmd *Command, args []string) bool {
} }
volumeServer := lookup.Locations[0].Url volumeServer := lookup.Locations[0].Url
stats, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid)) stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
if err != nil { if err != nil {
fmt.Printf("Error get volume %d status: %v\n", vid, err) fmt.Printf("Error get volume %d status: %v\n", vid, err)
return true return true
} }
ttl, err := storage.ReadTTL(stats.Ttl) ttl, err := needle.ReadTTL(stats.Ttl)
if err != nil { if err != nil {
fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err) fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err)
return true return true
@ -81,7 +90,34 @@ func runBackup(cmd *Command, args []string) bool {
return true return true
} }
if err := v.Synchronize(volumeServer); err != nil { if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
if err = v.Compact(0, 0); err != nil {
fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true
}
if err = v.CommitCompact(); err != nil {
fmt.Printf("Commit Compact before synchronizing %v\n", err)
return true
}
v.SuperBlock.CompactionRevision = uint16(stats.CompactRevision)
v.DataFile().WriteAt(v.SuperBlock.Bytes(), 0)
}
datSize, _, _ := v.FileStat()
if datSize > stats.TailOffset {
// remove the old data
v.Destroy()
// recreate an empty volume
v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
}
}
defer v.Close()
if err := v.IncrementalBackup(volumeServer, grpcDialOption); err != nil {
fmt.Printf("Error synchronizing volume %d: %v\n", vid, err) fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
return true return true
} }

View file

@ -15,6 +15,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/spf13/viper"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -33,15 +36,17 @@ type BenchmarkOptions struct {
read *bool read *bool
sequentialRead *bool sequentialRead *bool
collection *string collection *string
replication *string
cpuprofile *string cpuprofile *string
maxCpu *int maxCpu *int
secretKey *string grpcDialOption grpc.DialOption
masterClient *wdclient.MasterClient
} }
var ( var (
b BenchmarkOptions b BenchmarkOptions
sharedBytes []byte sharedBytes []byte
masterClient *wdclient.MasterClient isSecure bool
) )
func init() { func init() {
@ -57,9 +62,9 @@ func init() {
b.read = cmdBenchmark.Flag.Bool("read", true, "enable read") b.read = cmdBenchmark.Flag.Bool("read", true, "enable read")
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file") b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection") b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type")
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
b.secretKey = cmdBenchmark.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
sharedBytes = make([]byte, 1024) sharedBytes = make([]byte, 1024)
} }
@ -102,6 +107,10 @@ var (
) )
func runBenchmark(cmd *Command, args []string) bool { func runBenchmark(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
if *b.maxCpu < 1 { if *b.maxCpu < 1 {
*b.maxCpu = runtime.NumCPU() *b.maxCpu = runtime.NumCPU()
@ -116,9 +125,9 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
masterClient = wdclient.NewMasterClient(context.Background(), "benchmark", strings.Split(*b.masters, ",")) b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ","))
go masterClient.KeepConnectedToMaster() go b.masterClient.KeepConnectedToMaster()
masterClient.WaitUntilConnected() b.masterClient.WaitUntilConnected()
if *b.write { if *b.write {
benchWrite() benchWrite()
@ -188,7 +197,6 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
defer wait.Done() defer wait.Done()
delayedDeleteChan := make(chan *delayedFile, 100) delayedDeleteChan := make(chan *delayedFile, 100)
var waitForDeletions sync.WaitGroup var waitForDeletions sync.WaitGroup
secret := security.Secret(*b.secretKey)
for i := 0; i < 7; i++ { for i := 0; i < 7; i++ {
waitForDeletions.Add(1) waitForDeletions.Add(1)
@ -198,8 +206,11 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
if df.enterTime.After(time.Now()) { if df.enterTime.After(time.Now()) {
time.Sleep(df.enterTime.Sub(time.Now())) time.Sleep(df.enterTime.Sub(time.Now()))
} }
if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid, var jwtAuthorization security.EncodedJwt
security.GenJwt(secret, df.fp.Fid)); e == nil { if isSecure {
jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), df.fp.Fid)
}
if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil {
s.completed++ s.completed++
} else { } else {
s.failed++ s.failed++
@ -219,12 +230,16 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
MimeType: "image/bench", // prevent gzip benchmark content MimeType: "image/bench", // prevent gzip benchmark content
} }
ar := &operation.VolumeAssignRequest{ ar := &operation.VolumeAssignRequest{
Count: 1, Count: 1,
Collection: *b.collection, Collection: *b.collection,
Replication: *b.replication,
} }
if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil { if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
if _, err := fp.Upload(0, masterClient.GetMaster(), secret); err == nil { if !isSecure && assignResult.Auth != "" {
isSecure = true
}
if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage { if random.Intn(100) < *b.deletePercentage {
s.total++ s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
@ -264,7 +279,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
fmt.Printf("reading file %s\n", fid) fmt.Printf("reading file %s\n", fid)
} }
start := time.Now() start := time.Now()
url, err := masterClient.LookupFileId(fid) url, err := b.masterClient.LookupFileId(fid)
if err != nil { if err != nil {
s.failed++ s.failed++
println("!!!! ", fid, " location not found!!!!!") println("!!!! ", fid, " location not found!!!!!")

View file

@ -13,7 +13,6 @@ var Commands = []*Command{
cmdCompact, cmdCompact,
cmdCopy, cmdCopy,
cmdFix, cmdFix,
cmdFilerExport,
cmdFilerReplicate, cmdFilerReplicate,
cmdServer, cmdServer,
cmdMaster, cmdMaster,
@ -27,6 +26,7 @@ var Commands = []*Command{
cmdVolume, cmdVolume,
cmdExport, cmdExport,
cmdMount, cmdMount,
cmdWebDav,
} }
type Command struct { type Command struct {

View file

@ -3,6 +3,7 @@ package command
import ( import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
) )
func init() { func init() {
@ -35,14 +36,14 @@ func runCompact(cmd *Command, args []string) bool {
preallocate := *compactVolumePreallocate * (1 << 20) preallocate := *compactVolumePreallocate * (1 << 20)
vid := storage.VolumeId(*compactVolumeId) vid := needle.VolumeId(*compactVolumeId)
v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid, v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid,
storage.NeedleMapInMemory, nil, nil, preallocate) storage.NeedleMapInMemory, nil, nil, preallocate)
if err != nil { if err != nil {
glog.Fatalf("Load Volume [ERROR] %s\n", err) glog.Fatalf("Load Volume [ERROR] %s\n", err)
} }
if *compactMethod == 0 { if *compactMethod == 0 {
if err = v.Compact(preallocate); err != nil { if err = v.Compact(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err) glog.Fatalf("Compact Volume [ERROR] %s\n", err)
} }
} else { } else {

View file

@ -12,10 +12,12 @@ import (
"text/template" "text/template"
"time" "time"
"io"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
"io"
) )
const ( const (
@ -66,10 +68,10 @@ var (
localLocation, _ = time.LoadLocation("Local") localLocation, _ = time.LoadLocation("Local")
) )
func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Version, deleted bool) { func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool) {
key := storage.NewFileIdFromNeedle(vid, n).String() key := needle.NewFileIdFromNeedle(vid, n).String()
size := n.DataSize size := n.DataSize
if version == storage.Version1 { if version == needle.Version1 {
size = n.Size size = n.Size
} }
fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n", fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n",
@ -85,10 +87,10 @@ func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Versio
} }
type VolumeFileScanner4Export struct { type VolumeFileScanner4Export struct {
version storage.Version version needle.Version
counter int counter int
needleMap *storage.NeedleMap needleMap *storage.NeedleMap
vid storage.VolumeId vid needle.VolumeId
} }
func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock storage.SuperBlock) error { func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock storage.SuperBlock) error {
@ -100,14 +102,14 @@ func (scanner *VolumeFileScanner4Export) ReadNeedleBody() bool {
return true return true
} }
func (scanner *VolumeFileScanner4Export) VisitNeedle(n *storage.Needle, offset int64) error { func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset int64) error {
needleMap := scanner.needleMap needleMap := scanner.needleMap
vid := scanner.vid vid := scanner.vid
nv, ok := needleMap.Get(n.Id) nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v", glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv) n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv)
if ok && nv.Size > 0 && int64(nv.Offset)*types.NeedlePaddingSize == offset { if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix) n.LastModified, newerThanUnix)
@ -189,7 +191,7 @@ func runExport(cmd *Command, args []string) bool {
if *export.collection != "" { if *export.collection != "" {
fileName = *export.collection + "_" + fileName fileName = *export.collection + "_" + fileName
} }
vid := storage.VolumeId(*export.volumeId) vid := needle.VolumeId(*export.volumeId)
indexFile, err := os.OpenFile(path.Join(*export.dir, fileName+".idx"), os.O_RDONLY, 0644) indexFile, err := os.OpenFile(path.Join(*export.dir, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil { if err != nil {
glog.Fatalf("Create Volume Index [ERROR] %s\n", err) glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
@ -225,8 +227,8 @@ type nameParams struct {
Ext string Ext string
} }
func writeFile(vid storage.VolumeId, n *storage.Needle) (err error) { func writeFile(vid needle.VolumeId, n *needle.Needle) (err error) {
key := storage.NewFileIdFromNeedle(vid, n).String() key := needle.NewFileIdFromNeedle(vid, n).String()
fileNameTemplateBuffer.Reset() fileNameTemplateBuffer.Reset()
if err = fileNameTemplate.Execute(fileNameTemplateBuffer, if err = fileNameTemplate.Execute(fileNameTemplateBuffer,
nameParams{ nameParams{

View file

@ -6,6 +6,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/server"
@ -21,17 +24,16 @@ type FilerOptions struct {
masters *string masters *string
ip *string ip *string
port *int port *int
grpcPort *int
publicPort *int publicPort *int
collection *string collection *string
defaultReplicaPlacement *string defaultReplicaPlacement *string
redirectOnRead *bool redirectOnRead *bool
disableDirListing *bool disableDirListing *bool
maxMB *int maxMB *int
secretKey *string
dirListingLimit *int dirListingLimit *int
dataCenter *string dataCenter *string
enableNotification *bool enableNotification *bool
disableHttp *bool
// default leveldb directory, used in "weed server" mode // default leveldb directory, used in "weed server" mode
defaultLevelDbDirectory *string defaultLevelDbDirectory *string
@ -43,15 +45,14 @@ func init() {
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection") f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address") f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
f.grpcPort = cmdFiler.Flag.Int("port.grpc", 0, "filer grpc server listen port, default to http port + 10000") f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
f.publicPort = cmdFiler.Flag.Int("port.public", 0, "port opened to public")
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit")
f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size")
f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center")
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
} }
var cmdFiler = &Command{ var cmdFiler = &Command{
@ -70,13 +71,15 @@ var cmdFiler = &Command{
The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
The example filer.toml configuration file can be generated by "weed scaffold filer" The example filer.toml configuration file can be generated by "weed scaffold -config=filer"
`, `,
} }
func runFiler(cmd *Command, args []string) bool { func runFiler(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
f.startFiler() f.startFiler()
return true return true
@ -91,22 +94,23 @@ func (fo *FilerOptions) startFiler() {
publicVolumeMux = http.NewServeMux() publicVolumeMux = http.NewServeMux()
} }
defaultLevelDbDirectory := "./filerdb" defaultLevelDbDirectory := "./filerldb2"
if fo.defaultLevelDbDirectory != nil { if fo.defaultLevelDbDirectory != nil {
defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerdb" defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerldb2"
} }
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
Masters: strings.Split(*f.masters, ","), Masters: strings.Split(*fo.masters, ","),
Collection: *fo.collection, Collection: *fo.collection,
DefaultReplication: *fo.defaultReplicaPlacement, DefaultReplication: *fo.defaultReplicaPlacement,
RedirectOnRead: *fo.redirectOnRead, RedirectOnRead: *fo.redirectOnRead,
DisableDirListing: *fo.disableDirListing, DisableDirListing: *fo.disableDirListing,
MaxMB: *fo.maxMB, MaxMB: *fo.maxMB,
SecretKey: *fo.secretKey,
DirListingLimit: *fo.dirListingLimit, DirListingLimit: *fo.dirListingLimit,
DataCenter: *fo.dataCenter, DataCenter: *fo.dataCenter,
DefaultLevelDbDir: defaultLevelDbDirectory, DefaultLevelDbDir: defaultLevelDbDirectory,
DisableHttp: *fo.disableHttp,
Port: *fo.port,
}) })
if nfs_err != nil { if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err) glog.Fatalf("Filer startup error: %v", nfs_err)
@ -128,7 +132,7 @@ func (fo *FilerOptions) startFiler() {
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port) glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port)
filerListener, e := util.NewListener( filerListener, e := util.NewListener(
":"+strconv.Itoa(*fo.port), *fo.ip+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second, time.Duration(10)*time.Second,
) )
if e != nil { if e != nil {
@ -136,15 +140,12 @@ func (fo *FilerOptions) startFiler() {
} }
// starting grpc server // starting grpc server
grpcPort := *fo.grpcPort grpcPort := *fo.port + 10000
if grpcPort == 0 {
grpcPort = *fo.port + 10000
}
grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0) grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0)
if err != nil { if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
} }
grpcS := util.NewGrpcServer() grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs) filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
reflection.Register(grpcS) reflection.Register(grpcS)
go grpcS.Serve(grpcL) go grpcS.Serve(grpcL)

View file

@ -1,52 +1,56 @@
package command package command
import ( import (
"context"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"net/http"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"sync"
"time"
"context"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io" "github.com/chrislusf/seaweedfs/weed/wdclient"
"net/http" "github.com/spf13/viper"
"strconv" "google.golang.org/grpc"
"time"
) )
var ( var (
copy CopyOptions copy CopyOptions
waitGroup sync.WaitGroup
) )
type CopyOptions struct { type CopyOptions struct {
filerGrpcPort *int include *string
master *string replication *string
include *string collection *string
replication *string ttl *string
collection *string maxMB *int
ttl *string masterClient *wdclient.MasterClient
maxMB *int concurrency *int
secretKey *string compressionLevel *int
grpcDialOption grpc.DialOption
secret security.Secret masters []string
} }
func init() { func init() {
cmdCopy.Run = runCopy // break init cycle cmdCopy.Run = runCopy // break init cycle
cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information") cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information")
copy.master = cmdCopy.Flag.String("master", "localhost:9333", "SeaweedFS master location")
copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir") copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir")
copy.replication = cmdCopy.Flag.String("replication", "", "replication type") copy.replication = cmdCopy.Flag.String("replication", "", "replication type")
copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name") copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name")
copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
copy.maxMB = cmdCopy.Flag.Int("maxMB", 0, "split files larger than the limit") copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit")
copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000") copy.concurrency = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines")
copy.secretKey = cmdCopy.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9")
} }
var cmdCopy = &Command{ var cmdCopy = &Command{
@ -66,7 +70,9 @@ var cmdCopy = &Command{
} }
func runCopy(cmd *Command, args []string) bool { func runCopy(cmd *Command, args []string) bool {
copy.secret = security.Secret(*copy.secretKey)
util.LoadConfiguration("security", false)
if len(args) <= 1 { if len(args) <= 1 {
return false return false
} }
@ -96,67 +102,170 @@ func runCopy(cmd *Command, args []string) bool {
} }
filerGrpcPort := filerPort + 10000 filerGrpcPort := filerPort + 10000
if *copy.filerGrpcPort != 0 {
filerGrpcPort = uint64(*copy.filerGrpcPort)
}
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
for _, fileOrDir := range fileOrDirs { ctx := context.Background()
if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, urlPath) {
return false masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress)
} if err != nil {
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
return false
} }
if *copy.collection == "" {
*copy.collection = collection
}
if *copy.replication == "" {
*copy.replication = replication
}
if *copy.maxMB == 0 {
*copy.maxMB = int(maxMB)
}
copy.masters = masters
copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters)
go copy.masterClient.KeepConnectedToMaster()
copy.masterClient.WaitUntilConnected()
if *cmdCopy.IsDebug {
util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof")
}
fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrency)
go func() {
defer close(fileCopyTaskChan)
for _, fileOrDir := range fileOrDirs {
if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil {
fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err)
break
}
}
}()
for i := 0; i < *copy.concurrency; i++ {
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
worker := FileCopyWorker{
options: &copy,
filerHost: filerUrl.Host,
filerGrpcAddress: filerGrpcAddress,
}
if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil {
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
return
}
}()
}
waitGroup.Wait()
return true return true
} }
func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path string) bool { func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) {
f, err := os.Open(fileOrDir) err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
if err != nil { resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{})
fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) if err != nil {
return false return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
} }
defer f.Close() masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb
return nil
})
return
}
fi, err := f.Stat() func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan FileCopyTask) error {
fi, err := os.Stat(fileOrDir)
if err != nil { if err != nil {
fmt.Printf("Failed to get stat for file %s: %v\n", fileOrDir, err) fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err)
return false return nil
} }
mode := fi.Mode() mode := fi.Mode()
if mode.IsDir() { if mode.IsDir() {
files, _ := ioutil.ReadDir(fileOrDir) files, _ := ioutil.ReadDir(fileOrDir)
for _, subFileOrDir := range files { for _, subFileOrDir := range files {
if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, path+fi.Name()+"/") { if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
return false return err
} }
} }
return true return nil
} }
uid, gid := util.GetFileUidGid(fi)
fileCopyTaskChan <- FileCopyTask{
sourceLocation: fileOrDir,
destinationUrlPath: destPath,
fileSize: fi.Size(),
fileMode: fi.Mode(),
uid: uid,
gid: gid,
}
return nil
}
type FileCopyWorker struct {
options *CopyOptions
filerHost string
filerGrpcAddress string
}
func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error {
for task := range fileCopyTaskChan {
if err := worker.doEachCopy(ctx, task); err != nil {
return err
}
}
return nil
}
type FileCopyTask struct {
sourceLocation string
destinationUrlPath string
fileSize int64
fileMode os.FileMode
uid uint32
gid uint32
}
func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error {
f, err := os.Open(task.sourceLocation)
if err != nil {
fmt.Printf("Failed to open file %s: %v\n", task.sourceLocation, err)
if _, ok := err.(*os.PathError); ok {
fmt.Printf("skipping %s\n", task.sourceLocation)
return nil
}
return err
}
defer f.Close()
// this is a regular file // this is a regular file
if *copy.include != "" { if *worker.options.include != "" {
if ok, _ := filepath.Match(*copy.include, filepath.Base(fileOrDir)); !ok { if ok, _ := filepath.Match(*worker.options.include, filepath.Base(task.sourceLocation)); !ok {
return true return nil
} }
} }
// find the chunk count // find the chunk count
chunkSize := int64(*copy.maxMB * 1024 * 1024) chunkSize := int64(*worker.options.maxMB * 1024 * 1024)
chunkCount := 1 chunkCount := 1
if chunkSize > 0 && fi.Size() > chunkSize { if chunkSize > 0 && task.fileSize > chunkSize {
chunkCount = int(fi.Size()/chunkSize) + 1 chunkCount = int(task.fileSize/chunkSize) + 1
} }
if chunkCount == 1 { if chunkCount == 1 {
return uploadFileAsOne(filerAddress, filerGrpcAddress, path, f, fi) return worker.uploadFileAsOne(ctx, task, f)
} }
return uploadFileInChunks(filerAddress, filerGrpcAddress, path, f, fi, chunkCount, chunkSize) return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize)
} }
func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo) bool { func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error {
// upload the file content // upload the file content
fileName := filepath.Base(f.Name()) fileName := filepath.Base(f.Name())
@ -164,29 +273,27 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f
var chunks []*filer_pb.FileChunk var chunks []*filer_pb.FileChunk
if fi.Size() > 0 { if task.fileSize > 0 {
// assign a volume // assign a volume
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
Count: 1, Count: 1,
Replication: *copy.replication, Replication: *worker.options.replication,
Collection: *copy.collection, Collection: *worker.options.collection,
Ttl: *copy.ttl, Ttl: *worker.options.ttl,
}) })
if err != nil { if err != nil {
fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
} }
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
uploadResult, err := operation.Upload(targetUrl, fileName, f, false, mimeType, nil, "") uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel)
if err != nil { if err != nil {
fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return false
} }
if uploadResult.Error != "" { if uploadResult.Error != "" {
fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return false
} }
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
@ -198,43 +305,42 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f
ETag: uploadResult.ETag, ETag: uploadResult.ETag,
}) })
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
} }
if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: urlFolder, Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
Name: fileName, Name: fileName,
Attributes: &filer_pb.FuseAttributes{ Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(), Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(), Mtime: time.Now().Unix(),
Gid: uint32(os.Getgid()), Gid: task.gid,
Uid: uint32(os.Getuid()), Uid: task.uid,
FileSize: uint64(fi.Size()), FileSize: uint64(task.fileSize),
FileMode: uint32(fi.Mode()), FileMode: uint32(task.fileMode),
Mime: mimeType, Mime: mimeType,
Replication: *copy.replication, Replication: *worker.options.replication,
Collection: *copy.collection, Collection: *worker.options.collection,
TtlSec: int32(util.ParseInt(*copy.ttl, 0)), TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
}, },
Chunks: chunks, Chunks: chunks,
}, },
} }
if _, err := client.CreateEntry(context.Background(), request); err != nil { if _, err := client.CreateEntry(ctx, request); err != nil {
return fmt.Errorf("update fh: %v", err) return fmt.Errorf("update fh: %v", err)
} }
return nil return nil
}); err != nil { }); err != nil {
fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
return false
} }
return true return nil
} }
func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error {
fileName := filepath.Base(f.Name()) fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f) mimeType := detectMimeType(f)
@ -244,14 +350,14 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string,
for i := int64(0); i < int64(chunkCount); i++ { for i := int64(0); i < int64(chunkCount); i++ {
// assign a volume // assign a volume
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{
Count: 1, Count: 1,
Replication: *copy.replication, Replication: *worker.options.replication,
Collection: *copy.collection, Collection: *worker.options.collection,
Ttl: *copy.ttl, Ttl: *worker.options.ttl,
}) })
if err != nil { if err != nil {
fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
} }
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
@ -259,14 +365,12 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string,
uploadResult, err := operation.Upload(targetUrl, uploadResult, err := operation.Upload(targetUrl,
fileName+"-"+strconv.FormatInt(i+1, 10), fileName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(f, chunkSize), io.LimitReader(f, chunkSize),
false, "application/octet-stream", nil, "") false, "application/octet-stream", nil, assignResult.Auth)
if err != nil { if err != nil {
fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return false
} }
if uploadResult.Error != "" { if uploadResult.Error != "" {
fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return false
} }
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
FileId: assignResult.Fid, FileId: assignResult.Fid,
@ -278,39 +382,38 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string,
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
} }
if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: urlFolder, Directory: task.destinationUrlPath,
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
Name: fileName, Name: fileName,
Attributes: &filer_pb.FuseAttributes{ Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(), Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(), Mtime: time.Now().Unix(),
Gid: uint32(os.Getgid()), Gid: task.gid,
Uid: uint32(os.Getuid()), Uid: task.uid,
FileSize: uint64(fi.Size()), FileSize: uint64(task.fileSize),
FileMode: uint32(fi.Mode()), FileMode: uint32(task.fileMode),
Mime: mimeType, Mime: mimeType,
Replication: *copy.replication, Replication: *worker.options.replication,
Collection: *copy.collection, Collection: *worker.options.collection,
TtlSec: int32(util.ParseInt(*copy.ttl, 0)), TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)),
}, },
Chunks: chunks, Chunks: chunks,
}, },
} }
if _, err := client.CreateEntry(context.Background(), request); err != nil { if _, err := client.CreateEntry(ctx, request); err != nil {
return fmt.Errorf("update fh: %v", err) return fmt.Errorf("update fh: %v", err)
} }
return nil return nil
}); err != nil { }); err != nil {
fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
return false
} }
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName)
return true return nil
} }
func detectMimeType(f *os.File) string { func detectMimeType(f *os.File) string {
@ -329,15 +432,11 @@ func detectMimeType(f *os.File) string {
return mimeType return mimeType
} }
func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error { func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
grpcConnection, err := util.GrpcDial(filerAddress) return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error {
if err != nil { client := filer_pb.NewSeaweedFilerClient(clientConn)
return fmt.Errorf("fail to dial %s: %v", filerAddress, err) return fn(client)
} }, filerAddress, grpcDialOption)
defer grpcConnection.Close()
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
} }

View file

@ -1,187 +0,0 @@
package command
import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
)
func init() {
cmdFilerExport.Run = runFilerExport // break init cycle
}
var cmdFilerExport = &Command{
UsageLine: "filer.export -sourceStore=mysql -targetStore=cassandra",
Short: "export meta data in filer store",
Long: `Iterate the file tree and export all metadata out
Both source and target store:
* should be a store name already specified in filer.toml
* do not need to be enabled state
If target store is empty, only the directory tree will be listed.
If target store is "notification", the list of entries will be sent to notification.
This is usually used to bootstrap filer replication to a remote system.
`,
}
var (
// filerExportOutputFile = cmdFilerExport.Flag.String("output", "", "the output file. If empty, only list out the directory tree")
filerExportSourceStore = cmdFilerExport.Flag.String("sourceStore", "", "the source store name in filer.toml, default to currently enabled store")
filerExportTargetStore = cmdFilerExport.Flag.String("targetStore", "", "the target store name in filer.toml, or \"notification\" to export all files to message queue")
dir = cmdFilerExport.Flag.String("dir", "/", "only process files under this directory")
dirListLimit = cmdFilerExport.Flag.Int("dirListLimit", 100000, "limit directory list size")
dryRun = cmdFilerExport.Flag.Bool("dryRun", false, "not actually moving data")
verboseFilerExport = cmdFilerExport.Flag.Bool("v", false, "verbose entry details")
)
type statistics struct {
directoryCount int
fileCount int
}
func runFilerExport(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("filer", true)
config := viper.GetViper()
var sourceStore, targetStore filer2.FilerStore
for _, store := range filer2.Stores {
if store.GetName() == *filerExportSourceStore || *filerExportSourceStore == "" && config.GetBool(store.GetName()+".enabled") {
viperSub := config.Sub(store.GetName())
if err := store.Initialize(viperSub); err != nil {
glog.Fatalf("Failed to initialize source store for %s: %+v",
store.GetName(), err)
} else {
sourceStore = store
}
break
}
}
for _, store := range filer2.Stores {
if store.GetName() == *filerExportTargetStore {
viperSub := config.Sub(store.GetName())
if err := store.Initialize(viperSub); err != nil {
glog.Fatalf("Failed to initialize target store for %s: %+v",
store.GetName(), err)
} else {
targetStore = store
}
break
}
}
if sourceStore == nil {
glog.Errorf("Failed to find source store %s", *filerExportSourceStore)
println("existing data sources are:")
for _, store := range filer2.Stores {
println(" " + store.GetName())
}
return false
}
if targetStore == nil && *filerExportTargetStore != "" && *filerExportTargetStore != "notification" {
glog.Errorf("Failed to find target store %s", *filerExportTargetStore)
println("existing data sources are:")
for _, store := range filer2.Stores {
println(" " + store.GetName())
}
return false
}
stat := statistics{}
var fn func(level int, entry *filer2.Entry) error
if *filerExportTargetStore == "notification" {
weed_server.LoadConfiguration("notification", false)
v := viper.GetViper()
notification.LoadConfiguration(v.Sub("notification"))
fn = func(level int, entry *filer2.Entry) error {
printout(level, entry)
if *dryRun {
return nil
}
return notification.Queue.SendMessage(
string(entry.FullPath),
&filer_pb.EventNotification{
NewEntry: entry.ToProtoEntry(),
},
)
}
} else if targetStore == nil {
fn = printout
} else {
fn = func(level int, entry *filer2.Entry) error {
printout(level, entry)
if *dryRun {
return nil
}
return targetStore.InsertEntry(entry)
}
}
doTraverse(&stat, sourceStore, filer2.FullPath(*dir), 0, fn)
glog.Infof("processed %d directories, %d files", stat.directoryCount, stat.fileCount)
return true
}
func doTraverse(stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) {
limit := *dirListLimit
lastEntryName := ""
for {
entries, err := filerStore.ListDirectoryEntries(parentPath, lastEntryName, false, limit)
if err != nil {
break
}
for _, entry := range entries {
if fnErr := fn(level, entry); fnErr != nil {
glog.Errorf("failed to process entry: %s", entry.FullPath)
}
if entry.IsDirectory() {
stat.directoryCount++
doTraverse(stat, filerStore, entry.FullPath, level+1, fn)
} else {
stat.fileCount++
}
}
if len(entries) < limit {
break
}
}
}
func printout(level int, entry *filer2.Entry) error {
for i := 0; i < level; i++ {
if i == level-1 {
print("+-")
} else {
print("| ")
}
}
print(entry.FullPath.Name())
if *verboseFilerExport {
for _, chunk := range entry.Chunks {
print("[")
print(chunk.FileId)
print(",")
print(chunk.Offset)
print(",")
print(chunk.Size)
print(")")
}
}
println()
return nil
}

View file

@ -1,6 +1,7 @@
package command package command
import ( import (
"context"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -12,7 +13,7 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink"
"github.com/chrislusf/seaweedfs/weed/replication/sub" "github.com/chrislusf/seaweedfs/weed/replication/sub"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -28,15 +29,16 @@ var cmdFilerReplicate = &Command{
filer.replicate listens on filer notifications. If any file is updated, it will fetch the updated content, filer.replicate listens on filer notifications. If any file is updated, it will fetch the updated content,
and write to the other destination. and write to the other destination.
Run "weed scaffold -config replication" to generate a replication.toml file and customize the parameters. Run "weed scaffold -config=replication" to generate a replication.toml file and customize the parameters.
`, `,
} }
func runFilerReplicate(cmd *Command, args []string) bool { func runFilerReplicate(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("replication", true) util.LoadConfiguration("security", false)
weed_server.LoadConfiguration("notification", true) util.LoadConfiguration("replication", true)
util.LoadConfiguration("notification", true)
config := viper.GetViper() config := viper.GetViper()
var notificationInput sub.NotificationInput var notificationInput sub.NotificationInput
@ -115,7 +117,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
} else { } else {
glog.V(1).Infof("modify: %s", key) glog.V(1).Infof("modify: %s", key)
} }
if err = replicator.Replicate(key, m); err != nil { if err = replicator.Replicate(context.Background(), key, m); err != nil {
glog.Errorf("replicate %s: %+v", key, err) glog.Errorf("replicate %s: %+v", key, err)
} else { } else {
glog.V(1).Infof("replicated %s", key) glog.V(1).Infof("replicated %s", key)

View file

@ -7,6 +7,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
) )
@ -29,7 +30,7 @@ var (
) )
type VolumeFileScanner4Fix struct { type VolumeFileScanner4Fix struct {
version storage.Version version needle.Version
nm *storage.NeedleMap nm *storage.NeedleMap
} }
@ -42,14 +43,14 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
return false return false
} }
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *storage.Needle, offset int64) error { func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64) error {
glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped())
if n.Size > 0 { if n.Size > 0 && n.Size != types.TombstoneFileSize {
pe := scanner.nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size) pe := scanner.nm.Put(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe) glog.V(2).Infof("saved %d with error %v", n.Size, pe)
} else { } else {
glog.V(2).Infof("skipping deleted file ...") glog.V(2).Infof("skipping deleted file ...")
return scanner.nm.Delete(n.Id, types.Offset(offset/types.NeedlePaddingSize)) return scanner.nm.Delete(n.Id, types.ToOffset(offset))
} }
return nil return nil
} }
@ -74,7 +75,7 @@ func runFix(cmd *Command, args []string) bool {
nm := storage.NewBtreeNeedleMap(indexFile) nm := storage.NewBtreeNeedleMap(indexFile)
defer nm.Close() defer nm.Close()
vid := storage.VolumeId(*fixVolumeId) vid := needle.VolumeId(*fixVolumeId)
scanner := &VolumeFileScanner4Fix{ scanner := &VolumeFileScanner4Fix{
nm: nm, nm: nm,
} }

View file

@ -6,76 +6,98 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/chrislusf/raft/protobuf"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/spf13/viper"
"google.golang.org/grpc/reflection" "google.golang.org/grpc/reflection"
) )
var (
m MasterOptions
)
type MasterOptions struct {
port *int
ip *string
ipBind *string
metaFolder *string
peers *string
volumeSizeLimitMB *uint
volumePreallocate *bool
pulseSeconds *int
defaultReplication *string
garbageThreshold *float64
whiteList *string
disableHttp *bool
metricsAddress *string
metricsIntervalSec *int
}
func init() { func init() {
cmdMaster.Run = runMaster // break init cycle cmdMaster.Run = runMaster // break init cycle
m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
m.ip = cmdMaster.Flag.String("ip", "localhost", "master <ip>|<server> address")
m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094")
m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.")
m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address")
m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
} }
var cmdMaster = &Command{ var cmdMaster = &Command{
UsageLine: "master -port=9333", UsageLine: "master -port=9333",
Short: "start a master server", Short: "start a master server",
Long: `start a master server to provide volume=>location mapping service Long: `start a master server to provide volume=>location mapping service and sequence number of file ids
and sequence number of file ids
The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order.
The example security.toml configuration file can be generated by "weed scaffold -config=security"
`, `,
} }
var ( var (
mport = cmdMaster.Flag.Int("port", 9333, "http listen port") masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file")
mGrpcPort = cmdMaster.Flag.Int("port.grpc", 0, "grpc server listen port, default to http port + 10000") masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file")
masterIp = cmdMaster.Flag.String("ip", "localhost", "master <ip>|<server> address")
masterBindIp = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
masterPeers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094")
volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.")
mpulse = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
defaultReplicaPlacement = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.")
// mTimeout = cmdMaster.Flag.Int("idleTimeout", 30, "connection idle seconds")
mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
masterSecureKey = cmdMaster.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file")
masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file")
masterWhiteList []string masterWhiteList []string
) )
func runMaster(cmd *Command, args []string) bool { func runMaster(cmd *Command, args []string) bool {
if *mMaxCpu < 1 {
*mMaxCpu = runtime.NumCPU() util.LoadConfiguration("security", false)
} util.LoadConfiguration("master", false)
runtime.GOMAXPROCS(*mMaxCpu)
runtime.GOMAXPROCS(runtime.NumCPU())
util.SetupProfiling(*masterCpuProfile, *masterMemProfile) util.SetupProfiling(*masterCpuProfile, *masterMemProfile)
if err := util.TestFolderWritable(*metaFolder); err != nil { if err := util.TestFolderWritable(*m.metaFolder); err != nil {
glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *metaFolder, err) glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
} }
if *masterWhiteListOption != "" { if *m.whiteList != "" {
masterWhiteList = strings.Split(*masterWhiteListOption, ",") masterWhiteList = strings.Split(*m.whiteList, ",")
} }
if *volumeSizeLimitMB > 30*1000 { if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("volumeSizeLimitMB should be smaller than 30000") glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
} }
r := mux.NewRouter() r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, *mport, *metaFolder, ms := weed_server.NewMasterServer(r, m.toMasterOption(masterWhiteList))
*volumeSizeLimitMB, *volumePreallocate,
*mpulse, *defaultReplicaPlacement, *garbageThreshold,
masterWhiteList, *masterSecureKey,
)
listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport) listeningAddress := *m.ipBind + ":" + strconv.Itoa(*m.port)
glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", listeningAddress) glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", listeningAddress)
@ -85,28 +107,29 @@ func runMaster(cmd *Command, args []string) bool {
} }
go func() { go func() {
time.Sleep(100 * time.Millisecond) // start raftServer
myMasterAddress, peers := checkPeers(*masterIp, *mport, *masterPeers) myMasterAddress, peers := checkPeers(*m.ip, *m.port, *m.peers)
raftServer := weed_server.NewRaftServer(r, peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"),
ms.SetRaftServer(raftServer) peers, myMasterAddress, *m.metaFolder, ms.Topo, *m.pulseSeconds)
}() if raftServer == nil {
glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *m.metaFolder)
go func() {
// starting grpc server
grpcPort := *mGrpcPort
if grpcPort == 0 {
grpcPort = *mport + 10000
} }
grpcL, err := util.NewListener(*masterBindIp+":"+strconv.Itoa(grpcPort), 0) ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
// starting grpc server
grpcPort := *m.port + 10000
grpcL, err := util.NewListener(*m.ipBind+":"+strconv.Itoa(grpcPort), 0)
if err != nil { if err != nil {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
} }
// Create your protocol servers. // Create your protocol servers.
grpcS := util.NewGrpcServer() grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master"))
master_pb.RegisterSeaweedServer(grpcS, ms) master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS) reflection.Register(grpcS)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterBindIp, grpcPort) glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *m.ipBind, grpcPort)
grpcS.Serve(grpcL) grpcS.Serve(grpcL)
}() }()
@ -142,3 +165,19 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
} }
return return
} }
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
return &weed_server.MasterOption{
Port: *m.port,
MetaFolder: *m.metaFolder,
VolumeSizeLimitMB: *m.volumeSizeLimitMB,
VolumePreallocate: *m.volumePreallocate,
PulseSeconds: *m.pulseSeconds,
DefaultReplicaPlacement: *m.defaultReplication,
GarbageThreshold: *m.garbageThreshold,
WhiteList: whiteList,
DisableHttp: *m.disableHttp,
MetricsAddress: *m.metricsAddress,
MetricsIntervalSec: *m.metricsIntervalSec,
}
}

View file

@ -8,7 +8,6 @@ import (
type MountOptions struct { type MountOptions struct {
filer *string filer *string
filerGrpcPort *int
filerMountRootPath *string filerMountRootPath *string
dir *string dir *string
dirListingLimit *int dirListingLimit *int
@ -17,6 +16,7 @@ type MountOptions struct {
ttlSec *int ttlSec *int
chunkSizeLimitMB *int chunkSizeLimitMB *int
dataCenter *string dataCenter *string
allowOthers *bool
} }
var ( var (
@ -28,7 +28,6 @@ var (
func init() { func init() {
cmdMount.Run = runMount // break init cycle cmdMount.Run = runMount // break init cycle
mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location")
mountOptions.filerGrpcPort = cmdMount.Flag.Int("filer.grpc.port", 0, "filer grpc server listen port, default to http port + 10000")
mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server")
mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory")
mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size") mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size")
@ -37,6 +36,7 @@ func init() {
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files") mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file")
mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file")
} }
@ -59,7 +59,7 @@ var cmdMount = &Command{
`, `,
} }
func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress string, err error) { func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) {
hostnameAndPort := strings.Split(filer, ":") hostnameAndPort := strings.Split(filer, ":")
if len(hostnameAndPort) != 2 { if len(hostnameAndPort) != 2 {
return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort) return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort)
@ -71,9 +71,6 @@ func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress
} }
filerGrpcPort := int(filerPort) + 10000 filerGrpcPort := int(filerPort) + 10000
if optionalGrpcPort != 0 {
filerGrpcPort = optionalGrpcPort
}
return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil
} }

View file

@ -6,11 +6,16 @@ import (
"fmt" "fmt"
"os" "os"
"os/user" "os/user"
"path"
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/jacobsa/daemonize"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -19,40 +24,67 @@ import (
) )
func runMount(cmd *Command, args []string) bool { func runMount(cmd *Command, args []string) bool {
util.SetupProfiling(*mountCpuProfile, *mountMemProfile)
return RunMount(
*mountOptions.filer,
*mountOptions.filerMountRootPath,
*mountOptions.dir,
*mountOptions.collection,
*mountOptions.replication,
*mountOptions.dataCenter,
*mountOptions.chunkSizeLimitMB,
*mountOptions.allowOthers,
*mountOptions.ttlSec,
*mountOptions.dirListingLimit,
)
}
func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int,
allowOthers bool, ttlSec int, dirListingLimit int) bool {
util.LoadConfiguration("security", false)
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
if *mountOptions.dir == "" { if dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"") fmt.Printf("Please specify the mount directory via \"-dir\"")
return false return false
} }
if *mountOptions.chunkSizeLimitMB <= 0 { if chunkSizeLimitMB <= 0 {
fmt.Printf("Please specify a reasonable buffer size.") fmt.Printf("Please specify a reasonable buffer size.")
return false return false
} }
fuse.Unmount(*mountOptions.dir) fuse.Unmount(dir)
uid, gid := uint32(0), uint32(0)
// detect mount folder mode // detect mount folder mode
mountMode := os.ModeDir | 0755 mountMode := os.ModeDir | 0755
if fileInfo, err := os.Stat(*mountOptions.dir); err == nil { fileInfo, err := os.Stat(dir)
if err == nil {
mountMode = os.ModeDir | fileInfo.Mode() mountMode = os.ModeDir | fileInfo.Mode()
uid, gid = util.GetFileUidGid(fileInfo)
fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode())
} }
// detect current user if uid == 0 {
uid, gid := uint32(0), uint32(0) if u, err := user.Current(); err == nil {
if u, err := user.Current(); err == nil { if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil {
if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil { uid = uint32(parsedId)
uid = uint32(parsedId) }
} if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil {
if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil { gid = uint32(parsedId)
gid = uint32(parsedId) }
fmt.Printf("current uid=%d gid=%d\n", uid, gid)
} }
} }
util.SetupProfiling(*mountCpuProfile, *mountMemProfile) mountName := path.Base(dir)
c, err := fuse.Mount( options := []fuse.MountOption{
*mountOptions.dir, fuse.VolumeName(mountName),
fuse.VolumeName("SeaweedFS"),
fuse.FSName("SeaweedFS"), fuse.FSName("SeaweedFS"),
fuse.Subtype("SeaweedFS"), fuse.Subtype("SeaweedFS"),
fuse.NoAppleDouble(), fuse.NoAppleDouble(),
@ -61,56 +93,69 @@ func runMount(cmd *Command, args []string) bool {
fuse.AutoXattr(), fuse.AutoXattr(),
fuse.ExclCreate(), fuse.ExclCreate(),
fuse.DaemonTimeout("3600"), fuse.DaemonTimeout("3600"),
fuse.AllowOther(),
fuse.AllowSUID(), fuse.AllowSUID(),
fuse.DefaultPermissions(), fuse.DefaultPermissions(),
fuse.MaxReadahead(1024*128), fuse.MaxReadahead(1024 * 128),
fuse.AsyncRead(), fuse.AsyncRead(),
fuse.WritebackCache(), fuse.WritebackCache(),
) fuse.AllowNonEmptyMount(),
}
if allowOthers {
options = append(options, fuse.AllowOther())
}
c, err := fuse.Mount(dir, options...)
if err != nil { if err != nil {
glog.Fatal(err) glog.Fatal(err)
daemonize.SignalOutcome(err)
return false return false
} }
util.OnInterrupt(func() { util.OnInterrupt(func() {
fuse.Unmount(*mountOptions.dir) fuse.Unmount(dir)
c.Close() c.Close()
}) })
filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer, *mountOptions.filerGrpcPort) filerGrpcAddress, err := parseFilerGrpcAddress(filer)
if err != nil { if err != nil {
glog.Fatal(err) glog.Fatal(err)
daemonize.SignalOutcome(err)
return false return false
} }
mountRoot := *mountOptions.filerMountRootPath mountRoot := filerMountRootPath
if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") { if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") {
mountRoot = mountRoot[0 : len(mountRoot)-1] mountRoot = mountRoot[0 : len(mountRoot)-1]
} }
daemonize.SignalOutcome(nil)
err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{
FilerGrpcAddress: filerGrpcAddress, FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
FilerMountRootPath: mountRoot, FilerMountRootPath: mountRoot,
Collection: *mountOptions.collection, Collection: collection,
Replication: *mountOptions.replication, Replication: replication,
TtlSec: int32(*mountOptions.ttlSec), TtlSec: int32(ttlSec),
ChunkSizeLimit: int64(*mountOptions.chunkSizeLimitMB) * 1024 * 1024, ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
DataCenter: *mountOptions.dataCenter, DataCenter: dataCenter,
DirListingLimit: *mountOptions.dirListingLimit, DirListingLimit: dirListingLimit,
EntryCacheTtl: 3 * time.Second, EntryCacheTtl: 3 * time.Second,
MountUid: uid, MountUid: uid,
MountGid: gid, MountGid: gid,
MountMode: mountMode, MountMode: mountMode,
MountCtime: fileInfo.ModTime(),
MountMtime: time.Now(),
})) }))
if err != nil { if err != nil {
fuse.Unmount(*mountOptions.dir) fuse.Unmount(dir)
} }
// check if the mount process has an error to report // check if the mount process has an error to report
<-c.Ready <-c.Ready
if err := c.MountError; err != nil { if err := c.MountError; err != nil {
glog.Fatal(err) glog.Fatal(err)
daemonize.SignalOutcome(err)
} }
return true return true

View file

@ -4,7 +4,11 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api" "github.com/chrislusf/seaweedfs/weed/s3api"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -12,12 +16,11 @@ import (
) )
var ( var (
s3options S3Options s3StandaloneOptions S3Options
) )
type S3Options struct { type S3Options struct {
filer *string filer *string
filerGrpcPort *int
filerBucketsPath *string filerBucketsPath *string
port *int port *int
domainName *string domainName *string
@ -27,13 +30,12 @@ type S3Options struct {
func init() { func init() {
cmdS3.Run = runS3 // break init cycle cmdS3.Run = runS3 // break init cycle
s3options.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address")
s3options.filerGrpcPort = cmdS3.Flag.Int("filer.grpcPort", 0, "filer server grpc port, default to filer http port plus 10000") s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3options.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port")
s3options.port = cmdS3.Flag.Int("port", 8333, "s3options server http listen port") s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}")
s3options.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3options.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
s3options.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
} }
var cmdS3 = &Command{ var cmdS3 = &Command{
@ -46,7 +48,15 @@ var cmdS3 = &Command{
func runS3(cmd *Command, args []string) bool { func runS3(cmd *Command, args []string) bool {
filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort) util.LoadConfiguration("security", false)
return s3StandaloneOptions.startS3Server()
}
func (s3opt *S3Options) startS3Server() bool {
filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer)
if err != nil { if err != nil {
glog.Fatal(err) glog.Fatal(err)
return false return false
@ -55,10 +65,11 @@ func runS3(cmd *Command, args []string) bool {
router := mux.NewRouter().SkipClean(true) router := mux.NewRouter().SkipClean(true)
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
Filer: *s3options.filer, Filer: *s3opt.filer,
FilerGrpcAddress: filerGrpcAddress, FilerGrpcAddress: filerGrpcAddress,
DomainName: *s3options.domainName, DomainName: *s3opt.domainName,
BucketsPath: *s3options.filerBucketsPath, BucketsPath: *s3opt.filerBucketsPath,
GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
}) })
if s3ApiServer_err != nil { if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
@ -66,22 +77,22 @@ func runS3(cmd *Command, args []string) bool {
httpS := &http.Server{Handler: router} httpS := &http.Server{Handler: router}
listenAddress := fmt.Sprintf(":%d", *s3options.port) listenAddress := fmt.Sprintf(":%d", *s3opt.port)
s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil { if err != nil {
glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err) glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
} }
if *s3options.tlsPrivateKey != "" { if *s3opt.tlsPrivateKey != "" {
if err = httpS.ServeTLS(s3ApiListener, *s3options.tlsCertificate, *s3options.tlsPrivateKey); err != nil { glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3opt.port)
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err) glog.Fatalf("S3 API Server Fail to serve: %v", err)
} }
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3options.port)
} else { } else {
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil { if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err) glog.Fatalf("S3 API Server Fail to serve: %v", err)
} }
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3options.port)
} }
return true return true

View file

@ -10,7 +10,7 @@ func init() {
} }
var cmdScaffold = &Command{ var cmdScaffold = &Command{
UsageLine: "scaffold [filer]", UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
Short: "generate basic configuration files", Short: "generate basic configuration files",
Long: `Generate filer.toml with all possible configurations for you to customize. Long: `Generate filer.toml with all possible configurations for you to customize.
@ -19,7 +19,7 @@ var cmdScaffold = &Command{
var ( var (
outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory") outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication] the configuration file to generate") config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
) )
func runScaffold(cmd *Command, args []string) bool { func runScaffold(cmd *Command, args []string) bool {
@ -32,6 +32,10 @@ func runScaffold(cmd *Command, args []string) bool {
content = NOTIFICATION_TOML_EXAMPLE content = NOTIFICATION_TOML_EXAMPLE
case "replication": case "replication":
content = REPLICATION_TOML_EXAMPLE content = REPLICATION_TOML_EXAMPLE
case "security":
content = SECURITY_TOML_EXAMPLE
case "master":
content = MASTER_TOML_EXAMPLE
} }
if content == "" { if content == "" {
println("need a valid -config option") println("need a valid -config option")
@ -61,6 +65,12 @@ enabled = false
[leveldb] [leveldb]
# local on disk, mostly for simple single-machine setup, fairly scalable # local on disk, mostly for simple single-machine setup, fairly scalable
enabled = false
dir = "." # directory to store level db files
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
enabled = true enabled = true
dir = "." # directory to store level db files dir = "." # directory to store level db files
@ -70,12 +80,13 @@ dir = "." # directory to store level db files
[mysql] [mysql]
# CREATE TABLE IF NOT EXISTS filemeta ( # CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) COMMENT 'directory or file name', # name VARCHAR(1000) COMMENT 'directory or file name',
# directory VARCHAR(4096) COMMENT 'full path to parent directory', # directory TEXT COMMENT 'full path to parent directory',
# meta BLOB, # meta LONGBLOB,
# PRIMARY KEY (dirhash, name) # PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8; # ) DEFAULT CHARSET=utf8;
enabled = false enabled = false
hostname = "localhost" hostname = "localhost"
port = 3306 port = 3306
@ -88,8 +99,8 @@ connection_max_open = 100
[postgres] [postgres]
# CREATE TABLE IF NOT EXISTS filemeta ( # CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT, # dirhash BIGINT,
# name VARCHAR(1000), # name VARCHAR(65535),
# directory VARCHAR(4096), # directory VARCHAR(65535),
# meta bytea, # meta bytea,
# PRIMARY KEY (dirhash, name) # PRIMARY KEY (dirhash, name)
# ); # );
@ -132,6 +143,7 @@ addresses = [
"localhost:30005", "localhost:30005",
"localhost:30006", "localhost:30006",
] ]
password = ""
` `
@ -178,6 +190,17 @@ google_application_credentials = "/path/to/x.json" # path to json credential fil
project_id = "" # an existing project id project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
# the RabbitMQ management plugin.
topic_url = "rabbit://myexchange"
sub_url = "rabbit://myqueue"
` `
REPLICATION_TOML_EXAMPLE = ` REPLICATION_TOML_EXAMPLE = `
@ -239,5 +262,79 @@ b2_master_application_key = ""
bucket = "mybucket" # an existing bucket bucket = "mybucket" # an existing bucket
directory = "/" # destination directory directory = "/" # destination directory
`
SECURITY_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./security.toml
# $HOME/.seaweedfs/security.toml
# /etc/seaweedfs/security.toml
# this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds.
[jwt.signing]
key = ""
expires_after_seconds = 10 # seconds
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read]
key = ""
expires_after_seconds = 10 # seconds
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
# the host name is not checked, so the PERM files can be shared.
[grpc]
ca = ""
[grpc.volume]
cert = ""
key = ""
[grpc.master]
cert = ""
key = ""
[grpc.filer]
cert = ""
key = ""
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = ""
key = ""
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
[https.client]
enabled = true
[https.volume]
cert = ""
key = ""
`
MASTER_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./master.toml
# $HOME/.seaweedfs/master.toml
# /etc/seaweedfs/master.toml
# this file is read by master
[master.maintenance]
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
"""
sleep_minutes = 17 # sleep minutes between each script execution
` `
) )

View file

@ -1,6 +1,7 @@
package command package command
import ( import (
"fmt"
"net/http" "net/http"
"os" "os"
"runtime" "runtime"
@ -10,6 +11,10 @@ import (
"sync" "sync"
"time" "time"
"github.com/chrislusf/raft/protobuf"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/server"
@ -25,7 +30,9 @@ type ServerOptions struct {
var ( var (
serverOptions ServerOptions serverOptions ServerOptions
masterOptions MasterOptions
filerOptions FilerOptions filerOptions FilerOptions
s3Options S3Options
) )
func init() { func init() {
@ -34,51 +41,52 @@ func init() {
var cmdServer = &Command{ var cmdServer = &Command{
UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name", UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name",
Short: "start a server, including volume server, and automatically elect a master server", Short: "start a master server, a volume server, and optionally a filer and a S3 gateway",
Long: `start both a volume server to provide storage spaces Long: `start both a volume server to provide storage spaces
and a master server to provide volume=>location mapping service and sequence number of file ids and a master server to provide volume=>location mapping service and sequence number of file ids
This is provided as a convenient way to start both volume server and master server. This is provided as a convenient way to start both volume server and master server.
The servers are exactly the same as starting them separately. The servers acts exactly the same as starting them separately.
So other volume servers can connect to this master server also.
So other volume servers can use this embedded master server also. Optionally, a filer server can be started.
Also optionally, a S3 gateway can be started.
Optionally, one filer server can be started. Logically, filer servers should not be in a cluster.
They run with meta data on disk, not shared. So each filer server is different.
`, `,
} }
var ( var (
serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name") serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name")
serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
serverMaxCpu = cmdServer.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds")
serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name")
serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name")
serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
serverSecureKey = cmdServer.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...")
serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
masterGrpcPort = cmdServer.Flag.Int("master.port.grpc", 0, "master grpc server listen port, default to http port + 10000") isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway")
masterMetaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
masterVolumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
masterVolumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.")
masterDefaultReplicaPlacement = cmdServer.Flag.String("master.defaultReplicaPlacement", "000", "Default replication type if not specified.")
volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...")
pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
serverWhiteList []string serverWhiteList []string
) )
func init() { func init() {
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file") serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file")
masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list")
masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
masterOptions.volumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.")
masterOptions.defaultReplication = cmdServer.Flag.String("master.defaultReplication", "000", "Default replication type if not specified.")
masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address")
masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection") filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
filerOptions.grpcPort = cmdServer.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to http port + 10000")
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.") filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
@ -88,15 +96,25 @@ func init() {
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
} }
func runServer(cmd *Command, args []string) bool { func runServer(cmd *Command, args []string) bool {
filerOptions.secretKey = serverSecureKey
util.LoadConfiguration("security", false)
util.LoadConfiguration("master", false)
if *serverOptions.cpuprofile != "" { if *serverOptions.cpuprofile != "" {
f, err := os.Create(*serverOptions.cpuprofile) f, err := os.Create(*serverOptions.cpuprofile)
if err != nil { if err != nil {
@ -110,41 +128,53 @@ func runServer(cmd *Command, args []string) bool {
*isStartingFiler = true *isStartingFiler = true
} }
master := *serverIp + ":" + strconv.Itoa(*masterPort) if *isStartingS3 {
filerOptions.ip = serverIp *isStartingFiler = true
}
master := *serverIp + ":" + strconv.Itoa(*masterOptions.port)
masterOptions.ip = serverIp
masterOptions.ipBind = serverBindIp
filerOptions.masters = &master
filerOptions.ip = serverBindIp
serverOptions.v.ip = serverIp serverOptions.v.ip = serverIp
serverOptions.v.bindIp = serverBindIp serverOptions.v.bindIp = serverBindIp
serverOptions.v.masters = &master serverOptions.v.masters = &master
serverOptions.v.idleConnectionTimeout = serverTimeout serverOptions.v.idleConnectionTimeout = serverTimeout
serverOptions.v.maxCpu = serverMaxCpu
serverOptions.v.dataCenter = serverDataCenter serverOptions.v.dataCenter = serverDataCenter
serverOptions.v.rack = serverRack serverOptions.v.rack = serverRack
serverOptions.v.pulseSeconds = pulseSeconds serverOptions.v.pulseSeconds = pulseSeconds
masterOptions.pulseSeconds = pulseSeconds
masterOptions.whiteList = serverWhiteListOption
filerOptions.dataCenter = serverDataCenter filerOptions.dataCenter = serverDataCenter
filerOptions.disableHttp = serverDisableHttp
masterOptions.disableHttp = serverDisableHttp
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
s3Options.filer = &filerAddress
if *filerOptions.defaultReplicaPlacement == "" { if *filerOptions.defaultReplicaPlacement == "" {
*filerOptions.defaultReplicaPlacement = *masterDefaultReplicaPlacement *filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication
} }
if *serverMaxCpu < 1 { runtime.GOMAXPROCS(runtime.NumCPU())
*serverMaxCpu = runtime.NumCPU()
}
runtime.GOMAXPROCS(*serverMaxCpu)
folders := strings.Split(*volumeDataFolders, ",") folders := strings.Split(*volumeDataFolders, ",")
if *masterVolumeSizeLimitMB > 30*1000 { if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000") glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
} }
if *masterMetaFolder == "" { if *masterOptions.metaFolder == "" {
*masterMetaFolder = folders[0] *masterOptions.metaFolder = folders[0]
} }
if err := util.TestFolderWritable(*masterMetaFolder); err != nil { if err := util.TestFolderWritable(*masterOptions.metaFolder); err != nil {
glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterMetaFolder, err) glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
} }
filerOptions.defaultLevelDbDirectory = masterMetaFolder filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder
if *serverWhiteListOption != "" { if *serverWhiteListOption != "" {
serverWhiteList = strings.Split(*serverWhiteListOption, ",") serverWhiteList = strings.Split(*serverWhiteListOption, ",")
@ -159,55 +189,55 @@ func runServer(cmd *Command, args []string) bool {
}() }()
} }
var raftWaitForMaster sync.WaitGroup if *isStartingS3 {
go func() {
time.Sleep(2 * time.Second)
s3Options.startS3Server()
}()
}
var volumeWait sync.WaitGroup var volumeWait sync.WaitGroup
raftWaitForMaster.Add(1)
volumeWait.Add(1) volumeWait.Add(1)
go func() { go func() {
r := mux.NewRouter() r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder, ms := weed_server.NewMasterServer(r, masterOptions.toMasterOption(serverWhiteList))
*masterVolumeSizeLimitMB, *masterVolumePreallocate,
*pulseSeconds, *masterDefaultReplicaPlacement, *serverGarbageThreshold,
serverWhiteList, *serverSecureKey,
)
glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterPort) glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterOptions.port)
masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterPort), 0) masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterOptions.port), 0)
if e != nil { if e != nil {
glog.Fatalf("Master startup error: %v", e) glog.Fatalf("Master startup error: %v", e)
} }
go func() { go func() {
// start raftServer
myMasterAddress, peers := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)
raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"),
peers, myMasterAddress, *masterOptions.metaFolder, ms.Topo, *masterOptions.pulseSeconds)
ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
// starting grpc server // starting grpc server
grpcPort := *masterGrpcPort grpcPort := *masterOptions.port + 10000
if grpcPort == 0 { grpcL, err := util.NewListener(*serverBindIp+":"+strconv.Itoa(grpcPort), 0)
grpcPort = *masterPort + 10000
}
grpcL, err := util.NewListener(*serverIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil { if err != nil {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
} }
// Create your protocol servers. // Create your protocol servers.
grpcS := util.NewGrpcServer() glog.V(1).Infof("grpc config %+v", viper.Sub("grpc"))
grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master"))
master_pb.RegisterSeaweedServer(grpcS, ms) master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS) reflection.Register(grpcS)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *serverIp, grpcPort) glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *serverIp, grpcPort)
grpcS.Serve(grpcL) grpcS.Serve(grpcL)
}() }()
go func() { volumeWait.Done()
raftWaitForMaster.Wait()
time.Sleep(100 * time.Millisecond)
myAddress, peers := checkPeers(*serverIp, *masterPort, *serverPeers)
raftServer := weed_server.NewRaftServer(r, peers, myAddress, *masterMetaFolder, ms.Topo, *pulseSeconds)
ms.SetRaftServer(raftServer)
volumeWait.Done()
}()
raftWaitForMaster.Done()
// start http server // start http server
httpS := &http.Server{Handler: r} httpS := &http.Server{Handler: r}

View file

@ -1,21 +1,25 @@
package command package command
import ( import (
"bufio" "github.com/chrislusf/seaweedfs/weed/security"
"fmt" "github.com/chrislusf/seaweedfs/weed/shell"
"os" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
)
"github.com/chrislusf/seaweedfs/weed/glog" var (
shellOptions shell.ShellOptions
) )
func init() { func init() {
cmdShell.Run = runShell // break init cycle cmdShell.Run = runShell // break init cycle
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
} }
var cmdShell = &Command{ var cmdShell = &Command{
UsageLine: "shell", UsageLine: "shell",
Short: "run interactive commands, now just echo", Short: "run interactive administrative commands",
Long: `run interactive commands. Long: `run interactive administrative commands.
`, `,
} }
@ -23,39 +27,16 @@ var cmdShell = &Command{
var () var ()
func runShell(command *Command, args []string) bool { func runShell(command *Command, args []string) bool {
r := bufio.NewReader(os.Stdin)
o := bufio.NewWriter(os.Stdout)
e := bufio.NewWriter(os.Stderr)
prompt := func() {
var err error
if _, err = o.WriteString("> "); err != nil {
glog.V(0).Infoln("error writing to stdout:", err)
}
if err = o.Flush(); err != nil {
glog.V(0).Infoln("error flushing stdout:", err)
}
}
readLine := func() string {
ret, err := r.ReadString('\n')
if err != nil {
fmt.Fprint(e, err)
os.Exit(1)
}
return ret
}
execCmd := func(cmd string) int {
if cmd != "" {
if _, err := o.WriteString(cmd); err != nil {
glog.V(0).Infoln("error writing to stdout:", err)
}
}
return 0
}
cmd := "" util.LoadConfiguration("security", false)
for { shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
prompt()
cmd = readLine() shellOptions.FilerHost = "localhost"
execCmd(cmd) shellOptions.FilerPort = 8888
} shellOptions.Directory = "/"
shell.RunShell(shellOptions)
return true
} }

View file

@ -6,8 +6,11 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/operation"
) )
var ( var (
@ -23,7 +26,6 @@ type UploadOptions struct {
dataCenter *string dataCenter *string
ttl *string ttl *string
maxMB *int maxMB *int
secretKey *string
} }
func init() { func init() {
@ -36,8 +38,7 @@ func init() {
upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name") upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name")
upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name")
upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y")
upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit") upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit")
upload.secretKey = cmdUpload.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)")
} }
var cmdUpload = &Command{ var cmdUpload = &Command{
@ -53,14 +54,17 @@ var cmdUpload = &Command{
All files under the folder and subfolders will be uploaded, each with its own file key. All files under the folder and subfolders will be uploaded, each with its own file key.
Optional parameter "-include" allows you to specify the file name patterns. Optional parameter "-include" allows you to specify the file name patterns.
If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separatedly. If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separately.
The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned. The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned.
`, `,
} }
func runUpload(cmd *Command, args []string) bool { func runUpload(cmd *Command, args []string) bool {
secret := security.Secret(*upload.secretKey)
util.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
if len(args) == 0 { if len(args) == 0 {
if *upload.dir == "" { if *upload.dir == "" {
return false return false
@ -77,9 +81,9 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil { if e != nil {
return e return e
} }
results, e := operation.SubmitFiles(*upload.master, parts, results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
*upload.replication, *upload.collection, *upload.dataCenter, *upload.replication, *upload.collection, *upload.dataCenter,
*upload.ttl, *upload.maxMB, secret) *upload.ttl, *upload.maxMB)
bytes, _ := json.Marshal(results) bytes, _ := json.Marshal(results)
fmt.Println(string(bytes)) fmt.Println(string(bytes))
if e != nil { if e != nil {
@ -96,9 +100,9 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil { if e != nil {
fmt.Println(e.Error()) fmt.Println(e.Error())
} }
results, _ := operation.SubmitFiles(*upload.master, parts, results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
*upload.replication, *upload.collection, *upload.dataCenter, *upload.replication, *upload.collection, *upload.dataCenter,
*upload.ttl, *upload.maxMB, secret) *upload.ttl, *upload.maxMB)
bytes, _ := json.Marshal(results) bytes, _ := json.Marshal(results)
fmt.Println(string(bytes)) fmt.Println(string(bytes))
} }

View file

@ -9,6 +9,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/server"
@ -32,7 +35,6 @@ type VolumeServerOptions struct {
masters *string masters *string
pulseSeconds *int pulseSeconds *int
idleConnectionTimeout *int idleConnectionTimeout *int
maxCpu *int
dataCenter *string dataCenter *string
rack *string rack *string
whiteList []string whiteList []string
@ -41,6 +43,7 @@ type VolumeServerOptions struct {
readRedirect *bool readRedirect *bool
cpuProfile *string cpuProfile *string
memProfile *string memProfile *string
compactionMBPerSecond *int
} }
func init() { func init() {
@ -53,14 +56,14 @@ func init() {
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers") v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds") v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
v.maxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name") v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name")
v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name") v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name")
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.") v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
} }
var cmdVolume = &Command{ var cmdVolume = &Command{
@ -78,10 +81,10 @@ var (
) )
func runVolume(cmd *Command, args []string) bool { func runVolume(cmd *Command, args []string) bool {
if *v.maxCpu < 1 {
*v.maxCpu = runtime.NumCPU() util.LoadConfiguration("security", false)
}
runtime.GOMAXPROCS(*v.maxCpu) runtime.GOMAXPROCS(runtime.NumCPU())
util.SetupProfiling(*v.cpuProfile, *v.memProfile) util.SetupProfiling(*v.cpuProfile, *v.memProfile)
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption) v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption)
@ -137,10 +140,10 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
switch *v.indexType { switch *v.indexType {
case "leveldb": case "leveldb":
volumeNeedleMapKind = storage.NeedleMapLevelDb volumeNeedleMapKind = storage.NeedleMapLevelDb
case "boltdb": case "leveldbMedium":
volumeNeedleMapKind = storage.NeedleMapBoltDb volumeNeedleMapKind = storage.NeedleMapLevelDbMedium
case "btree": case "leveldbLarge":
volumeNeedleMapKind = storage.NeedleMapBtree volumeNeedleMapKind = storage.NeedleMapLevelDbLarge
} }
masters := *v.masters masters := *v.masters
@ -152,6 +155,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack, strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack,
v.whiteList, v.whiteList,
*v.fixJpgOrientation, *v.readRedirect, *v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond,
) )
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
@ -185,13 +189,20 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if err != nil { if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
} }
grpcS := util.NewGrpcServer() grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer) volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer)
reflection.Register(grpcS) reflection.Register(grpcS)
go grpcS.Serve(grpcL) go grpcS.Serve(grpcL)
if e := http.Serve(listener, volumeMux); e != nil { if viper.GetString("https.volume.key") != "" {
glog.Fatalf("Volume server fail to serve: %v", e) if e := http.ServeTLS(listener, volumeMux,
viper.GetString("https.volume.cert"), viper.GetString("https.volume.key")); e != nil {
glog.Fatalf("Volume server fail to serve: %v", e)
}
} else {
if e := http.Serve(listener, volumeMux); e != nil {
glog.Fatalf("Volume server fail to serve: %v", e)
}
} }
} }

109
weed/command/webdav.go Normal file
View file

@ -0,0 +1,109 @@
package command
import (
"fmt"
"net/http"
"os/user"
"strconv"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/spf13/viper"
)
var (
webDavStandaloneOptions WebDavOption
)
type WebDavOption struct {
filer *string
port *int
collection *string
tlsPrivateKey *string
tlsCertificate *string
}
func init() {
cmdWebDav.Run = runWebDav // break init cycle
webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address")
webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port")
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
}
var cmdWebDav = &Command{
UsageLine: "webdav -port=7333 -filer=<ip:port>",
Short: "<unstable> start a webdav server that is backed by a filer",
Long: `start a webdav server that is backed by a filer.
`,
}
func runWebDav(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.VERSION, *webDavStandaloneOptions.port)
return webDavStandaloneOptions.startWebDav()
}
func (wo *WebDavOption) startWebDav() bool {
filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer)
if err != nil {
glog.Fatal(err)
return false
}
// detect current user
uid, gid := uint32(0), uint32(0)
if u, err := user.Current(); err == nil {
if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil {
uid = uint32(parsedId)
}
if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil {
gid = uint32(parsedId)
}
}
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
Filer: *wo.filer,
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
Collection: *wo.collection,
Uid: uid,
Gid: gid,
})
if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
}
httpS := &http.Server{Handler: ws.Handler}
listenAddress := fmt.Sprintf(":%d", *wo.port)
webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil {
glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
}
if *wo.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.VERSION, *wo.port)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
} else {
glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.VERSION, *wo.port)
if err = httpS.Serve(webDavListener); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
}
return true
}

View file

@ -0,0 +1,84 @@
Mount the SeaweedFS via FUSE
# Mount by fstab
```
$ # on linux
$ sudo apt-get install fuse
$ sudo echo 'user_allow_other' >> /etc/fuse.conf
$ sudo mv weedfuse /sbin/mount.weedfuse
$ # on Mac
$ sudo mv weedfuse /sbin/mount_weedfuse
```
On both OS X and Linux, you can add one of the entries to your /etc/fstab file like the following:
```
# mount the whole SeaweedFS
localhost:8888/ /home/some/mount/folder weedfuse
# mount the SeaweedFS sub folder
localhost:8888/sub/dir /home/some/mount/folder weedfuse
# mount the SeaweedFS sub folder with some options
localhost:8888/sub/dir /home/some/mount/folder weedfuse user
```
To verify it can work, try this command
```
$ sudo mount -av
...
/home/some/mount/folder : successfully mounted
```
If you see `successfully mounted`, try to access the mounted folder and verify everything works.
To debug, run these:
```
$ weedfuse -foreground localhost:8888/ /home/some/mount/folder
```
To unmount the folder:
```
$ sudo umount /home/some/mount/folder
```
<!-- not working yet!
# Mount by autofs
AutoFS can mount a folder if accessed.
```
# install autofs
$ sudo apt-get install autofs
```
Here is an example on how to mount a folder for all users under `/home` directory.
Assuming there exists corresponding folders under `/home` on both local and SeaweedFS.
Edit `/etc/auto.master` and `/etc/auto.weedfuse` file with these content
```
$ cat /etc/auto.master
/home /etc/auto.weedfuse
$ cat /etc/auto.weedfuse
# map /home/<user> to localhost:8888/home/<user>
* -fstype=weedfuse,rw,allow_other,foreground :localhost\:8888/home/&
```
-->

View file

@ -0,0 +1,109 @@
package main
import (
"flag"
"fmt"
"os"
"strings"
"github.com/chrislusf/seaweedfs/weed/command"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/jacobsa/daemonize"
"github.com/kardianos/osext"
)
var (
fuseCommand = flag.NewFlagSet("weedfuse", flag.ContinueOnError)
options = fuseCommand.String("o", "", "comma separated options rw,uid=xxx,gid=xxx")
isForeground = fuseCommand.Bool("foreground", false, "starts as a daemon")
)
func main() {
err := fuseCommand.Parse(os.Args[1:])
if err != nil {
glog.Fatal(err)
}
fmt.Printf("options: %v\n", *options)
// seems this value is always empty, need to parse it differently
optionsString := *options
prev := ""
for i, arg := range os.Args {
fmt.Printf("args[%d]: %v\n", i, arg)
if prev == "-o" {
optionsString = arg
}
prev = arg
}
device := fuseCommand.Arg(0)
mountPoint := fuseCommand.Arg(1)
fmt.Printf("source: %v\n", device)
fmt.Printf("target: %v\n", mountPoint)
nouser := true
for _, option := range strings.Split(optionsString, ",") {
fmt.Printf("option: %v\n", option)
switch option {
case "user":
nouser = false
}
}
maybeSetupPath()
if !*isForeground {
startAsDaemon()
return
}
parts := strings.SplitN(device, "/", 2)
filer, filerPath := parts[0], parts[1]
command.RunMount(
filer, "/"+filerPath, mountPoint, "", "000", "",
4, !nouser, 0, 1000000)
}
func maybeSetupPath() {
// sudo mount -av may not include PATH in some linux, e.g., Ubuntu
hasPathEnv := false
for _, e := range os.Environ() {
if strings.HasPrefix(e, "PATH=") {
hasPathEnv = true
}
fmt.Println(e)
}
if !hasPathEnv {
os.Setenv("PATH", "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin")
}
}
func startAsDaemon() {
// adapted from gcsfuse
// Find the executable.
var path string
path, err := osext.Executable()
if err != nil {
glog.Fatalf("osext.Executable: %v", err)
}
// Set up arguments. Be sure to use foreground mode.
args := append([]string{"-foreground"}, os.Args[1:]...)
// Pass along PATH so that the daemon can find fusermount on Linux.
env := []string{
fmt.Sprintf("PATH=%s", os.Getenv("PATH")),
}
err = daemonize.Run(path, args, env, os.Stdout)
if err != nil {
glog.Fatalf("daemonize.Run: %v", err)
}
}

View file

@ -1,6 +1,7 @@
package abstract_sql package abstract_sql
import ( import (
"context"
"database/sql" "database/sql"
"fmt" "fmt"
@ -18,7 +19,44 @@ type AbstractSqlStore struct {
SqlListInclusive string SqlListInclusive string
} }
func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { type TxOrDB interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
}
func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) {
tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{
Isolation: sql.LevelReadCommitted,
ReadOnly: false,
})
if err != nil {
return ctx, err
}
return context.WithValue(ctx, "tx", tx), nil
}
func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error {
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
return tx.Commit()
}
return nil
}
func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
return tx.Rollback()
}
return nil
}
func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB {
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
return tx
}
return store.DB
}
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
@ -26,7 +64,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
return fmt.Errorf("encode %s: %s", entry.FullPath, err) return fmt.Errorf("encode %s: %s", entry.FullPath, err)
} }
res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta) res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta)
if err != nil { if err != nil {
return fmt.Errorf("insert %s: %s", entry.FullPath, err) return fmt.Errorf("insert %s: %s", entry.FullPath, err)
} }
@ -38,7 +76,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil return nil
} }
func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
@ -46,7 +84,7 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
return fmt.Errorf("encode %s: %s", entry.FullPath, err) return fmt.Errorf("encode %s: %s", entry.FullPath, err)
} }
res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir) res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir)
if err != nil { if err != nil {
return fmt.Errorf("update %s: %s", entry.FullPath, err) return fmt.Errorf("update %s: %s", entry.FullPath, err)
} }
@ -58,10 +96,10 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) {
return nil return nil
} }
func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) { func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir) row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir)
var data []byte var data []byte
if err := row.Scan(&data); err != nil { if err := row.Scan(&data); err != nil {
return nil, filer2.ErrNotFound return nil, filer2.ErrNotFound
@ -77,11 +115,11 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr
return entry, nil return entry, nil
} }
func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error { func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir) res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir)
if err != nil { if err != nil {
return fmt.Errorf("delete %s: %s", fullpath, err) return fmt.Errorf("delete %s: %s", fullpath, err)
} }
@ -94,14 +132,14 @@ func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error {
return nil return nil
} }
func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
sqlText := store.SqlListExclusive sqlText := store.SqlListExclusive
if inclusive { if inclusive {
sqlText = store.SqlListInclusive sqlText = store.SqlListInclusive
} }
rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit)
if err != nil { if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err) return nil, fmt.Errorf("list %s : %v", fullpath, err)
} }

View file

@ -1,6 +1,7 @@
package cassandra package cassandra
import ( import (
"context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -39,7 +40,17 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er
return return
} }
func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) {
return ctx, nil
}
func (store *CassandraStore) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *CassandraStore) RollbackTransaction(ctx context.Context) error {
return nil
}
func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.FullPath.DirAndName() dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks() meta, err := entry.EncodeAttributesAndChunks()
@ -56,12 +67,12 @@ func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil return nil
} }
func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) { func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
return store.InsertEntry(entry) return store.InsertEntry(ctx, entry)
} }
func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
var data []byte var data []byte
@ -74,7 +85,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
} }
if len(data) == 0 { if len(data) == 0 {
return nil, fmt.Errorf("not found: %s", fullpath) return nil, filer2.ErrNotFound
} }
entry = &filer2.Entry{ entry = &filer2.Entry{
@ -88,7 +99,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.
return entry, nil return entry, nil
} }
func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error {
dir, name := fullpath.DirAndName() dir, name := fullpath.DirAndName()
@ -101,7 +112,7 @@ func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error {
return nil return nil
} }
func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) { limit int) (entries []*filer2.Entry, err error) {
cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?"

View file

@ -52,9 +52,20 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry {
return nil return nil
} }
return &filer_pb.Entry{ return &filer_pb.Entry{
Name: string(entry.FullPath), Name: entry.FullPath.Name(),
IsDirectory: entry.IsDirectory(), IsDirectory: entry.IsDirectory(),
Attributes: EntryAttributeToPb(entry), Attributes: EntryAttributeToPb(entry),
Chunks: entry.Chunks, Chunks: entry.Chunks,
} }
} }
func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
if entry == nil {
return nil
}
dir, _ := entry.FullPath.DirAndName()
return &filer_pb.FullEntry{
Dir: dir,
Entry: entry.ToProtoEntry(),
}
}

View file

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/gogo/protobuf/proto" "github.com/golang/protobuf/proto"
) )
func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) {

View file

@ -40,7 +40,7 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
fileIds[interval.fileId] = true fileIds[interval.fileId] = true
} }
for _, chunk := range chunks { for _, chunk := range chunks {
if found := fileIds[chunk.FileId]; found { if _, found := fileIds[chunk.GetFileIdString()]; found {
compacted = append(compacted, chunk) compacted = append(compacted, chunk)
} else { } else {
garbage = append(garbage, chunk) garbage = append(garbage, chunk)
@ -50,15 +50,15 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
return return
} }
func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []*filer_pb.FileChunk) { func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
fileIds := make(map[string]bool) fileIds := make(map[string]bool)
for _, interval := range newChunks { for _, interval := range bs {
fileIds[interval.FileId] = true fileIds[interval.GetFileIdString()] = true
} }
for _, chunk := range oldChunks { for _, chunk := range as {
if found := fileIds[chunk.FileId]; !found { if _, found := fileIds[chunk.GetFileIdString()]; !found {
unused = append(unused, chunk) delta = append(delta, chunk)
} }
} }
@ -123,7 +123,7 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.
newV := newVisibleInterval( newV := newVisibleInterval(
chunk.Offset, chunk.Offset,
chunk.Offset+int64(chunk.Size), chunk.Offset+int64(chunk.Size),
chunk.FileId, chunk.GetFileIdString(),
chunk.Mtime, chunk.Mtime,
true, true,
) )

View file

@ -3,6 +3,7 @@ package filer2
import ( import (
"context" "context"
"fmt" "fmt"
"google.golang.org/grpc"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@ -20,17 +21,19 @@ var (
) )
type Filer struct { type Filer struct {
store FilerStore store *FilerStoreWrapper
directoryCache *ccache.Cache directoryCache *ccache.Cache
MasterClient *wdclient.MasterClient MasterClient *wdclient.MasterClient
fileIdDeletionChan chan string fileIdDeletionChan chan string
GrpcDialOption grpc.DialOption
} }
func NewFiler(masters []string) *Filer { func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer {
f := &Filer{ f := &Filer{
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
MasterClient: wdclient.NewMasterClient(context.Background(), "filer", masters), MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters),
fileIdDeletionChan: make(chan string, 4096), fileIdDeletionChan: make(chan string, 4096),
GrpcDialOption: grpcDialOption,
} }
go f.loopProcessingDeletion() go f.loopProcessingDeletion()
@ -39,7 +42,7 @@ func NewFiler(masters []string) *Filer {
} }
func (f *Filer) SetStore(store FilerStore) { func (f *Filer) SetStore(store FilerStore) {
f.store = store f.store = NewFilerStoreWrapper(store)
} }
func (f *Filer) DisableDirectoryCache() { func (f *Filer) DisableDirectoryCache() {
@ -54,7 +57,19 @@ func (fs *Filer) KeepConnectedToMaster() {
fs.MasterClient.KeepConnectedToMaster() fs.MasterClient.KeepConnectedToMaster()
} }
func (f *Filer) CreateEntry(entry *Entry) error { func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
return f.store.BeginTransaction(ctx)
}
func (f *Filer) CommitTransaction(ctx context.Context) error {
return f.store.CommitTransaction(ctx)
}
func (f *Filer) RollbackTransaction(ctx context.Context) error {
return f.store.RollbackTransaction(ctx)
}
func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error {
if string(entry.FullPath) == "/" { if string(entry.FullPath) == "/" {
return nil return nil
@ -67,7 +82,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
var lastDirectoryEntry *Entry var lastDirectoryEntry *Entry
for i := 1; i < len(dirParts); i++ { for i := 1; i < len(dirParts); i++ {
dirPath := "/" + filepath.Join(dirParts[:i]...) dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...))
// fmt.Printf("%d directory: %+v\n", i, dirPath) // fmt.Printf("%d directory: %+v\n", i, dirPath)
// first check local cache // first check local cache
@ -76,7 +91,7 @@ func (f *Filer) CreateEntry(entry *Entry) error {
// not found, check the store directly // not found, check the store directly
if dirEntry == nil { if dirEntry == nil {
glog.V(4).Infof("find uncached directory: %s", dirPath) glog.V(4).Infof("find uncached directory: %s", dirPath)
dirEntry, _ = f.FindEntry(FullPath(dirPath)) dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath))
} else { } else {
glog.V(4).Infof("found cached directory: %s", dirPath) glog.V(4).Infof("found cached directory: %s", dirPath)
} }
@ -99,9 +114,9 @@ func (f *Filer) CreateEntry(entry *Entry) error {
} }
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.store.InsertEntry(dirEntry) mkdirErr := f.store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil { if mkdirErr != nil {
if _, err := f.FindEntry(FullPath(dirPath)); err == ErrNotFound { if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound {
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
} }
} else { } else {
@ -134,14 +149,16 @@ func (f *Filer) CreateEntry(entry *Entry) error {
} }
*/ */
oldEntry, _ := f.FindEntry(entry.FullPath) oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
if oldEntry == nil { if oldEntry == nil {
if err := f.store.InsertEntry(entry); err != nil { if err := f.store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
} }
} else { } else {
if err := f.UpdateEntry(oldEntry, entry); err != nil { if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
glog.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
} }
} }
@ -153,19 +170,21 @@ func (f *Filer) CreateEntry(entry *Entry) error {
return nil return nil
} }
func (f *Filer) UpdateEntry(oldEntry, entry *Entry) (err error) { func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
if oldEntry != nil { if oldEntry != nil {
if oldEntry.IsDirectory() && !entry.IsDirectory() { if oldEntry.IsDirectory() && !entry.IsDirectory() {
glog.Errorf("existing %s is a directory", entry.FullPath)
return fmt.Errorf("existing %s is a directory", entry.FullPath) return fmt.Errorf("existing %s is a directory", entry.FullPath)
} }
if !oldEntry.IsDirectory() && entry.IsDirectory() { if !oldEntry.IsDirectory() && entry.IsDirectory() {
glog.Errorf("existing %s is a file", entry.FullPath)
return fmt.Errorf("existing %s is a file", entry.FullPath) return fmt.Errorf("existing %s is a file", entry.FullPath)
} }
} }
return f.store.UpdateEntry(entry) return f.store.UpdateEntry(ctx, entry)
} }
func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) {
now := time.Now() now := time.Now()
@ -181,11 +200,11 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) {
}, },
}, nil }, nil
} }
return f.store.FindEntry(p) return f.store.FindEntry(ctx, p)
} }
func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) { func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) {
entry, err := f.FindEntry(p) entry, err := f.FindEntry(ctx, p)
if err != nil { if err != nil {
return err return err
} }
@ -198,37 +217,41 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDelet
lastFileName := "" lastFileName := ""
includeLastFile := false includeLastFile := false
for limit > 0 { for limit > 0 {
entries, err := f.ListDirectoryEntries(p, lastFileName, includeLastFile, 1024) entries, err := f.ListDirectoryEntries(ctx, p, lastFileName, includeLastFile, 1024)
if err != nil { if err != nil {
glog.Errorf("list folder %s: %v", p, err)
return fmt.Errorf("list folder %s: %v", p, err) return fmt.Errorf("list folder %s: %v", p, err)
} }
if len(entries) == 0 { if len(entries) == 0 {
break break
} else { }
if isRecursive {
for _, sub := range entries { if isRecursive {
lastFileName = sub.Name() for _, sub := range entries {
f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks) lastFileName = sub.Name()
limit-- err = f.DeleteEntryMetaAndData(ctx, sub.FullPath, isRecursive, shouldDeleteChunks)
if limit <= 0 { if err != nil {
break return err
}
} }
} else { limit--
if len(entries) > 0 { if limit <= 0 {
return fmt.Errorf("folder %s is not empty", p) break
} }
} }
f.cacheDelDirectory(string(p)) }
if len(entries) < 1024 {
break if len(entries) < 1024 {
} break
} }
} }
f.cacheDelDirectory(string(p))
} }
if shouldDeleteChunks { if shouldDeleteChunks {
f.DeleteChunks(entry.Chunks) f.DeleteChunks(p, entry.Chunks)
} }
if p == "/" { if p == "/" {
@ -238,17 +261,22 @@ func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDelet
f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks)
return f.store.DeleteEntry(p) return f.store.DeleteEntry(ctx, p)
} }
func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) {
if strings.HasSuffix(string(p), "/") && len(p) > 1 { if strings.HasSuffix(string(p), "/") && len(p) > 1 {
p = p[0 : len(p)-1] p = p[0 : len(p)-1]
} }
return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit) return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit)
} }
func (f *Filer) cacheDelDirectory(dirpath string) { func (f *Filer) cacheDelDirectory(dirpath string) {
if dirpath == "/" {
return
}
if f.directoryCache == nil { if f.directoryCache == nil {
return return
} }
@ -257,6 +285,7 @@ func (f *Filer) cacheDelDirectory(dirpath string) {
} }
func (f *Filer) cacheGetDirectory(dirpath string) *Entry { func (f *Filer) cacheGetDirectory(dirpath string) *Entry {
if f.directoryCache == nil { if f.directoryCache == nil {
return nil return nil
} }

View file

@ -0,0 +1,163 @@
package filer2
import (
"context"
"fmt"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
func VolumeId(fileId string) string {
lastCommaIndex := strings.LastIndex(fileId, ",")
if lastCommaIndex > 0 {
return fileId[:lastCommaIndex]
}
return fileId
}
type FilerClient interface {
WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error
}
func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) {
var vids []string
for _, chunkView := range chunkViews {
vids = append(vids, VolumeId(chunkView.FileId))
}
vid2Locations := make(map[string]*filer_pb.Locations)
err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
if err != nil {
return err
}
vid2Locations = resp.LocationsMap
return nil
})
if err != nil {
return 0, fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
}
var wg sync.WaitGroup
for _, chunkView := range chunkViews {
wg.Add(1)
go func(chunkView *ChunkView) {
defer wg.Done()
glog.V(4).Infof("read fh reading chunk: %+v", chunkView)
locations := vid2Locations[VolumeId(chunkView.FileId)]
if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", chunkView.FileId)
err = fmt.Errorf("failed to locate %s", chunkView.FileId)
return
}
var n int64
n, err = util.ReadUrl(
fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
chunkView.Offset,
int(chunkView.Size),
buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)],
!chunkView.IsFullChunk)
if err != nil {
glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err)
err = fmt.Errorf("failed to read http://%s/%s: %v",
locations.Locations[0].Url, chunkView.FileId, err)
return
}
glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView)
totalRead += n
}(chunkView)
}
wg.Wait()
return
}
func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) {
dir, name := FullPath(fullFilePath).DirAndName()
err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
Name: name,
}
glog.V(3).Infof("read %s request: %v", fullFilePath, request)
resp, err := client.LookupDirectoryEntry(ctx, request)
if err != nil {
if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
return nil
}
glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err)
return err
}
if resp.Entry != nil {
entry = resp.Entry
}
return nil
})
return
}
func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath string, fn func(entry *filer_pb.Entry)) (err error) {
err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
paginationLimit := 1024
lastEntryName := ""
for {
request := &filer_pb.ListEntriesRequest{
Directory: fullDirPath,
StartFromFileName: lastEntryName,
Limit: uint32(paginationLimit),
}
glog.V(3).Infof("read directory: %v", request)
resp, err := client.ListEntries(ctx, request)
if err != nil {
return fmt.Errorf("list %s: %v", fullDirPath, err)
}
for _, entry := range resp.Entries {
fn(entry)
lastEntryName = entry.Name
}
if len(resp.Entries) < paginationLimit {
break
}
}
return nil
})
return
}

View file

@ -38,25 +38,28 @@ func (f *Filer) loopProcessingDeletion() {
fileIds = append(fileIds, fid) fileIds = append(fileIds, fid)
if len(fileIds) >= 4096 { if len(fileIds) >= 4096 {
glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
fileIds = fileIds[:0] fileIds = fileIds[:0]
} }
case <-ticker.C: case <-ticker.C:
if len(fileIds) > 0 { if len(fileIds) > 0 {
glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
fileIds = fileIds[:0] fileIds = fileIds[:0]
} }
} }
} }
} }
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { func (f *Filer) DeleteChunks(fullpath FullPath, chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks { for _, chunk := range chunks {
f.fileIdDeletionChan <- chunk.FileId glog.V(3).Infof("deleting %s chunk %s", fullpath, chunk.String())
f.fileIdDeletionChan <- chunk.GetFileIdString()
} }
} }
// DeleteFileByFileId direct delete by file id.
// Only used when the fileId is not being managed by snapshots.
func (f *Filer) DeleteFileByFileId(fileId string) { func (f *Filer) DeleteFileByFileId(fileId string) {
f.fileIdDeletionChan <- fileId f.fileIdDeletionChan <- fileId
} }
@ -67,22 +70,19 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
return return
} }
if newEntry == nil { if newEntry == nil {
f.DeleteChunks(oldEntry.Chunks) f.DeleteChunks(oldEntry.FullPath, oldEntry.Chunks)
} }
var toDelete []*filer_pb.FileChunk var toDelete []*filer_pb.FileChunk
newChunkIds := make(map[string]bool)
for _, newChunk := range newEntry.Chunks {
newChunkIds[newChunk.GetFileIdString()] = true
}
for _, oldChunk := range oldEntry.Chunks { for _, oldChunk := range oldEntry.Chunks {
found := false if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found {
for _, newChunk := range newEntry.Chunks {
if oldChunk.FileId == newChunk.FileId {
found = true
break
}
}
if !found {
toDelete = append(toDelete, oldChunk) toDelete = append(toDelete, oldChunk)
} }
} }
f.DeleteChunks(toDelete) f.DeleteChunks(oldEntry.FullPath, toDelete)
} }

View file

@ -20,12 +20,18 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool)
glog.V(3).Infof("notifying entry update %v", key) glog.V(3).Infof("notifying entry update %v", key)
newParentPath := ""
if newEntry != nil {
newParentPath, _ = newEntry.FullPath.DirAndName()
}
notification.Queue.SendMessage( notification.Queue.SendMessage(
key, key,
&filer_pb.EventNotification{ &filer_pb.EventNotification{
OldEntry: oldEntry.ToProtoEntry(), OldEntry: oldEntry.ToProtoEntry(),
NewEntry: newEntry.ToProtoEntry(), NewEntry: newEntry.ToProtoEntry(),
DeleteChunks: deleteChunks, DeleteChunks: deleteChunks,
NewParentPath: newParentPath,
}, },
) )

View file

@ -1,7 +1,12 @@
package filer2 package filer2
import ( import (
"context"
"errors" "errors"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -10,12 +15,110 @@ type FilerStore interface {
GetName() string GetName() string
// Initialize initializes the file store // Initialize initializes the file store
Initialize(configuration util.Configuration) error Initialize(configuration util.Configuration) error
InsertEntry(*Entry) error InsertEntry(context.Context, *Entry) error
UpdateEntry(*Entry) (err error) UpdateEntry(context.Context, *Entry) (err error)
// err == filer2.ErrNotFound if not found // err == filer2.ErrNotFound if not found
FindEntry(FullPath) (entry *Entry, err error) FindEntry(context.Context, FullPath) (entry *Entry, err error)
DeleteEntry(FullPath) (err error) DeleteEntry(context.Context, FullPath) (err error)
ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error)
BeginTransaction(ctx context.Context) (context.Context, error)
CommitTransaction(ctx context.Context) error
RollbackTransaction(ctx context.Context) error
} }
var ErrNotFound = errors.New("filer: no entry is found in filer store") var ErrNotFound = errors.New("filer: no entry is found in filer store")
type FilerStoreWrapper struct {
actualStore FilerStore
}
func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
return &FilerStoreWrapper{
actualStore: store,
}
}
func (fsw *FilerStoreWrapper) GetName() string {
return fsw.actualStore.GetName()
}
func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error {
return fsw.actualStore.Initialize(configuration)
}
func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc()
start := time.Now()
defer func() {
stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds())
}()
filer_pb.BeforeEntrySerialization(entry.Chunks)
return fsw.actualStore.InsertEntry(ctx, entry)
}
func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc()
start := time.Now()
defer func() {
stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds())
}()
filer_pb.BeforeEntrySerialization(entry.Chunks)
return fsw.actualStore.UpdateEntry(ctx, entry)
}
func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry *Entry, err error) {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc()
start := time.Now()
defer func() {
stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds())
}()
entry, err = fsw.actualStore.FindEntry(ctx, fp)
if err != nil {
return nil, err
}
filer_pb.AfterEntryDeserialization(entry.Chunks)
return
}
func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err error) {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc()
start := time.Now()
defer func() {
stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds())
}()
return fsw.actualStore.DeleteEntry(ctx, fp)
}
func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) {
stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc()
start := time.Now()
defer func() {
stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds())
}()
entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
if err != nil {
return nil, err
}
for _, entry := range entries {
filer_pb.AfterEntryDeserialization(entry.Chunks)
}
return entries, err
}
func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) {
return fsw.actualStore.BeginTransaction(ctx)
}
func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error {
return fsw.actualStore.CommitTransaction(ctx)
}
func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error {
return fsw.actualStore.RollbackTransaction(ctx)
}

View file

@ -8,10 +8,7 @@ import (
type FullPath string type FullPath string
func NewFullPath(dir, name string) FullPath { func NewFullPath(dir, name string) FullPath {
if strings.HasSuffix(dir, "/") { return FullPath(dir).Child(name)
return FullPath(dir + name)
}
return FullPath(dir + "/" + name)
} }
func (fp FullPath) DirAndName() (string, string) { func (fp FullPath) DirAndName() (string, string) {
@ -29,3 +26,11 @@ func (fp FullPath) Name() string {
_, name := filepath.Split(string(fp)) _, name := filepath.Split(string(fp))
return name return name
} }
func (fp FullPath) Child(name string) FullPath {
dir := string(fp)
if strings.HasSuffix(dir, "/") {
return FullPath(dir + name)
}
return FullPath(dir + "/" + name)
}

View file

@ -2,12 +2,14 @@ package leveldb
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util" leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
) )
@ -38,14 +40,30 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
} }
if store.db, err = leveldb.OpenFile(dir, nil); err != nil { opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10,
}
if store.db, err = leveldb.OpenFile(dir, opts); err != nil {
glog.Infof("filer store open dir %s: %v", dir, err) glog.Infof("filer store open dir %s: %v", dir, err)
return return
} }
return return
} }
func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error) {
return ctx, nil
}
func (store *LevelDBStore) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error {
return nil
}
func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
key := genKey(entry.DirAndName()) key := genKey(entry.DirAndName())
value, err := entry.EncodeAttributesAndChunks() value, err := entry.EncodeAttributesAndChunks()
@ -64,12 +82,12 @@ func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil return nil
} }
func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) { func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
return store.InsertEntry(entry) return store.InsertEntry(ctx, entry)
} }
func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
key := genKey(fullpath.DirAndName()) key := genKey(fullpath.DirAndName())
data, err := store.db.Get(key, nil) data, err := store.db.Get(key, nil)
@ -94,7 +112,7 @@ func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.En
return entry, nil return entry, nil
} }
func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
key := genKey(fullpath.DirAndName()) key := genKey(fullpath.DirAndName())
err = store.db.Delete(key, nil) err = store.db.Delete(key, nil)
@ -105,7 +123,7 @@ func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) {
return nil return nil
} }
func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) { limit int) (entries []*filer2.Entry, err error) {
directoryPrefix := genDirectoryKeyPrefix(fullpath, "") directoryPrefix := genDirectoryKeyPrefix(fullpath, "")

View file

@ -1,6 +1,7 @@
package leveldb package leveldb
import ( import (
"context"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"io/ioutil" "io/ioutil"
"os" "os"
@ -8,7 +9,7 @@ import (
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
@ -18,6 +19,8 @@ func TestCreateAndFind(t *testing.T) {
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
entry1 := &filer2.Entry{ entry1 := &filer2.Entry{
FullPath: fullpath, FullPath: fullpath,
Attr: filer2.Attr{ Attr: filer2.Attr{
@ -27,12 +30,12 @@ func TestCreateAndFind(t *testing.T) {
}, },
} }
if err := filer.CreateEntry(entry1); err != nil { if err := filer.CreateEntry(ctx, entry1); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err) t.Errorf("create entry %v: %v", entry1.FullPath, err)
return return
} }
entry, err := filer.FindEntry(fullpath) entry, err := filer.FindEntry(ctx, fullpath)
if err != nil { if err != nil {
t.Errorf("find entry: %v", err) t.Errorf("find entry: %v", err)
@ -45,14 +48,14 @@ func TestCreateAndFind(t *testing.T) {
} }
// checking one upper directory // checking one upper directory
entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100) entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// checking one upper directory // checking one upper directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
@ -61,7 +64,7 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
@ -69,8 +72,10 @@ func TestEmptyRoot(t *testing.T) {
filer.SetStore(store) filer.SetStore(store)
filer.DisableDirectoryCache() filer.DisableDirectoryCache()
ctx := context.Background()
// checking one upper directory // checking one upper directory
entries, err := filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
if err != nil { if err != nil {
t.Errorf("list entries: %v", err) t.Errorf("list entries: %v", err)
return return

View file

@ -0,0 +1,208 @@
package leveldb
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"os"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
)
func init() {
filer2.Stores = append(filer2.Stores, &LevelDB2Store{})
}
type LevelDB2Store struct {
dbs []*leveldb.DB
dbCount int
}
func (store *LevelDB2Store) GetName() string {
return "leveldb2"
}
func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) {
dir := configuration.GetString("dir")
return store.initialize(dir, 8)
}
func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
glog.Infof("filer store leveldb2 dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
}
for d := 0 ; d < dbCount; d++ {
dbFolder := fmt.Sprintf("%s/%02d", dir, d)
os.MkdirAll(dbFolder, 0755)
db, dbErr := leveldb.OpenFile(dbFolder, opts)
if dbErr != nil {
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
return
}
store.dbs = append(store.dbs, db)
}
store.dbCount = dbCount
return
}
func (store *LevelDB2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
return ctx, nil
}
func (store *LevelDB2Store) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error {
return nil
}
func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
dir, name := entry.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
err = store.dbs[partitionId].Put(key, value, nil)
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
return nil
}
func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
dir, name := fullpath.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
data, err := store.dbs[partitionId].Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, filer2.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", entry.FullPath, err)
}
entry = &filer2.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks(data)
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
return entry, nil
}
func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
dir, name := fullpath.DirAndName()
key, partitionId := genKey(dir, name, store.dbCount)
err = store.dbs[partitionId].Delete(key, nil)
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
}
func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount)
lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount)
iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil)
for iter.Next() {
key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) {
break
}
fileName := getNameFromKey(key)
if fileName == "" {
continue
}
if fileName == startFileName && !inclusive {
continue
}
limit--
if limit < 0 {
break
}
entry := &filer2.Entry{
FullPath: filer2.NewFullPath(string(fullpath), fileName),
}
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
}
iter.Release()
return entries, err
}
func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) {
key, partitionId = hashToBytes(dirPath, dbCount)
key = append(key, []byte(fileName)...)
return key, partitionId
}
func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) {
keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount)
if len(startFileName) > 0 {
keyPrefix = append(keyPrefix, []byte(startFileName)...)
}
return keyPrefix, partitionId
}
func getNameFromKey(key []byte) string {
return string(key[md5.Size:])
}
// hash directory, and use last byte for partitioning
func hashToBytes(dir string, dbCount int) ([]byte, int) {
h := md5.New()
io.WriteString(h, dir)
b := h.Sum(nil)
x := b[len(b)-1]
return b, int(x)%dbCount
}

View file

@ -0,0 +1,88 @@
package leveldb
import (
"context"
"github.com/chrislusf/seaweedfs/weed/filer2"
"io/ioutil"
"os"
"testing"
)
func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil, nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir)
store := &LevelDB2Store{}
store.initialize(dir,2)
filer.SetStore(store)
filer.DisableDirectoryCache()
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
ctx := context.Background()
entry1 := &filer2.Entry{
FullPath: fullpath,
Attr: filer2.Attr{
Mode: 0440,
Uid: 1234,
Gid: 5678,
},
}
if err := filer.CreateEntry(ctx, entry1); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err)
return
}
entry, err := filer.FindEntry(ctx, fullpath)
if err != nil {
t.Errorf("find entry: %v", err)
return
}
if entry.FullPath != entry1.FullPath {
t.Errorf("find wrong entry: %v", entry.FullPath)
return
}
// checking one upper directory
entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
// checking one upper directory
entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries))
return
}
}
func TestEmptyRoot(t *testing.T) {
filer := filer2.NewFiler(nil, nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir)
store := &LevelDB2Store{}
store.initialize(dir,2)
filer.SetStore(store)
filer.DisableDirectoryCache()
ctx := context.Background()
// checking one upper directory
entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
if err != nil {
t.Errorf("list entries: %v", err)
return
}
if len(entries) != 0 {
t.Errorf("list entries count: %v", len(entries))
return
}
}

View file

@ -1,11 +1,13 @@
package memdb package memdb
import ( import (
"context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/google/btree" "github.com/google/btree"
"strings" "strings"
"sync"
) )
func init() { func init() {
@ -13,7 +15,8 @@ func init() {
} }
type MemDbStore struct { type MemDbStore struct {
tree *btree.BTree tree *btree.BTree
treeLock sync.Mutex
} }
type entryItem struct { type entryItem struct {
@ -33,21 +36,35 @@ func (store *MemDbStore) Initialize(configuration util.Configuration) (err error
return nil return nil
} }
func (store *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) { func (store *MemDbStore) BeginTransaction(ctx context.Context) (context.Context, error) {
// println("inserting", entry.FullPath) return ctx, nil
store.tree.ReplaceOrInsert(entryItem{entry}) }
func (store *MemDbStore) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *MemDbStore) RollbackTransaction(ctx context.Context) error {
return nil return nil
} }
func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) { func (store *MemDbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
if _, err = store.FindEntry(entry.FullPath); err != nil { // println("inserting", entry.FullPath)
store.treeLock.Lock()
store.tree.ReplaceOrInsert(entryItem{entry})
store.treeLock.Unlock()
return nil
}
func (store *MemDbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
if _, err = store.FindEntry(ctx, entry.FullPath); err != nil {
return fmt.Errorf("no such file %s : %v", entry.FullPath, err) return fmt.Errorf("no such file %s : %v", entry.FullPath, err)
} }
store.treeLock.Lock()
store.tree.ReplaceOrInsert(entryItem{entry}) store.tree.ReplaceOrInsert(entryItem{entry})
store.treeLock.Unlock()
return nil return nil
} }
func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { func (store *MemDbStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}}) item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}})
if item == nil { if item == nil {
return nil, filer2.ErrNotFound return nil, filer2.ErrNotFound
@ -56,12 +73,14 @@ func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entr
return entry, nil return entry, nil
} }
func (store *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (err error) { func (store *MemDbStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
store.treeLock.Lock()
store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}}) store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}})
store.treeLock.Unlock()
return nil return nil
} }
func (store *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { func (store *MemDbStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) {
startFrom := string(fullpath) startFrom := string(fullpath)
if startFileName != "" { if startFileName != "" {

View file

@ -1,17 +1,20 @@
package memdb package memdb
import ( import (
"context"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"testing" "testing"
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
store := &MemDbStore{} store := &MemDbStore{}
store.Initialize(nil) store.Initialize(nil)
filer.SetStore(store) filer.SetStore(store)
filer.DisableDirectoryCache() filer.DisableDirectoryCache()
ctx := context.Background()
fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg")
entry1 := &filer2.Entry{ entry1 := &filer2.Entry{
@ -23,12 +26,12 @@ func TestCreateAndFind(t *testing.T) {
}, },
} }
if err := filer.CreateEntry(entry1); err != nil { if err := filer.CreateEntry(ctx, entry1); err != nil {
t.Errorf("create entry %v: %v", entry1.FullPath, err) t.Errorf("create entry %v: %v", entry1.FullPath, err)
return return
} }
entry, err := filer.FindEntry(fullpath) entry, err := filer.FindEntry(ctx, fullpath)
if err != nil { if err != nil {
t.Errorf("find entry: %v", err) t.Errorf("find entry: %v", err)
@ -43,12 +46,14 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestCreateFileAndList(t *testing.T) { func TestCreateFileAndList(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
store := &MemDbStore{} store := &MemDbStore{}
store.Initialize(nil) store.Initialize(nil)
filer.SetStore(store) filer.SetStore(store)
filer.DisableDirectoryCache() filer.DisableDirectoryCache()
ctx := context.Background()
entry1 := &filer2.Entry{ entry1 := &filer2.Entry{
FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"), FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"),
Attr: filer2.Attr{ Attr: filer2.Attr{
@ -67,11 +72,11 @@ func TestCreateFileAndList(t *testing.T) {
}, },
} }
filer.CreateEntry(entry1) filer.CreateEntry(ctx, entry1)
filer.CreateEntry(entry2) filer.CreateEntry(ctx, entry2)
// checking the 2 files // checking the 2 files
entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "", false, 100)
if err != nil { if err != nil {
t.Errorf("list entries: %v", err) t.Errorf("list entries: %v", err)
@ -94,21 +99,21 @@ func TestCreateFileAndList(t *testing.T) {
} }
// checking the offset // checking the offset
entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) entries, err = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100)
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// checking one upper directory // checking one upper directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// checking root directory // checking root directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100)
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
@ -124,18 +129,18 @@ func TestCreateFileAndList(t *testing.T) {
Gid: 5678, Gid: 5678,
}, },
} }
filer.CreateEntry(entry3) filer.CreateEntry(ctx, entry3)
// checking one upper directory // checking one upper directory
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 2 { if len(entries) != 2 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return
} }
// delete file and count // delete file and count
filer.DeleteEntryMetaAndData(file3Path, false, false) filer.DeleteEntryMetaAndData(ctx, file3Path, false, false)
entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is"), "", false, 100)
if len(entries) != 1 { if len(entries) != 1 {
t.Errorf("list entries count: %v", len(entries)) t.Errorf("list entries count: %v", len(entries))
return return

View file

@ -9,8 +9,8 @@ $PGHOME/bin/psql --username=postgres --password seaweedfs
CREATE TABLE IF NOT EXISTS filemeta ( CREATE TABLE IF NOT EXISTS filemeta (
dirhash BIGINT, dirhash BIGINT,
name VARCHAR(1000), name VARCHAR(65535),
directory VARCHAR(4096), directory VARCHAR(65535),
meta bytea, meta bytea,
PRIMARY KEY (dirhash, name) PRIMARY KEY (dirhash, name)
); );

View file

@ -21,12 +21,14 @@ func (store *RedisClusterStore) GetName() string {
func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) {
return store.initialize( return store.initialize(
configuration.GetStringSlice("addresses"), configuration.GetStringSlice("addresses"),
configuration.GetString("password"),
) )
} }
func (store *RedisClusterStore) initialize(addresses []string) (err error) { func (store *RedisClusterStore) initialize(addresses []string, password string) (err error) {
store.Client = redis.NewClusterClient(&redis.ClusterOptions{ store.Client = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: addresses, Addrs: addresses,
Password: password,
}) })
return return
} }

View file

@ -1,6 +1,7 @@
package redis package redis
import ( import (
"context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -18,7 +19,17 @@ type UniversalRedisStore struct {
Client redis.UniversalClient Client redis.UniversalClient
} }
func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) { func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error) {
return ctx, nil
}
func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error {
return nil
}
func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks() value, err := entry.EncodeAttributesAndChunks()
if err != nil { if err != nil {
@ -42,12 +53,12 @@ func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil return nil
} }
func (store *UniversalRedisStore) UpdateEntry(entry *filer2.Entry) (err error) { func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
return store.InsertEntry(entry) return store.InsertEntry(ctx, entry)
} }
func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result() data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil { if err == redis.Nil {
@ -69,7 +80,7 @@ func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *fi
return entry, nil return entry, nil
} }
func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) { func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) {
_, err = store.Client.Del(string(fullpath)).Result() _, err = store.Client.Del(string(fullpath)).Result()
@ -88,7 +99,7 @@ func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err err
return nil return nil
} }
func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) { limit int) (entries []*filer2.Entry, err error) {
members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result()
@ -126,7 +137,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath,
// fetch entry meta // fetch entry meta
for _, fileName := range members { for _, fileName := range members {
path := filer2.NewFullPath(string(fullpath), fileName) path := filer2.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(path) entry, err := store.FindEntry(ctx, path)
if err != nil { if err != nil {
glog.V(0).Infof("list %s : %v", path, err) glog.V(0).Infof("list %s : %v", path, err)
} else { } else {

41
weed/filer2/stream.go Normal file
View file

@ -0,0 +1,41 @@
package filer2
import (
"io"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error {
chunkViews := ViewFromChunks(chunks, offset, size)
fileId2Url := make(map[string]string)
for _, chunkView := range chunkViews {
urlString, err := masterClient.LookupFileId(chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}
fileId2Url[chunkView.FileId] = urlString
}
for _, chunkView := range chunkViews {
urlString := fileId2Url[chunkView.FileId]
_, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) {
w.Write(data)
})
if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
return err
}
}
return nil
}

View file

@ -4,7 +4,6 @@ import (
"context" "context"
"os" "os"
"path" "path"
"path/filepath"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
@ -29,15 +28,13 @@ var _ = fs.NodeRemover(&Dir{})
var _ = fs.NodeRenamer(&Dir{}) var _ = fs.NodeRenamer(&Dir{})
var _ = fs.NodeSetattrer(&Dir{}) var _ = fs.NodeSetattrer(&Dir{})
func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
// https://github.com/bazil/fuse/issues/196 // https://github.com/bazil/fuse/issues/196
attr.Valid = time.Second attr.Valid = time.Second
if dir.Path == dir.wfs.option.FilerMountRootPath { if dir.Path == dir.wfs.option.FilerMountRootPath {
attr.Uid = dir.wfs.option.MountUid dir.setRootDirAttributes(attr)
attr.Gid = dir.wfs.option.MountGid
attr.Mode = dir.wfs.option.MountMode
return nil return nil
} }
@ -54,40 +51,14 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
return nil return nil
} }
parent, name := filepath.Split(dir.Path) entry, err := filer2.GetEntry(ctx, dir.wfs, dir.Path)
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: parent,
Name: name,
}
glog.V(1).Infof("read dir %s attr: %v", dir.Path, request)
resp, err := client.LookupDirectoryEntry(context, request)
if err != nil {
if err == filer2.ErrNotFound {
return nil
}
glog.V(0).Infof("read dir %s attr %v: %v", dir.Path, request, err)
return err
}
if resp.Entry != nil {
dir.attributes = resp.Entry.Attributes
}
// dir.wfs.listDirectoryEntriesCache.Set(dir.Path, resp.Entry, dir.wfs.option.EntryCacheTtl)
return nil
})
if err != nil { if err != nil {
glog.V(2).Infof("read dir %s attr: %v, error: %v", dir.Path, dir.attributes, err)
return err return err
} }
dir.attributes = entry.Attributes
// glog.V(1).Infof("dir %s: %v", dir.Path, attributes) glog.V(2).Infof("dir %s: %v perm: %v", dir.Path, dir.attributes, os.FileMode(dir.attributes.FileMode))
// glog.V(1).Infof("dir %s permission: %v", dir.Path, os.FileMode(attributes.FileMode))
attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir
@ -99,6 +70,16 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
return nil return nil
} }
func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
attr.Uid = dir.wfs.option.MountUid
attr.Gid = dir.wfs.option.MountGid
attr.Mode = dir.wfs.option.MountMode
attr.Crtime = dir.wfs.option.MountCtime
attr.Ctime = dir.wfs.option.MountCtime
attr.Mtime = dir.wfs.option.MountMtime
attr.Atime = dir.wfs.option.MountMtime
}
func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File {
return &File{ return &File{
Name: name, Name: name,
@ -132,7 +113,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
glog.V(1).Infof("create: %v", request) glog.V(1).Infof("create: %v", request)
if request.Entry.IsDirectory { if request.Entry.IsDirectory {
if err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
if _, err := client.CreateEntry(ctx, request); err != nil { if _, err := client.CreateEntry(ctx, request); err != nil {
glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err)
return fuse.EIO return fuse.EIO
@ -155,7 +136,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: dir.Path, Directory: dir.Path,
@ -192,33 +173,18 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
var entry *filer_pb.Entry var entry *filer_pb.Entry
fullFilePath := path.Join(dir.Path, req.Name)
item := dir.wfs.listDirectoryEntriesCache.Get(path.Join(dir.Path, req.Name)) item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath)
if item != nil && !item.Expired() { if item != nil && !item.Expired() {
entry = item.Value().(*filer_pb.Entry) entry = item.Value().(*filer_pb.Entry)
} }
if entry == nil { if entry == nil {
err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath)
if err != nil {
request := &filer_pb.LookupDirectoryEntryRequest{ return nil, err
Directory: dir.Path, }
Name: req.Name,
}
glog.V(4).Infof("lookup directory entry: %v", request)
resp, err := client.LookupDirectoryEntry(ctx, request)
if err != nil {
// glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err)
return fuse.ENOENT
}
entry = resp.Entry
// dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, dir.wfs.option.EntryCacheTtl)
return nil
})
} }
if entry != nil { if entry != nil {
@ -243,7 +209,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err = dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
paginationLimit := 1024 paginationLimit := 1024
remaining := dir.wfs.option.DirListingLimit remaining := dir.wfs.option.DirListingLimit
@ -305,33 +271,14 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error {
var entry *filer_pb.Entry entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name))
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{
Directory: dir.Path,
Name: req.Name,
}
glog.V(4).Infof("lookup to-be-removed entry: %v", request)
resp, err := client.LookupDirectoryEntry(ctx, request)
if err != nil {
// glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err)
return fuse.ENOENT
}
entry = resp.Entry
return nil
})
if err != nil { if err != nil {
return err return err
} }
dir.wfs.deleteFileChunks(entry.Chunks) dir.wfs.deleteFileChunks(ctx, entry.Chunks)
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.DeleteEntryRequest{ request := &filer_pb.DeleteEntryRequest{
Directory: dir.Path, Directory: dir.Path,
@ -355,7 +302,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro
func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error {
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.DeleteEntryRequest{ request := &filer_pb.DeleteEntryRequest{
Directory: dir.Path, Directory: dir.Path,
@ -379,6 +326,10 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
if dir.attributes == nil {
return nil
}
glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle)
if req.Valid.Mode() { if req.Valid.Mode() {
dir.attributes.FileMode = uint32(req.Mode) dir.attributes.FileMode = uint32(req.Mode)
@ -397,7 +348,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
} }
parentDir, name := filer2.FullPath(dir.Path).DirAndName() parentDir, name := filer2.FullPath(dir.Path).DirAndName()
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{ request := &filer_pb.UpdateEntryRequest{
Directory: parentDir, Directory: parentDir,

View file

@ -35,7 +35,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
}, },
} }
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
if _, err := client.CreateEntry(ctx, request); err != nil { if _, err := client.CreateEntry(ctx, request); err != nil {
glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err)
return fuse.EIO return fuse.EIO

View file

@ -2,118 +2,32 @@ package filesys
import ( import (
"context" "context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"math"
"path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error {
newDir := newDirectory.(*Dir) newDir := newDirectory.(*Dir)
return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
// find existing entry request := &filer_pb.AtomicRenameEntryRequest{
request := &filer_pb.LookupDirectoryEntryRequest{ OldDirectory: dir.Path,
Directory: dir.Path, OldName: req.OldName,
Name: req.OldName, NewDirectory: newDir.Path,
NewName: req.NewName,
} }
glog.V(4).Infof("find existing directory entry: %v", request) _, err := client.AtomicRenameEntry(ctx, request)
resp, err := client.LookupDirectoryEntry(ctx, request)
if err != nil { if err != nil {
glog.V(3).Infof("renaming find %s/%s: %v", dir.Path, req.OldName, err) return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err)
return fuse.ENOENT
} }
entry := resp.Entry return nil
glog.V(4).Infof("found existing directory entry resp: %+v", resp)
return moveEntry(ctx, client, dir.Path, entry, newDir.Path, req.NewName)
}) })
} }
func moveEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, oldParent string, entry *filer_pb.Entry, newParent, newName string) error {
if entry.IsDirectory {
currentDirPath := filepath.Join(oldParent, entry.Name)
lastFileName := ""
includeLastFile := false
limit := math.MaxInt32
for limit > 0 {
request := &filer_pb.ListEntriesRequest{
Directory: currentDirPath,
StartFromFileName: lastFileName,
InclusiveStartFrom: includeLastFile,
Limit: 1024,
}
glog.V(4).Infof("read directory: %v", request)
resp, err := client.ListEntries(ctx, request)
if err != nil {
glog.V(0).Infof("list %s: %v", oldParent, err)
return fuse.EIO
}
if len(resp.Entries) == 0 {
break
}
for _, item := range resp.Entries {
lastFileName = item.Name
err := moveEntry(ctx, client, currentDirPath, item, filepath.Join(newParent, newName), item.Name)
if err != nil {
return err
}
limit--
}
if len(resp.Entries) < 1024 {
break
}
}
}
// add to new directory
{
request := &filer_pb.CreateEntryRequest{
Directory: newParent,
Entry: &filer_pb.Entry{
Name: newName,
IsDirectory: entry.IsDirectory,
Attributes: entry.Attributes,
Chunks: entry.Chunks,
},
}
glog.V(1).Infof("create new entry: %v", request)
if _, err := client.CreateEntry(ctx, request); err != nil {
glog.V(0).Infof("renaming create %s/%s: %v", newParent, newName, err)
return fuse.EIO
}
}
// delete old entry
{
request := &filer_pb.DeleteEntryRequest{
Directory: oldParent,
Name: entry.Name,
IsDeleteData: false,
}
glog.V(1).Infof("remove old entry: %v", request)
_, err := client.DeleteEntry(ctx, request)
if err != nil {
glog.V(0).Infof("renaming delete %s/%s: %v", oldParent, entry.Name, err)
return fuse.EIO
}
}
return nil
}

View file

@ -4,13 +4,14 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"sync" "github.com/chrislusf/seaweedfs/weed/security"
) )
type ContinuousDirtyPages struct { type ContinuousDirtyPages struct {
@ -109,7 +110,7 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6
// flush existing // flush existing
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
if chunk != nil { if chunk != nil {
glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
chunks = append(chunks, chunk) chunks = append(chunks, chunk)
} }
} else { } else {
@ -122,7 +123,7 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6
// flush the new page // flush the new page
if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {
if chunk != nil { if chunk != nil {
glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId)
chunks = append(chunks, chunk) chunks = append(chunks, chunk)
} }
} else { } else {
@ -164,8 +165,9 @@ func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Contex
func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {
var fileId, host string var fileId, host string
var auth security.EncodedJwt
if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{ request := &filer_pb.AssignVolumeRequest{
Count: 1, Count: 1,
@ -181,7 +183,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte
return err return err
} }
fileId, host = resp.FileId, resp.Url fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
return nil return nil
}); err != nil { }); err != nil {
@ -190,7 +192,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(buf) bufReader := bytes.NewReader(buf)
uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "") uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, auth)
if err != nil { if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err)
return nil, fmt.Errorf("upload data: %v", err) return nil, fmt.Errorf("upload data: %v", err)

View file

@ -74,10 +74,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
return err return err
} }
if file.isOpen {
return nil
}
glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes)
if req.Valid.Size() { if req.Valid.Size() {
@ -109,7 +105,11 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.entry.Attributes.Mtime = req.Mtime.Unix() file.entry.Attributes.Mtime = req.Mtime.Unix()
} }
return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { if file.isOpen {
return nil
}
return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{ request := &filer_pb.UpdateEntryRequest{
Directory: file.dir.Path, Directory: file.dir.Path,
@ -144,7 +144,7 @@ func (file *File) maybeLoadAttributes(ctx context.Context) error {
file.setEntry(entry) file.setEntry(entry)
// glog.V(1).Infof("file attr read cached %v attributes", file.Name) // glog.V(1).Infof("file attr read cached %v attributes", file.Name)
} else { } else {
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.LookupDirectoryEntryRequest{ request := &filer_pb.LookupDirectoryEntryRequest{
Name: file.Name, Name: file.Name,
@ -194,6 +194,8 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
newVisibles = t newVisibles = t
} }
glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, chunks...) file.entry.Chunks = append(file.entry.Chunks, chunks...)
} }

View file

@ -3,17 +3,16 @@ package filesys
import ( import (
"context" "context"
"fmt" "fmt"
"mime"
"path"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/gabriel-vasile/mimetype"
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"net/http"
"strings"
"sync"
"time"
) )
type FileHandle struct { type FileHandle struct {
@ -65,75 +64,14 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size)
var vids []string totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset)
for _, chunkView := range chunkViews {
vids = append(vids, volumeId(chunkView.FileId))
}
vid2Locations := make(map[string]*filer_pb.Locations)
err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
if err != nil {
return err
}
vid2Locations = resp.LocationsMap
return nil
})
if err != nil {
glog.V(4).Infof("%v/%v read fh lookup volume ids: %v", fh.f.dir.Path, fh.f.Name, err)
return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
}
var totalRead int64
var wg sync.WaitGroup
for _, chunkView := range chunkViews {
wg.Add(1)
go func(chunkView *filer2.ChunkView) {
defer wg.Done()
glog.V(4).Infof("read fh reading chunk: %+v", chunkView)
locations := vid2Locations[volumeId(chunkView.FileId)]
if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", chunkView.FileId)
err = fmt.Errorf("failed to locate %s", chunkView.FileId)
return
}
var n int64
n, err = util.ReadUrl(
fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
chunkView.Offset,
int(chunkView.Size),
buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)],
!chunkView.IsFullChunk)
if err != nil {
glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.f.dir.Path, fh.f.Name, locations.Locations[0].Url, chunkView.FileId, n, err)
err = fmt.Errorf("failed to read http://%s/%s: %v",
locations.Locations[0].Url, chunkView.FileId, err)
return
}
glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView)
totalRead += n
}(chunkView)
}
wg.Wait()
resp.Data = buff[:totalRead] resp.Data = buff[:totalRead]
if err != nil {
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
return err return err
} }
@ -153,7 +91,13 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
resp.Size = len(req.Data) resp.Size = len(req.Data)
if req.Offset == 0 { if req.Offset == 0 {
fh.contentType = http.DetectContentType(req.Data) // detect mime type
var possibleExt string
fh.contentType, possibleExt = mimetype.Detect(req.Data)
if ext := path.Ext(fh.f.Name); ext != possibleExt {
fh.contentType = mime.TypeByExtension(ext)
}
fh.dirtyMetadata = true fh.dirtyMetadata = true
} }
@ -196,7 +140,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
return nil return nil
} }
return fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
if fh.f.entry.Attributes != nil { if fh.f.entry.Attributes != nil {
fh.f.entry.Attributes.Mime = fh.contentType fh.f.entry.Attributes.Mime = fh.contentType
@ -212,70 +156,25 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
Entry: fh.f.entry, Entry: fh.f.entry,
} }
//glog.V(1).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks))
//for i, chunk := range fh.f.entry.Chunks { for i, chunk := range fh.f.entry.Chunks {
// glog.V(4).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
//} }
chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks)
fh.f.entry.Chunks = chunks fh.f.entry.Chunks = chunks
// fh.f.entryViewCache = nil // fh.f.entryViewCache = nil
fh.f.wfs.deleteFileChunks(garbages)
if _, err := client.CreateEntry(ctx, request); err != nil { if _, err := client.CreateEntry(ctx, request); err != nil {
glog.Errorf("update fh: %v", err)
return fmt.Errorf("update fh: %v", err) return fmt.Errorf("update fh: %v", err)
} }
fh.f.wfs.deleteFileChunks(ctx, garbages)
for i, chunk := range garbages {
glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
return nil return nil
}) })
} }
func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, fileIds []string) error {
var vids []string
for _, fileId := range fileIds {
vids = append(vids, volumeId(fileId))
}
lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
m := make(map[string]operation.LookupResult)
glog.V(4).Infof("remove file lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
if err != nil {
return m, err
}
for _, vid := range vids {
lr := operation.LookupResult{
VolumeId: vid,
Locations: nil,
}
locations := resp.LocationsMap[vid]
for _, loc := range locations.Locations {
lr.Locations = append(lr.Locations, operation.Location{
Url: loc.Url,
PublicUrl: loc.PublicUrl,
})
}
m[vid] = lr
}
return m, err
}
_, err := operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc)
return err
}
func volumeId(fileId string) string {
lastCommaIndex := strings.LastIndex(fileId, ",")
if lastCommaIndex > 0 {
return fileId[:lastCommaIndex]
}
return fileId
}

View file

@ -19,6 +19,7 @@ import (
type Option struct { type Option struct {
FilerGrpcAddress string FilerGrpcAddress string
GrpcDialOption grpc.DialOption
FilerMountRootPath string FilerMountRootPath string
Collection string Collection string
Replication string Replication string
@ -28,9 +29,11 @@ type Option struct {
DirListingLimit int DirListingLimit int
EntryCacheTtl time.Duration EntryCacheTtl time.Duration
MountUid uint32 MountUid uint32
MountGid uint32 MountGid uint32
MountMode os.FileMode MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
} }
var _ = fs.FS(&WFS{}) var _ = fs.FS(&WFS{})
@ -46,8 +49,6 @@ type WFS struct {
pathToHandleLock sync.Mutex pathToHandleLock sync.Mutex
bufPool sync.Pool bufPool sync.Pool
fileIdsDeletionChan chan []string
stats statsCache stats statsCache
} }
type statsCache struct { type statsCache struct {
@ -65,11 +66,8 @@ func NewSeaweedFileSystem(option *Option) *WFS {
return make([]byte, option.ChunkSizeLimit) return make([]byte, option.ChunkSizeLimit)
}, },
}, },
fileIdsDeletionChan: make(chan []string, 32),
} }
go wfs.loopProcessingDeletion()
return wfs return wfs
} }
@ -77,12 +75,12 @@ func (wfs *WFS) Root() (fs.Node, error) {
return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil
} }
func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error {
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection) client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client) return fn(client)
}, wfs.option.FilerGrpcAddress) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
} }
@ -137,7 +135,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
if wfs.stats.lastChecked < time.Now().Unix()-20 { if wfs.stats.lastChecked < time.Now().Unix()-20 {
err := wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.StatisticsRequest{ request := &filer_pb.StatisticsRequest{
Collection: wfs.option.Collection, Collection: wfs.option.Collection,

View file

@ -2,57 +2,68 @@ package filesys
import ( import (
"context" "context"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"google.golang.org/grpc"
) )
func (wfs *WFS) loopProcessingDeletion() { func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) {
ticker := time.NewTicker(2 * time.Second)
wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
var fileIds []string
for {
select {
case fids := <-wfs.fileIdsDeletionChan:
fileIds = append(fileIds, fids...)
if len(fileIds) >= 1024 {
glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
deleteFileIds(context.Background(), client, fileIds)
fileIds = fileIds[:0]
}
case <-ticker.C:
if len(fileIds) > 0 {
glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
deleteFileIds(context.Background(), client, fileIds)
fileIds = fileIds[:0]
}
}
}
})
}
func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
if len(chunks) == 0 { if len(chunks) == 0 {
return return
} }
var fileIds []string var fileIds []string
for _, chunk := range chunks { for _, chunk := range chunks {
fileIds = append(fileIds, chunk.FileId) fileIds = append(fileIds, chunk.GetFileIdString())
} }
var async = false wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error {
if async { deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds)
wfs.fileIdsDeletionChan <- fileIds
return
}
wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
deleteFileIds(context.Background(), client, fileIds)
return nil return nil
}) })
} }
func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error {
var vids []string
for _, fileId := range fileIds {
vids = append(vids, filer2.VolumeId(fileId))
}
lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) {
m := make(map[string]operation.LookupResult)
glog.V(4).Infof("remove file lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})
if err != nil {
return m, err
}
for _, vid := range vids {
lr := operation.LookupResult{
VolumeId: vid,
Locations: nil,
}
locations := resp.LocationsMap[vid]
for _, loc := range locations.Locations {
lr.Locations = append(lr.Locations, operation.Location{
Url: loc.Url,
PublicUrl: loc.PublicUrl,
})
}
m[vid] = lr
}
return m, err
}
_, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
return err
}

190
weed/glide.lock generated
View file

@ -1,190 +0,0 @@
hash: 2e3a065472829938d25e879451b6d1aa43e55270e1166a9c044803ef8a3b9eb1
updated: 2018-06-28T22:01:35.910567-07:00
imports:
- name: github.com/seaweedfs/fuse
version: 65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e
subpackages:
- fs
- fuseutil
- name: github.com/boltdb/bolt
version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8
- name: github.com/chrislusf/raft
version: 5f7ddd8f479583daf05879d3d3b174aa202c8fb7
subpackages:
- protobuf
- name: github.com/dgrijalva/jwt-go
version: 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e
- name: github.com/disintegration/imaging
version: bbcee2f5c9d5e94ca42c8b50ec847fec64a6c134
- name: github.com/fsnotify/fsnotify
version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
- name: github.com/go-redis/redis
version: 83fb42932f6145ce52df09860384a4653d2d332a
subpackages:
- internal
- internal/consistenthash
- internal/hashtag
- internal/pool
- internal/proto
- internal/singleflight
- internal/util
- name: github.com/go-sql-driver/mysql
version: d523deb1b23d913de5bdada721a6071e71283618
- name: github.com/gocql/gocql
version: e06f8c1bcd787e6bf0608288b314522f08cc7848
subpackages:
- internal/lru
- internal/murmur
- internal/streams
- name: github.com/gogo/protobuf
version: 30cf7ac33676b5786e78c746683f0d4cd64fa75b
subpackages:
- proto
- name: github.com/golang/protobuf
version: b4deda0973fb4c70b50d226b1af49f3da59f5265
subpackages:
- proto
- protoc-gen-go/descriptor
- ptypes
- ptypes/any
- ptypes/duration
- ptypes/timestamp
- name: github.com/golang/snappy
version: 2e65f85255dbc3072edf28d6b5b8efc472979f5a
- name: github.com/google/btree
version: e89373fe6b4a7413d7acd6da1725b83ef713e6e4
- name: github.com/gorilla/context
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
- name: github.com/gorilla/mux
version: e3702bed27f0d39777b0b37b664b6280e8ef8fbf
- name: github.com/hailocab/go-hostpool
version: e80d13ce29ede4452c43dea11e79b9bc8a15b478
- name: github.com/hashicorp/hcl
version: ef8a98b0bbce4a65b5aa4c368430a80ddc533168
subpackages:
- hcl/ast
- hcl/parser
- hcl/printer
- hcl/scanner
- hcl/strconv
- hcl/token
- json/parser
- json/scanner
- json/token
- name: github.com/karlseguin/ccache
version: b425c9ca005a2050ebe723f6a0cddcb907354ab7
- name: github.com/klauspost/crc32
version: cb6bfca970f6908083f26f39a79009d608efd5cd
- name: github.com/lib/pq
version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8
subpackages:
- oid
- name: github.com/magiconair/properties
version: c2353362d570a7bfa228149c62842019201cfb71
- name: github.com/mitchellh/mapstructure
version: bb74f1db0675b241733089d5a1faa5dd8b0ef57b
- name: github.com/pelletier/go-toml
version: c01d1270ff3e442a8a57cddc1c92dc1138598194
- name: github.com/rwcarlsen/goexif
version: 8d986c03457a2057c7b0fb0a48113f7dd48f9619
subpackages:
- exif
- tiff
- name: github.com/soheilhy/cmux
version: e09e9389d85d8492d313d73d1469c029e710623f
- name: github.com/spf13/afero
version: 787d034dfe70e44075ccc060d346146ef53270ad
subpackages:
- mem
- name: github.com/spf13/cast
version: 8965335b8c7107321228e3e3702cab9832751bac
- name: github.com/spf13/jwalterweatherman
version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394
- name: github.com/spf13/pflag
version: 3ebe029320b2676d667ae88da602a5f854788a8a
- name: github.com/spf13/viper
version: 15738813a09db5c8e5b60a19d67d3f9bd38da3a4
- name: github.com/syndtr/goleveldb
version: 0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697
subpackages:
- leveldb
- leveldb/cache
- leveldb/comparer
- leveldb/errors
- leveldb/filter
- leveldb/iterator
- leveldb/journal
- leveldb/memdb
- leveldb/opt
- leveldb/storage
- leveldb/table
- leveldb/util
- name: golang.org/x/image
version: cc896f830cedae125428bc9fe1b0362aa91b3fb1
subpackages:
- bmp
- tiff
- tiff/lzw
- name: golang.org/x/net
version: 4cb1c02c05b0e749b0365f61ae859a8e0cfceed9
subpackages:
- context
- http/httpguts
- http2
- http2/hpack
- idna
- internal/timeseries
- trace
- name: golang.org/x/sys
version: 7138fd3d9dc8335c567ca206f4333fb75eb05d56
subpackages:
- unix
- name: golang.org/x/text
version: 5cec4b58c438bd98288aeb248bab2c1840713d21
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/appengine
version: b1f26356af11148e710935ed1ac8a7f5702c7612
subpackages:
- cloudsql
- name: google.golang.org/genproto
version: ff3583edef7de132f219f0efc00e097cabcc0ec0
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc
version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8
subpackages:
- balancer
- balancer/base
- balancer/roundrobin
- codes
- connectivity
- credentials
- encoding
- encoding/proto
- grpclog
- internal
- internal/backoff
- internal/channelz
- internal/grpcrand
- keepalive
- metadata
- naming
- peer
- reflection
- reflection/grpc_reflection_v1alpha
- resolver
- resolver/dns
- resolver/passthrough
- stats
- status
- tap
- transport
- name: gopkg.in/inf.v0
version: d2d2541c53f18d2a059457998ce2876cc8e67cbf
- name: gopkg.in/yaml.v2
version: 5420a8b6744d3b0345ab293f6fcba19c978f1183
testImports: []

View file

@ -1,44 +1,117 @@
package: github.com/chrislusf/seaweedfs/weed package: github.com/chrislusf/seaweedfs/weed
import: import:
- package: github.com/seaweedfs/fuse - package: cloud.google.com/go
version: ^0.40.0
subpackages: subpackages:
- fs - pubsub
- package: github.com/boltdb/bolt - storage
version: ^1.3.1 - package: github.com/Azure/azure-storage-blob-go
version: ^0.7.0
subpackages:
- azblob
- package: github.com/Shopify/sarama
version: ^1.22.1
- package: github.com/aws/aws-sdk-go
version: ^1.20.12
subpackages:
- aws
- aws/awserr
- aws/credentials
- aws/session
- service/s3
- service/s3/s3iface
- service/sqs
- package: github.com/chrislusf/raft - package: github.com/chrislusf/raft
subpackages:
- protobuf
- package: github.com/dgrijalva/jwt-go - package: github.com/dgrijalva/jwt-go
version: ^3.2.0 version: ^3.2.0
- package: github.com/disintegration/imaging - package: github.com/disintegration/imaging
version: ^1.4.1 version: ^1.6.0
- package: github.com/dustin/go-humanize
version: ^1.0.0
- package: github.com/gabriel-vasile/mimetype
version: ^0.3.14
- package: github.com/go-redis/redis - package: github.com/go-redis/redis
version: ^6.10.2 version: ^6.15.3
- package: github.com/go-sql-driver/mysql - package: github.com/go-sql-driver/mysql
version: ^1.3.0 version: ^1.4.1
- package: github.com/gocql/gocql - package: github.com/gocql/gocql
- package: github.com/golang/protobuf - package: github.com/golang/protobuf
version: ^1.0.0 version: ^1.3.1
subpackages: subpackages:
- proto - proto
- package: github.com/google/btree - package: github.com/google/btree
version: ^1.0.0
- package: github.com/gorilla/mux - package: github.com/gorilla/mux
version: ^1.6.1 version: ^1.7.3
- package: github.com/jacobsa/daemonize
- package: github.com/kardianos/osext
- package: github.com/karlseguin/ccache
version: ^2.0.3
- package: github.com/klauspost/crc32 - package: github.com/klauspost/crc32
version: ^1.1.0 version: ^1.2.0
- package: github.com/klauspost/reedsolomon
version: ^1.9.2
- package: github.com/kurin/blazer
version: ^0.5.3
subpackages:
- b2
- package: github.com/lib/pq - package: github.com/lib/pq
version: ^1.1.1
- package: github.com/peterh/liner
version: ^1.1.0
- package: github.com/prometheus/client_golang
version: ^1.0.0
subpackages:
- prometheus
- prometheus/push
- package: github.com/rakyll/statik
version: ^0.1.6
subpackages:
- fs
- package: github.com/rwcarlsen/goexif - package: github.com/rwcarlsen/goexif
subpackages: subpackages:
- exif - exif
- package: github.com/soheilhy/cmux - package: github.com/satori/go.uuid
version: ^0.1.4 version: ^1.2.0
- package: github.com/seaweedfs/fuse
subpackages:
- fs
- package: github.com/spf13/viper
version: ^1.4.0
- package: github.com/syndtr/goleveldb - package: github.com/syndtr/goleveldb
version: ^1.0.0
subpackages: subpackages:
- leveldb - leveldb
- leveldb/opt
- leveldb/util - leveldb/util
- package: github.com/willf/bloom
version: ^2.0.3
- package: gocloud.dev
version: ^0.15.0
subpackages:
- pubsub
- pubsub/awssnssqs
- pubsub/azuresb
- pubsub/gcppubsub
- pubsub/natspubsub
- pubsub/rabbitpubsub
- package: golang.org/x/net - package: golang.org/x/net
subpackages: subpackages:
- context - context
- package: google.golang.org/grpc - webdav
version: ^1.11.3 - package: golang.org/x/tools
subpackages: subpackages:
- godoc/util
- package: google.golang.org/api
version: ^0.7.0
subpackages:
- option
- package: google.golang.org/grpc
version: ^1.21.1
subpackages:
- credentials
- keepalive
- peer - peer
- reflection - reflection

View file

@ -0,0 +1,71 @@
// Package gocdk_pub_sub supports the Go CDK (Cloud Development Kit) PubSub API,
// which in turn supports many providers, including Amazon SNS/SQS, Azure Service Bus,
// Google Cloud PubSub, and RabbitMQ.
//
// In the config, select a provider and topic using a URL. See
// https://godoc.org/gocloud.dev/pubsub and its sub-packages for details.
//
// The Go CDK PubSub API does not support administrative operations like topic
// creation. Create the topic using a UI, CLI or provider-specific API before running
// weed.
//
// The Go CDK obtains credentials via environment variables and other
// provider-specific default mechanisms. See the provider's documentation for
// details.
package gocdk_pub_sub
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"gocloud.dev/pubsub"
_ "gocloud.dev/pubsub/awssnssqs"
_ "gocloud.dev/pubsub/azuresb"
_ "gocloud.dev/pubsub/gcppubsub"
_ "gocloud.dev/pubsub/natspubsub"
_ "gocloud.dev/pubsub/rabbitpubsub"
)
func init() {
notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{})
}
type GoCDKPubSub struct {
topicURL string
topic *pubsub.Topic
}
func (k *GoCDKPubSub) GetName() string {
return "gocdk_pub_sub"
}
func (k *GoCDKPubSub) Initialize(config util.Configuration) error {
k.topicURL = config.GetString("topic_url")
glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
topic, err := pubsub.OpenTopic(context.Background(), k.topicURL)
if err != nil {
glog.Fatalf("Failed to open topic: %v", err)
}
k.topic = topic
return nil
}
func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error {
bytes, err := proto.Marshal(message)
if err != nil {
return err
}
ctx := context.Background()
err = k.topic.Send(ctx, &pubsub.Message{
Body: bytes,
Metadata: map[string]string{"key": key},
})
if err != nil {
return fmt.Errorf("send message via Go CDK pubsub %s: %v", k.topicURL, err)
}
return nil
}

View file

@ -3,9 +3,11 @@ package operation
import ( import (
"context" "context"
"fmt" "fmt"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc"
"strings"
) )
type VolumeAssignRequest struct { type VolumeAssignRequest struct {
@ -19,14 +21,15 @@ type VolumeAssignRequest struct {
} }
type AssignResult struct { type AssignResult struct {
Fid string `json:"fid,omitempty"` Fid string `json:"fid,omitempty"`
Url string `json:"url,omitempty"` Url string `json:"url,omitempty"`
PublicUrl string `json:"publicUrl,omitempty"` PublicUrl string `json:"publicUrl,omitempty"`
Count uint64 `json:"count,omitempty"` Count uint64 `json:"count,omitempty"`
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`
Auth security.EncodedJwt `json:"auth,omitempty"`
} }
func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
var requests []*VolumeAssignRequest var requests []*VolumeAssignRequest
requests = append(requests, primaryRequest) requests = append(requests, primaryRequest)
@ -40,9 +43,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
continue continue
} }
lastError = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel()
req := &master_pb.AssignRequest{ req := &master_pb.AssignRequest{
Count: primaryRequest.Count, Count: primaryRequest.Count,
@ -53,7 +54,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
Rack: primaryRequest.Rack, Rack: primaryRequest.Rack,
DataNode: primaryRequest.DataNode, DataNode: primaryRequest.DataNode,
} }
resp, grpcErr := masterClient.Assign(ctx, req) resp, grpcErr := masterClient.Assign(context.Background(), req)
if grpcErr != nil { if grpcErr != nil {
return grpcErr return grpcErr
} }
@ -63,6 +64,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
ret.Url = resp.Url ret.Url = resp.Url
ret.PublicUrl = resp.PublicUrl ret.PublicUrl = resp.PublicUrl
ret.Error = resp.Error ret.Error = resp.Error
ret.Auth = security.EncodedJwt(resp.Auth)
return nil return nil
@ -81,3 +83,17 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
return ret, lastError return ret, lastError
} }
func LookupJwt(master string, fileId string) security.EncodedJwt {
tokenStr := ""
if h, e := util.Head(fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId)); e == nil {
bearer := h.Get("Authorization")
if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" {
tokenStr = bearer[7:]
}
}
return security.EncodedJwt(tokenStr)
}

View file

@ -5,9 +5,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"sort" "sort"
"google.golang.org/grpc"
"sync" "sync"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -53,7 +56,7 @@ func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) { func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
if isGzipped { if isGzipped {
var err error var err error
if buffer, err = UnGzipData(buffer); err != nil { if buffer, err = util.UnGzipData(buffer); err != nil {
return nil, err return nil, err
} }
} }
@ -69,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) {
return json.Marshal(cm) return json.Marshal(cm)
} }
func (cm *ChunkManifest) DeleteChunks(master string) error { func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error {
var fileIds []string var fileIds []string
for _, ci := range cm.Chunks { for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid) fileIds = append(fileIds, ci.Fid)
} }
results, err := DeleteFiles(master, fileIds) results, err := DeleteFiles(master, grpcDialOption, fileIds)
if err != nil { if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err) glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err) return fmt.Errorf("chunk delete: %v", err)
@ -102,7 +105,10 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64,
if err != nil { if err != nil {
return written, err return written, err
} }
defer resp.Body.Close() defer func() {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}()
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusRequestedRangeNotSatisfiable: case http.StatusRequestedRangeNotSatisfiable:

View file

@ -2,6 +2,5 @@ package operation
type JoinResult struct { type JoinResult struct {
VolumeSizeLimit uint64 `json:"VolumeSizeLimit,omitempty"` VolumeSizeLimit uint64 `json:"VolumeSizeLimit,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`
} }

View file

@ -4,12 +4,12 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"google.golang.org/grpc"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
) )
type DeleteResult struct { type DeleteResult struct {
@ -28,17 +28,17 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
} }
// DeleteFiles batch deletes a list of fileIds // DeleteFiles batch deletes a list of fileIds
func DeleteFiles(master string, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
lookupFunc := func(vids []string) (map[string]LookupResult, error) { lookupFunc := func(vids []string) (map[string]LookupResult, error) {
return LookupVolumeIds(master, vids) return LookupVolumeIds(master, grpcDialOption, vids)
} }
return DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
} }
func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) {
var ret []*volume_server_pb.DeleteResult var ret []*volume_server_pb.DeleteResult
@ -48,7 +48,7 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin
vid, _, err := ParseFileId(fileId) vid, _, err := ParseFileId(fileId)
if err != nil { if err != nil {
ret = append(ret, &volume_server_pb.DeleteResult{ ret = append(ret, &volume_server_pb.DeleteResult{
FileId: vid, FileId: fileId,
Status: http.StatusBadRequest, Status: http.StatusBadRequest,
Error: err.Error()}, Error: err.Error()},
) )
@ -85,38 +85,43 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin
} }
} }
resultChan := make(chan []*volume_server_pb.DeleteResult, len(server_to_fileIds))
var wg sync.WaitGroup var wg sync.WaitGroup
for server, fidList := range server_to_fileIds { for server, fidList := range server_to_fileIds {
wg.Add(1) wg.Add(1)
go func(server string, fidList []string) { go func(server string, fidList []string) {
defer wg.Done() defer wg.Done()
if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, fidList); deleteErr != nil { if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil {
err = deleteErr err = deleteErr
} else { } else {
ret = append(ret, deleteResults...) resultChan <- deleteResults
} }
}(server, fidList) }(server, fidList)
} }
wg.Wait() wg.Wait()
close(resultChan)
for result := range resultChan {
ret = append(ret, result...)
}
glog.V(0).Infof("deleted %d items", len(ret))
return ret, err return ret, err
} }
// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
func DeleteFilesAtOneVolumeServer(volumeServer string, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) {
err = WithVolumeServerClient(volumeServer, func(volumeServerClient volume_server_pb.VolumeServerClient) error { err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel()
req := &volume_server_pb.BatchDeleteRequest{ req := &volume_server_pb.BatchDeleteRequest{
FileIds: fileIds, FileIds: fileIds,
} }
resp, err := volumeServerClient.BatchDelete(ctx, req) resp, err := volumeServerClient.BatchDelete(context.Background(), req)
// fmt.Printf("deleted %v %v: %v\n", fileIds, err, resp) // fmt.Printf("deleted %v %v: %v\n", fileIds, err, resp)

View file

@ -1,34 +1,30 @@
package operation package operation
import ( import (
"context"
"fmt" "fmt"
"strconv"
"strings"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/grpc" "google.golang.org/grpc"
"strconv"
"strings"
) )
var ( func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error {
grpcClients = make(map[string]*grpc.ClientConn)
grpcClientsLock sync.Mutex
)
func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.VolumeServerClient) error) error { ctx := context.Background()
grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) grpcAddress, err := toVolumeServerGrpcAddress(volumeServer)
if err != nil { if err != nil {
return err return err
} }
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
client := volume_server_pb.NewVolumeServerClient(grpcConnection) client := volume_server_pb.NewVolumeServerClient(grpcConnection)
return fn(client) return fn(client)
}, grpcAddress) }, grpcAddress, grpcDialOption)
} }
@ -42,16 +38,18 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil
} }
func withMasterServerClient(masterServer string, fn func(masterClient master_pb.SeaweedClient) error) error { func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error {
masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0) ctx := context.Background()
masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer)
if parseErr != nil { if parseErr != nil {
return fmt.Errorf("failed to parse master grpc %v", masterServer) return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr)
} }
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error {
client := master_pb.NewSeaweedClient(grpcConnection) client := master_pb.NewSeaweedClient(grpcConnection)
return fn(client) return fn(client)
}, masterGrpcAddress) }, masterGrpcAddress, grpcDialOption)
} }

View file

@ -1,32 +0,0 @@
package operation
import (
"encoding/json"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
)
type ClusterStatusResult struct {
IsLeader bool `json:"IsLeader,omitempty"`
Leader string `json:"Leader,omitempty"`
Peers []string `json:"Peers,omitempty"`
}
func ListMasters(server string) (leader string, peers []string, err error) {
jsonBlob, err := util.Get("http://" + server + "/cluster/status")
glog.V(2).Info("list masters result :", string(jsonBlob))
if err != nil {
return "", nil, err
}
var ret ClusterStatusResult
err = json.Unmarshal(jsonBlob, &ret)
if err != nil {
return "", nil, err
}
peers = ret.Peers
if ret.IsLeader {
peers = append(peers, ret.Leader)
}
return ret.Leader, peers, nil
}

View file

@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"google.golang.org/grpc"
"math/rand" "math/rand"
"net/url" "net/url"
"strings" "strings"
@ -78,7 +79,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) {
} }
// LookupVolumeIds find volume locations by cache and actual lookup // LookupVolumeIds find volume locations by cache and actual lookup
func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, error) { func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
ret := make(map[string]LookupResult) ret := make(map[string]LookupResult)
var unknown_vids []string var unknown_vids []string
@ -98,14 +99,12 @@ func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, err
//only query unknown_vids //only query unknown_vids
err := withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel()
req := &master_pb.LookupVolumeRequest{ req := &master_pb.LookupVolumeRequest{
VolumeIds: unknown_vids, VolumeIds: unknown_vids,
} }
resp, grpcErr := masterClient.LookupVolume(ctx, req) resp, grpcErr := masterClient.LookupVolume(context.Background(), req)
if grpcErr != nil { if grpcErr != nil {
return grpcErr return grpcErr
} }

View file

@ -2,18 +2,16 @@ package operation
import ( import (
"context" "context"
"time" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
) )
func Statistics(server string, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) {
err = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel()
grpcResponse, grpcErr := masterClient.Statistics(ctx, req) grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req)
if grpcErr != nil { if grpcErr != nil {
return grpcErr return grpcErr
} }

View file

@ -2,6 +2,7 @@ package operation
import ( import (
"bytes" "bytes"
"google.golang.org/grpc"
"io" "io"
"mime" "mime"
"net/url" "net/url"
@ -36,10 +37,8 @@ type SubmitResult struct {
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`
} }
func SubmitFiles(master string, files []FilePart, func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart,
replication string, collection string, dataCenter string, ttl string, maxMB int, replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) {
secret security.Secret,
) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files)) results := make([]SubmitResult, len(files))
for index, file := range files { for index, file := range files {
results[index].FileName = file.FileName results[index].FileName = file.FileName
@ -51,7 +50,7 @@ func SubmitFiles(master string, files []FilePart,
DataCenter: dataCenter, DataCenter: dataCenter,
Ttl: ttl, Ttl: ttl,
} }
ret, err := Assign(master, ar) ret, err := Assign(master, grpcDialOption, ar)
if err != nil { if err != nil {
for index, _ := range files { for index, _ := range files {
results[index].Error = err.Error() results[index].Error = err.Error()
@ -67,7 +66,7 @@ func SubmitFiles(master string, files []FilePart,
file.Replication = replication file.Replication = replication
file.Collection = collection file.Collection = collection
file.DataCenter = dataCenter file.DataCenter = dataCenter
results[index].Size, err = file.Upload(maxMB, master, secret) results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption)
if err != nil { if err != nil {
results[index].Error = err.Error() results[index].Error = err.Error()
} }
@ -110,8 +109,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
return ret, nil return ret, nil
} }
func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (retSize uint32, err error) { func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
jwt := security.GenJwt(secret, fi.Fid)
fileUrl := "http://" + fi.Server + "/" + fi.Fid fileUrl := "http://" + fi.Server + "/" + fi.Fid
if fi.ModTime != 0 { if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
@ -139,7 +137,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
Collection: fi.Collection, Collection: fi.Collection,
Ttl: fi.Ttl, Ttl: fi.Ttl,
} }
ret, err = Assign(master, ar) ret, err = Assign(master, grpcDialOption, ar)
if err != nil { if err != nil {
return return
} }
@ -152,10 +150,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
Collection: fi.Collection, Collection: fi.Collection,
Ttl: fi.Ttl, Ttl: fi.Ttl,
} }
ret, err = Assign(master, ar) ret, err = Assign(master, grpcDialOption, ar)
if err != nil { if err != nil {
// delete all uploaded chunks // delete all uploaded chunks
cm.DeleteChunks(master) cm.DeleteChunks(master, grpcDialOption)
return return
} }
id = ret.Fid id = ret.Fid
@ -170,10 +168,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
baseName+"-"+strconv.FormatInt(i+1, 10), baseName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(fi.Reader, chunkSize), io.LimitReader(fi.Reader, chunkSize),
master, fileUrl, master, fileUrl,
jwt) ret.Auth)
if e != nil { if e != nil {
// delete all uploaded chunks // delete all uploaded chunks
cm.DeleteChunks(master) cm.DeleteChunks(master, grpcDialOption)
return 0, e return 0, e
} }
cm.Chunks = append(cm.Chunks, cm.Chunks = append(cm.Chunks,
@ -188,7 +186,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
err = upload_chunked_file_manifest(fileUrl, &cm, jwt) err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil { if err != nil {
// delete all uploaded chunks // delete all uploaded chunks
cm.DeleteChunks(master) cm.DeleteChunks(master, grpcDialOption)
} }
} else { } else {
ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt)

View file

@ -2,63 +2,19 @@ package operation
import ( import (
"context" "context"
"fmt"
"io"
"time"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
. "github.com/chrislusf/seaweedfs/weed/storage/types" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/util"
) )
func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) {
WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel()
resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{ resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{
VolumdId: vid, VolumeId: vid,
}) })
return nil return nil
}) })
return return
} }
func GetVolumeIdxEntries(server string, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error {
return WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error {
stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{
VolumdId: vid,
})
if err != nil {
return err
}
var indexFileContent []byte
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("read index entries: %v", err)
}
indexFileContent = append(indexFileContent, resp.IndexFileContent...)
}
dataSize := len(indexFileContent)
for idx := 0; idx+NeedleEntrySize <= dataSize; idx += NeedleEntrySize {
line := indexFileContent[idx : idx+NeedleEntrySize]
key := BytesToNeedleId(line[:NeedleIdSize])
offset := BytesToOffset(line[NeedleIdSize : NeedleIdSize+OffsetSize])
size := util.BytesToUint32(line[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize])
eachEntryFn(key, offset, size)
}
return nil
})
}

View file

@ -0,0 +1,82 @@
package operation
import (
"context"
"fmt"
"io"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"google.golang.org/grpc"
)
func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error {
// find volume location, replication, ttl info
lookup, err := Lookup(master, vid.String())
if err != nil {
return fmt.Errorf("look up volume %d: %v", vid, err)
}
if len(lookup.Locations) == 0 {
return fmt.Errorf("unable to locate volume %d", vid)
}
volumeServer := lookup.Locations[0].Url
return TailVolumeFromSource(volumeServer, grpcDialOption, vid, sinceNs, timeoutSeconds, fn)
}
func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error {
return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{
VolumeId: uint32(vid),
SinceNs: sinceNs,
IdleTimeoutSeconds: uint32(idleTimeoutSeconds),
})
if err != nil {
return err
}
for {
resp, recvErr := stream.Recv()
if recvErr != nil {
if recvErr == io.EOF {
break
} else {
return recvErr
}
}
needleHeader := resp.NeedleHeader
needleBody := resp.NeedleBody
if len(needleHeader) == 0 {
continue
}
for !resp.IsLastChunk {
resp, recvErr = stream.Recv()
if recvErr != nil {
if recvErr == io.EOF {
break
} else {
return recvErr
}
}
needleBody = append(needleBody, resp.NeedleBody...)
}
n := new(needle.Needle)
n.ParseNeedleHeader(needleHeader)
n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion)
err = fn(n)
if err != nil {
return err
}
}
return nil
})
}

View file

@ -2,6 +2,8 @@ package operation
import ( import (
"bytes" "bytes"
"compress/flate"
"compress/gzip"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -16,6 +18,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
) )
type UploadResult struct { type UploadResult struct {
@ -37,13 +40,43 @@ func init() {
var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
// Upload sends a POST request to a volume server to upload the content // Upload sends a POST request to a volume server to upload the content with adjustable compression level
func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) {
return upload_content(uploadUrl, func(w io.Writer) (err error) { if compressionLevel < 1 {
_, err = io.Copy(w, reader) compressionLevel = 1
return }
}, filename, isGzipped, mtype, pairMap, jwt) if compressionLevel > 9 {
compressionLevel = 9
}
return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt)
} }
// Upload sends a POST request to a volume server to upload the content with fast compression
func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt)
}
func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) {
contentIsGzipped := isGzipped
shouldGzipNow := false
if !isGzipped {
if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped {
shouldGzipNow = true
contentIsGzipped = true
}
}
return upload_content(uploadUrl, func(w io.Writer) (err error) {
if shouldGzipNow {
gzWriter, _ := gzip.NewWriterLevel(w, compression)
_, err = io.Copy(gzWriter, reader)
gzWriter.Close()
} else {
_, err = io.Copy(w, reader)
}
return
}, filename, contentIsGzipped, mtype, pairMap, jwt)
}
func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) {
body_buf := bytes.NewBufferString("") body_buf := bytes.NewBufferString("")
body_writer := multipart.NewWriter(body_buf) body_writer := multipart.NewWriter(body_buf)
@ -58,9 +91,6 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
if isGzipped { if isGzipped {
h.Set("Content-Encoding", "gzip") h.Set("Content-Encoding", "gzip")
} }
if jwt != "" {
h.Set("Authorization", "BEARER "+string(jwt))
}
file_writer, cp_err := body_writer.CreatePart(h) file_writer, cp_err := body_writer.CreatePart(h)
if cp_err != nil { if cp_err != nil {
@ -86,18 +116,15 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
for k, v := range pairMap { for k, v := range pairMap {
req.Header.Set(k, v) req.Header.Set(k, v)
} }
if jwt != "" {
req.Header.Set("Authorization", "BEARER "+string(jwt))
}
resp, post_err := client.Do(req) resp, post_err := client.Do(req)
if post_err != nil { if post_err != nil {
glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error()) glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error())
return nil, post_err return nil, post_err
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode < http.StatusOK ||
resp.StatusCode > http.StatusIMUsed {
return nil, errors.New(http.StatusText(resp.StatusCode))
}
etag := getEtag(resp) etag := getEtag(resp)
resp_body, ra_err := ioutil.ReadAll(resp.Body) resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil { if ra_err != nil {

View file

@ -24,6 +24,9 @@ service SeaweedFiler {
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
} }
rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
}
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
} }
@ -36,6 +39,9 @@ service SeaweedFiler {
rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { rpc Statistics (StatisticsRequest) returns (StatisticsResponse) {
} }
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
}
} }
////////////////////////////////////////////////// //////////////////////////////////////////////////
@ -69,19 +75,33 @@ message Entry {
map<string, bytes> extended = 5; map<string, bytes> extended = 5;
} }
message FullEntry {
string dir = 1;
Entry entry = 2;
}
message EventNotification { message EventNotification {
Entry old_entry = 1; Entry old_entry = 1;
Entry new_entry = 2; Entry new_entry = 2;
bool delete_chunks = 3; bool delete_chunks = 3;
string new_parent_path = 4;
} }
message FileChunk { message FileChunk {
string file_id = 1; string file_id = 1; // to be deprecated
int64 offset = 2; int64 offset = 2;
uint64 size = 3; uint64 size = 3;
int64 mtime = 4; int64 mtime = 4;
string e_tag = 5; string e_tag = 5;
string source_file_id = 6; string source_file_id = 6; // to be deprecated
FileId fid = 7;
FileId source_fid = 8;
}
message FileId {
uint32 volume_id = 1;
uint64 file_key = 2;
fixed32 cookie = 3;
} }
message FuseAttributes { message FuseAttributes {
@ -126,6 +146,16 @@ message DeleteEntryRequest {
message DeleteEntryResponse { message DeleteEntryResponse {
} }
message AtomicRenameEntryRequest {
string old_directory = 1;
string old_name = 2;
string new_directory = 3;
string new_name = 4;
}
message AtomicRenameEntryResponse {
}
message AssignVolumeRequest { message AssignVolumeRequest {
int32 count = 1; int32 count = 1;
string collection = 2; string collection = 2;
@ -139,6 +169,7 @@ message AssignVolumeResponse {
string url = 2; string url = 2;
string public_url = 3; string public_url = 3;
int32 count = 4; int32 count = 4;
string auth = 5;
} }
message LookupVolumeRequest { message LookupVolumeRequest {
@ -177,3 +208,12 @@ message StatisticsResponse {
uint64 used_size = 5; uint64 used_size = 5;
uint64 file_count = 6; uint64 file_count = 6;
} }
message GetFilerConfigurationRequest {
}
message GetFilerConfigurationResponse {
repeated string masters = 1;
string replication = 2;
string collection = 3;
uint32 max_mb = 4;
}

View file

@ -14,8 +14,10 @@ It has these top-level messages:
ListEntriesRequest ListEntriesRequest
ListEntriesResponse ListEntriesResponse
Entry Entry
FullEntry
EventNotification EventNotification
FileChunk FileChunk
FileId
FuseAttributes FuseAttributes
CreateEntryRequest CreateEntryRequest
CreateEntryResponse CreateEntryResponse
@ -23,6 +25,8 @@ It has these top-level messages:
UpdateEntryResponse UpdateEntryResponse
DeleteEntryRequest DeleteEntryRequest
DeleteEntryResponse DeleteEntryResponse
AtomicRenameEntryRequest
AtomicRenameEntryResponse
AssignVolumeRequest AssignVolumeRequest
AssignVolumeResponse AssignVolumeResponse
LookupVolumeRequest LookupVolumeRequest
@ -33,6 +37,8 @@ It has these top-level messages:
DeleteCollectionResponse DeleteCollectionResponse
StatisticsRequest StatisticsRequest
StatisticsResponse StatisticsResponse
GetFilerConfigurationRequest
GetFilerConfigurationResponse
*/ */
package filer_pb package filer_pb
@ -208,16 +214,41 @@ func (m *Entry) GetExtended() map[string][]byte {
return nil return nil
} }
type FullEntry struct {
Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
}
func (m *FullEntry) Reset() { *m = FullEntry{} }
func (m *FullEntry) String() string { return proto.CompactTextString(m) }
func (*FullEntry) ProtoMessage() {}
func (*FullEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FullEntry) GetDir() string {
if m != nil {
return m.Dir
}
return ""
}
func (m *FullEntry) GetEntry() *Entry {
if m != nil {
return m.Entry
}
return nil
}
type EventNotification struct { type EventNotification struct {
OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"`
NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"`
DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"`
NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath" json:"new_parent_path,omitempty"`
} }
func (m *EventNotification) Reset() { *m = EventNotification{} } func (m *EventNotification) Reset() { *m = EventNotification{} }
func (m *EventNotification) String() string { return proto.CompactTextString(m) } func (m *EventNotification) String() string { return proto.CompactTextString(m) }
func (*EventNotification) ProtoMessage() {} func (*EventNotification) ProtoMessage() {}
func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *EventNotification) GetOldEntry() *Entry { func (m *EventNotification) GetOldEntry() *Entry {
if m != nil { if m != nil {
@ -240,19 +271,28 @@ func (m *EventNotification) GetDeleteChunks() bool {
return false return false
} }
func (m *EventNotification) GetNewParentPath() string {
if m != nil {
return m.NewParentPath
}
return ""
}
type FileChunk struct { type FileChunk struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"`
Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"`
Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"` Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"`
ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"` ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"`
SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"`
Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"`
SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"`
} }
func (m *FileChunk) Reset() { *m = FileChunk{} } func (m *FileChunk) Reset() { *m = FileChunk{} }
func (m *FileChunk) String() string { return proto.CompactTextString(m) } func (m *FileChunk) String() string { return proto.CompactTextString(m) }
func (*FileChunk) ProtoMessage() {} func (*FileChunk) ProtoMessage() {}
func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *FileChunk) GetFileId() string { func (m *FileChunk) GetFileId() string {
if m != nil { if m != nil {
@ -296,6 +336,52 @@ func (m *FileChunk) GetSourceFileId() string {
return "" return ""
} }
func (m *FileChunk) GetFid() *FileId {
if m != nil {
return m.Fid
}
return nil
}
func (m *FileChunk) GetSourceFid() *FileId {
if m != nil {
return m.SourceFid
}
return nil
}
type FileId struct {
VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"`
FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"`
Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie" json:"cookie,omitempty"`
}
func (m *FileId) Reset() { *m = FileId{} }
func (m *FileId) String() string { return proto.CompactTextString(m) }
func (*FileId) ProtoMessage() {}
func (*FileId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *FileId) GetVolumeId() uint32 {
if m != nil {
return m.VolumeId
}
return 0
}
func (m *FileId) GetFileKey() uint64 {
if m != nil {
return m.FileKey
}
return 0
}
func (m *FileId) GetCookie() uint32 {
if m != nil {
return m.Cookie
}
return 0
}
type FuseAttributes struct { type FuseAttributes struct {
FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"`
Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"` Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"`
@ -315,7 +401,7 @@ type FuseAttributes struct {
func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
func (m *FuseAttributes) String() string { return proto.CompactTextString(m) } func (m *FuseAttributes) String() string { return proto.CompactTextString(m) }
func (*FuseAttributes) ProtoMessage() {} func (*FuseAttributes) ProtoMessage() {}
func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *FuseAttributes) GetFileSize() uint64 { func (m *FuseAttributes) GetFileSize() uint64 {
if m != nil { if m != nil {
@ -416,7 +502,7 @@ type CreateEntryRequest struct {
func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} }
func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) } func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) }
func (*CreateEntryRequest) ProtoMessage() {} func (*CreateEntryRequest) ProtoMessage() {}
func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *CreateEntryRequest) GetDirectory() string { func (m *CreateEntryRequest) GetDirectory() string {
if m != nil { if m != nil {
@ -438,7 +524,7 @@ type CreateEntryResponse struct {
func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} }
func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) } func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) }
func (*CreateEntryResponse) ProtoMessage() {} func (*CreateEntryResponse) ProtoMessage() {}
func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
type UpdateEntryRequest struct { type UpdateEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
@ -448,7 +534,7 @@ type UpdateEntryRequest struct {
func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} } func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) } func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateEntryRequest) ProtoMessage() {} func (*UpdateEntryRequest) ProtoMessage() {}
func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
func (m *UpdateEntryRequest) GetDirectory() string { func (m *UpdateEntryRequest) GetDirectory() string {
if m != nil { if m != nil {
@ -470,7 +556,7 @@ type UpdateEntryResponse struct {
func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} } func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) } func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
func (*UpdateEntryResponse) ProtoMessage() {} func (*UpdateEntryResponse) ProtoMessage() {}
func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
type DeleteEntryRequest struct { type DeleteEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
@ -483,7 +569,7 @@ type DeleteEntryRequest struct {
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} } func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) } func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryRequest) ProtoMessage() {} func (*DeleteEntryRequest) ProtoMessage() {}
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
func (m *DeleteEntryRequest) GetDirectory() string { func (m *DeleteEntryRequest) GetDirectory() string {
if m != nil { if m != nil {
@ -519,7 +605,55 @@ type DeleteEntryResponse struct {
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) } func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryResponse) ProtoMessage() {} func (*DeleteEntryResponse) ProtoMessage() {}
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
type AtomicRenameEntryRequest struct {
OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"`
OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"`
NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory" json:"new_directory,omitempty"`
NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName" json:"new_name,omitempty"`
}
func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} }
func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) }
func (*AtomicRenameEntryRequest) ProtoMessage() {}
func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
func (m *AtomicRenameEntryRequest) GetOldDirectory() string {
if m != nil {
return m.OldDirectory
}
return ""
}
func (m *AtomicRenameEntryRequest) GetOldName() string {
if m != nil {
return m.OldName
}
return ""
}
func (m *AtomicRenameEntryRequest) GetNewDirectory() string {
if m != nil {
return m.NewDirectory
}
return ""
}
func (m *AtomicRenameEntryRequest) GetNewName() string {
if m != nil {
return m.NewName
}
return ""
}
type AtomicRenameEntryResponse struct {
}
func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} }
func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) }
func (*AtomicRenameEntryResponse) ProtoMessage() {}
func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
type AssignVolumeRequest struct { type AssignVolumeRequest struct {
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
@ -532,7 +666,7 @@ type AssignVolumeRequest struct {
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeRequest) ProtoMessage() {} func (*AssignVolumeRequest) ProtoMessage() {}
func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *AssignVolumeRequest) GetCount() int32 { func (m *AssignVolumeRequest) GetCount() int32 {
if m != nil { if m != nil {
@ -574,12 +708,13 @@ type AssignVolumeResponse struct {
Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"`
} }
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeResponse) ProtoMessage() {} func (*AssignVolumeResponse) ProtoMessage() {}
func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
func (m *AssignVolumeResponse) GetFileId() string { func (m *AssignVolumeResponse) GetFileId() string {
if m != nil { if m != nil {
@ -609,6 +744,13 @@ func (m *AssignVolumeResponse) GetCount() int32 {
return 0 return 0
} }
func (m *AssignVolumeResponse) GetAuth() string {
if m != nil {
return m.Auth
}
return ""
}
type LookupVolumeRequest struct { type LookupVolumeRequest struct {
VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
} }
@ -616,7 +758,7 @@ type LookupVolumeRequest struct {
func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*LookupVolumeRequest) ProtoMessage() {} func (*LookupVolumeRequest) ProtoMessage() {}
func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *LookupVolumeRequest) GetVolumeIds() []string { func (m *LookupVolumeRequest) GetVolumeIds() []string {
if m != nil { if m != nil {
@ -632,7 +774,7 @@ type Locations struct {
func (m *Locations) Reset() { *m = Locations{} } func (m *Locations) Reset() { *m = Locations{} }
func (m *Locations) String() string { return proto.CompactTextString(m) } func (m *Locations) String() string { return proto.CompactTextString(m) }
func (*Locations) ProtoMessage() {} func (*Locations) ProtoMessage() {}
func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
func (m *Locations) GetLocations() []*Location { func (m *Locations) GetLocations() []*Location {
if m != nil { if m != nil {
@ -649,7 +791,7 @@ type Location struct {
func (m *Location) Reset() { *m = Location{} } func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) } func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {} func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
func (m *Location) GetUrl() string { func (m *Location) GetUrl() string {
if m != nil { if m != nil {
@ -672,7 +814,7 @@ type LookupVolumeResponse struct {
func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*LookupVolumeResponse) ProtoMessage() {} func (*LookupVolumeResponse) ProtoMessage() {}
func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
if m != nil { if m != nil {
@ -688,7 +830,7 @@ type DeleteCollectionRequest struct {
func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} }
func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteCollectionRequest) ProtoMessage() {} func (*DeleteCollectionRequest) ProtoMessage() {}
func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
func (m *DeleteCollectionRequest) GetCollection() string { func (m *DeleteCollectionRequest) GetCollection() string {
if m != nil { if m != nil {
@ -703,7 +845,7 @@ type DeleteCollectionResponse struct {
func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} }
func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteCollectionResponse) ProtoMessage() {} func (*DeleteCollectionResponse) ProtoMessage() {}
func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
type StatisticsRequest struct { type StatisticsRequest struct {
Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"`
@ -714,7 +856,7 @@ type StatisticsRequest struct {
func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} }
func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) }
func (*StatisticsRequest) ProtoMessage() {} func (*StatisticsRequest) ProtoMessage() {}
func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
func (m *StatisticsRequest) GetReplication() string { func (m *StatisticsRequest) GetReplication() string {
if m != nil { if m != nil {
@ -749,7 +891,7 @@ type StatisticsResponse struct {
func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} }
func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) }
func (*StatisticsResponse) ProtoMessage() {} func (*StatisticsResponse) ProtoMessage() {}
func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
func (m *StatisticsResponse) GetReplication() string { func (m *StatisticsResponse) GetReplication() string {
if m != nil { if m != nil {
@ -793,14 +935,64 @@ func (m *StatisticsResponse) GetFileCount() uint64 {
return 0 return 0
} }
type GetFilerConfigurationRequest struct {
}
func (m *GetFilerConfigurationRequest) Reset() { *m = GetFilerConfigurationRequest{} }
func (m *GetFilerConfigurationRequest) String() string { return proto.CompactTextString(m) }
func (*GetFilerConfigurationRequest) ProtoMessage() {}
func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
type GetFilerConfigurationResponse struct {
Masters []string `protobuf:"bytes,1,rep,name=masters" json:"masters,omitempty"`
Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"`
Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"`
}
func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} }
func (m *GetFilerConfigurationResponse) String() string { return proto.CompactTextString(m) }
func (*GetFilerConfigurationResponse) ProtoMessage() {}
func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
func (m *GetFilerConfigurationResponse) GetMasters() []string {
if m != nil {
return m.Masters
}
return nil
}
func (m *GetFilerConfigurationResponse) GetReplication() string {
if m != nil {
return m.Replication
}
return ""
}
func (m *GetFilerConfigurationResponse) GetCollection() string {
if m != nil {
return m.Collection
}
return ""
}
func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 {
if m != nil {
return m.MaxMb
}
return 0
}
func init() { func init() {
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse")
proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest") proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest")
proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse") proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse")
proto.RegisterType((*Entry)(nil), "filer_pb.Entry") proto.RegisterType((*Entry)(nil), "filer_pb.Entry")
proto.RegisterType((*FullEntry)(nil), "filer_pb.FullEntry")
proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification") proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification")
proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk") proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk")
proto.RegisterType((*FileId)(nil), "filer_pb.FileId")
proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes") proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes")
proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest") proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse") proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
@ -808,6 +1000,8 @@ func init() {
proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse") proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest") proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse") proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest")
proto.RegisterType((*AtomicRenameEntryResponse)(nil), "filer_pb.AtomicRenameEntryResponse")
proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest") proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse") proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest") proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest")
@ -818,6 +1012,8 @@ func init() {
proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse") proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse")
proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest") proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest")
proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse") proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse")
proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest")
proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse")
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
@ -836,10 +1032,12 @@ type SeaweedFilerClient interface {
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
} }
type seaweedFilerClient struct { type seaweedFilerClient struct {
@ -895,6 +1093,15 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq
return out, nil return out, nil
} }
func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) {
out := new(AtomicRenameEntryResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) {
out := new(AssignVolumeResponse) out := new(AssignVolumeResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...) err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...)
@ -931,6 +1138,15 @@ func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsReque
return out, nil return out, nil
} }
func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) {
out := new(GetFilerConfigurationResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for SeaweedFiler service // Server API for SeaweedFiler service
type SeaweedFilerServer interface { type SeaweedFilerServer interface {
@ -939,10 +1155,12 @@ type SeaweedFilerServer interface {
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
} }
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@ -1039,6 +1257,24 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AtomicRenameEntryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/AtomicRenameEntry",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, req.(*AtomicRenameEntryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AssignVolumeRequest) in := new(AssignVolumeRequest)
if err := dec(in); err != nil { if err := dec(in); err != nil {
@ -1111,6 +1347,24 @@ func _SeaweedFiler_Statistics_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetFilerConfigurationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/GetFilerConfiguration",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, req.(*GetFilerConfigurationRequest))
}
return interceptor(ctx, in, info, handler)
}
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler", ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil), HandlerType: (*SeaweedFilerServer)(nil),
@ -1135,6 +1389,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "DeleteEntry", MethodName: "DeleteEntry",
Handler: _SeaweedFiler_DeleteEntry_Handler, Handler: _SeaweedFiler_DeleteEntry_Handler,
}, },
{
MethodName: "AtomicRenameEntry",
Handler: _SeaweedFiler_AtomicRenameEntry_Handler,
},
{ {
MethodName: "AssignVolume", MethodName: "AssignVolume",
Handler: _SeaweedFiler_AssignVolume_Handler, Handler: _SeaweedFiler_AssignVolume_Handler,
@ -1151,6 +1409,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "Statistics", MethodName: "Statistics",
Handler: _SeaweedFiler_Statistics_Handler, Handler: _SeaweedFiler_Statistics_Handler,
}, },
{
MethodName: "GetFilerConfiguration",
Handler: _SeaweedFiler_GetFilerConfiguration_Handler,
},
}, },
Streams: []grpc.StreamDesc{}, Streams: []grpc.StreamDesc{},
Metadata: "filer.proto", Metadata: "filer.proto",
@ -1159,86 +1421,104 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{ var fileDescriptor0 = []byte{
// 1291 bytes of a gzipped FileDescriptorProto // 1583 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x8f, 0xdc, 0x44, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xdb, 0x6f, 0xdc, 0x44,
0x13, 0x8e, 0xe7, 0x2b, 0xe3, 0x9a, 0x99, 0xbc, 0xbb, 0x3d, 0xfb, 0x12, 0x6b, 0xb2, 0x1b, 0x26, 0x17, 0xaf, 0xf7, 0xee, 0xb3, 0xbb, 0x6d, 0x32, 0x49, 0xbf, 0xba, 0x9b, 0xcb, 0x97, 0x3a, 0x5f,
0x86, 0xa0, 0x8d, 0x88, 0x46, 0x51, 0xe0, 0x90, 0x10, 0x21, 0x91, 0x6c, 0x36, 0x52, 0xa4, 0x4d, 0xfb, 0xa5, 0xa2, 0x0a, 0x55, 0xe1, 0xa1, 0xa5, 0x42, 0xa2, 0xcd, 0x05, 0x45, 0xa4, 0x17, 0x39,
0x82, 0xbc, 0x09, 0x12, 0xe2, 0x60, 0x79, 0xed, 0x9e, 0xa1, 0xb5, 0x1e, 0x7b, 0x70, 0xb7, 0x37, 0x2d, 0x02, 0x21, 0x61, 0x39, 0xf6, 0xec, 0x66, 0x88, 0xed, 0x59, 0xec, 0x71, 0x92, 0xf2, 0x27,
0x09, 0x7f, 0x82, 0x0b, 0x57, 0x0e, 0x9c, 0xf8, 0x17, 0x5c, 0xf8, 0x3f, 0xdc, 0xb9, 0xa1, 0xae, 0xf0, 0x82, 0xc4, 0x23, 0x12, 0xcf, 0xfc, 0x13, 0x88, 0x17, 0xc4, 0xbf, 0xc3, 0x23, 0xcf, 0x68,
0x6e, 0x7b, 0xda, 0x63, 0xef, 0x06, 0x84, 0x72, 0xeb, 0x7e, 0xaa, 0xba, 0xbe, 0xfa, 0xe9, 0x2a, 0x2e, 0xf6, 0x8e, 0xd7, 0x9b, 0xa4, 0x08, 0xf5, 0xcd, 0x73, 0xae, 0xbf, 0x73, 0xe6, 0x5c, 0x66,
0x1b, 0x06, 0x73, 0x16, 0xd3, 0x6c, 0xb6, 0xca, 0x52, 0x91, 0x92, 0x3e, 0x6e, 0xfc, 0xd5, 0x89, 0x17, 0xba, 0x43, 0x12, 0xe2, 0x64, 0x73, 0x9c, 0x50, 0x46, 0x51, 0x47, 0x1c, 0xdc, 0xf1, 0xa1,
0xfb, 0x02, 0xae, 0x1d, 0xa5, 0xe9, 0x69, 0xbe, 0x7a, 0xcc, 0x32, 0x1a, 0x8a, 0x34, 0x7b, 0x7b, 0xfd, 0x02, 0x96, 0xf6, 0x29, 0x3d, 0xce, 0xc6, 0xdb, 0x24, 0xc1, 0x3e, 0xa3, 0xc9, 0x9b, 0x9d,
0x98, 0x88, 0xec, 0xad, 0x47, 0x7f, 0xc8, 0x29, 0x17, 0x64, 0x17, 0xec, 0xa8, 0x10, 0x38, 0xd6, 0x98, 0x25, 0x6f, 0x1c, 0xfc, 0x6d, 0x86, 0x53, 0x86, 0x96, 0xc1, 0x0c, 0x72, 0x86, 0x65, 0xac,
0xd4, 0xda, 0xb7, 0xbd, 0x35, 0x40, 0x08, 0x74, 0x92, 0x60, 0x49, 0x9d, 0x16, 0x0a, 0x70, 0xed, 0x19, 0x1b, 0xa6, 0x33, 0x21, 0x20, 0x04, 0x8d, 0xd8, 0x8b, 0xb0, 0x55, 0x13, 0x0c, 0xf1, 0x6d,
0x1e, 0xc2, 0x6e, 0xb3, 0x41, 0xbe, 0x4a, 0x13, 0x4e, 0xc9, 0x4d, 0xe8, 0x52, 0x09, 0xa0, 0xb5, 0xef, 0xc0, 0xf2, 0x6c, 0x83, 0xe9, 0x98, 0xc6, 0x29, 0x46, 0xb7, 0xa1, 0x89, 0x39, 0x41, 0x58,
0xc1, 0xdd, 0xff, 0xcd, 0x8a, 0x50, 0x66, 0x4a, 0x4f, 0x49, 0xdd, 0xdf, 0x2d, 0x20, 0x47, 0x8c, 0xeb, 0x3e, 0xb8, 0xb6, 0x99, 0x43, 0xd9, 0x94, 0x72, 0x92, 0x6b, 0xff, 0x66, 0x00, 0xda, 0x27,
0x0b, 0x09, 0x32, 0xca, 0xff, 0x59, 0x3c, 0x1f, 0x40, 0x6f, 0x95, 0xd1, 0x39, 0x7b, 0xa3, 0x23, 0x29, 0xe3, 0x44, 0x82, 0xd3, 0xb7, 0xc3, 0xf3, 0x1f, 0x68, 0x8d, 0x13, 0x3c, 0x24, 0x67, 0x0a,
0xd2, 0x3b, 0x72, 0x1b, 0xb6, 0xb9, 0x08, 0x32, 0xf1, 0x24, 0x4b, 0x97, 0x4f, 0x58, 0x4c, 0x9f, 0x91, 0x3a, 0xa1, 0x7b, 0x30, 0x9f, 0x32, 0x2f, 0x61, 0xbb, 0x09, 0x8d, 0x76, 0x49, 0x88, 0x9f,
0xcb, 0xa0, 0xdb, 0xa8, 0x52, 0x17, 0x90, 0x19, 0x10, 0x96, 0x84, 0x71, 0xce, 0xd9, 0x19, 0x3d, 0x73, 0xd0, 0x75, 0x21, 0x52, 0x65, 0xa0, 0x4d, 0x40, 0x24, 0xf6, 0xc3, 0x2c, 0x25, 0x27, 0xf8,
0x2e, 0xa4, 0x4e, 0x67, 0x6a, 0xed, 0xf7, 0xbd, 0x06, 0x09, 0xd9, 0x81, 0x6e, 0xcc, 0x96, 0x4c, 0x20, 0xe7, 0x5a, 0x8d, 0x35, 0x63, 0xa3, 0xe3, 0xcc, 0xe0, 0xa0, 0x45, 0x68, 0x86, 0x24, 0x22,
0x38, 0xdd, 0xa9, 0xb5, 0x3f, 0xf2, 0xd4, 0xc6, 0xfd, 0x0a, 0xc6, 0x95, 0xf8, 0x75, 0xfa, 0xb7, 0xcc, 0x6a, 0xae, 0x19, 0x1b, 0x7d, 0x47, 0x1e, 0xec, 0x4f, 0x60, 0xa1, 0x84, 0x5f, 0x85, 0x7f,
0xe0, 0x32, 0x55, 0x90, 0x63, 0x4d, 0xdb, 0x4d, 0x05, 0x28, 0xe4, 0xee, 0x2f, 0x2d, 0xe8, 0x22, 0x17, 0xda, 0x58, 0x92, 0x2c, 0x63, 0xad, 0x3e, 0x2b, 0x01, 0x39, 0xdf, 0xfe, 0xb9, 0x06, 0x4d,
0x54, 0xd6, 0xd9, 0x5a, 0xd7, 0x99, 0xdc, 0x80, 0x21, 0xe3, 0xfe, 0xba, 0x18, 0x2d, 0x8c, 0x6f, 0x41, 0x2a, 0xf2, 0x6c, 0x4c, 0xf2, 0x8c, 0x6e, 0x41, 0x8f, 0xa4, 0xee, 0x24, 0x19, 0x35, 0x81,
0xc0, 0x78, 0x59, 0x77, 0xf2, 0x29, 0xf4, 0xc2, 0xef, 0xf3, 0xe4, 0x94, 0x3b, 0x6d, 0x74, 0x35, 0xaf, 0x4b, 0xd2, 0x22, 0xef, 0xe8, 0x3d, 0x68, 0xf9, 0x47, 0x59, 0x7c, 0x9c, 0x5a, 0x75, 0xe1,
0x5e, 0xbb, 0x92, 0xc9, 0x1e, 0x48, 0x99, 0xa7, 0x55, 0xc8, 0x3d, 0x80, 0x40, 0x88, 0x8c, 0x9d, 0x6a, 0x61, 0xe2, 0x8a, 0x07, 0xbb, 0xc5, 0x79, 0x8e, 0x12, 0x41, 0x0f, 0x01, 0x3c, 0xc6, 0x12,
0xe4, 0x82, 0x72, 0xcc, 0x76, 0x70, 0xd7, 0x31, 0x0e, 0xe4, 0x9c, 0x3e, 0x2c, 0xe5, 0x9e, 0xa1, 0x72, 0x98, 0x31, 0x9c, 0x8a, 0x68, 0xbb, 0x0f, 0x2c, 0x4d, 0x21, 0x4b, 0xf1, 0x93, 0x82, 0xef,
0x4b, 0xee, 0x43, 0x9f, 0xbe, 0x11, 0x34, 0x89, 0x68, 0xe4, 0x74, 0xd1, 0xd1, 0xde, 0x46, 0x4e, 0x68, 0xb2, 0xe8, 0x11, 0x74, 0xf0, 0x19, 0xc3, 0x71, 0x80, 0x03, 0xab, 0x29, 0x1c, 0xad, 0x4c,
0xb3, 0x43, 0x2d, 0x57, 0x19, 0x96, 0xea, 0x93, 0x07, 0x30, 0xaa, 0x88, 0xc8, 0x16, 0xb4, 0x4f, 0xc5, 0xb4, 0xb9, 0xa3, 0xf8, 0x32, 0xc2, 0x42, 0x7c, 0xf0, 0x18, 0xfa, 0x25, 0x16, 0x9a, 0x83,
0x69, 0x71, 0xb3, 0x72, 0x29, 0xab, 0x7b, 0x16, 0xc4, 0xb9, 0x22, 0xd9, 0xd0, 0x53, 0x9b, 0x2f, 0xfa, 0x31, 0xce, 0x6f, 0x96, 0x7f, 0xf2, 0xec, 0x9e, 0x78, 0x61, 0x26, 0x8b, 0xac, 0xe7, 0xc8,
0x5a, 0xf7, 0x2c, 0xf7, 0x67, 0x0b, 0xb6, 0x0f, 0xcf, 0x68, 0x22, 0x9e, 0xa7, 0x82, 0xcd, 0x59, 0xc3, 0x47, 0xb5, 0x87, 0x86, 0xbd, 0x0d, 0xe6, 0x6e, 0x16, 0x86, 0x85, 0x62, 0x40, 0x92, 0x5c,
0x18, 0x08, 0x96, 0x26, 0xe4, 0x36, 0xd8, 0x69, 0x1c, 0xf9, 0x17, 0x72, 0xac, 0x9f, 0xc6, 0xda, 0x31, 0x20, 0xc9, 0xa4, 0xd0, 0x6a, 0x17, 0x16, 0xda, 0xaf, 0x06, 0xcc, 0xef, 0x9c, 0xe0, 0x98,
0xdf, 0x6d, 0xb0, 0x13, 0xfa, 0x5a, 0x6b, 0xb7, 0xce, 0xd1, 0x4e, 0xe8, 0x6b, 0xa5, 0xfd, 0x11, 0x3d, 0xa7, 0x8c, 0x0c, 0x89, 0xef, 0x31, 0x42, 0x63, 0x74, 0x0f, 0x4c, 0x1a, 0x06, 0xee, 0x85,
0x8c, 0x22, 0x1a, 0x53, 0x41, 0xfd, 0xb2, 0xae, 0xb2, 0xe8, 0x43, 0x05, 0x62, 0x3d, 0xb9, 0xfb, 0x95, 0xda, 0xa1, 0xa1, 0x42, 0x7d, 0x0f, 0xcc, 0x18, 0x9f, 0xba, 0x17, 0xba, 0xeb, 0xc4, 0xf8,
0xab, 0x05, 0x76, 0x59, 0x5e, 0x72, 0x15, 0x2e, 0x4b, 0x73, 0x3e, 0x8b, 0x74, 0x52, 0x3d, 0xb9, 0x54, 0x4a, 0xaf, 0x43, 0x3f, 0xc0, 0x21, 0x66, 0xd8, 0x2d, 0x6e, 0x87, 0x5f, 0x5d, 0x4f, 0x12,
0x7d, 0x1a, 0x49, 0xae, 0xa6, 0xf3, 0x39, 0xa7, 0x02, 0xdd, 0xb6, 0x3d, 0xbd, 0x93, 0x77, 0xcd, 0xb7, 0xe4, 0x75, 0xdc, 0x81, 0x6b, 0xdc, 0xe4, 0xd8, 0x4b, 0x70, 0xcc, 0xdc, 0xb1, 0xc7, 0x8e,
0xd9, 0x8f, 0x8a, 0x9e, 0x1d, 0x0f, 0xd7, 0xb2, 0x06, 0x4b, 0xc1, 0x96, 0x14, 0xaf, 0xa5, 0xed, 0xc4, 0x9d, 0x98, 0x4e, 0x3f, 0xc6, 0xa7, 0x2f, 0x05, 0xf5, 0xa5, 0xc7, 0x8e, 0xec, 0xbf, 0x0c,
0xa9, 0x0d, 0x19, 0x43, 0x97, 0xfa, 0x22, 0x58, 0x20, 0xef, 0x6c, 0xaf, 0x43, 0x5f, 0x06, 0x0b, 0x30, 0x8b, 0xcb, 0x44, 0x37, 0xa0, 0xcd, 0xdd, 0xba, 0x24, 0x50, 0x99, 0x68, 0xf1, 0xe3, 0x5e,
0xf2, 0x31, 0x5c, 0xe1, 0x69, 0x9e, 0x85, 0xd4, 0x2f, 0xdc, 0xf6, 0x50, 0x3a, 0x54, 0xe8, 0x13, 0xc0, 0x3b, 0x83, 0x0e, 0x87, 0x29, 0x66, 0x02, 0x5e, 0xdd, 0x51, 0x27, 0x5e, 0x59, 0x29, 0xf9,
0x74, 0xee, 0xfe, 0xd9, 0x82, 0x2b, 0xd5, 0x1b, 0x25, 0xd7, 0xc0, 0xc6, 0x13, 0xe8, 0xdc, 0x42, 0x4e, 0x36, 0x43, 0xc3, 0x11, 0xdf, 0x3c, 0xe3, 0x11, 0x23, 0x11, 0x16, 0x0e, 0xeb, 0x8e, 0x3c,
0xe7, 0xd8, 0x25, 0x8e, 0x2b, 0x01, 0xb4, 0xcc, 0x00, 0x8a, 0x23, 0xcb, 0x34, 0x52, 0xf1, 0x8e, 0xa0, 0x05, 0x68, 0x62, 0x97, 0x79, 0x23, 0x51, 0xe5, 0xa6, 0xd3, 0xc0, 0xaf, 0xbc, 0x11, 0xfa,
0xd4, 0x91, 0x67, 0x69, 0x44, 0xe5, 0x4d, 0xe6, 0x2c, 0xc2, 0x88, 0x47, 0x9e, 0x5c, 0x4a, 0x64, 0x1f, 0x5c, 0x4d, 0x69, 0x96, 0xf8, 0xd8, 0xcd, 0xdd, 0xb6, 0x04, 0xb7, 0x27, 0xa9, 0xbb, 0xd2,
0xc1, 0x22, 0xfd, 0x4a, 0xe4, 0x52, 0xd6, 0x20, 0xcc, 0xd0, 0x6e, 0x4f, 0xd5, 0x40, 0xed, 0x64, 0xb9, 0x0d, 0xf5, 0x21, 0x09, 0xac, 0xb6, 0x48, 0xcc, 0x5c, 0xb9, 0x08, 0xf7, 0x02, 0x87, 0x33,
0x0d, 0x96, 0x12, 0xbd, 0xac, 0x12, 0x93, 0x6b, 0x32, 0x85, 0x41, 0x46, 0x57, 0xb1, 0xbe, 0x66, 0xd1, 0xfb, 0x00, 0x85, 0xa5, 0xc0, 0xea, 0x9c, 0x23, 0x6a, 0xe6, 0x76, 0x03, 0xfb, 0x0b, 0x68,
0xa7, 0x8f, 0x22, 0x13, 0x22, 0xd7, 0x01, 0xc2, 0x34, 0x8e, 0x69, 0x88, 0x0a, 0x36, 0x2a, 0x18, 0x29, 0xf3, 0x4b, 0x60, 0x9e, 0xd0, 0x30, 0x8b, 0x8a, 0xb0, 0xfb, 0x4e, 0x47, 0x12, 0xf6, 0x02,
0x88, 0xbc, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xa9, 0xb5, 0xdf, 0xf5, 0x7a, 0x42, 0xc4, 0x74, 0x13, 0xc4, 0xac, 0x73, 0x79, 0x55, 0xd5, 0x44, 0x90, 0x22, 0x43, 0x9f, 0x61, 0x31, 0x2d,
0xc7, 0x34, 0x94, 0x79, 0xe4, 0x9c, 0x66, 0x3e, 0xbe, 0xb1, 0x01, 0x9e, 0xeb, 0x4b, 0x00, 0xbb, 0x7c, 0x4a, 0x8f, 0x89, 0x8c, 0xbe, 0xed, 0xa8, 0x93, 0xfd, 0x67, 0x0d, 0xae, 0x96, 0xcb, 0x9d,
0xc1, 0x1e, 0xc0, 0x22, 0x4b, 0xf3, 0x95, 0x92, 0x0e, 0xa7, 0x6d, 0xd9, 0x72, 0x10, 0x41, 0xf1, 0xbb, 0x10, 0x56, 0x44, 0xae, 0x0c, 0x61, 0x46, 0x98, 0x3d, 0x28, 0xe5, 0xab, 0xa6, 0xe7, 0x2b,
0x4d, 0xb8, 0xc2, 0xdf, 0x2e, 0x63, 0x96, 0x9c, 0xfa, 0x22, 0xc8, 0x16, 0x54, 0x38, 0x23, 0x34, 0x57, 0x89, 0x68, 0x20, 0x1d, 0xf4, 0xa5, 0xca, 0x33, 0x1a, 0x60, 0x5e, 0xad, 0x19, 0x09, 0x44,
0x30, 0xd2, 0xe8, 0x4b, 0x04, 0xdd, 0x6f, 0x81, 0x1c, 0x64, 0x34, 0x10, 0xf4, 0x5f, 0x74, 0xd7, 0x82, 0xfb, 0x0e, 0xff, 0xe4, 0x94, 0x11, 0x09, 0xd4, 0x08, 0xe1, 0x9f, 0x02, 0x5e, 0x22, 0xec,
0xb2, 0x53, 0xb6, 0x2e, 0xec, 0x94, 0xff, 0x87, 0x71, 0xc5, 0xb4, 0x6a, 0x34, 0xd2, 0xe3, 0xab, 0xb6, 0xe4, 0x95, 0xc9, 0x13, 0xbf, 0xb2, 0x88, 0x53, 0xdb, 0xf2, 0x1e, 0xf8, 0x37, 0x5a, 0x83,
0x55, 0xf4, 0xbe, 0x3c, 0x56, 0x4c, 0x6b, 0x8f, 0x3f, 0x59, 0x40, 0x1e, 0xe3, 0x4b, 0xf8, 0x6f, 0x6e, 0x82, 0xc7, 0xa1, 0xaa, 0x5e, 0x91, 0x3e, 0xd3, 0xd1, 0x49, 0x68, 0x15, 0xc0, 0xa7, 0x61,
0x23, 0x44, 0x72, 0x58, 0xb6, 0x36, 0xf5, 0xd2, 0xa2, 0x40, 0x04, 0xba, 0xf9, 0x0e, 0x19, 0x57, 0x88, 0x7d, 0x21, 0x60, 0x0a, 0x01, 0x8d, 0xc2, 0x2b, 0x87, 0xb1, 0xd0, 0x4d, 0xb1, 0x6f, 0xc1,
0xf6, 0x1f, 0x07, 0x22, 0xd0, 0x0d, 0x30, 0xa3, 0x61, 0x9e, 0xc9, 0x7e, 0x8c, 0xbc, 0xc2, 0x06, 0x9a, 0xb1, 0xd1, 0x74, 0x5a, 0x8c, 0x85, 0x07, 0xd8, 0xe7, 0x71, 0x64, 0x29, 0x4e, 0x5c, 0x31,
0xe8, 0x15, 0x90, 0x0c, 0xb4, 0x12, 0x90, 0x0e, 0xf4, 0x37, 0x0b, 0xc6, 0x0f, 0x39, 0x67, 0x8b, 0x80, 0xba, 0x42, 0xaf, 0xc3, 0x09, 0x62, 0x54, 0xae, 0x00, 0x8c, 0x12, 0x9a, 0x8d, 0x25, 0xb7,
0xe4, 0x9b, 0x34, 0xce, 0x97, 0xb4, 0x88, 0x74, 0x07, 0xba, 0x61, 0x9a, 0x27, 0x02, 0xa3, 0xec, 0xb7, 0x56, 0xe7, 0xf3, 0x58, 0x50, 0x04, 0xfb, 0x36, 0x5c, 0x4d, 0xdf, 0x44, 0x21, 0x89, 0x8f,
0x7a, 0x6a, 0xb3, 0x41, 0xab, 0x56, 0x8d, 0x56, 0x1b, 0xc4, 0x6c, 0xd7, 0x89, 0x69, 0x10, 0xaf, 0x5d, 0xe6, 0x25, 0x23, 0xcc, 0xac, 0xbe, 0xac, 0x61, 0x45, 0x7d, 0x25, 0x88, 0xf6, 0x97, 0x80,
0x53, 0x21, 0xde, 0x87, 0x30, 0x90, 0xe9, 0xf9, 0x21, 0x4d, 0x04, 0xcd, 0xf4, 0x3b, 0x06, 0x09, 0xb6, 0x12, 0xec, 0x31, 0xfc, 0x0f, 0x56, 0xcf, 0x5b, 0x76, 0xf7, 0x75, 0x58, 0x28, 0x99, 0x96,
0x1d, 0x20, 0xe2, 0x9e, 0xc1, 0x4e, 0x35, 0x50, 0x3d, 0x45, 0xce, 0xed, 0x2a, 0xf2, 0xd5, 0x65, 0x53, 0x98, 0x7b, 0x7c, 0x3d, 0x0e, 0xde, 0x95, 0xc7, 0x92, 0x69, 0xe5, 0xf1, 0x07, 0x03, 0xd0,
0xb1, 0x8e, 0x52, 0x2e, 0x25, 0x7f, 0x57, 0xf9, 0x49, 0xcc, 0x42, 0x5f, 0x0a, 0x54, 0x74, 0xb6, 0xb6, 0x68, 0xf0, 0x7f, 0xb7, 0x5f, 0x79, 0xcb, 0xf1, 0xb9, 0x2f, 0x07, 0x48, 0xe0, 0x31, 0x4f,
0x42, 0x5e, 0x65, 0xf1, 0x3a, 0xe7, 0x8e, 0x91, 0xb3, 0xfb, 0x39, 0x8c, 0xd5, 0x10, 0xaf, 0x16, 0x6d, 0xa6, 0x1e, 0x49, 0xa5, 0xfd, 0x6d, 0x8f, 0x79, 0x6a, 0x3b, 0x24, 0xd8, 0xcf, 0x12, 0xbe,
0x68, 0x0f, 0xe0, 0x0c, 0x01, 0x9f, 0x45, 0x6a, 0x7e, 0xd9, 0x9e, 0xad, 0x90, 0xa7, 0x11, 0x77, 0xac, 0x44, 0x5d, 0x89, 0xed, 0xe0, 0xe4, 0x24, 0x0e, 0xb4, 0x04, 0x48, 0x01, 0xfd, 0xc9, 0x00,
0xbf, 0x04, 0xfb, 0x28, 0x55, 0x39, 0x73, 0x72, 0x07, 0xec, 0xb8, 0xd8, 0xe8, 0x51, 0x47, 0xd6, 0xeb, 0x09, 0xa3, 0x11, 0xf1, 0x1d, 0xcc, 0x1d, 0x96, 0xe0, 0xae, 0x43, 0x9f, 0x8f, 0xc5, 0x69,
0x7c, 0x2a, 0xf4, 0xbc, 0xb5, 0x92, 0xfb, 0x00, 0xfa, 0x05, 0x5c, 0xe4, 0x61, 0x9d, 0x97, 0x47, 0xc8, 0x3d, 0x1a, 0x06, 0x93, 0xb5, 0x73, 0x13, 0xf8, 0x64, 0x74, 0x35, 0xe4, 0x6d, 0x1a, 0x06,
0x6b, 0x23, 0x0f, 0xf7, 0x0f, 0x0b, 0x76, 0xaa, 0x21, 0xeb, 0x52, 0xbd, 0x82, 0x51, 0xe9, 0xc2, 0xa2, 0x20, 0xd6, 0x81, 0x8f, 0x2f, 0x4d, 0x5f, 0x2e, 0xe1, 0x5e, 0x8c, 0x4f, 0x4b, 0xfa, 0x5c,
0x5f, 0x06, 0x2b, 0x1d, 0xcb, 0x1d, 0x33, 0x96, 0xfa, 0xb1, 0x32, 0x40, 0xfe, 0x2c, 0x58, 0x29, 0x48, 0xe8, 0xcb, 0x99, 0xd7, 0x8e, 0xf1, 0x29, 0xd7, 0xb7, 0x97, 0xe0, 0xe6, 0x0c, 0x6c, 0x0a,
0xf6, 0x0c, 0x63, 0x03, 0x9a, 0xbc, 0x84, 0xed, 0x9a, 0x4a, 0xc3, 0xf4, 0xba, 0x65, 0x4e, 0xaf, 0xf9, 0x2f, 0x06, 0x2c, 0x3c, 0x49, 0x53, 0x32, 0x8a, 0x3f, 0x17, 0xdd, 0x9f, 0x83, 0x5e, 0x84,
0xca, 0x04, 0x2e, 0x4f, 0x9b, 0x23, 0xed, 0x3e, 0x5c, 0x55, 0x84, 0x3d, 0x28, 0xf9, 0x55, 0xd4, 0xa6, 0x4f, 0xb3, 0x98, 0x09, 0xb0, 0x4d, 0x47, 0x1e, 0xa6, 0x1a, 0xa2, 0x56, 0x69, 0x88, 0xa9,
0xbe, 0x4a, 0x43, 0x6b, 0x93, 0x86, 0xee, 0x04, 0x9c, 0xfa, 0x51, 0x4d, 0xf8, 0x05, 0x6c, 0x1f, 0x96, 0xaa, 0x57, 0x5b, 0x4a, 0x6b, 0x99, 0x46, 0xa9, 0x65, 0xfe, 0x0b, 0x5d, 0x7e, 0x31, 0xae,
0x8b, 0x40, 0x30, 0x2e, 0x58, 0x58, 0x7e, 0x4a, 0x6d, 0xf0, 0xd6, 0x7a, 0x57, 0x43, 0xad, 0x33, 0x8f, 0x63, 0x86, 0x13, 0x35, 0x30, 0x81, 0x93, 0xb6, 0x04, 0xc5, 0xfe, 0xde, 0x80, 0xc5, 0x32,
0x7f, 0x0b, 0xda, 0x42, 0x14, 0x9c, 0x92, 0x4b, 0x79, 0x0b, 0xc4, 0xf4, 0xa4, 0xef, 0xe0, 0x3d, 0x52, 0xf5, 0x3a, 0x38, 0x77, 0x7e, 0xf3, 0x81, 0x91, 0x84, 0x0a, 0x26, 0xff, 0xe4, 0xad, 0x37,
0xb8, 0x92, 0x7c, 0x10, 0xa9, 0x08, 0x62, 0x35, 0xb0, 0x3a, 0x38, 0xb0, 0x6c, 0x44, 0x70, 0x62, 0xce, 0x0e, 0x43, 0xe2, 0xbb, 0x9c, 0x21, 0xe1, 0x99, 0x92, 0xf2, 0x3a, 0x09, 0x27, 0x41, 0x37,
0xa9, 0x9e, 0x1e, 0x29, 0x69, 0x57, 0x8d, 0x33, 0x09, 0xa0, 0x70, 0x0f, 0x00, 0x9f, 0x8f, 0x62, 0xf4, 0xa0, 0x11, 0x34, 0xbc, 0x8c, 0x1d, 0xe5, 0x33, 0x9c, 0x7f, 0xdb, 0x1f, 0xc2, 0x82, 0x7c,
0x7e, 0x4f, 0x9d, 0x95, 0xc8, 0x81, 0x04, 0xee, 0xfe, 0xd5, 0x85, 0xe1, 0x31, 0x0d, 0x5e, 0x53, 0xb0, 0x95, 0xb3, 0xb6, 0x02, 0x50, 0x4c, 0x55, 0xf9, 0x56, 0x31, 0x1d, 0x33, 0x1f, 0xab, 0xa9,
0x1a, 0xc9, 0x79, 0x99, 0x91, 0x45, 0xc1, 0xad, 0xea, 0x37, 0x2d, 0xb9, 0xb9, 0x49, 0xa2, 0xc6, 0xfd, 0x31, 0x98, 0xfb, 0x54, 0x26, 0x22, 0x45, 0xf7, 0xc1, 0x0c, 0xf3, 0x83, 0x7a, 0xd6, 0xa0,
0x8f, 0xe8, 0xc9, 0x27, 0xef, 0x52, 0xd3, 0xd7, 0x74, 0x89, 0x1c, 0xc1, 0xc0, 0xf8, 0x68, 0x24, 0x49, 0x7b, 0xe4, 0x72, 0xce, 0x44, 0xc8, 0x7e, 0x0c, 0x9d, 0x9c, 0x9c, 0xc7, 0x66, 0x9c, 0x17,
0xbb, 0xc6, 0xc1, 0xda, 0xb7, 0xf0, 0x64, 0xef, 0x1c, 0xa9, 0x69, 0xcd, 0x98, 0x0c, 0xa6, 0xb5, 0x5b, 0x6d, 0x2a, 0x36, 0xfb, 0x77, 0x03, 0x16, 0xcb, 0x90, 0x55, 0xfa, 0x5e, 0x43, 0xbf, 0x70,
0xfa, 0x2c, 0x32, 0xad, 0x35, 0x8d, 0x13, 0xb4, 0x66, 0x74, 0x7d, 0xd3, 0x5a, 0x7d, 0xce, 0x98, 0xe1, 0x46, 0xde, 0x58, 0x61, 0xb9, 0xaf, 0x63, 0xa9, 0xaa, 0x15, 0x00, 0xd3, 0x67, 0xde, 0x58,
0xd6, 0x9a, 0x46, 0x05, 0x5a, 0x33, 0x5a, 0xb3, 0x69, 0xad, 0x3e, 0x42, 0x4c, 0x6b, 0x4d, 0xfd, 0x96, 0x54, 0x2f, 0xd4, 0x48, 0x83, 0x57, 0x30, 0x5f, 0x11, 0x99, 0xf1, 0x52, 0xb9, 0xab, 0xbf,
0xfc, 0x12, 0x79, 0x01, 0x43, 0xb3, 0x4f, 0x12, 0xe3, 0x40, 0x43, 0xa3, 0x9f, 0x5c, 0x3f, 0x4f, 0x54, 0x4a, 0xaf, 0xad, 0x42, 0x5b, 0x7f, 0xbe, 0x3c, 0x82, 0x1b, 0xb2, 0xff, 0xb6, 0x8a, 0xa2,
0x6c, 0x1a, 0x34, 0xdb, 0x82, 0x69, 0xb0, 0xa1, 0x31, 0x9a, 0x06, 0x9b, 0xba, 0x89, 0x7b, 0x89, 0xcb, 0x73, 0x5f, 0xae, 0x4d, 0x63, 0xba, 0x36, 0xed, 0x01, 0x58, 0x55, 0x55, 0xd5, 0x05, 0x23,
0x7c, 0x07, 0x5b, 0x9b, 0xcf, 0x93, 0xdc, 0xd8, 0x4c, 0xab, 0xf6, 0xea, 0x27, 0xee, 0x45, 0x2a, 0x98, 0x3f, 0x60, 0x1e, 0x23, 0x29, 0x23, 0x7e, 0xf1, 0x6c, 0x9e, 0x2a, 0x66, 0xe3, 0xb2, 0xfd,
0xa5, 0xf1, 0xa7, 0x00, 0xeb, 0x57, 0x47, 0xae, 0xad, 0xcf, 0xd4, 0x5e, 0xfd, 0x64, 0xb7, 0x59, 0x50, 0x6d, 0x87, 0x39, 0xa8, 0x33, 0x96, 0xd7, 0x19, 0xff, 0xe4, 0xb7, 0x80, 0x74, 0x4f, 0xea,
0x58, 0x98, 0x7a, 0x74, 0x1d, 0xb6, 0xb8, 0xa2, 0xfe, 0x9c, 0xcf, 0xc2, 0x98, 0xd1, 0x44, 0x3c, 0x0e, 0xde, 0x81, 0x2b, 0x5e, 0x0f, 0x8c, 0x32, 0x2f, 0x94, 0xfb, 0xb7, 0x21, 0xf6, 0xaf, 0x29,
0x02, 0x7c, 0x05, 0x5f, 0xcb, 0x3f, 0xc7, 0x93, 0x1e, 0xfe, 0x40, 0x7e, 0xf6, 0x77, 0x00, 0x00, 0x28, 0x62, 0x01, 0xcb, 0x15, 0x15, 0x48, 0x6e, 0x53, 0x6e, 0x67, 0x4e, 0x10, 0xcc, 0x15, 0x00,
0x00, 0xff, 0xff, 0x8d, 0x38, 0xa9, 0x9f, 0x4f, 0x0e, 0x00, 0x00, 0xd1, 0x52, 0xb2, 0x1b, 0x5a, 0x52, 0x97, 0x53, 0xb6, 0x38, 0xc1, 0x5e, 0x85, 0xe5, 0x4f, 0x31,
0xe3, 0x2f, 0x89, 0x64, 0x8b, 0xc6, 0x43, 0x32, 0xca, 0x12, 0x4f, 0xbb, 0x0a, 0xfb, 0x47, 0x03,
0x56, 0xce, 0x11, 0x50, 0x01, 0x5b, 0xd0, 0x8e, 0xbc, 0x94, 0xe1, 0x24, 0xef, 0x92, 0xfc, 0x38,
0x9d, 0x8a, 0xda, 0x65, 0xa9, 0xa8, 0x57, 0x52, 0x71, 0x1d, 0x5a, 0x91, 0x77, 0xe6, 0x46, 0x87,
0xea, 0xa9, 0xd0, 0x8c, 0xbc, 0xb3, 0x67, 0x87, 0x0f, 0xfe, 0x68, 0x43, 0xef, 0x00, 0x7b, 0xa7,
0x18, 0x07, 0x02, 0x18, 0x1a, 0xe5, 0x0d, 0x51, 0xfe, 0xd1, 0x85, 0x6e, 0x4f, 0x57, 0xfe, 0xcc,
0x5f, 0x79, 0x83, 0x3b, 0x97, 0x89, 0xa9, 0xda, 0xba, 0x82, 0xf6, 0xa1, 0xab, 0xfd, 0xaa, 0x41,
0xcb, 0x9a, 0x62, 0xe5, 0xc7, 0xda, 0x60, 0xe5, 0x1c, 0xae, 0x6e, 0x4d, 0xdb, 0xce, 0xba, 0xb5,
0xea, 0x7b, 0x40, 0xb7, 0x36, 0x6b, 0xa5, 0x0b, 0x6b, 0xda, 0xe6, 0xd5, 0xad, 0x55, 0x77, 0xbd,
0x6e, 0x6d, 0xd6, 0xba, 0x16, 0xd6, 0xb4, 0xf5, 0xa8, 0x5b, 0xab, 0xae, 0x71, 0xdd, 0xda, 0xac,
0x9d, 0x7a, 0x05, 0x7d, 0x0d, 0xf3, 0x95, 0xc5, 0x85, 0xec, 0x89, 0xd6, 0x79, 0x1b, 0x77, 0xb0,
0x7e, 0xa1, 0x4c, 0x61, 0xff, 0x05, 0xf4, 0xf4, 0x85, 0x82, 0x34, 0x40, 0x33, 0x56, 0xe2, 0x60,
0xf5, 0x3c, 0xb6, 0x6e, 0x50, 0x9f, 0x95, 0xba, 0xc1, 0x19, 0xdb, 0x42, 0x37, 0x38, 0x6b, 0xc4,
0xda, 0x57, 0xd0, 0x57, 0x30, 0x37, 0x3d, 0xb3, 0xd0, 0xad, 0xe9, 0xb4, 0x55, 0x46, 0xe1, 0xc0,
0xbe, 0x48, 0xa4, 0x30, 0xbe, 0x07, 0x30, 0x19, 0x45, 0x68, 0x69, 0xa2, 0x53, 0x19, 0x85, 0x83,
0xe5, 0xd9, 0xcc, 0xc2, 0xd4, 0x37, 0x70, 0x7d, 0x66, 0xbf, 0x23, 0xad, 0x49, 0x2e, 0x9a, 0x18,
0x83, 0xff, 0x5f, 0x2a, 0x97, 0xfb, 0x7a, 0xba, 0x0a, 0x73, 0xa9, 0x6c, 0xe3, 0x61, 0xba, 0xe9,
0x87, 0x04, 0xc7, 0xec, 0x29, 0x08, 0x8d, 0x97, 0x09, 0x65, 0xf4, 0xb0, 0x25, 0xfe, 0xad, 0xf9,
0xe0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xec, 0xb0, 0x56, 0xbc, 0x11, 0x00, 0x00,
} }

View file

@ -0,0 +1,69 @@
package filer_pb
import (
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
func toFileIdObject(fileIdStr string) (*FileId, error) {
t, err := needle.ParseFileIdFromString(fileIdStr)
if err != nil {
return nil, err
}
return &FileId{
VolumeId: uint32(t.VolumeId),
Cookie: uint32(t.Cookie),
FileKey: uint64(t.Key),
}, nil
}
func (fid *FileId) toFileIdString() string {
return needle.NewFileId(needle.VolumeId(fid.VolumeId), fid.FileKey, fid.Cookie).String()
}
func (c *FileChunk) GetFileIdString() string {
if c.FileId != "" {
return c.FileId
}
if c.Fid != nil {
c.FileId = c.Fid.toFileIdString()
return c.FileId
}
return ""
}
func BeforeEntrySerialization(chunks []*FileChunk) {
for _, chunk := range chunks {
if chunk.FileId != "" {
if fid, err := toFileIdObject(chunk.FileId); err == nil {
chunk.Fid = fid
chunk.FileId = ""
}
}
if chunk.SourceFileId != "" {
if fid, err := toFileIdObject(chunk.SourceFileId); err == nil {
chunk.SourceFid = fid
chunk.SourceFileId = ""
}
}
}
}
func AfterEntryDeserialization(chunks []*FileChunk) {
for _, chunk := range chunks {
if chunk.Fid != nil && chunk.FileId == "" {
chunk.FileId = chunk.Fid.toFileIdString()
}
if chunk.SourceFid != nil && chunk.SourceFileId == "" {
chunk.SourceFileId = chunk.SourceFid.toFileIdString()
}
}
}

View file

@ -0,0 +1,17 @@
package filer_pb
import (
"testing"
"github.com/golang/protobuf/proto"
)
func TestFileIdSize(t *testing.T) {
fileIdStr := "11745,0293434534cbb9892b"
fid, _ := toFileIdObject(fileIdStr)
bytes, _ := proto.Marshal(fid)
println(len(fileIdStr))
println(len(bytes))
}

Some files were not shown because too many files have changed in this diff Show more