mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
9601880e32
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1 +1,3 @@
|
|||
weed
|
||||
tags
|
||||
*.swp
|
||||
|
|
5
.project
5
.project
|
@ -5,11 +5,6 @@
|
|||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>com.googlecode.goclipse.goBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>goclipse.goNature</nature>
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- release
|
||||
- tip
|
||||
|
|
19
Dockerfile
19
Dockerfile
|
@ -1,6 +1,21 @@
|
|||
FROM cydev/go
|
||||
RUN go get code.google.com/p/weed-fs/go/weed
|
||||
FROM progrium/busybox
|
||||
|
||||
WORKDIR /opt/weed
|
||||
|
||||
RUN opkg-install curl
|
||||
RUN echo insecure >> ~/.curlrc
|
||||
|
||||
RUN \
|
||||
curl -Lks https://bintray.com$(curl -Lk http://bintray.com/chrislusf/Weed-FS/seaweed/_latestVersion | grep linux_amd64.tar.gz | sed -n "/href/ s/.*href=['\"]\([^'\"]*\)['\"].*/\1/gp") | gunzip | tar -xf - -C /opt/weed/ && \
|
||||
mv weed_* bin && \
|
||||
chmod +x ./bin/weed
|
||||
|
||||
EXPOSE 8080
|
||||
EXPOSE 9333
|
||||
|
||||
VOLUME /data
|
||||
|
||||
ENV WEED_HOME /opt/weed
|
||||
ENV PATH ${PATH}:${WEED_HOME}/bin
|
||||
|
||||
ENTRYPOINT ["weed"]
|
6
Dockerfile.go_build
Normal file
6
Dockerfile.go_build
Normal file
|
@ -0,0 +1,6 @@
|
|||
FROM cydev/go
|
||||
RUN go get github.com/chrislusf/weed-fs/go/weed
|
||||
EXPOSE 8080
|
||||
EXPOSE 9333
|
||||
VOLUME /data
|
||||
ENTRYPOINT ["weed"]
|
42
README.md
42
README.md
|
@ -5,16 +5,6 @@ Seaweed File System
|
|||
[![GoDoc](https://godoc.org/github.com/chrislusf/weed-fs/go?status.svg)](https://godoc.org/github.com/chrislusf/weed-fs/go)
|
||||
[![RTD](https://readthedocs.org/projects/weed-fs/badge/?version=latest)](http://weed-fs.readthedocs.org/en/latest/)
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
go get github.com/chrislusf/weed-fs/go/weed
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
For pre-compiled releases,
|
||||
https://bintray.com/chrislusf/Weed-FS/seaweed
|
||||
|
||||
## Introduction
|
||||
|
||||
|
@ -246,12 +236,34 @@ More tools and documentation, on how to maintain and scale the system. For examp
|
|||
|
||||
This is a super exciting project! And I need helpers!
|
||||
|
||||
## Contributions ##
|
||||
|
||||
To make contributions easier, I have mirrored a repo in github.com
|
||||
```
|
||||
https://github.com/chrislusf/weed-fs.git
|
||||
```
|
||||
## Installation guide for users who are not familiar with golang
|
||||
|
||||
step 1: install go on your machine and setup the environment by following the instructions from the following link:
|
||||
|
||||
https://golang.org/doc/install
|
||||
|
||||
make sure you set up your $GOPATH
|
||||
|
||||
|
||||
step 2: also you may need to install Mercurial by following the instructions below
|
||||
|
||||
http://mercurial.selenic.com/downloads
|
||||
|
||||
|
||||
step 3: download, compile, and install the project by executing the following command
|
||||
|
||||
go get github.com/chrislusf/weed-fs/go/weed
|
||||
|
||||
once this is done, you should see the executable "weed" under $GOPATH/bin
|
||||
|
||||
step 4: after you modify your code locally, you could start a local build by calling "go install" under $GOPATH/src/github.com/chrislusf/weed-fs/go/weed
|
||||
|
||||
## Reference
|
||||
|
||||
For pre-compiled releases,
|
||||
https://bintray.com/chrislusf/Weed-FS/seaweed
|
||||
|
||||
## Disk Related topics ##
|
||||
|
||||
### Hard Drive Performance ###
|
||||
|
|
|
@ -187,10 +187,11 @@ Upload File Directly
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -F file=@/home/chris/myphoto.jpg http://localhost:8080/submit
|
||||
curl -F file=@/home/chris/myphoto.jpg http://localhost:9333/submit
|
||||
{"fid":"3,01fbe0dc6f1f38","fileName":"myphoto.jpg","fileUrl":"localhost:8080/3,01fbe0dc6f1f38","size":68231}
|
||||
|
||||
This API is a little convenient. The volume server would contact the master to get an file id and store it to the right volume server(not necessarily itself).
|
||||
This API is just for convenience. The master server would get an file id and store the file to the right volume server.
|
||||
It is a convenient API and does not support different parameters when assigning file id. (or you can add the support and send a push request.)
|
||||
|
||||
Delete File
|
||||
***********************************
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
Benchmarks
|
||||
======================
|
||||
|
||||
Do we really need the benchmark? People always use benchmark to compare systems. But benchmarks are misleading. The resources, e.g., CPU, disk, memory, network, all matter a lot. And with Weed File System, single node vs multiple nodes, benchmarking on one machine vs several multiple machines, all matter a lot.
|
||||
Do we really need the benchmark? People always use benchmark to compare systems.
|
||||
But benchmarks are misleading. The resources, e.g., CPU, disk, memory, network,
|
||||
all matter a lot. And with Seaweed File System, single node vs multiple nodes,
|
||||
benchmarking on one machine vs several multiple machines, all matter a lot.
|
||||
|
||||
Here is the steps on how to run benchmark if you really need some numbers.
|
||||
|
||||
|
@ -25,9 +28,13 @@ For more realistic tests, please start them on different machines.
|
|||
What does the test do?
|
||||
#############################
|
||||
|
||||
By default, the benchmark command would start writing 1 million files, each having 1KB size, uncompressed. For each file, one request is sent to assign a file key, and a second request is sent to post the file to the volume server. The written file keys are stored in a temp file.
|
||||
By default, the benchmark command would start writing 1 million files, each having 1KB size, uncompressed.
|
||||
For each file, one request is sent to assign a file key, and a second request is sent to post the file to the volume server.
|
||||
The written file keys are stored in a temp file.
|
||||
|
||||
Then the benchmark command would read the list of file keys, randomly read 1 million files. For each volume, the volume id is cached, so there is several request to lookup the volume id, and all the rest requests are to get the file content.
|
||||
Then the benchmark command would read the list of file keys, randomly read 1 million files.
|
||||
For each volume, the volume id is cached, so there is several request to lookup the volume id,
|
||||
and all the rest requests are to get the file content.
|
||||
|
||||
Many options are options are configurable. Please check the help content:
|
||||
|
||||
|
@ -35,10 +42,11 @@ Many options are options are configurable. Please check the help content:
|
|||
|
||||
./weed benchmark -h
|
||||
|
||||
Common Problems
|
||||
Different Benchmark Target
|
||||
###############################
|
||||
|
||||
The most common problem is "too many open files" error. This is because the test itself starts too many network connections on one single machine. In my local macbook, if I ran "random read" following writing right away, the error happens always. I have to run "weed benchmark -write=false" to run the reading test only. Also, changing the concurrency level to "-c=16" would also help.
|
||||
The default "weed benchmark" uses 1 million 1KB file. This is to stress the number of files per second.
|
||||
Increasing the file size to 100KB or more can show much larger number of IO throughput in KB/second.
|
||||
|
||||
My own unscientific single machine results
|
||||
###################################################
|
||||
|
@ -156,4 +164,6 @@ Create benchmark volumes directly
|
|||
99% 9.4 ms
|
||||
100% 256.9 ms
|
||||
How can the replication 001 writes faster than no replication?
|
||||
I could not tell. Very likely, the computer was in turbo mode. I can not reproduce it consistently either. Posted the number here just to illustrate that number lies. Don't quote on the exact number, just get an idea of the performance would be good enough.
|
||||
I could not tell. Very likely, the computer was in turbo mode.
|
||||
I can not reproduce it consistently either. Posted the number here just to illustrate that number lies.
|
||||
Don't quote on the exact number, just get an idea of the performance would be good enough.
|
275
docs/changelist.rst
Normal file
275
docs/changelist.rst
Normal file
|
@ -0,0 +1,275 @@
|
|||
Change List
|
||||
===================================
|
||||
|
||||
Introduction
|
||||
############
|
||||
This file contains list of recent changes, important features, usage changes, data format changes, etc. Do read this if you upgrade.
|
||||
|
||||
|
||||
v0.67
|
||||
#####
|
||||
1. Increase "weed benchmark" performance to pump in more data. The bottleneck is on the client side. Duh...
|
||||
|
||||
v0.65
|
||||
#####
|
||||
|
||||
1. Reset the cluster configuration if "-peers" is not empty.
|
||||
|
||||
v0.64
|
||||
#####
|
||||
|
||||
1. Add TTL support!
|
||||
1. filer: resolve directory log file error, avoid possible race condition
|
||||
|
||||
v0.63
|
||||
#####
|
||||
|
||||
1. Compiled with Go 1.3.1 to fix a rare crashing issue.
|
||||
|
||||
v0.62
|
||||
#####
|
||||
|
||||
1. Add support for Etag.
|
||||
2. Add /admin/mv to move a file or a folder.
|
||||
3. Add client Go API to pre-process the images.
|
||||
|
||||
v0.61
|
||||
#####
|
||||
|
||||
1. Reduce memory requirements for "weed fix"
|
||||
2. Guess mime type by file name extensions when stored mime type is "application/octstream"
|
||||
3. Added simple volume id lookup caching expiring by time.
|
||||
|
||||
v0.60
|
||||
#####
|
||||
|
||||
Fix file missing error caused by .idx file overwriting. The problem shows up if the weed volume server is restarted after 2 times. But the actual .idx file may have already been overwritten on second restart.
|
||||
|
||||
To fix this issue, please run "weed fix -dir=... -volumeId=..." to re-generate the .idx file.
|
||||
|
||||
v0.59
|
||||
#####
|
||||
|
||||
1. Add option to automatically fix jpeg picture orientation.
|
||||
2. Add volume id lookup caching
|
||||
3. Support Partial Content and Range Requests. http status code == 206.
|
||||
|
||||
v0.57
|
||||
#####
|
||||
|
||||
Add hidden dynamic image resizing feature
|
||||
|
||||
Add an hidden feature: For images, jpg/png/gif, if you specify append these url parameters, &width=xxx or &height=xxx or both, the image will be dynamically resized. However, resizing the image would cause high CPU and memory usage. Not recommended unless special use cases. So this would not be documented anywhere else.
|
||||
|
||||
v0.56 Major Command line options change
|
||||
#####
|
||||
|
||||
|
||||
Adjust command line options.
|
||||
|
||||
1. switch to use -publicIp instead of -publicUrl
|
||||
2. -ip can be empty. It will listen to all available interfaces.
|
||||
3. For "weed server", these options are changed:
|
||||
- -masterPort => -master.port
|
||||
- -peers => -master.peers
|
||||
- -mdir => -master.dir
|
||||
- -volumeSizeLimitMB => -master.volumeSizeLimitMB
|
||||
- -conf => -master.conf
|
||||
- -defaultReplicaPlacement => -master.defaultReplicaPlacement
|
||||
- -port => -volume.port
|
||||
- -max => -volume.max
|
||||
|
||||
v0.55 Recursive folder deletion for Filer
|
||||
#####
|
||||
|
||||
Now folders with sub folders or files can be deleted recursively.
|
||||
|
||||
Also, for filer, avoid showing files under the first created directory when listing the root directory.
|
||||
|
||||
v0.54 Misc improvements
|
||||
#####
|
||||
|
||||
No need to persist metadata for master sequence number generation. This shall avoid possible issues where file are lost due to duplicated sequence number generated in rare cases.
|
||||
|
||||
More robust handing of "peers" in master node clustering mode.
|
||||
|
||||
Added logging instructions.
|
||||
|
||||
v0.53 Miscellaneous improvements
|
||||
#####
|
||||
|
||||
Added retry logic to wait for cluster peers during cluster bootstrapping. Previously the cluster bootstrapping is ordered. This make it tricky to deploy automatically and repeatedly. The fix make the commands repeatable.
|
||||
|
||||
Also, when growing volumes, additional preferred "rack" and "dataNode" parameters are also provided, works together with existing "dataCenter" parameter.
|
||||
|
||||
Fix important bug where settings for non-"000" replications are read back wrong, if volume server is restarted.
|
||||
|
||||
v0.52 Added "filer" server
|
||||
#####
|
||||
|
||||
A "weed filer" server is added, to provide more "common" file storage. Currently the fullFileName-to-fileId mapping is stored with an efficient embedded leveldb. So it's not linearly scalable yet. But it can handle LOTS of files.
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
//POST a file and read it back
|
||||
curl -F "filename=@README.md" "http://localhost:8888/path/to/sources/"
|
||||
curl "http://localhost:8888/path/to/sources/README.md"
|
||||
//POST a file with a new name and read it back
|
||||
curl -F "filename=@Makefile" "http://localhost:8888/path/to/sources/new_name"
|
||||
curl "http://localhost:8888/path/to/sources/new_name"
|
||||
//list sub folders and files
|
||||
curl "http://localhost:8888/path/to/sources/?pretty=y"
|
||||
|
||||
|
||||
v0.51 Idle Timeout
|
||||
#####
|
||||
|
||||
Previously the timeout setting is "-readTimeout", which is the time limit of the whole http connection. This is inconvenient for large files or for slow internet connections. Now this option is replaced with "-idleTimeout", and default to 10 seconds. Ideally, you should not need to tweak it based on your use case.
|
||||
|
||||
v0.50 Improved Locking
|
||||
#####
|
||||
|
||||
1. All read operation switched to thread-safe pread, no read locks now.
|
||||
2. When vacuuming large volumes, a lock was preventing heartbeats to master node. This is fixed now.
|
||||
3. Fix volume compaction error for collections.
|
||||
|
||||
v0.49 Bug Fixes
|
||||
#####
|
||||
|
||||
With the new benchmark tool to bombard the system, many bugs are found and fixed, especially on clustering, http connection reuse.
|
||||
|
||||
v0.48 added benchmark command!
|
||||
#####
|
||||
|
||||
Benchmark! Enough said.
|
||||
|
||||
v0.47 Improving replication
|
||||
#####
|
||||
|
||||
Support more replication types.
|
||||
|
||||
v0.46 Adding failover master server
|
||||
#####
|
||||
|
||||
Automatically fail over master servers!
|
||||
|
||||
v0.46 Add "weed server" command
|
||||
#####
|
||||
|
||||
Now you can start one master server and one volume server in just one command!
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
weed server
|
||||
|
||||
|
||||
v0.45 Add support for extra large file
|
||||
#####
|
||||
|
||||
For extra large file, this example will split the file into 100MB chunks.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
weed upload -maxMB=100 the_file_name
|
||||
|
||||
|
||||
Also, Added "download" command, for simple files or chunked files.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
weed download file_id [file_id3](file_id2)
|
||||
|
||||
|
||||
v0.34 Add support for multiple directories on volume server
|
||||
#####
|
||||
|
||||
For volume server, add support for multiple folders and multiple max limit. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
weed volume -dir=folder1,folder2,folder3 -max=7,8,9
|
||||
|
||||
|
||||
v0.33 Add Nicer URL support
|
||||
#####
|
||||
|
||||
For HTTP GET request
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
http://localhost:8080/3,01637037d6
|
||||
|
||||
Can also be retrieved by
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
http://localhost:8080/3/01637037d6/my_preferred_name.jpg
|
||||
|
||||
|
||||
v0.32 Add support for Last-Modified header
|
||||
#####
|
||||
|
||||
The last modified timestamp is stored with 5 additional bytes.
|
||||
|
||||
Return http code 304 if the file is not modified.
|
||||
|
||||
Also, the writing are more solid with the fix for issue#26.
|
||||
|
||||
v0.31 Allocate File Key on specific data center
|
||||
#####
|
||||
|
||||
Volume servers can start with a specific data center name.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
weed volume -dir=/tmp/1 -port=8080 -dataCenter=dc1
|
||||
weed volume -dir=/tmp/2 -port=8081 -dataCenter=dc2
|
||||
|
||||
Or the master server can determine the data center via volume server's IP address and settings in weed.conf file.
|
||||
|
||||
Now when requesting a file key, an optional "dataCenter" parameter can limit the assigned volume to the specific data center. For example, this specif
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
http://localhost:9333/dir/assign?dataCenter=dc1
|
||||
|
||||
v0.26 Storing File Name and Mime Type
|
||||
#####
|
||||
|
||||
In order to keep one single disk read for each file, a new storage format is implemented to store: is gzipped or not, file name and mime type (used when downloading files), and possibly other future new attributes. The volumes with old storage format are treated as read only and deprecated.
|
||||
|
||||
Also, you can pre-gzip and submit your file directly, for example, gzip "my.css" into "my.css.gz", and submit. In this case, "my.css" will be stored as the file name. This should save some transmission time, and allow you to force gzipped storage or customize the gzip compression level.
|
||||
|
||||
v0.25 Adding reclaiming garbage spaces
|
||||
|
||||
Garbage spaces are reclaimed by an automatic compacting process. Garbage spaces are generated when updating or deleting files. If they exceed a configurable threshold, 0.3 by default (meaning 30% of the used disk space is garbage), the volume will be marked as readonly, compacted and garbage spaces are reclaimed, and then marked as writable.
|
||||
|
||||
v0.19 Adding rack and data center aware replication
|
||||
#####
|
||||
|
||||
Now when you have one rack, or multiple racks, or multiple data centers, you can choose your own replication strategy.
|
||||
|
||||
v0.18 Detect disconnected volume servers
|
||||
#####
|
||||
|
||||
The disconnected volume servers would not be assigned when generating the file keys. Volume servers by default send a heartbeat to master server every 5~10 seconds. Master thinks the volume server is disconnected after 5 times of the heartbeat interval, or 25 seconds by default.
|
||||
|
||||
v0.16 Change to single executable file to do everything
|
||||
#####
|
||||
|
||||
If you are using v0.15 or earlier, you would use
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
>weedvolume -dir="/tmp" -volumes=0-4 -mserver="localhost:9333" -port=8080 -publicUrl="localhost:8080"
|
||||
|
||||
With v0.16 or later, you would need to do this in stead:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
>weed volume -dir="/tmp" -volumes=0-4 -mserver="localhost:9333" -port=8080 -publicUrl="localhost:8080"
|
||||
|
||||
And more new commands, in addition to "server","volume","fix", etc, will be added.
|
||||
|
||||
This provides a simple deliverable file, and the file size is much smaller since Go language statically compile the commands. Combining commands into one file would avoid lots of duplication.
|
|
@ -5,20 +5,20 @@ Clients
|
|||
###################################
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| Name | Author | Language |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
+=================================================================================+==============+===========+
|
||||
| `WeedPHP <https://github.com/micjohnson/weed-php/>`_ | Mic Johnson | PHP |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Seaweed-FS Symfony bundle <https://github.com/micjohnson/weed-php-bundle>`_ | Mic Johnson | PHP |
|
||||
| `Seaweed-FS Symfony bundle <https://github.com/micjohnson/weed-php-bundle>`_ | Mic Johnson | PHP |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Seaweed-FS Node.js client <https://github.com/cruzrr/node-weedfs>`_ | Aaron Blakely| Javascript|
|
||||
| `Seaweed-FS Node.js client <https://github.com/cruzrr/node-weedfs>`_ | Aaron Blakely| Javascript|
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Amazon S3 API for Seaweed-FS <https://github.com/tgulacsi/s3weed>`_ | Tamás Gulácsi| Go |
|
||||
| `Amazon S3 API for Seaweed-FS <https://github.com/tgulacsi/s3weed>`_ | Tamás Gulácsi| Go |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `File store upload test <https://github.com/tgulacsi/filestore-upload-test>`_ | Tamás Gulácsi| Go |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Java Seaweed-FS client <https://github.com/simplebread/WeedFSClient>`_ | Xu Zhang | Java |
|
||||
| `Java Seaweed-FS client <https://github.com/simplebread/WeedFSClient>`_ | Xu Zhang | Java |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Java Seaweed-FS client 2 <https://github.com/zenria/Weed-FS-Java-Client>`_ | Zenria | Java |
|
||||
| `Java Seaweed-FS client 2 <https://github.com/zenria/Weed-FS-Java-Client>`_ | Zenria | Java |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Python-weed <https://github.com/darkdarkfruit/python-weed>`_ | Darkdarkfruit| Python |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
|
@ -26,7 +26,7 @@ Clients
|
|||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Camlistore blobserver Storage <https://github.com/tgulacsi/camli-weed>`_ | Tamás Gulácsi| Go |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Scala Seaweed-FS client <https://github.com/chiradip/WeedFsScalaClient>`_ | Chiradip | Scala |
|
||||
| `Scala Seaweed-FS client <https://github.com/chiradip/WeedFsScalaClient>`_ | Chiradip | Scala |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
| `Module for kohana <https://github.com/bububa/kohanaphp-weedfs>`_ | Bububa | PHP |
|
||||
+---------------------------------------------------------------------------------+--------------+-----------+
|
||||
|
|
12
docs/conf.py
12
docs/conf.py
|
@ -43,17 +43,17 @@ source_suffix = '.rst'
|
|||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'weed-fs'
|
||||
copyright = u'2014, chrislusf, ernado'
|
||||
project = u'seaweed-fs'
|
||||
copyright = u'2015, chrislusf, ernado'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '0.63'
|
||||
version = '0.67'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '0.63'
|
||||
release = '0.67'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
|
@ -240,8 +240,8 @@ man_pages = [
|
|||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'weed-fs', u'weed-fs Documentation',
|
||||
u'chrislusf, ernado', 'weed-fs', 'One line description of project.',
|
||||
('index', 'seaweed-fs', u'seaweed-fs Documentation',
|
||||
u'chrislusf, ernado', 'seaweed-fs', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
Directories and files
|
||||
===========================
|
||||
|
||||
When talking about file systems, many people would assume directories, list files under a directory, etc. These are expected if we want to hook up Weed File System with linux by FUSE, or with Hadoop, etc.
|
||||
When talking about file systems, many people would assume directories,
|
||||
list files under a directory, etc. These are expected if we want to hook up
|
||||
Seaweed File System with linux by FUSE, or with Hadoop, etc.
|
||||
|
||||
Sample usage
|
||||
#####################
|
||||
|
||||
Two ways to start a weed filer
|
||||
Two ways to start a weed filer in standalone mode:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -39,7 +41,7 @@ A common file system would use inode to store meta data for each folder and file
|
|||
|
||||
Seaweed-FS wants to make as small number of disk access as possible, yet still be able to store a lot of file metadata. So we need to think very differently.
|
||||
|
||||
From a full file path to get to the file content, there are several steps:
|
||||
We can take the following steps to map a full file path to the actual data block:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -48,7 +50,7 @@ From a full file path to get to the file content, there are several steps:
|
|||
file_id => data_block
|
||||
|
||||
|
||||
Because default Seaweed-FS only provides file_id=>data_block mapping, the first 2 steps need to be implemented.
|
||||
Because default Seaweed-FS only provides file_id=>data_block mapping, only the first 2 steps need to be implemented.
|
||||
|
||||
|
||||
There are several data features I noticed:
|
||||
|
@ -72,7 +74,7 @@ I believe these are reasonable assumptions:
|
|||
Data structure
|
||||
#################
|
||||
|
||||
This difference lead to the design that the metadata for directories and files should have different data structure.
|
||||
This assumed differences between directories and files lead to the design that the metadata for directories and files should have different data structure.
|
||||
|
||||
* Store directories in memory
|
||||
|
||||
|
@ -100,16 +102,18 @@ For file renaming, it's just trivially delete and then add a row in leveldb.
|
|||
Details
|
||||
########################
|
||||
|
||||
In the current first version, the path_to_file=>file_id mapping is stored with an efficient embedded leveldb. Being embedded, it runs on single machine. So it's not linearly scalable yet. However, it can handle LOTS AND LOTS of files on weed-fs on other servers. Using an external distributed database is possible. Your contribution is welcome!
|
||||
In the current first version, the path_to_file=>file_id mapping is stored with an efficient embedded leveldb. Being embedded, it runs on single machine. So it's not linearly scalable yet. However, it can handle LOTS AND LOTS of files on Seaweed-FS on other master/volume servers.
|
||||
|
||||
The in-memory directory structure can improve on memory efficiency. Current simple map in memory works when the number of directories is less than 1 million, which will use about 500MB memory. But I would highly doubt any common use case would have more than 100 directories.
|
||||
Switching from the embedded leveldb to an external distributed database is very feasible. Your contribution is welcome!
|
||||
|
||||
The in-memory directory structure can improve on memory efficiency. Current simple map in memory works when the number of directories is less than 1 million, which will use about 500MB memory. But I would expect common use case would have a few, not even more than 100 directories.
|
||||
|
||||
Use Cases
|
||||
#########################
|
||||
|
||||
Clients can assess one "weed filer" via HTTP, list files under a directory, create files via HTTP POST, read files via HTTP POST directly.
|
||||
|
||||
Although one "weed filer" can only sits in one machine, you can start multiple "weed filer" on several machines, each "weed filer" instance running in its own collection, having its own namespace, but sharing the same weed-fs.
|
||||
Although one "weed filer" can only sits in one machine, you can start multiple "weed filer" on several machines, each "weed filer" instance running in its own collection, having its own namespace, but sharing the same Seaweed-FS storage.
|
||||
|
||||
Future
|
||||
###################
|
||||
|
@ -127,6 +131,6 @@ Later, FUSE or HCFS plugins will be created, to really integrate Seaweed-FS to e
|
|||
Helps Wanted
|
||||
########################
|
||||
|
||||
This is a big step towards more interesting weed-fs usage and integration with existing systems.
|
||||
This is a big step towards more interesting Seaweed-FS usage and integration with existing systems.
|
||||
|
||||
If you can help to refactor and implement other directory meta data, or file meta data storage, please do so.
|
118
docs/distributed_filer.rst
Normal file
118
docs/distributed_filer.rst
Normal file
|
@ -0,0 +1,118 @@
|
|||
Distributed Filer
|
||||
===========================
|
||||
|
||||
The default weed filer is in standalone mode, storing file metadata on disk.
|
||||
It is quite efficient to go through deep directory path and can handle
|
||||
millions of files.
|
||||
|
||||
However, no SPOF is a must-have requirement for many projects.
|
||||
|
||||
Luckily, SeaweedFS is so flexible that we can use a completely different way
|
||||
to manage file metadata.
|
||||
|
||||
This distributed filer uses Redis or Cassandra to store the metadata.
|
||||
|
||||
Redis Setup
|
||||
#####################
|
||||
No setup required.
|
||||
|
||||
Cassandra Setup
|
||||
#####################
|
||||
Here is the CQL to create the table.CassandraStore.
|
||||
Optionally you can adjust the keyspace name and replication settings.
|
||||
For production, you would want to set replication_factor to 3
|
||||
if there are at least 3 Cassandra servers.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
create keyspace seaweed WITH replication = {
|
||||
'class':'SimpleStrategy',
|
||||
'replication_factor':1
|
||||
};
|
||||
|
||||
use seaweed;
|
||||
|
||||
CREATE TABLE seaweed_files (
|
||||
path varchar,
|
||||
fids list<varchar>,
|
||||
PRIMARY KEY (path)
|
||||
);
|
||||
|
||||
|
||||
Sample usage
|
||||
#####################
|
||||
|
||||
To start a weed filer in distributed mode with Redis:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# assuming you already started weed master and weed volume
|
||||
weed filer -redis.server=localhost:6379
|
||||
|
||||
To start a weed filer in distributed mode with Cassandra:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# assuming you already started weed master and weed volume
|
||||
weed filer -cassandra.server=localhost
|
||||
|
||||
Now you can add/delete files
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# POST a file and read it back
|
||||
curl -F "filename=@README.md" "http://localhost:8888/path/to/sources/"
|
||||
curl "http://localhost:8888/path/to/sources/README.md"
|
||||
# POST a file with a new name and read it back
|
||||
curl -F "filename=@Makefile" "http://localhost:8888/path/to/sources/new_name"
|
||||
curl "http://localhost:8888/path/to/sources/new_name"
|
||||
|
||||
Limitation
|
||||
############
|
||||
List sub folders and files are not supported because Redis or Cassandra
|
||||
does not support prefix search.
|
||||
|
||||
Flat Namespace Design
|
||||
############
|
||||
In stead of using both directory and file metadata, this implementation uses
|
||||
a flat namespace.
|
||||
|
||||
If storing each directory metadata separatedly, there would be multiple
|
||||
network round trips to fetch directory information for deep directories,
|
||||
impeding system performance.
|
||||
|
||||
A flat namespace would take more space because the parent directories are
|
||||
repeatedly stored. But disk space is a lesser concern especially for
|
||||
distributed systems.
|
||||
|
||||
So either Redis or Cassandra is a simple file_full_path ~ file_id mapping.
|
||||
(Actually Cassandra is a file_full_path ~ list_of_file_ids mapping
|
||||
with the hope to support easy file appending for streaming files.)
|
||||
|
||||
Complexity
|
||||
###################
|
||||
|
||||
For one file retrieval, the full_filename=>file_id lookup will be O(logN)
|
||||
using Redis or Cassandra. But very likely the one additional network hop would
|
||||
take longer than the actual lookup.
|
||||
|
||||
Use Cases
|
||||
#########################
|
||||
|
||||
Clients can assess one "weed filer" via HTTP, create files via HTTP POST,
|
||||
read files via HTTP POST directly.
|
||||
|
||||
Future
|
||||
###################
|
||||
|
||||
SeaweedFS can support other distributed databases. It will be better
|
||||
if that database can support prefix search, in order to list files
|
||||
under a directory.
|
||||
|
||||
Helps Wanted
|
||||
########################
|
||||
|
||||
Please implement your preferred metadata store!
|
||||
|
||||
Just follow the cassandra_store/cassandra_store.go file and send me a pull
|
||||
request. I will handle the rest.
|
|
@ -7,7 +7,7 @@ Introduction
|
|||
|
||||
Some user will ask for no single point of failure. Although google runs its file system with a single master for years, no SPOF seems becoming a criteria for architects to pick solutions.
|
||||
|
||||
Luckily, it's not too difficult to enable Weed File System with failover master servers.
|
||||
Luckily, it's not too difficult to enable Seaweed File System with failover master servers.
|
||||
|
||||
Cheat Sheet: Startup multiple servers
|
||||
########################################
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
Getting started
|
||||
===================================
|
||||
Installing Weed-Fs
|
||||
Installing Seaweed-FS
|
||||
###################################
|
||||
|
||||
Download a proper version from `Seaweed-FS download page <https://bintray.com/chrislusf/Weed-FS/weed/>`_.
|
||||
|
@ -57,7 +57,7 @@ Actually, forget about previous commands. You can setup one master server and on
|
|||
# use "weed server -h" to find out more
|
||||
./weed server -master.port=9333 -volume.port=8080 -dir="./data"
|
||||
|
||||
Testing Weed-Fs
|
||||
Testing Seaweed-FS
|
||||
###################################
|
||||
|
||||
With the master and volume server up, now what? Let's pump in a lot of files into the system!
|
||||
|
@ -77,11 +77,14 @@ Then, you can simply check "du -m -s /some/big/folder" to see the actual disk us
|
|||
|
||||
Now you can use your tools to hit weed-fs as hard as you can.
|
||||
|
||||
Using Weed-Fs in docker
|
||||
Using Seaweed-FS in docker
|
||||
####################################
|
||||
|
||||
You can use image "cydev/weed" or build your own with `dockerfile <https://github.com/chrislusf/weed-fs/blob/master/Dockerfile>`_ in the root of repo.
|
||||
|
||||
Using pre-built Docker image
|
||||
**************************************************************
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker run --name weed cydev/weed server
|
||||
|
@ -98,6 +101,29 @@ And in another terminal
|
|||
}
|
||||
# use $IP as host for api queries
|
||||
|
||||
Building image from dockerfile
|
||||
**************************************************************
|
||||
|
||||
Make a local copy of weed-fs from github
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/chrislusf/weed-fs.git
|
||||
|
||||
Minimal Image (~19.6 MB)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker build --no-cache -t 'cydev/weed' .
|
||||
|
||||
Go-Build Docker Image (~764 MB)
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
mv Dockerfile Dockerfile.minimal
|
||||
mv Dockerfile.go_build Dockerfile
|
||||
docker build --no-cache -t 'cydev/weed' .
|
||||
|
||||
In production
|
||||
**************************************************************
|
||||
|
||||
|
|
|
@ -3,19 +3,14 @@
|
|||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to weed-fs's documentation!
|
||||
Welcome to Seaweed-FS documentation!
|
||||
===================================
|
||||
|
||||
An official mirror of code.google.com/p/weed-fs .
|
||||
Moving to github.com to make cooperations easier.
|
||||
This repo and the google code repo will be kept synchronized.
|
||||
|
||||
|
||||
For documents and bug reporting, Please visit
|
||||
http://weed-fs.googlecode.com
|
||||
This is the official site for Seaweed-FS.
|
||||
The one on google code is deprecated.
|
||||
|
||||
For pre-compiled releases,
|
||||
https://bintray.com/chrislusf/Weed-FS/weed
|
||||
https://bintray.com/chrislusf/Weed-FS/seaweed
|
||||
|
||||
|
||||
Contents:
|
||||
|
@ -27,17 +22,11 @@ Contents:
|
|||
clients
|
||||
api
|
||||
replication
|
||||
ttl
|
||||
failover
|
||||
usecases
|
||||
directories
|
||||
distributed_filer
|
||||
usecases
|
||||
optimization
|
||||
benchmarks
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
||||
changelist
|
||||
|
|
85
docs/ttl.rst
Normal file
85
docs/ttl.rst
Normal file
|
@ -0,0 +1,85 @@
|
|||
Store file with a Time To Live
|
||||
===================
|
||||
|
||||
Introduction
|
||||
#############################
|
||||
|
||||
Seaweed is a key~file store, and files can optionally expire with a Time To Live (TTL).
|
||||
|
||||
How to use it?
|
||||
#############################
|
||||
|
||||
Assume we want to store a file with TTL of 3 minutes.
|
||||
|
||||
First, ask the master to assign a file id to a volume with a 3-minute TTL:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
> curl http://localhost:9333/dir/assign?ttl=3m
|
||||
{"count":1,"fid":"5,01637037d6","url":"127.0.0.1:8080","publicUrl":"localhost:8080"}
|
||||
|
||||
Secondly, use the file id to store on the volume server
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
> curl -F "file=@x.go" http://127.0.0.1:8080/5,01637037d6?ttl=3m
|
||||
|
||||
After writing, the file content will be returned as usual if read before the TTL expiry. But if read after the TTL expiry, the file will be reported as missing and return the http response status as not found.
|
||||
|
||||
For next writes with ttl=3m, the same set of volumes with ttl=3m will be used until:
|
||||
|
||||
1. the ttl=3m volumes are full. If so, new volumes will be created.
|
||||
2. there are no write activities for 3 minutes. If so, these volumes will be stopped and deleted.
|
||||
|
||||
Advanced Usage
|
||||
#############################
|
||||
|
||||
As you may have noticed, the "ttl=3m" is used twice! One for assigning file id, and one for uploading the actual file. The first one is for master to pick a matching volume, while the second one is written together with the file.
|
||||
|
||||
These two TTL values are not required to be the same. As long as the volume TTL is larger than file TTL, it should be OK.
|
||||
|
||||
This gives some flexibility to fine-tune the file TTL, while reducing the number of volume TTL variations, which simplifies managing the TTL volumes.
|
||||
|
||||
Supported TTL format
|
||||
#############################
|
||||
|
||||
The TTL is in the format of one integer number followed by one unit. The unit can be 'm', 'h', 'd', 'w', 'M', 'y'.
|
||||
|
||||
Supported TTL format examples:
|
||||
|
||||
- 3m: 3 minutes
|
||||
- 4h: 4 hours
|
||||
- 5d: 5 days
|
||||
- 6w: 6 weeks
|
||||
- 7M: 7 months
|
||||
- 8y: 8 years
|
||||
|
||||
|
||||
How efficient it is?
|
||||
#############################
|
||||
|
||||
TTL seems easy to implement since we just need to report the file as missing if the time is over the TTL. However, the real difficulty is to efficiently reclaim disk space from expired files, similar to JVM memory garbage collection, which is a sophisticated piece of work with many man-years of effort.
|
||||
|
||||
Memcached also supports TTL. It gets around this problem by putting entries into fix-sized slabs. If one slab is expired, no work is required and the slab can be overwritten right away. However, this fix-sized slab approach is not applicable to files since the file contents rarely fit in slabs exactly.
|
||||
|
||||
Seaweed-FS efficiently resolves this disk space garbage collection problem with great simplicity. One of key differences from "normal" implementation is that the TTL is associated with the volume, together with each specific file.
|
||||
|
||||
During the file id assigning step, the file id will be assigned to a volume with matching TTL. The volumes are checked periodically (every 5~10 seconds by default). If the latest expiration time has been reached, all the files in the whole volume will be all expired, and the volume can be safely deleted.
|
||||
|
||||
Implementation Details
|
||||
#############################
|
||||
1. When assigning file key, the master would pick one TTL volume with matching TTL. If such volumes do not exist, create a few.
|
||||
2. Volume servers will write the file with expiration time. When serving file, if the file is expired, the file will be reported as not found.
|
||||
3. Volume servers will track each volume's largest expiration time, and stop reporting the expired volumes to the master server.
|
||||
4. Master server will think the previously existed volumes are dead, and stop assigning write requests to them.
|
||||
5. After about 10% of the TTL time, or at most 10 minutes, the volume servers will delete the expired volume.
|
||||
|
||||
Deployment
|
||||
#############################
|
||||
|
||||
For deploying to production, the TTL volume maximum size should be taken into consideration. If the writes are frequent, the TTL volume will grow to the max volume size. So when the disk space is not ample enough, it's better to reduce the maximum volume size.
|
||||
|
||||
It's recommended not to mix the TTL volumes and non TTL volumes in the same cluster. This is because the volume maximum size, default to 30GB, is configured on the volume master at the cluster level.
|
||||
|
||||
We could implement the configuration for max volume size for each TTL. However, it could get fairly verbose. Maybe later if it is strongly desired.
|
||||
|
87
go/filer/cassandra_store/cassandra_store.go
Normal file
87
go/filer/cassandra_store/cassandra_store.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
package cassandra_store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
Basically you need a table just like this:
|
||||
|
||||
CREATE TABLE seaweed_files (
|
||||
path varchar,
|
||||
fids list<varchar>,
|
||||
PRIMARY KEY (path)
|
||||
);
|
||||
|
||||
Need to match flat_namespace.FlatNamespaceStore interface
|
||||
Put(fullFileName string, fid string) (err error)
|
||||
Get(fullFileName string) (fid string, err error)
|
||||
Delete(fullFileName string) (fid string, err error)
|
||||
|
||||
*/
|
||||
type CassandraStore struct {
|
||||
cluster *gocql.ClusterConfig
|
||||
session *gocql.Session
|
||||
}
|
||||
|
||||
func NewCassandraStore(keyspace string, hosts ...string) (c *CassandraStore, err error) {
|
||||
c = &CassandraStore{}
|
||||
c.cluster = gocql.NewCluster(hosts...)
|
||||
c.cluster.Keyspace = keyspace
|
||||
c.cluster.Consistency = gocql.Quorum
|
||||
c.session, err = c.cluster.CreateSession()
|
||||
if err != nil {
|
||||
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *CassandraStore) Put(fullFileName string, fid string) (err error) {
|
||||
var input []string
|
||||
input = append(input, fid)
|
||||
if err := c.session.Query(
|
||||
`INSERT INTO seaweed_files (path, fids) VALUES (?, ?)`,
|
||||
fullFileName, input).Exec(); err != nil {
|
||||
glog.V(0).Infof("Failed to save file %s with id %s: %v", fullFileName, fid, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (c *CassandraStore) Get(fullFileName string) (fid string, err error) {
|
||||
var output []string
|
||||
if err := c.session.Query(
|
||||
`select fids FROM seaweed_files WHERE path = ? LIMIT 1`,
|
||||
fullFileName).Consistency(gocql.One).Scan(&output); err != nil {
|
||||
if err != gocql.ErrNotFound {
|
||||
glog.V(0).Infof("Failed to find file %s: %v", fullFileName, fid, err)
|
||||
}
|
||||
}
|
||||
if len(output) == 0 {
|
||||
return "", fmt.Errorf("No file id found for %s", fullFileName)
|
||||
}
|
||||
return output[0], nil
|
||||
}
|
||||
|
||||
// Currently the fid is not returned
|
||||
func (c *CassandraStore) Delete(fullFileName string) (fid string, err error) {
|
||||
if err := c.session.Query(
|
||||
`DELETE FROM seaweed_files WHERE path = ?`,
|
||||
fullFileName).Exec(); err != nil {
|
||||
if err != gocql.ErrNotFound {
|
||||
glog.V(0).Infof("Failed to delete file %s: %v", fullFileName, err)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c *CassandraStore) Close() {
|
||||
if c.session != nil {
|
||||
c.session.Close()
|
||||
}
|
||||
}
|
22
go/filer/cassandra_store/schema.cql
Normal file
22
go/filer/cassandra_store/schema.cql
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
|
||||
Here is the CQL to create the table.CassandraStore
|
||||
|
||||
Optionally you can adjust the keyspace name and replication settings.
|
||||
|
||||
For production server, very likely you want to set replication_factor to 3
|
||||
|
||||
*/
|
||||
|
||||
create keyspace seaweed WITH replication = {
|
||||
'class':'SimpleStrategy',
|
||||
'replication_factor':1
|
||||
};
|
||||
|
||||
use seaweed;
|
||||
|
||||
CREATE TABLE seaweed_files (
|
||||
path varchar,
|
||||
fids list<varchar>,
|
||||
PRIMARY KEY (path)
|
||||
);
|
|
@ -1,12 +1,12 @@
|
|||
package filer
|
||||
|
||||
import ()
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
|
||||
"net/url"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
package filer
|
||||
|
||||
import ()
|
||||
|
||||
type DirectoryId int32
|
||||
|
||||
type DirectoryEntry struct {
|
||||
Name string //dir name without path
|
||||
Id DirectoryId
|
||||
}
|
||||
|
||||
type DirectoryManager interface {
|
||||
FindDirectory(dirPath string) (DirectoryId, error)
|
||||
ListDirectories(dirPath string) (dirs []DirectoryEntry, err error)
|
||||
MakeDirectory(currentDirPath string, dirName string) (DirectoryId, error)
|
||||
MoveUnderDirectory(oldDirPath string, newParentDirPath string) error
|
||||
DeleteDirectory(dirPath string) error
|
||||
//functions used by FUSE
|
||||
FindDirectoryById(DirectoryId, error)
|
||||
}
|
15
go/filer/embedded_filer/directory.go
Normal file
15
go/filer/embedded_filer/directory.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package embedded_filer
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/filer"
|
||||
)
|
||||
|
||||
type DirectoryManager interface {
|
||||
FindDirectory(dirPath string) (filer.DirectoryId, error)
|
||||
ListDirectories(dirPath string) (dirs []filer.DirectoryEntry, err error)
|
||||
MakeDirectory(currentDirPath string, dirName string) (filer.DirectoryId, error)
|
||||
MoveUnderDirectory(oldDirPath string, newParentDirPath string) error
|
||||
DeleteDirectory(dirPath string) error
|
||||
//functions used by FUSE
|
||||
FindDirectoryById(filer.DirectoryId, error)
|
||||
}
|
|
@ -1,8 +1,7 @@
|
|||
package filer
|
||||
package embedded_filer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -10,6 +9,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/filer"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
var writeLock sync.Mutex //serialize changes to dir.log
|
||||
|
@ -18,12 +20,12 @@ type DirectoryEntryInMap struct {
|
|||
Name string
|
||||
Parent *DirectoryEntryInMap
|
||||
SubDirectories map[string]*DirectoryEntryInMap
|
||||
Id DirectoryId
|
||||
Id filer.DirectoryId
|
||||
}
|
||||
|
||||
type DirectoryManagerInMap struct {
|
||||
Root *DirectoryEntryInMap
|
||||
max DirectoryId
|
||||
max filer.DirectoryId
|
||||
logFile *os.File
|
||||
isLoading bool
|
||||
}
|
||||
|
@ -82,7 +84,7 @@ func (dm *DirectoryManagerInMap) processEachLine(line string) error {
|
|||
if pe != nil {
|
||||
return pe
|
||||
}
|
||||
if e := dm.loadDirectory(parts[1], DirectoryId(v)); e != nil {
|
||||
if e := dm.loadDirectory(parts[1], filer.DirectoryId(v)); e != nil {
|
||||
return e
|
||||
}
|
||||
case "mov":
|
||||
|
@ -141,7 +143,7 @@ func (dm *DirectoryManagerInMap) findDirectory(dirPath string) (*DirectoryEntryI
|
|||
}
|
||||
return dir, nil
|
||||
}
|
||||
func (dm *DirectoryManagerInMap) FindDirectory(dirPath string) (DirectoryId, error) {
|
||||
func (dm *DirectoryManagerInMap) FindDirectory(dirPath string) (filer.DirectoryId, error) {
|
||||
d, e := dm.findDirectory(dirPath)
|
||||
if e == nil {
|
||||
return d.Id, nil
|
||||
|
@ -149,7 +151,7 @@ func (dm *DirectoryManagerInMap) FindDirectory(dirPath string) (DirectoryId, err
|
|||
return dm.Root.Id, e
|
||||
}
|
||||
|
||||
func (dm *DirectoryManagerInMap) loadDirectory(dirPath string, dirId DirectoryId) error {
|
||||
func (dm *DirectoryManagerInMap) loadDirectory(dirPath string, dirId filer.DirectoryId) error {
|
||||
dirPath = filepath.Clean(dirPath)
|
||||
if dirPath == "/" {
|
||||
return nil
|
||||
|
@ -200,7 +202,7 @@ func (dm *DirectoryManagerInMap) makeDirectory(dirPath string) (dir *DirectoryEn
|
|||
return dir, created
|
||||
}
|
||||
|
||||
func (dm *DirectoryManagerInMap) MakeDirectory(dirPath string) (DirectoryId, error) {
|
||||
func (dm *DirectoryManagerInMap) MakeDirectory(dirPath string) (filer.DirectoryId, error) {
|
||||
dir, _ := dm.makeDirectory(dirPath)
|
||||
return dir.Id, nil
|
||||
}
|
||||
|
@ -227,13 +229,13 @@ func (dm *DirectoryManagerInMap) MoveUnderDirectory(oldDirPath string, newParent
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dm *DirectoryManagerInMap) ListDirectories(dirPath string) (dirNames []DirectoryEntry, err error) {
|
||||
func (dm *DirectoryManagerInMap) ListDirectories(dirPath string) (dirNames []filer.DirectoryEntry, err error) {
|
||||
d, e := dm.findDirectory(dirPath)
|
||||
if e != nil {
|
||||
return dirNames, e
|
||||
}
|
||||
for k, v := range d.SubDirectories {
|
||||
dirNames = append(dirNames, DirectoryEntry{Name: k, Id: v.Id})
|
||||
dirNames = append(dirNames, filer.DirectoryEntry{Name: k, Id: v.Id})
|
||||
}
|
||||
return dirNames, nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package filer
|
||||
package embedded_filer
|
||||
|
||||
import (
|
||||
"os"
|
|
@ -1,11 +1,13 @@
|
|||
package filer
|
||||
package embedded_filer
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/filer"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
)
|
||||
|
||||
type FilerEmbedded struct {
|
||||
|
@ -47,13 +49,13 @@ func (filer *FilerEmbedded) FindFile(filePath string) (fid string, err error) {
|
|||
}
|
||||
return filer.files.FindFile(dirId, file)
|
||||
}
|
||||
func (filer *FilerEmbedded) FindDirectory(dirPath string) (dirId DirectoryId, err error) {
|
||||
func (filer *FilerEmbedded) FindDirectory(dirPath string) (dirId filer.DirectoryId, err error) {
|
||||
return filer.directories.FindDirectory(dirPath)
|
||||
}
|
||||
func (filer *FilerEmbedded) ListDirectories(dirPath string) (dirs []DirectoryEntry, err error) {
|
||||
func (filer *FilerEmbedded) ListDirectories(dirPath string) (dirs []filer.DirectoryEntry, err error) {
|
||||
return filer.directories.ListDirectories(dirPath)
|
||||
}
|
||||
func (filer *FilerEmbedded) ListFiles(dirPath string, lastFileName string, limit int) (files []FileEntry, err error) {
|
||||
func (filer *FilerEmbedded) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) {
|
||||
dirId, e := filer.directories.FindDirectory(dirPath)
|
||||
if e != nil {
|
||||
return nil, e
|
|
@ -1,7 +1,9 @@
|
|||
package filer
|
||||
package embedded_filer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/filer"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
@ -26,7 +28,7 @@ func NewFileListInLevelDb(dir string) (fl *FileListInLevelDb, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func genKey(dirId DirectoryId, fileName string) []byte {
|
||||
func genKey(dirId filer.DirectoryId, fileName string) []byte {
|
||||
ret := make([]byte, 0, 4+len(fileName))
|
||||
for i := 3; i >= 0; i-- {
|
||||
ret = append(ret, byte(dirId>>(uint(i)*8)))
|
||||
|
@ -35,25 +37,25 @@ func genKey(dirId DirectoryId, fileName string) []byte {
|
|||
return ret
|
||||
}
|
||||
|
||||
func (fl *FileListInLevelDb) CreateFile(dirId DirectoryId, fileName string, fid string) (err error) {
|
||||
func (fl *FileListInLevelDb) CreateFile(dirId filer.DirectoryId, fileName string, fid string) (err error) {
|
||||
glog.V(4).Infoln("directory", dirId, "fileName", fileName, "fid", fid)
|
||||
return fl.db.Put(genKey(dirId, fileName), []byte(fid), nil)
|
||||
}
|
||||
func (fl *FileListInLevelDb) DeleteFile(dirId DirectoryId, fileName string) (fid string, err error) {
|
||||
func (fl *FileListInLevelDb) DeleteFile(dirId filer.DirectoryId, fileName string) (fid string, err error) {
|
||||
if fid, err = fl.FindFile(dirId, fileName); err != nil {
|
||||
return
|
||||
}
|
||||
err = fl.db.Delete(genKey(dirId, fileName), nil)
|
||||
return fid, err
|
||||
}
|
||||
func (fl *FileListInLevelDb) FindFile(dirId DirectoryId, fileName string) (fid string, err error) {
|
||||
func (fl *FileListInLevelDb) FindFile(dirId filer.DirectoryId, fileName string) (fid string, err error) {
|
||||
data, e := fl.db.Get(genKey(dirId, fileName), nil)
|
||||
if e != nil {
|
||||
return "", e
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
func (fl *FileListInLevelDb) ListFiles(dirId DirectoryId, lastFileName string, limit int) (files []FileEntry) {
|
||||
func (fl *FileListInLevelDb) ListFiles(dirId filer.DirectoryId, lastFileName string, limit int) (files []filer.FileEntry) {
|
||||
glog.V(4).Infoln("directory", dirId, "lastFileName", lastFileName, "limit", limit)
|
||||
dirKey := genKey(dirId, "")
|
||||
iter := fl.db.NewIterator(&util.Range{Start: genKey(dirId, lastFileName)}, nil)
|
||||
|
@ -73,7 +75,7 @@ func (fl *FileListInLevelDb) ListFiles(dirId DirectoryId, lastFileName string, l
|
|||
break
|
||||
}
|
||||
}
|
||||
files = append(files, FileEntry{Name: fileName, Id: FileId(string(iter.Value()))})
|
||||
files = append(files, filer.FileEntry{Name: fileName, Id: filer.FileId(string(iter.Value()))})
|
||||
}
|
||||
iter.Release()
|
||||
return
|
|
@ -1,7 +1,5 @@
|
|||
package filer
|
||||
|
||||
import ()
|
||||
|
||||
type FileId string //file id on weedfs
|
||||
|
||||
type FileEntry struct {
|
||||
|
@ -9,13 +7,22 @@ type FileEntry struct {
|
|||
Id FileId `json:"fid,omitempty"`
|
||||
}
|
||||
|
||||
type DirectoryId int32
|
||||
|
||||
type DirectoryEntry struct {
|
||||
Name string //dir name without path
|
||||
Id DirectoryId
|
||||
}
|
||||
|
||||
type Filer interface {
|
||||
CreateFile(filePath string, fid string) (err error)
|
||||
FindFile(filePath string) (fid string, err error)
|
||||
CreateFile(fullFileName string, fid string) (err error)
|
||||
FindFile(fullFileName string) (fid string, err error)
|
||||
DeleteFile(fullFileName string) (fid string, err error)
|
||||
|
||||
//Optional functions. embedded filer support these
|
||||
FindDirectory(dirPath string) (dirId DirectoryId, err error)
|
||||
ListDirectories(dirPath string) (dirs []DirectoryEntry, err error)
|
||||
ListFiles(dirPath string, lastFileName string, limit int) (files []FileEntry, err error)
|
||||
DeleteDirectory(dirPath string, recursive bool) (err error)
|
||||
DeleteFile(filePath string) (fid string, err error)
|
||||
Move(fromPath string, toPath string) (err error)
|
||||
}
|
||||
|
|
50
go/filer/flat_namespace/flat_namespace_filer.go
Normal file
50
go/filer/flat_namespace/flat_namespace_filer.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package flat_namespace
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/filer"
|
||||
)
|
||||
|
||||
type FlatNamesapceFiler struct {
|
||||
master string
|
||||
store FlatNamespaceStore
|
||||
}
|
||||
|
||||
var (
|
||||
NotImplemented = errors.New("Not Implemented for flat namespace meta data store!")
|
||||
)
|
||||
|
||||
func NewFlatNamesapceFiler(master string, store FlatNamespaceStore) *FlatNamesapceFiler {
|
||||
return &FlatNamesapceFiler{
|
||||
master: master,
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (filer *FlatNamesapceFiler) CreateFile(fullFileName string, fid string) (err error) {
|
||||
return filer.store.Put(fullFileName, fid)
|
||||
}
|
||||
func (filer *FlatNamesapceFiler) FindFile(fullFileName string) (fid string, err error) {
|
||||
return filer.store.Get(fullFileName)
|
||||
}
|
||||
func (filer *FlatNamesapceFiler) FindDirectory(dirPath string) (dirId filer.DirectoryId, err error) {
|
||||
return 0, NotImplemented
|
||||
}
|
||||
func (filer *FlatNamesapceFiler) ListDirectories(dirPath string) (dirs []filer.DirectoryEntry, err error) {
|
||||
return nil, NotImplemented
|
||||
}
|
||||
func (filer *FlatNamesapceFiler) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) {
|
||||
return nil, NotImplemented
|
||||
}
|
||||
func (filer *FlatNamesapceFiler) DeleteDirectory(dirPath string, recursive bool) (err error) {
|
||||
return NotImplemented
|
||||
}
|
||||
|
||||
func (filer *FlatNamesapceFiler) DeleteFile(fullFileName string) (fid string, err error) {
|
||||
return filer.store.Delete(fullFileName)
|
||||
}
|
||||
|
||||
func (filer *FlatNamesapceFiler) Move(fromPath string, toPath string) error {
|
||||
return NotImplemented
|
||||
}
|
9
go/filer/flat_namespace/flat_namespace_store.go
Normal file
9
go/filer/flat_namespace/flat_namespace_store.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package flat_namespace
|
||||
|
||||
import ()
|
||||
|
||||
type FlatNamespaceStore interface {
|
||||
Put(fullFileName string, fid string) (err error)
|
||||
Get(fullFileName string) (fid string, err error)
|
||||
Delete(fullFileName string) (fid string, err error)
|
||||
}
|
48
go/filer/redis_store/redis_store.go
Normal file
48
go/filer/redis_store/redis_store.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
package redis_store
|
||||
|
||||
import (
|
||||
redis "gopkg.in/redis.v2"
|
||||
)
|
||||
|
||||
type RedisStore struct {
|
||||
Client *redis.Client
|
||||
}
|
||||
|
||||
func NewRedisStore(hostPort string, database int) *RedisStore {
|
||||
client := redis.NewTCPClient(&redis.Options{
|
||||
Addr: hostPort,
|
||||
Password: "", // no password set
|
||||
DB: int64(database),
|
||||
})
|
||||
return &RedisStore{Client: client}
|
||||
}
|
||||
|
||||
func (s *RedisStore) Get(fullFileName string) (fid string, err error) {
|
||||
fid, err = s.Client.Get(fullFileName).Result()
|
||||
if err == redis.Nil {
|
||||
err = nil
|
||||
}
|
||||
return fid, err
|
||||
}
|
||||
func (s *RedisStore) Put(fullFileName string, fid string) (err error) {
|
||||
_, err = s.Client.Set(fullFileName, fid).Result()
|
||||
if err == redis.Nil {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Currently the fid is not returned
|
||||
func (s *RedisStore) Delete(fullFileName string) (fid string, err error) {
|
||||
_, err = s.Client.Del(fullFileName).Result()
|
||||
if err == redis.Nil {
|
||||
err = nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (c *RedisStore) Close() {
|
||||
if c.Client != nil {
|
||||
c.Client.Close()
|
||||
}
|
||||
}
|
|
@ -1,7 +1,5 @@
|
|||
package glog
|
||||
|
||||
import ()
|
||||
|
||||
/*
|
||||
Copying the original glog because it is missing several convenient methods.
|
||||
1. remove nano time in log format
|
||||
|
|
|
@ -2,11 +2,12 @@ package images
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/rwcarlsen/goexif/exif"
|
||||
"image"
|
||||
"image/draw"
|
||||
"image/jpeg"
|
||||
"log"
|
||||
|
||||
"github.com/rwcarlsen/goexif/exif"
|
||||
)
|
||||
|
||||
//many code is copied from http://camlistore.org/pkg/images/images.go
|
||||
|
|
|
@ -2,11 +2,12 @@ package images
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/disintegration/imaging"
|
||||
"image"
|
||||
"image/gif"
|
||||
"image/jpeg"
|
||||
"image/png"
|
||||
|
||||
"github.com/disintegration/imaging"
|
||||
)
|
||||
|
||||
func Resized(ext string, data []byte, width, height int) (resized []byte, w int, h int) {
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type AssignResult struct {
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package operation
|
||||
|
||||
import ()
|
||||
|
||||
type JoinResult struct {
|
||||
VolumeSizeLimit uint64 `json:"VolumeSizeLimit,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type DeleteResult struct {
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type ClusterStatusResult struct {
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
_ "fmt"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type Location struct {
|
||||
|
@ -21,6 +22,10 @@ type LookupResult struct {
|
|||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (lr *LookupResult) String() string {
|
||||
return fmt.Sprintf("VolumeId:%s, Locations:%v, Error:%s", lr.VolumeId, lr.Locations, lr.Error)
|
||||
}
|
||||
|
||||
var (
|
||||
vc VidCache
|
||||
)
|
||||
|
|
|
@ -2,13 +2,14 @@ package operation
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"io"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
type FilePart struct {
|
||||
|
|
|
@ -14,7 +14,7 @@ It has these top-level messages:
|
|||
*/
|
||||
package operation
|
||||
|
||||
import proto "code.google.com/p/goprotobuf/proto"
|
||||
import "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
proto "code.google.com/p/goprotobuf/proto"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestSerialDeserial(t *testing.T) {
|
||||
|
|
|
@ -2,7 +2,6 @@ package operation
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -14,6 +13,8 @@ import (
|
|||
"net/textproto"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
type UploadResult struct {
|
||||
|
|
146
go/security/guard.go
Normal file
146
go/security/guard.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package security
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnauthorized = errors.New("unauthorized token")
|
||||
)
|
||||
|
||||
/*
|
||||
Guard is to ensure data access security.
|
||||
There are 2 ways to check access:
|
||||
1. white list. It's checking request ip address.
|
||||
2. JSON Web Token(JWT) generated from secretKey.
|
||||
The jwt can come from:
|
||||
1. url parameter jwt=...
|
||||
2. request header "Authorization"
|
||||
3. cookie with the name "jwt"
|
||||
|
||||
The white list is checked first because it is easy.
|
||||
Then the JWT is checked.
|
||||
|
||||
The Guard will also check these claims if provided:
|
||||
1. "exp" Expiration Time
|
||||
2. "nbf" Not Before
|
||||
|
||||
Generating JWT:
|
||||
1. use HS256 to sign
|
||||
2. optionally set "exp", "nbf" fields, in Unix time,
|
||||
the number of seconds elapsed since January 1, 1970 UTC.
|
||||
|
||||
Referenced:
|
||||
https://github.com/pkieltyka/jwtauth/blob/master/jwtauth.go
|
||||
|
||||
*/
|
||||
type Guard struct {
|
||||
whiteList []string
|
||||
secretKey string
|
||||
|
||||
isActive bool
|
||||
}
|
||||
|
||||
func NewGuard(whiteList []string, secretKey string) *Guard {
|
||||
g := &Guard{whiteList: whiteList, secretKey: secretKey}
|
||||
g.isActive = len(g.whiteList) != 0 || len(g.secretKey) != 0
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *Guard) Secure(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
if !g.isActive {
|
||||
//if no security needed, just skip all checkings
|
||||
return f
|
||||
}
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := g.doCheck(w, r); err != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
f(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Guard) NewToken() (tokenString string, err error) {
|
||||
m := make(map[string]interface{})
|
||||
m["exp"] = time.Now().Unix() + 10
|
||||
return g.Encode(m)
|
||||
}
|
||||
|
||||
func (g *Guard) Encode(claims map[string]interface{}) (tokenString string, err error) {
|
||||
if !g.isActive {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
t := jwt.New(jwt.GetSigningMethod("HS256"))
|
||||
t.Claims = claims
|
||||
return t.SignedString(g.secretKey)
|
||||
}
|
||||
|
||||
func (g *Guard) Decode(tokenString string) (token *jwt.Token, err error) {
|
||||
return jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
|
||||
return g.secretKey, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (g *Guard) doCheck(w http.ResponseWriter, r *http.Request) error {
|
||||
if len(g.whiteList) != 0 {
|
||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err == nil {
|
||||
for _, ip := range g.whiteList {
|
||||
if ip == host {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(g.secretKey) != 0 {
|
||||
|
||||
// Get token from query params
|
||||
tokenStr := r.URL.Query().Get("jwt")
|
||||
|
||||
// Get token from authorization header
|
||||
if tokenStr == "" {
|
||||
bearer := r.Header.Get("Authorization")
|
||||
if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" {
|
||||
tokenStr = bearer[7:]
|
||||
}
|
||||
}
|
||||
|
||||
// Get token from cookie
|
||||
if tokenStr == "" {
|
||||
cookie, err := r.Cookie("jwt")
|
||||
if err == nil {
|
||||
tokenStr = cookie.Value
|
||||
}
|
||||
}
|
||||
|
||||
if tokenStr == "" {
|
||||
return ErrUnauthorized
|
||||
}
|
||||
|
||||
// Verify the token
|
||||
token, err := g.Decode(tokenStr)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Token verification error from %s: %v", r.RemoteAddr, err)
|
||||
return ErrUnauthorized
|
||||
}
|
||||
if !token.Valid {
|
||||
glog.V(1).Infof("Token invliad from %s: %v", r.RemoteAddr, tokenStr)
|
||||
return ErrUnauthorized
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
glog.V(1).Infof("No permission from %s", r.RemoteAddr)
|
||||
return fmt.Errorf("No write permisson from %s", r.RemoteAddr)
|
||||
}
|
|
@ -1,7 +1,5 @@
|
|||
package sequence
|
||||
|
||||
import ()
|
||||
|
||||
type Sequencer interface {
|
||||
NextFileId(count int) (uint64, int)
|
||||
SetMax(uint64)
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package stats
|
||||
|
||||
import ()
|
||||
|
||||
type DiskStatus struct {
|
||||
Dir string
|
||||
All uint64
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
package stats
|
||||
|
||||
import ()
|
||||
|
||||
func (disk *DiskStatus) fillInStatus() {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
package stats
|
||||
|
||||
import ()
|
||||
|
||||
func (mem *MemStatus) fillInStatus() {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/tgulacsi/go-cdb"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/tgulacsi/go-cdb"
|
||||
)
|
||||
|
||||
// CDB-backed read-only needle map
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
var testIndexFilename string = "../../test/sample.idx"
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package storage
|
||||
|
||||
import ()
|
||||
|
||||
type NeedleValue struct {
|
||||
Key Key
|
||||
Offset uint32 `comment:"Volume offset"` //since aligned to 8 bytes, range is 4G*8=32G
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
func TestMemoryUsage(t *testing.T) {
|
||||
|
|
|
@ -2,11 +2,12 @@ package storage
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
var table = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type FileId struct {
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/images"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
@ -13,6 +11,10 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/images"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -23,6 +25,7 @@ const (
|
|||
)
|
||||
|
||||
/*
|
||||
* A Needle means a uploaded and stored file.
|
||||
* Needle file size is limited to 4GB for now.
|
||||
*/
|
||||
type Needle struct {
|
||||
|
@ -44,6 +47,11 @@ type Needle struct {
|
|||
Padding []byte `comment:"Aligned to 8 bytes"`
|
||||
}
|
||||
|
||||
func (n *Needle) String() (str string) {
|
||||
str = fmt.Sprintf("Cookie:%d, Id:%d, Size:%d, DataSize:%d, Name: %s, Mime: %s", n.Cookie, n.Id, n.Size, n.DataSize, n.Name, n.Mime)
|
||||
return
|
||||
}
|
||||
|
||||
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, ttl *TTL, e error) {
|
||||
form, fe := r.MultipartReader()
|
||||
if fe != nil {
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type NeedleMapper interface {
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
proto "code.google.com/p/goprotobuf/proto"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -12,6 +8,11 @@ import (
|
|||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -32,6 +33,10 @@ type MasterNodes struct {
|
|||
lastNode int
|
||||
}
|
||||
|
||||
func (mn *MasterNodes) String() string {
|
||||
return fmt.Sprintf("nodes:%v, lastNode:%d", mn.nodes, mn.lastNode)
|
||||
}
|
||||
|
||||
func NewMasterNodes(bootstrapNode string) (mn *MasterNodes) {
|
||||
mn = &MasterNodes{nodes: []string{bootstrapNode}, lastNode: -1}
|
||||
return
|
||||
|
@ -64,6 +69,9 @@ func (mn *MasterNodes) findMaster() (string, error) {
|
|||
return mn.nodes[mn.lastNode], nil
|
||||
}
|
||||
|
||||
/*
|
||||
* A VolumeServer contains one Store
|
||||
*/
|
||||
type Store struct {
|
||||
Port int
|
||||
Ip string
|
||||
|
@ -76,6 +84,11 @@ type Store struct {
|
|||
masterNodes *MasterNodes
|
||||
}
|
||||
|
||||
func (s *Store) String() (str string) {
|
||||
str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d, masterNodes:%s", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.volumeSizeLimit, s.masterNodes)
|
||||
return
|
||||
}
|
||||
|
||||
func NewStore(port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int) (s *Store) {
|
||||
s = &Store{Port: port, Ip: ip, PublicUrl: publicUrl}
|
||||
s.Locations = make([]*DiskLocation, 0)
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
func (s *Store) CheckCompactVolume(volumeIdString string, garbageThresholdString string) (error, bool) {
|
||||
|
|
|
@ -2,7 +2,6 @@ package storage
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -10,6 +9,8 @@ import (
|
|||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
type Volume struct {
|
||||
|
@ -32,6 +33,10 @@ func NewVolume(dirname string, collection string, id VolumeId, replicaPlacement
|
|||
e = v.load(true, true)
|
||||
return
|
||||
}
|
||||
func (v *Volume) String() string {
|
||||
return fmt.Sprintf("Id:%v, dir:%s, Collection:%s, dataFile:%v, nm:%v, readOnly:%v", v.Id, v.dir, v.Collection, v.dataFile, v.nm, v.readOnly)
|
||||
}
|
||||
|
||||
func loadVolumeWithoutIndex(dirname string, collection string, id VolumeId) (v *Volume, e error) {
|
||||
v = &Volume{dir: dirname, Collection: collection, Id: id}
|
||||
v.SuperBlock = SuperBlock{}
|
||||
|
@ -72,7 +77,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool) error {
|
|||
|
||||
if e != nil {
|
||||
if !os.IsPermission(e) {
|
||||
return fmt.Errorf("cannot load Volume Data %s.dat: %s", fileName, e.Error())
|
||||
return fmt.Errorf("cannot load Volume Data %s.dat: %v", fileName, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -92,12 +97,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool) error {
|
|||
if v.readOnly {
|
||||
glog.V(1).Infoln("open to read file", fileName+".idx")
|
||||
if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDONLY, 0644); e != nil {
|
||||
return fmt.Errorf("cannot read Volume Index %s.idx: %s", fileName, e.Error())
|
||||
return fmt.Errorf("cannot read Volume Index %s.idx: %v", fileName, e)
|
||||
}
|
||||
} else {
|
||||
glog.V(1).Infoln("open to write file", fileName+".idx")
|
||||
if indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); e != nil {
|
||||
return fmt.Errorf("cannot write Volume Index %s.idx: %s", fileName, e.Error())
|
||||
return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, e)
|
||||
}
|
||||
}
|
||||
glog.V(0).Infoln("loading file", fileName+".idx", "readonly", v.readOnly)
|
||||
|
@ -115,7 +120,7 @@ func (v *Volume) Size() int64 {
|
|||
if e == nil {
|
||||
return stat.Size()
|
||||
}
|
||||
glog.V(0).Infof("Failed to read file size %s %s", v.dataFile.Name(), e.Error())
|
||||
glog.V(0).Infof("Failed to read file size %s %v", v.dataFile.Name(), e)
|
||||
return -1
|
||||
}
|
||||
func (v *Volume) Close() {
|
||||
|
@ -134,7 +139,7 @@ func (v *Volume) isFileUnchanged(n *Needle) bool {
|
|||
oldNeedle := new(Needle)
|
||||
oldNeedle.Read(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version())
|
||||
if oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
|
||||
n.Size = oldNeedle.Size
|
||||
n.DataSize = oldNeedle.DataSize
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -164,12 +169,13 @@ func (v *Volume) write(n *Needle) (size uint32, err error) {
|
|||
v.accessLock.Lock()
|
||||
defer v.accessLock.Unlock()
|
||||
if v.isFileUnchanged(n) {
|
||||
size = n.Size
|
||||
size = n.DataSize
|
||||
glog.V(4).Infof("needle is unchanged!")
|
||||
return
|
||||
}
|
||||
var offset int64
|
||||
if offset, err = v.dataFile.Seek(0, 2); err != nil {
|
||||
glog.V(0).Infof("faile to seek the end of file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -177,21 +183,21 @@ func (v *Volume) write(n *Needle) (size uint32, err error) {
|
|||
if offset%NeedlePaddingSize != 0 {
|
||||
offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize)
|
||||
if offset, err = v.dataFile.Seek(offset, 0); err != nil {
|
||||
glog.V(4).Infof("failed to align in datafile %s: %s", v.dataFile.Name(), err.Error())
|
||||
glog.V(0).Infof("failed to align in datafile %s: %v", v.dataFile.Name(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if size, err = n.Append(v.dataFile, v.Version()); err != nil {
|
||||
if e := v.dataFile.Truncate(offset); e != nil {
|
||||
err = fmt.Errorf("%s\ncannot truncate %s: %s", err, v.dataFile.Name(), e.Error())
|
||||
err = fmt.Errorf("%s\ncannot truncate %s: %v", err, v.dataFile.Name(), e)
|
||||
}
|
||||
return
|
||||
}
|
||||
nv, ok := v.nm.Get(n.Id)
|
||||
if !ok || int64(nv.Offset)*NeedlePaddingSize < offset {
|
||||
if _, err = v.nm.Put(n.Id, uint32(offset/NeedlePaddingSize), n.Size); err != nil {
|
||||
glog.V(4).Infof("failed to save in needle map %d: %s", n.Id, err.Error())
|
||||
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
|
||||
}
|
||||
}
|
||||
if v.lastModifiedTime < n.LastModified {
|
||||
|
@ -292,13 +298,13 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId,
|
|||
offset := int64(SuperBlockSize)
|
||||
n, rest, e := ReadNeedleHeader(v.dataFile, version, offset)
|
||||
if e != nil {
|
||||
err = fmt.Errorf("cannot read needle header: %s", e)
|
||||
err = fmt.Errorf("cannot read needle header: %v", e)
|
||||
return
|
||||
}
|
||||
for n != nil {
|
||||
if readNeedleBody {
|
||||
if err = n.ReadNeedleBody(v.dataFile, version, offset+int64(NeedleHeaderSize), rest); err != nil {
|
||||
err = fmt.Errorf("cannot read needle body: %s", err)
|
||||
err = fmt.Errorf("cannot read needle body: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -310,7 +316,7 @@ func ScanVolumeFile(dirname string, collection string, id VolumeId,
|
|||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("cannot read needle header: %s", err)
|
||||
return fmt.Errorf("cannot read needle header: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -360,7 +366,7 @@ func (v *Volume) ensureConvertIdxToCdb(fileName string) (cdbCanRead bool) {
|
|||
defer indexFile.Close()
|
||||
glog.V(0).Infof("converting %s.idx to %s.cdb", fileName, fileName)
|
||||
if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
|
||||
glog.V(0).Infof("error converting %s.idx to %s.cdb: %s", fileName, fileName, e.Error())
|
||||
glog.V(0).Infof("error converting %s.idx to %s.cdb: %v", fileName, fileName, e)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
)
|
||||
|
||||
|
@ -36,3 +37,7 @@ func NewVolumeInfo(m *operation.VolumeInformationMessage) (vi VolumeInfo, err er
|
|||
vi.Ttl = LoadTTLFromUint32(*m.Ttl)
|
||||
return vi, nil
|
||||
}
|
||||
|
||||
func (vi VolumeInfo) String() string {
|
||||
return fmt.Sprintf("Id:%s, Size:%d, ReplicaPlacement:%s, Collection:%s, Version:%v, FileCount:%d, DeleteCount:%d, DeletedByteCount:%d, ReadOnly:%v", vi.Id, vi.Size, vi.ReplicaPlacement, vi.Collection, vi.Version, vi.FileCount, vi.DeleteCount, vi.DeletedByteCount, vi.ReadOnly)
|
||||
}
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
func (v *Volume) garbageLevel() float64 {
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package storage
|
||||
|
||||
import ()
|
||||
|
||||
type Version uint8
|
||||
|
||||
const (
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type AllocateVolumeResult struct {
|
||||
|
|
|
@ -1,36 +1,43 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type Collection struct {
|
||||
Name string
|
||||
volumeSizeLimit uint64
|
||||
storageType2VolumeLayout map[string]*VolumeLayout
|
||||
storageType2VolumeLayout *util.ConcurrentReadMap
|
||||
}
|
||||
|
||||
func NewCollection(name string, volumeSizeLimit uint64) *Collection {
|
||||
c := &Collection{Name: name, volumeSizeLimit: volumeSizeLimit}
|
||||
c.storageType2VolumeLayout = make(map[string]*VolumeLayout)
|
||||
c.storageType2VolumeLayout = util.NewConcurrentReadMap()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collection) String() string {
|
||||
return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout)
|
||||
}
|
||||
|
||||
func (c *Collection) GetOrCreateVolumeLayout(rp *storage.ReplicaPlacement, ttl *storage.TTL) *VolumeLayout {
|
||||
keyString := rp.String()
|
||||
if ttl != nil {
|
||||
keyString += ttl.String()
|
||||
}
|
||||
if c.storageType2VolumeLayout[keyString] == nil {
|
||||
c.storageType2VolumeLayout[keyString] = NewVolumeLayout(rp, ttl, c.volumeSizeLimit)
|
||||
}
|
||||
return c.storageType2VolumeLayout[keyString]
|
||||
vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} {
|
||||
return NewVolumeLayout(rp, ttl, c.volumeSizeLimit)
|
||||
})
|
||||
return vl.(*VolumeLayout)
|
||||
}
|
||||
|
||||
func (c *Collection) Lookup(vid storage.VolumeId) []*DataNode {
|
||||
for _, vl := range c.storageType2VolumeLayout {
|
||||
for _, vl := range c.storageType2VolumeLayout.Items {
|
||||
if vl != nil {
|
||||
if list := vl.Lookup(vid); list != nil {
|
||||
if list := vl.(*VolumeLayout).Lookup(vid); list != nil {
|
||||
return list
|
||||
}
|
||||
}
|
||||
|
@ -39,9 +46,9 @@ func (c *Collection) Lookup(vid storage.VolumeId) []*DataNode {
|
|||
}
|
||||
|
||||
func (c *Collection) ListVolumeServers() (nodes []*DataNode) {
|
||||
for _, vl := range c.storageType2VolumeLayout {
|
||||
for _, vl := range c.storageType2VolumeLayout.Items {
|
||||
if vl != nil {
|
||||
if list := vl.ListVolumeServers(); list != nil {
|
||||
if list := vl.(*VolumeLayout).ListVolumeServers(); list != nil {
|
||||
nodes = append(nodes, list...)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package topology
|
||||
|
||||
import ()
|
||||
|
||||
type DataCenter struct {
|
||||
NodeImpl
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type DataNode struct {
|
||||
|
@ -25,6 +27,10 @@ func NewDataNode(id string) *DataNode {
|
|||
return s
|
||||
}
|
||||
|
||||
func (dn *DataNode) String() string {
|
||||
return fmt.Sprintf("NodeImpl:%s ,volumes:%v, Ip:%s, Port:%d, PublicUrl:%s, Dead:%v", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl, dn.Dead)
|
||||
}
|
||||
|
||||
func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) {
|
||||
if _, ok := dn.volumes[v.Id]; !ok {
|
||||
dn.volumes[v.Id] = v
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
type NodeId string
|
||||
|
|
|
@ -2,12 +2,13 @@ package topology
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func ReplicatedWrite(masterNode string, s *storage.Store, volumeId storage.VolumeId, needle *storage.Needle, r *http.Request) (size uint32, errorStatus string) {
|
||||
|
|
|
@ -1,20 +1,22 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/sequence"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"errors"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/goraft/raft"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
type Topology struct {
|
||||
NodeImpl
|
||||
|
||||
collectionMap map[string]*Collection
|
||||
collectionMap *util.ConcurrentReadMap
|
||||
|
||||
pulse int64
|
||||
|
||||
|
@ -37,7 +39,7 @@ func NewTopology(id string, confFile string, seq sequence.Sequencer, volumeSizeL
|
|||
t.nodeType = "Topology"
|
||||
t.NodeImpl.value = t
|
||||
t.children = make(map[NodeId]Node)
|
||||
t.collectionMap = make(map[string]*Collection)
|
||||
t.collectionMap = util.NewConcurrentReadMap()
|
||||
t.pulse = int64(pulse)
|
||||
t.volumeSizeLimit = volumeSizeLimit
|
||||
|
||||
|
@ -89,14 +91,14 @@ func (t *Topology) loadConfiguration(configurationFile string) error {
|
|||
func (t *Topology) Lookup(collection string, vid storage.VolumeId) []*DataNode {
|
||||
//maybe an issue if lots of collections?
|
||||
if collection == "" {
|
||||
for _, c := range t.collectionMap {
|
||||
if list := c.Lookup(vid); list != nil {
|
||||
for _, c := range t.collectionMap.Items {
|
||||
if list := c.(*Collection).Lookup(vid); list != nil {
|
||||
return list
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if c, ok := t.collectionMap[collection]; ok {
|
||||
return c.Lookup(vid)
|
||||
if c, ok := t.collectionMap.Items[collection]; ok {
|
||||
return c.(*Collection).Lookup(vid)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -109,7 +111,7 @@ func (t *Topology) NextVolumeId() storage.VolumeId {
|
|||
return next
|
||||
}
|
||||
|
||||
func (t *Topology) HasWriableVolume(option *VolumeGrowOption) bool {
|
||||
func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
|
||||
vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl)
|
||||
return vl.GetActiveVolumeCount(option) > 0
|
||||
}
|
||||
|
@ -124,20 +126,18 @@ func (t *Topology) PickForWrite(count int, option *VolumeGrowOption) (string, in
|
|||
}
|
||||
|
||||
func (t *Topology) GetVolumeLayout(collectionName string, rp *storage.ReplicaPlacement, ttl *storage.TTL) *VolumeLayout {
|
||||
_, ok := t.collectionMap[collectionName]
|
||||
if !ok {
|
||||
t.collectionMap[collectionName] = NewCollection(collectionName, t.volumeSizeLimit)
|
||||
}
|
||||
return t.collectionMap[collectionName].GetOrCreateVolumeLayout(rp, ttl)
|
||||
return t.collectionMap.Get(collectionName, func() interface{} {
|
||||
return NewCollection(collectionName, t.volumeSizeLimit)
|
||||
}).(*Collection).GetOrCreateVolumeLayout(rp, ttl)
|
||||
}
|
||||
|
||||
func (t *Topology) GetCollection(collectionName string) (collection *Collection, ok bool) {
|
||||
collection, ok = t.collectionMap[collectionName]
|
||||
return
|
||||
func (t *Topology) GetCollection(collectionName string) (*Collection, bool) {
|
||||
c, hasCollection := t.collectionMap.Items[collectionName]
|
||||
return c.(*Collection), hasCollection
|
||||
}
|
||||
|
||||
func (t *Topology) DeleteCollection(collectionName string) {
|
||||
delete(t.collectionMap, collectionName)
|
||||
delete(t.collectionMap.Items, collectionName)
|
||||
}
|
||||
|
||||
func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package topology
|
||||
|
||||
import ()
|
||||
|
||||
func (t *Topology) ToMap() interface{} {
|
||||
m := make(map[string]interface{})
|
||||
m["Max"] = t.GetMaxVolumeCount()
|
||||
|
@ -13,10 +11,11 @@ func (t *Topology) ToMap() interface{} {
|
|||
}
|
||||
m["DataCenters"] = dcs
|
||||
var layouts []interface{}
|
||||
for _, c := range t.collectionMap {
|
||||
for _, layout := range c.storageType2VolumeLayout {
|
||||
for _, col := range t.collectionMap.Items {
|
||||
c := col.(*Collection)
|
||||
for _, layout := range c.storageType2VolumeLayout.Items {
|
||||
if layout != nil {
|
||||
tmp := layout.ToMap()
|
||||
tmp := layout.(*VolumeLayout).ToMap()
|
||||
tmp["collection"] = c.Name
|
||||
layouts = append(layouts, tmp)
|
||||
}
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold string) bool {
|
||||
|
@ -79,13 +80,15 @@ func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlis
|
|||
return isCommitSuccess
|
||||
}
|
||||
func (t *Topology) Vacuum(garbageThreshold string) int {
|
||||
for _, c := range t.collectionMap {
|
||||
for _, vl := range c.storageType2VolumeLayout {
|
||||
for _, col := range t.collectionMap.Items {
|
||||
c := col.(*Collection)
|
||||
for _, vl := range c.storageType2VolumeLayout.Items {
|
||||
if vl != nil {
|
||||
for vid, locationlist := range vl.vid2location {
|
||||
if batchVacuumVolumeCheck(vl, vid, locationlist, garbageThreshold) {
|
||||
if batchVacuumVolumeCompact(vl, vid, locationlist) {
|
||||
batchVacuumVolumeCommit(vl, vid, locationlist)
|
||||
volumeLayout := vl.(*VolumeLayout)
|
||||
for vid, locationlist := range volumeLayout.vid2location {
|
||||
if batchVacuumVolumeCheck(volumeLayout, vid, locationlist, garbageThreshold) {
|
||||
if batchVacuumVolumeCompact(volumeLayout, vid, locationlist) {
|
||||
batchVacuumVolumeCommit(volumeLayout, vid, locationlist)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -29,6 +30,10 @@ type VolumeGrowth struct {
|
|||
accessLock sync.Mutex
|
||||
}
|
||||
|
||||
func (o *VolumeGrowOption) String() string {
|
||||
return fmt.Sprintf("Collection:%s, ReplicaPlacement:%v, Ttl:%v, DataCenter:%s, Rack:%s, DataNode:%s", o.Collection, o.ReplicaPlacement, o.Ttl, o.DataCenter, o.Rack, o.DataNode)
|
||||
}
|
||||
|
||||
func NewDefaultVolumeGrowth() *VolumeGrowth {
|
||||
return &VolumeGrowth{}
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/sequence"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/sequence"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
var topologyLayout = `
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
// mapping from volume to its locations, inverted from server to volume
|
||||
|
@ -28,6 +30,10 @@ func NewVolumeLayout(rp *storage.ReplicaPlacement, ttl *storage.TTL, volumeSizeL
|
|||
}
|
||||
}
|
||||
|
||||
func (vl *VolumeLayout) String() string {
|
||||
return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
|
||||
}
|
||||
|
||||
func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
|
||||
vl.accessLock.Lock()
|
||||
defer vl.accessLock.Unlock()
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package topology
|
||||
|
||||
import ()
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type VolumeLocationList struct {
|
||||
list []*DataNode
|
||||
|
@ -10,6 +12,10 @@ func NewVolumeLocationList() *VolumeLocationList {
|
|||
return &VolumeLocationList{}
|
||||
}
|
||||
|
||||
func (dnll *VolumeLocationList) String() string {
|
||||
return fmt.Sprintf("%v", dnll.list)
|
||||
}
|
||||
|
||||
func (dnll *VolumeLocationList) Head() *DataNode {
|
||||
return dnll.list[0]
|
||||
}
|
||||
|
|
39
go/util/concurrent_read_map.go
Normal file
39
go/util/concurrent_read_map.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A mostly for read map, which can thread-safely
|
||||
// initialize the map entries.
|
||||
type ConcurrentReadMap struct {
|
||||
rmutex sync.RWMutex
|
||||
mutex sync.Mutex
|
||||
Items map[string]interface{}
|
||||
}
|
||||
|
||||
func NewConcurrentReadMap() *ConcurrentReadMap {
|
||||
return &ConcurrentReadMap{Items: make(map[string]interface{})}
|
||||
}
|
||||
|
||||
func (m *ConcurrentReadMap) initMapEntry(key string, newEntry func() interface{}) (value interface{}) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if value, ok := m.Items[key]; ok {
|
||||
return value
|
||||
}
|
||||
value = newEntry()
|
||||
m.Items[key] = value
|
||||
return value
|
||||
}
|
||||
|
||||
func (m *ConcurrentReadMap) Get(key string, newEntry func() interface{}) interface{} {
|
||||
m.rmutex.RLock()
|
||||
if value, ok := m.Items[key]; ok {
|
||||
m.rmutex.RUnlock()
|
||||
return value
|
||||
} else {
|
||||
m.rmutex.RUnlock()
|
||||
return m.initMapEntry(key, newEntry)
|
||||
}
|
||||
}
|
|
@ -10,9 +10,10 @@ package util
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package util
|
||||
|
||||
import ()
|
||||
|
||||
const (
|
||||
VERSION = "0.64"
|
||||
VERSION = "0.67"
|
||||
)
|
||||
|
|
|
@ -2,9 +2,10 @@ package util
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
func TestFolderWritable(folder string) (err error) {
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/stats"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/stats"
|
||||
)
|
||||
|
||||
// Listener wraps a net.Listener, and gives a place to store the timeout
|
||||
|
|
|
@ -2,9 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -16,6 +13,10 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
type BenchmarkOptions struct {
|
||||
|
@ -30,11 +31,14 @@ type BenchmarkOptions struct {
|
|||
sequentialRead *bool
|
||||
collection *string
|
||||
cpuprofile *string
|
||||
maxCpu *int
|
||||
vid2server map[string]string //cache for vid locations
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
b BenchmarkOptions
|
||||
b BenchmarkOptions
|
||||
sharedBytes []byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -50,8 +54,10 @@ func init() {
|
|||
b.read = cmdBenchmark.Flag.Bool("read", true, "enable read")
|
||||
b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file")
|
||||
b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection")
|
||||
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "write cpu profile to file")
|
||||
b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
|
||||
b.vid2server = make(map[string]string)
|
||||
sharedBytes = make([]byte, 1024)
|
||||
}
|
||||
|
||||
var cmdBenchmark = &Command{
|
||||
|
@ -87,18 +93,17 @@ var cmdBenchmark = &Command{
|
|||
}
|
||||
|
||||
var (
|
||||
wait sync.WaitGroup
|
||||
writeStats *stats
|
||||
readStats *stats
|
||||
serverLimitChan map[string]chan bool
|
||||
wait sync.WaitGroup
|
||||
writeStats *stats
|
||||
readStats *stats
|
||||
)
|
||||
|
||||
func init() {
|
||||
serverLimitChan = make(map[string]chan bool)
|
||||
}
|
||||
|
||||
func runbenchmark(cmd *Command, args []string) bool {
|
||||
fmt.Printf("This is Seaweed File System version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
|
||||
if *b.maxCpu < 1 {
|
||||
*b.maxCpu = runtime.NumCPU()
|
||||
}
|
||||
runtime.GOMAXPROCS(*b.maxCpu)
|
||||
if *b.cpuprofile != "" {
|
||||
f, err := os.Create(*b.cpuprofile)
|
||||
if err != nil {
|
||||
|
@ -122,12 +127,12 @@ func runbenchmark(cmd *Command, args []string) bool {
|
|||
func bench_write() {
|
||||
fileIdLineChan := make(chan string)
|
||||
finishChan := make(chan bool)
|
||||
writeStats = newStats()
|
||||
writeStats = newStats(*b.concurrency)
|
||||
idChan := make(chan int)
|
||||
wait.Add(*b.concurrency)
|
||||
go writeFileIds(*b.idListFile, fileIdLineChan, finishChan)
|
||||
for i := 0; i < *b.concurrency; i++ {
|
||||
go writeFiles(idChan, fileIdLineChan, writeStats)
|
||||
wait.Add(1)
|
||||
go writeFiles(idChan, fileIdLineChan, &writeStats.localStats[i])
|
||||
}
|
||||
writeStats.start = time.Now()
|
||||
writeStats.total = *b.numberOfFiles
|
||||
|
@ -138,28 +143,30 @@ func bench_write() {
|
|||
close(idChan)
|
||||
wait.Wait()
|
||||
writeStats.end = time.Now()
|
||||
wait.Add(1)
|
||||
wait.Add(2)
|
||||
finishChan <- true
|
||||
finishChan <- true
|
||||
close(finishChan)
|
||||
wait.Wait()
|
||||
close(finishChan)
|
||||
writeStats.printStats()
|
||||
}
|
||||
|
||||
func bench_read() {
|
||||
fileIdLineChan := make(chan string)
|
||||
finishChan := make(chan bool)
|
||||
readStats = newStats()
|
||||
wait.Add(*b.concurrency)
|
||||
readStats = newStats(*b.concurrency)
|
||||
go readFileIds(*b.idListFile, fileIdLineChan)
|
||||
readStats.start = time.Now()
|
||||
readStats.total = *b.numberOfFiles
|
||||
go readStats.checkProgress("Randomly Reading Benchmark", finishChan)
|
||||
for i := 0; i < *b.concurrency; i++ {
|
||||
go readFiles(fileIdLineChan, readStats)
|
||||
wait.Add(1)
|
||||
go readFiles(fileIdLineChan, &readStats.localStats[i])
|
||||
}
|
||||
wait.Wait()
|
||||
wait.Add(1)
|
||||
finishChan <- true
|
||||
wait.Wait()
|
||||
close(finishChan)
|
||||
readStats.end = time.Now()
|
||||
readStats.printStats()
|
||||
|
@ -170,126 +177,102 @@ type delayedFile struct {
|
|||
fp *operation.FilePart
|
||||
}
|
||||
|
||||
func writeFiles(idChan chan int, fileIdLineChan chan string, s *stats) {
|
||||
func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
|
||||
defer wait.Done()
|
||||
delayedDeleteChan := make(chan *delayedFile, 100)
|
||||
var waitForDeletions sync.WaitGroup
|
||||
for i := 0; i < 7; i++ {
|
||||
waitForDeletions.Add(1)
|
||||
go func() {
|
||||
waitForDeletions.Add(1)
|
||||
defer waitForDeletions.Done()
|
||||
for df := range delayedDeleteChan {
|
||||
if df == nil {
|
||||
break
|
||||
}
|
||||
if df.enterTime.After(time.Now()) {
|
||||
time.Sleep(df.enterTime.Sub(time.Now()))
|
||||
}
|
||||
fp := df.fp
|
||||
serverLimitChan[fp.Server] <- true
|
||||
if e := util.Delete("http://" + fp.Server + "/" + fp.Fid); e == nil {
|
||||
if e := util.Delete("http://" + df.fp.Server + "/" + df.fp.Fid); e == nil {
|
||||
s.completed++
|
||||
} else {
|
||||
s.failed++
|
||||
}
|
||||
<-serverLimitChan[fp.Server]
|
||||
}
|
||||
waitForDeletions.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
for {
|
||||
if id, ok := <-idChan; ok {
|
||||
start := time.Now()
|
||||
fileSize := int64(*b.fileSize + rand.Intn(64))
|
||||
fp := &operation.FilePart{Reader: &FakeReader{id: uint64(id), size: fileSize}, FileSize: fileSize}
|
||||
if assignResult, err := operation.Assign(*b.server, 1, "", *b.collection, ""); err == nil {
|
||||
fp.Server, fp.Fid, fp.Collection = assignResult.PublicUrl, assignResult.Fid, *b.collection
|
||||
if _, ok := serverLimitChan[fp.Server]; !ok {
|
||||
serverLimitChan[fp.Server] = make(chan bool, 7)
|
||||
}
|
||||
serverLimitChan[fp.Server] <- true
|
||||
if _, err := fp.Upload(0, *b.server); err == nil {
|
||||
if rand.Intn(100) < *b.deletePercentage {
|
||||
s.total++
|
||||
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
|
||||
} else {
|
||||
fileIdLineChan <- fp.Fid
|
||||
}
|
||||
s.completed++
|
||||
s.transferred += fileSize
|
||||
for id := range idChan {
|
||||
start := time.Now()
|
||||
fileSize := int64(*b.fileSize + rand.Intn(64))
|
||||
fp := &operation.FilePart{Reader: &FakeReader{id: uint64(id), size: fileSize}, FileSize: fileSize}
|
||||
if assignResult, err := operation.Assign(*b.server, 1, "", *b.collection, ""); err == nil {
|
||||
fp.Server, fp.Fid, fp.Collection = assignResult.PublicUrl, assignResult.Fid, *b.collection
|
||||
if _, err := fp.Upload(0, *b.server); err == nil {
|
||||
if rand.Intn(100) < *b.deletePercentage {
|
||||
s.total++
|
||||
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}
|
||||
} else {
|
||||
s.failed++
|
||||
}
|
||||
writeStats.addSample(time.Now().Sub(start))
|
||||
<-serverLimitChan[fp.Server]
|
||||
if *cmdBenchmark.IsDebug {
|
||||
fmt.Printf("writing %d file %s\n", id, fp.Fid)
|
||||
fileIdLineChan <- fp.Fid
|
||||
}
|
||||
s.completed++
|
||||
s.transferred += fileSize
|
||||
} else {
|
||||
s.failed++
|
||||
println("writing file error:", err.Error())
|
||||
fmt.Printf("Failed to write with error:%v\n", err)
|
||||
}
|
||||
writeStats.addSample(time.Now().Sub(start))
|
||||
if *cmdBenchmark.IsDebug {
|
||||
fmt.Printf("writing %d file %s\n", id, fp.Fid)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
s.failed++
|
||||
println("writing file error:", err.Error())
|
||||
}
|
||||
}
|
||||
close(delayedDeleteChan)
|
||||
waitForDeletions.Wait()
|
||||
wait.Done()
|
||||
}
|
||||
|
||||
func readFiles(fileIdLineChan chan string, s *stats) {
|
||||
serverLimitChan := make(map[string]chan bool)
|
||||
func readFiles(fileIdLineChan chan string, s *stat) {
|
||||
defer wait.Done()
|
||||
masterLimitChan := make(chan bool, 1)
|
||||
for {
|
||||
if fid, ok := <-fileIdLineChan; ok {
|
||||
if len(fid) == 0 {
|
||||
continue
|
||||
}
|
||||
if fid[0] == '#' {
|
||||
continue
|
||||
}
|
||||
if *cmdBenchmark.IsDebug {
|
||||
fmt.Printf("reading file %s\n", fid)
|
||||
}
|
||||
parts := strings.SplitN(fid, ",", 2)
|
||||
vid := parts[0]
|
||||
start := time.Now()
|
||||
if server, ok := b.vid2server[vid]; !ok {
|
||||
masterLimitChan <- true
|
||||
if _, now_ok := b.vid2server[vid]; !now_ok {
|
||||
if ret, err := operation.Lookup(*b.server, vid); err == nil {
|
||||
if len(ret.Locations) > 0 {
|
||||
server = ret.Locations[0].PublicUrl
|
||||
b.vid2server[vid] = server
|
||||
}
|
||||
for fid := range fileIdLineChan {
|
||||
if len(fid) == 0 {
|
||||
continue
|
||||
}
|
||||
if fid[0] == '#' {
|
||||
continue
|
||||
}
|
||||
if *cmdBenchmark.IsDebug {
|
||||
fmt.Printf("reading file %s\n", fid)
|
||||
}
|
||||
parts := strings.SplitN(fid, ",", 2)
|
||||
vid := parts[0]
|
||||
start := time.Now()
|
||||
if server, ok := b.vid2server[vid]; !ok {
|
||||
masterLimitChan <- true
|
||||
if _, now_ok := b.vid2server[vid]; !now_ok {
|
||||
if ret, err := operation.Lookup(*b.server, vid); err == nil {
|
||||
if len(ret.Locations) > 0 {
|
||||
server = ret.Locations[0].PublicUrl
|
||||
b.vid2server[vid] = server
|
||||
}
|
||||
}
|
||||
<-masterLimitChan
|
||||
}
|
||||
if server, ok := b.vid2server[vid]; ok {
|
||||
if _, ok := serverLimitChan[server]; !ok {
|
||||
serverLimitChan[server] = make(chan bool, 7)
|
||||
}
|
||||
serverLimitChan[server] <- true
|
||||
url := "http://" + server + "/" + fid
|
||||
if bytesRead, err := util.Get(url); err == nil {
|
||||
s.completed++
|
||||
s.transferred += int64(len(bytesRead))
|
||||
readStats.addSample(time.Now().Sub(start))
|
||||
} else {
|
||||
s.failed++
|
||||
println("!!!! Failed to read from ", url, " !!!!!")
|
||||
}
|
||||
<-serverLimitChan[server]
|
||||
<-masterLimitChan
|
||||
}
|
||||
if server, ok := b.vid2server[vid]; ok {
|
||||
url := "http://" + server + "/" + fid
|
||||
if bytesRead, err := util.Get(url); err == nil {
|
||||
s.completed++
|
||||
s.transferred += int64(len(bytesRead))
|
||||
readStats.addSample(time.Now().Sub(start))
|
||||
} else {
|
||||
s.failed++
|
||||
println("!!!! volume id ", vid, " location not found!!!!!")
|
||||
fmt.Printf("Failed to read %s error:%v\n", url, err)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
s.failed++
|
||||
println("!!!! volume id ", vid, " location not found!!!!!")
|
||||
}
|
||||
}
|
||||
wait.Done()
|
||||
}
|
||||
|
||||
func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) {
|
||||
|
@ -353,20 +336,28 @@ const (
|
|||
|
||||
// An efficient statics collecting and rendering
|
||||
type stats struct {
|
||||
data []int
|
||||
overflow []int
|
||||
data []int
|
||||
overflow []int
|
||||
localStats []stat
|
||||
start time.Time
|
||||
end time.Time
|
||||
total int
|
||||
}
|
||||
type stat struct {
|
||||
completed int
|
||||
failed int
|
||||
total int
|
||||
transferred int64
|
||||
start time.Time
|
||||
end time.Time
|
||||
}
|
||||
|
||||
var percentages = []int{50, 66, 75, 80, 90, 95, 98, 99, 100}
|
||||
|
||||
func newStats() *stats {
|
||||
return &stats{data: make([]int, benchResolution), overflow: make([]int, 0)}
|
||||
func newStats(n int) *stats {
|
||||
return &stats{
|
||||
data: make([]int, benchResolution),
|
||||
overflow: make([]int, 0),
|
||||
localStats: make([]stat, n),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stats) addSample(d time.Duration) {
|
||||
|
@ -387,28 +378,41 @@ func (s *stats) checkProgress(testName string, finishChan chan bool) {
|
|||
for {
|
||||
select {
|
||||
case <-finishChan:
|
||||
wait.Done()
|
||||
return
|
||||
case t := <-ticker:
|
||||
completed, transferred, taken := s.completed-lastCompleted, s.transferred-lastTransferred, t.Sub(lastTime)
|
||||
completed, transferred, taken, total := 0, int64(0), t.Sub(lastTime), s.total
|
||||
for _, localStat := range s.localStats {
|
||||
completed += localStat.completed
|
||||
transferred += localStat.transferred
|
||||
total += localStat.total
|
||||
}
|
||||
fmt.Printf("Completed %d of %d requests, %3.1f%% %3.1f/s %3.1fMB/s\n",
|
||||
s.completed, s.total, float64(s.completed)*100/float64(s.total),
|
||||
float64(completed)*float64(int64(time.Second))/float64(int64(taken)),
|
||||
float64(transferred)*float64(int64(time.Second))/float64(int64(taken))/float64(1024*1024),
|
||||
completed, total, float64(completed)*100/float64(total),
|
||||
float64(completed-lastCompleted)*float64(int64(time.Second))/float64(int64(taken)),
|
||||
float64(transferred-lastTransferred)*float64(int64(time.Second))/float64(int64(taken))/float64(1024*1024),
|
||||
)
|
||||
lastCompleted, lastTransferred, lastTime = s.completed, s.transferred, t
|
||||
lastCompleted, lastTransferred, lastTime = completed, transferred, t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stats) printStats() {
|
||||
completed, failed, transferred, total := 0, 0, int64(0), s.total
|
||||
for _, localStat := range s.localStats {
|
||||
completed += localStat.completed
|
||||
failed += localStat.failed
|
||||
transferred += localStat.transferred
|
||||
total += localStat.total
|
||||
}
|
||||
timeTaken := float64(int64(s.end.Sub(s.start))) / 1000000000
|
||||
fmt.Printf("\nConcurrency Level: %d\n", *b.concurrency)
|
||||
fmt.Printf("Time taken for tests: %.3f seconds\n", timeTaken)
|
||||
fmt.Printf("Complete requests: %d\n", s.completed)
|
||||
fmt.Printf("Failed requests: %d\n", s.failed)
|
||||
fmt.Printf("Total transferred: %d bytes\n", s.transferred)
|
||||
fmt.Printf("Requests per second: %.2f [#/sec]\n", float64(s.completed)/timeTaken)
|
||||
fmt.Printf("Transfer rate: %.2f [Kbytes/sec]\n", float64(s.transferred)/1024/timeTaken)
|
||||
fmt.Printf("Complete requests: %d\n", completed)
|
||||
fmt.Printf("Failed requests: %d\n", failed)
|
||||
fmt.Printf("Total transferred: %d bytes\n", transferred)
|
||||
fmt.Printf("Requests per second: %.2f [#/sec]\n", float64(completed)/timeTaken)
|
||||
fmt.Printf("Transfer rate: %.2f [Kbytes/sec]\n", float64(transferred)/1024/timeTaken)
|
||||
n, sum := 0, 0
|
||||
min, max := 10000000, 0
|
||||
for i := 0; i < len(s.data); i++ {
|
||||
|
@ -496,15 +500,32 @@ func (l *FakeReader) Read(p []byte) (n int, err error) {
|
|||
} else {
|
||||
n = len(p)
|
||||
}
|
||||
for i := 0; i < n-8; i += 8 {
|
||||
for s := uint(0); s < 8; s++ {
|
||||
p[i] = byte(l.id >> (s * 8))
|
||||
if n >= 8 {
|
||||
for i := 0; i < 8; i++ {
|
||||
p[i] = byte(l.id >> uint(i*8))
|
||||
}
|
||||
}
|
||||
l.size -= int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *FakeReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
size := int(l.size)
|
||||
bufferSize := len(sharedBytes)
|
||||
for size > 0 {
|
||||
tempBuffer := sharedBytes
|
||||
if size < bufferSize {
|
||||
tempBuffer = sharedBytes[0:size]
|
||||
}
|
||||
count, e := w.Write(tempBuffer)
|
||||
if e != nil {
|
||||
return int64(size), e
|
||||
}
|
||||
size -= count
|
||||
}
|
||||
return l.size, nil
|
||||
}
|
||||
|
||||
func Readln(r *bufio.Reader) ([]byte, error) {
|
||||
var (
|
||||
isPrefix bool = true
|
||||
|
|
|
@ -12,7 +12,7 @@ func init() {
|
|||
|
||||
var cmdCompact = &Command{
|
||||
UsageLine: "compact -dir=/tmp -volumeId=234",
|
||||
Short: "run weed tool compact on volume file if corrupted",
|
||||
Short: "run weed tool compact on volume file",
|
||||
Long: `Force an compaction to remove deleted files from volume files.
|
||||
The compacted .dat file is stored as .cpd file.
|
||||
The compacted .idx file is stored as .cpx file.
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -3,8 +3,6 @@ package main
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -12,6 +10,9 @@ import (
|
|||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -36,7 +37,7 @@ var cmdExport = &Command{
|
|||
var (
|
||||
exportVolumePath = cmdExport.Flag.String("dir", "/tmp", "input data directory to store volume data files")
|
||||
exportCollection = cmdExport.Flag.String("collection", "", "the volume collection name")
|
||||
exportVolumeId = cmdExport.Flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
|
||||
exportVolumeId = cmdExport.Flag.Int("volumeId", -1, "a volume id. The volume .dat and .idx files should already exist in the dir.")
|
||||
dest = cmdExport.Flag.String("o", "", "output tar file name, must ends with .tar, or just a \"-\" for stdout")
|
||||
format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename format, default to {{.Mime}}/{{.Id}}:{{.Name}}")
|
||||
tarFh *tar.Writer
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -20,6 +21,11 @@ type FilerOptions struct {
|
|||
collection *string
|
||||
defaultReplicaPlacement *string
|
||||
dir *string
|
||||
redirectOnRead *bool
|
||||
cassandra_server *string
|
||||
cassandra_keyspace *string
|
||||
redis_server *string
|
||||
redis_database *int
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -28,7 +34,12 @@ func init() {
|
|||
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
|
||||
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
||||
f.dir = cmdFiler.Flag.String("dir", os.TempDir(), "directory to store meta data")
|
||||
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "Default replication type if not specified.")
|
||||
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified")
|
||||
f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
|
||||
f.cassandra_server = cmdFiler.Flag.String("cassandra.server", "", "host[:port] of the cassandra server")
|
||||
f.cassandra_keyspace = cmdFiler.Flag.String("cassandra.keyspace", "seaweed", "keyspace of the cassandra server")
|
||||
f.redis_server = cmdFiler.Flag.String("redis.server", "", "host:port of the redis server, e.g., 127.0.0.1:6379")
|
||||
f.redis_database = cmdFiler.Flag.Int("redis.database", 0, "the database on the redis server")
|
||||
}
|
||||
|
||||
var cmdFiler = &Command{
|
||||
|
@ -54,12 +65,17 @@ var cmdFiler = &Command{
|
|||
}
|
||||
|
||||
func runFiler(cmd *Command, args []string) bool {
|
||||
|
||||
if err := util.TestFolderWritable(*f.dir); err != nil {
|
||||
glog.Fatalf("Check Meta Folder (-dir) Writable %s : %s", *f.dir, err)
|
||||
}
|
||||
|
||||
r := http.NewServeMux()
|
||||
_, nfs_err := weed_server.NewFilerServer(r, *f.port, *f.master, *f.dir, *f.collection)
|
||||
_, nfs_err := weed_server.NewFilerServer(r, *f.port, *f.master, *f.dir, *f.collection,
|
||||
*f.defaultReplicaPlacement, *f.redirectOnRead,
|
||||
*f.cassandra_server, *f.cassandra_keyspace,
|
||||
*f.redis_server, *f.redis_database,
|
||||
)
|
||||
if nfs_err != nil {
|
||||
glog.Fatalf(nfs_err.Error())
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -16,7 +17,7 @@ func init() {
|
|||
var cmdFix = &Command{
|
||||
UsageLine: "fix -dir=/tmp -volumeId=234",
|
||||
Short: "run weed tool fix on index file if corrupted",
|
||||
Long: `Fix runs the WeedFS fix command to re-create the index .idx file.
|
||||
Long: `Fix runs the SeeweedFS fix command to re-create the index .idx file.
|
||||
|
||||
`,
|
||||
}
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
"github.com/gorilla/mux"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -29,6 +30,7 @@ var cmdMaster = &Command{
|
|||
var (
|
||||
mport = cmdMaster.Flag.Int("port", 9333, "http listen port")
|
||||
masterIp = cmdMaster.Flag.String("ip", "", "master listening ip address, default to listen on all network interfaces")
|
||||
masterBindIp = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
|
||||
mPublicIp = cmdMaster.Flag.String("publicIp", "", "peer accessible <ip>|<server_name>")
|
||||
metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
|
||||
masterPeers = cmdMaster.Flag.String("peers", "", "other master nodes in comma separated ip:port list")
|
||||
|
@ -40,6 +42,7 @@ var (
|
|||
mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
|
||||
garbageThreshold = cmdMaster.Flag.String("garbageThreshold", "0.3", "threshold to vacuum and reclaim spaces")
|
||||
masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
|
||||
masterSecureKey = cmdMaster.Flag.String("secure.key", "", "secret key to check permission")
|
||||
|
||||
masterWhiteList []string
|
||||
)
|
||||
|
@ -58,10 +61,11 @@ func runMaster(cmd *Command, args []string) bool {
|
|||
|
||||
r := mux.NewRouter()
|
||||
ms := weed_server.NewMasterServer(r, *mport, *metaFolder,
|
||||
*volumeSizeLimitMB, *mpulse, *confFile, *defaultReplicaPlacement, *garbageThreshold, masterWhiteList,
|
||||
*volumeSizeLimitMB, *mpulse, *confFile, *defaultReplicaPlacement, *garbageThreshold,
|
||||
masterWhiteList, *masterSecureKey,
|
||||
)
|
||||
|
||||
listeningAddress := *masterIp + ":" + strconv.Itoa(*mport)
|
||||
listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport)
|
||||
|
||||
glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", listeningAddress)
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package main
|
||||
|
||||
import ()
|
||||
|
||||
type MountOptions struct {
|
||||
filer *string
|
||||
dir *string
|
||||
|
|
|
@ -3,15 +3,16 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/chrislusf/weed-fs/go/filer"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func runMount(cmd *Command, args []string) bool {
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
"github.com/gorilla/mux"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
|
@ -13,6 +9,11 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
type ServerOptions struct {
|
||||
|
@ -55,6 +56,7 @@ var (
|
|||
serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name")
|
||||
serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
|
||||
serverPeers = cmdServer.Flag.String("master.peers", "", "other master nodes in comma separated ip:masterPort list")
|
||||
serverSecureKey = cmdServer.Flag.String("secure.key", "", "secret key to ensure authenticated access")
|
||||
serverGarbageThreshold = cmdServer.Flag.String("garbageThreshold", "0.3", "threshold to vacuum and reclaim spaces")
|
||||
masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
|
||||
masterMetaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
|
||||
|
@ -72,12 +74,18 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "write cpu profile to file")
|
||||
serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file")
|
||||
filerOptions.master = cmdServer.Flag.String("filer.master", "", "default to current master server")
|
||||
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
||||
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
|
||||
filerOptions.dir = cmdServer.Flag.String("filer.dir", "", "directory to store meta data, default to a 'filer' sub directory of what -mdir is specified")
|
||||
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.")
|
||||
filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request")
|
||||
filerOptions.cassandra_server = cmdFiler.Flag.String("filer.cassandra.server", "", "host[:port] of the cassandra server")
|
||||
filerOptions.cassandra_keyspace = cmdFiler.Flag.String("filer.cassandra.keyspace", "seaweed", "keyspace of the cassandra server")
|
||||
filerOptions.redis_server = cmdServer.Flag.String("filer.redis.server", "", "host:port of the redis server, e.g., 127.0.0.1:6379")
|
||||
filerOptions.redis_database = cmdFiler.Flag.Int("filer.redis.database", 0, "the database on the redis server")
|
||||
|
||||
}
|
||||
|
||||
func runServer(cmd *Command, args []string) bool {
|
||||
|
@ -98,6 +106,10 @@ func runServer(cmd *Command, args []string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
if *filerOptions.redirectOnRead {
|
||||
*isStartingFiler = true
|
||||
}
|
||||
|
||||
*filerOptions.master = *serverPublicIp + ":" + strconv.Itoa(*masterPort)
|
||||
|
||||
if *filerOptions.defaultReplicaPlacement == "" {
|
||||
|
@ -149,7 +161,11 @@ func runServer(cmd *Command, args []string) bool {
|
|||
if *isStartingFiler {
|
||||
go func() {
|
||||
r := http.NewServeMux()
|
||||
_, nfs_err := weed_server.NewFilerServer(r, *filerOptions.port, *filerOptions.master, *filerOptions.dir, *filerOptions.collection)
|
||||
_, nfs_err := weed_server.NewFilerServer(r, *filerOptions.port, *filerOptions.master, *filerOptions.dir, *filerOptions.collection,
|
||||
*filerOptions.defaultReplicaPlacement, *filerOptions.redirectOnRead,
|
||||
"", "",
|
||||
"", 0,
|
||||
)
|
||||
if nfs_err != nil {
|
||||
glog.Fatalf(nfs_err.Error())
|
||||
}
|
||||
|
@ -176,7 +192,8 @@ func runServer(cmd *Command, args []string) bool {
|
|||
go func() {
|
||||
r := mux.NewRouter()
|
||||
ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder,
|
||||
*masterVolumeSizeLimitMB, *volumePulse, *masterConfFile, *masterDefaultReplicaPlacement, *serverGarbageThreshold, serverWhiteList,
|
||||
*masterVolumeSizeLimitMB, *volumePulse, *masterConfFile, *masterDefaultReplicaPlacement, *serverGarbageThreshold,
|
||||
serverWhiteList, *serverSecureKey,
|
||||
)
|
||||
|
||||
glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", *serverIp+":"+strconv.Itoa(*masterPort))
|
||||
|
@ -208,8 +225,8 @@ func runServer(cmd *Command, args []string) bool {
|
|||
time.Sleep(100 * time.Millisecond)
|
||||
r := http.NewServeMux()
|
||||
volumeServer := weed_server.NewVolumeServer(r, *serverIp, *volumePort, *serverPublicIp, folders, maxCounts,
|
||||
*serverIp+":"+strconv.Itoa(*masterPort), *volumePulse, *serverDataCenter, *serverRack, serverWhiteList,
|
||||
*volumeFixJpgOrientation,
|
||||
*serverIp+":"+strconv.Itoa(*masterPort), *volumePulse, *serverDataCenter, *serverRack,
|
||||
serverWhiteList, *volumeFixJpgOrientation,
|
||||
)
|
||||
|
||||
glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "at", *serverIp+":"+strconv.Itoa(*volumePort))
|
||||
|
|
|
@ -2,9 +2,10 @@ package main
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -2,7 +2,5 @@
|
|||
|
||||
package main
|
||||
|
||||
import ()
|
||||
|
||||
func OnInterrupt(fn func()) {
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/operation"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
var cmdVersion = &Command{
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
"github.com/chrislusf/weed-fs/go/weed/weed_server"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -26,11 +27,12 @@ var cmdVolume = &Command{
|
|||
|
||||
var (
|
||||
vport = cmdVolume.Flag.Int("port", 8080, "http listen port")
|
||||
volumeSecurePort = cmdVolume.Flag.Int("port.secure", 8443, "https listen port, active when SSL certs are specified. Not ready yet.")
|
||||
volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
|
||||
maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...")
|
||||
ip = cmdVolume.Flag.String("ip", "", "ip or server name")
|
||||
publicIp = cmdVolume.Flag.String("publicIp", "", "Publicly accessible <ip|server_name>")
|
||||
bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
|
||||
volumeBindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
|
||||
masterNode = cmdVolume.Flag.String("mserver", "localhost:9333", "master server location")
|
||||
vpulse = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
|
||||
vTimeout = cmdVolume.Flag.Int("idleTimeout", 10, "connection idle seconds")
|
||||
|
@ -69,6 +71,7 @@ func runVolume(cmd *Command, args []string) bool {
|
|||
|
||||
if *publicIp == "" {
|
||||
if *ip == "" {
|
||||
*ip = "127.0.0.1"
|
||||
*publicIp = "localhost"
|
||||
} else {
|
||||
*publicIp = *ip
|
||||
|
@ -81,11 +84,12 @@ func runVolume(cmd *Command, args []string) bool {
|
|||
r := http.NewServeMux()
|
||||
|
||||
volumeServer := weed_server.NewVolumeServer(r, *ip, *vport, *publicIp, folders, maxCounts,
|
||||
*masterNode, *vpulse, *dataCenter, *rack, volumeWhiteList,
|
||||
*masterNode, *vpulse, *dataCenter, *rack,
|
||||
volumeWhiteList,
|
||||
*fixJpgOrientation,
|
||||
)
|
||||
|
||||
listeningAddress := *bindIp + ":" + strconv.Itoa(*vport)
|
||||
listeningAddress := *volumeBindIp + ":" + strconv.Itoa(*vport)
|
||||
|
||||
glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "at", listeningAddress)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue