Merge pull request #81 from chrislusf/master

sync
This commit is contained in:
hilimd 2021-09-13 10:34:33 +08:00 committed by GitHub
commit 1de733fda5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
214 changed files with 9968 additions and 5237 deletions

62
.github/workflows/binaries_dev.yml vendored Normal file
View file

@ -0,0 +1,62 @@
name: "go: build dev binaries"
on:
push:
branches: [ master ]
jobs:
build-latest-docker-image:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=raw,value=latest
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile
platforms: linux/amd64, linux/arm, linux/arm64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

63
.github/workflows/binaries_release.yml vendored Normal file
View file

@ -0,0 +1,63 @@
# This is a basic workflow to help you get started with Actions
name: "go: build versioned binaries"
on:
push:
tags:
- '*'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
build-release-binaries:
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux, windows, darwin, freebsd]
goarch: [amd64, arm, arm64]
exclude:
- goarch: arm
goos: darwin
- goarch: 386
goos: darwin
- goarch: arm
goos: windows
- goarch: arm64
goos: windows
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@v1.20
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"

View file

@ -1,22 +0,0 @@
name: Cleanup
on:
push:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*

63
.github/workflows/container_dev.yml vendored Normal file
View file

@ -0,0 +1,63 @@
name: "docker: build dev containers"
on:
push:
branches: [ master ]
workflow_dispatch: []
jobs:
build-dev-containers:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: docker_meta
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=raw,value=dev
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View file

@ -1,66 +1,15 @@
name: Build Latest Containers
name: "docker: build latest container"
on:
push:
branches:
- master
branches: [ master ]
workflow_dispatch: []
jobs:
build-latest:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v2
with:
images: |
chrislusf/seaweedfs
ghcr.io/chrislusf/seaweedfs
tags: |
type=raw,value=latest
labels: |
org.opencontainers.image.title=seaweedfs
org.opencontainers.image.vendor=Chris Lu
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile
platforms: linux/amd64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}
build-dev:
build-latest-container:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
@ -68,7 +17,7 @@ jobs:
-
name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v2
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
@ -109,6 +58,6 @@ jobs:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
platforms: linux/amd64
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View file

@ -1,4 +1,5 @@
name: Build Release Containers
name: "docker: build release containers"
on:
push:
tags:
@ -6,8 +7,9 @@ on:
workflow_dispatch: []
jobs:
build-default:
build-default-release-container:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
@ -15,7 +17,7 @@ jobs:
-
name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v2
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
@ -58,11 +60,12 @@ jobs:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build
platforms: linux/amd64
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}
build-large:
build-large-release-container:
runs-on: [ubuntu-latest]
steps:
-
name: Checkout
@ -70,7 +73,7 @@ jobs:
-
name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v2
uses: docker/metadata-action@v3
with:
images: |
chrislusf/seaweedfs
@ -113,6 +116,6 @@ jobs:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile.go_build_large
platforms: linux/amd64
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View file

@ -1,4 +1,4 @@
name: Go
name: "go: build binary"
on:
push:
@ -6,6 +6,10 @@ on:
pull_request:
branches: [ master ]
concurrency:
group: ${{ github.head_ref }}/go
cancel-in-progress: true
jobs:
build:

View file

@ -1,66 +0,0 @@
name: Release
on:
push:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux, windows, darwin, freebsd ]
goarch: [amd64, arm]
exclude:
- goarch: arm
goos: darwin
- goarch: arm
goos: windows
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Wait for the deletion
uses: jakejarvis/wait-action@master
with:
time: '30s'
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.17
with:
goversion: 1.16
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.17
with:
goversion: 1.16
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"

View file

@ -1,46 +0,0 @@
sudo: false
language: go
go:
- 1.16.x
before_install:
- export PATH=/home/travis/gopath/bin:$PATH
install:
- export CGO_ENABLED="0"
- go env
script:
- env GO111MODULE=on go test ./weed/...
before_deploy:
- make release
deploy:
provider: releases
skip_cleanup: true
api_key:
secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI=
file:
- build/linux_arm.tar.gz
- build/linux_arm64.tar.gz
- build/linux_386.tar.gz
- build/linux_amd64.tar.gz
- build/linux_amd64_large_disk.tar.gz
- build/darwin_amd64.tar.gz
- build/darwin_amd64_large_disk.tar.gz
- build/windows_386.zip
- build/windows_amd64.zip
- build/windows_amd64_large_disk.zip
- build/freebsd_arm.tar.gz
- build/freebsd_amd64.tar.gz
- build/freebsd_386.tar.gz
- build/netbsd_arm.tar.gz
- build/netbsd_amd64.tar.gz
- build/netbsd_386.tar.gz
- build/openbsd_arm.tar.gz
- build/openbsd_amd64.tar.gz
- build/openbsd_386.tar.gz
on:
tags: true
repo: chrislusf/seaweedfs
go: 1.16.x

144
Makefile
View file

@ -1,144 +0,0 @@
BINARY = weed/weed
package = github.com/chrislusf/seaweedfs/weed
GO_FLAGS = #-v
SOURCE_DIR = ./weed/
appname := weed
sources := $(wildcard *.go)
COMMIT ?= $(shell git rev-parse --short HEAD)
LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT}
build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3)
zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3)
build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR)
tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3)
zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3)
all: build
.PHONY : clean deps build linux release windows_build darwin_build linux_build bsd_build clean
clean:
go clean -i $(GO_FLAGS) $(SOURCE_DIR)
rm -f $(BINARY)
rm -rf build/
deps:
go get $(GO_FLAGS) -d $(SOURCE_DIR)
rm -rf /home/travis/gopath/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace
rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
build: deps
go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR)
install: deps
go install $(GO_FLAGS) -ldflags "$(LDFLAGS)" $(SOURCE_DIR)
linux: deps
mkdir -p linux
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR)
release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_arm64_build 5_byte_darwin_build 5_byte_windows_build
##### LINUX BUILDS #####
5_byte_linux_build:
$(call build_large,linux,amd64,)
$(call tar_large,linux,amd64)
5_byte_darwin_build:
$(call build_large,darwin,amd64,)
$(call tar_large,darwin,amd64)
5_byte_windows_build:
$(call build_large,windows,amd64,.exe)
$(call zip_large,windows,amd64,.exe)
5_byte_arm_build: $(sources)
$(call build_large,linux,arm,)
$(call tar_large,linux,arm)
5_byte_arm64_build: $(sources)
$(call build_large,linux,arm64,)
$(call tar_large,linux,arm64)
linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz
build/linux_386.tar.gz: $(sources)
$(call build,linux,386,)
$(call tar,linux,386)
build/linux_amd64.tar.gz: $(sources)
$(call build,linux,amd64,)
$(call tar,linux,amd64)
build/linux_arm.tar.gz: $(sources)
$(call build,linux,arm,)
$(call tar,linux,arm)
build/linux_arm64.tar.gz: $(sources)
$(call build,linux,arm64,)
$(call tar,linux,arm64)
##### DARWIN (MAC) BUILDS #####
darwin_build: build/darwin_amd64.tar.gz
build/darwin_amd64.tar.gz: $(sources)
$(call build,darwin,amd64,)
$(call tar,darwin,amd64)
##### WINDOWS BUILDS #####
windows_build: build/windows_386.zip build/windows_amd64.zip
build/windows_386.zip: $(sources)
$(call build,windows,386,.exe)
$(call zip,windows,386,.exe)
build/windows_amd64.zip: $(sources)
$(call build,windows,amd64,.exe)
$(call zip,windows,amd64,.exe)
##### BSD BUILDS #####
bsd_build: build/freebsd_arm.tar.gz build/freebsd_386.tar.gz build/freebsd_amd64.tar.gz \
build/netbsd_arm.tar.gz build/netbsd_386.tar.gz build/netbsd_amd64.tar.gz \
build/openbsd_arm.tar.gz build/openbsd_386.tar.gz build/openbsd_amd64.tar.gz
build/freebsd_386.tar.gz: $(sources)
$(call build,freebsd,386,)
$(call tar,freebsd,386)
build/freebsd_amd64.tar.gz: $(sources)
$(call build,freebsd,amd64,)
$(call tar,freebsd,amd64)
build/freebsd_arm.tar.gz: $(sources)
$(call build,freebsd,arm,)
$(call tar,freebsd,arm)
build/netbsd_386.tar.gz: $(sources)
$(call build,netbsd,386,)
$(call tar,netbsd,386)
build/netbsd_amd64.tar.gz: $(sources)
$(call build,netbsd,amd64,)
$(call tar,netbsd,amd64)
build/netbsd_arm.tar.gz: $(sources)
$(call build,netbsd,arm,)
$(call tar,netbsd,arm)
build/openbsd_386.tar.gz: $(sources)
$(call build,openbsd,386,)
$(call tar,openbsd,386)
build/openbsd_amd64.tar.gz: $(sources)
$(call build,openbsd,amd64,)
$(call tar,openbsd,amd64)
build/openbsd_arm.tar.gz: $(sources)
$(call build,openbsd,arm,)
$(call tar,openbsd,arm)

View file

@ -3,7 +3,7 @@
[![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY)
[![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs)
[![Build Status](https://travis-ci.com/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.com/chrislusf/seaweedfs)
[![Build Status](https://img.shields.io/github/workflow/status/chrislusf/seaweedfs/Go)](https://github.com/chrislusf/seaweedfs/actions/workflows/go.yml)
[![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed)
[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki)
[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/)
@ -146,7 +146,7 @@ Faster and Cheaper than direct cloud storage!
* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices.
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
* [Cloud Data Accelerator][RemoteStorage] transparently read and write existing cloud data at local speed with content cache, metadata cache, and asynchronous write back.
* [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back.
## Kubernetes ##
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
@ -169,7 +169,7 @@ Faster and Cheaper than direct cloud storage!
[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
[RemoteStorage]: https://github.com/chrislusf/seaweedfs/wiki/Remote-Storage-Architecture
[CloudDrive]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Drive-Architecture
[Back to TOC](#table-of-contents)
@ -590,6 +590,7 @@ The text of this page is available for modification and reuse under the terms of
[Back to TOC](#table-of-contents)
## Stargazers over time ##
## Stargazers over time
[![Stargazers over time](https://starchart.cc/chrislusf/seaweedfs.svg)](https://starchart.cc/chrislusf/seaweedfs)
[![Stargazers over time](https://starcharts.herokuapp.com/chrislusf/seaweedfs.svg)](https://starcharts.herokuapp.com/chrislusf/seaweedfs)

View file

@ -8,7 +8,9 @@ RUN \
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; \
elif [ $(uname -m) == "s390x" ]; then echo "s390x"; \
elif [ $(uname -m) == "ppc64le" ]; then echo "ppc64le"; fi;) && \
echo "Building for $ARCH" 1>&2 && \
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
@ -16,7 +18,7 @@ RUN \
# Install SeaweedFS and Supercronic ( for cron job mode )
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
apk add fuse && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz" | head -n 1) && \
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
curl -fsSLO "$SUPERCRONIC_URL" && \
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \

View file

@ -0,0 +1,43 @@
FROM gcc:11 as builder
RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
ARG BRANCH=${BRANCH:-master}
RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
&& apt-get update \
&& apt-get install -y golang-src \
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}" -compiler=gccgo -tags gccgo,noasm
FROM alpine AS final
LABEL author="Chris Lu"
COPY --from=builder /go/bin/weed /usr/bin/
RUN mkdir -p /etc/seaweedfs
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
RUN apk add fuse # for weed mount
# volume server gprc port
EXPOSE 18080
# volume server http port
EXPOSE 8080
# filer server gprc port
EXPOSE 18888
# filer server http port
EXPOSE 8888
# master server shared gprc port
EXPOSE 19333
# master server shared http port
EXPOSE 9333
# s3 server http port
EXPOSE 8333
# webdav server http port
EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -1,4 +1,4 @@
FROM amd64/golang:1.16-alpine as builder
FROM amd64/golang:1.17-alpine as builder
RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs

View file

@ -1,4 +1,4 @@
FROM amd64/golang:1.16-alpine as builder
FROM amd64/golang:1.17-alpine as builder
RUN apk add git g++ fuse
RUN mkdir -p /go/src/github.com/chrislusf/
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs

158
go.mod
View file

@ -1,22 +1,34 @@
module github.com/chrislusf/seaweedfs
go 1.16
go 1.17
require (
cloud.google.com/go v0.94.1 // indirect
cloud.google.com/go/pubsub v1.3.1
cloud.google.com/go/storage v1.9.0
github.com/Azure/azure-storage-blob-go v0.9.0
cloud.google.com/go/storage v1.16.1
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 // indirect
github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.34.30
github.com/aws/aws-sdk-go v1.35.3
github.com/beorn7/perks v1.0.1 // indirect
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/bwmarrin/snowflake v0.3.0
github.com/cespare/xxhash v1.1.0
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/chrislusf/raft v1.0.7
github.com/colinmarc/hdfs/v2 v2.2.0
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/disintegration/imaging v1.6.2
github.com/dustin/go-humanize v1.0.0
github.com/eapache/go-resiliency v1.2.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
@ -24,77 +36,167 @@ require (
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/fclairamb/ftpserverlib v0.8.0
github.com/frankban/quicktest v1.7.2 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-errors/errors v1.1.1 // indirect
github.com/go-redis/redis/v8 v8.4.4
github.com/go-sql-driver/mysql v1.5.0
github.com/go-stack/stack v1.8.0 // indirect
github.com/go-zookeeper/zk v1.0.2 // indirect
github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
github.com/golang-jwt/jwt v3.2.1+incompatible
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
github.com/golang/protobuf v1.4.3
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/golang/protobuf v1.5.2
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.0.0
github.com/google/uuid v1.1.1
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/uuid v1.3.0
github.com/google/wire v0.4.0 // indirect
github.com/googleapis/gax-go v2.0.2+incompatible // indirect
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.1
github.com/jinzhu/copier v0.2.8
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.11
github.com/karlseguin/ccache/v2 v2.0.7
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.10.9 // indirect
github.com/klauspost/cpuid v1.2.1 // indirect
github.com/klauspost/crc32 v1.2.0
github.com/klauspost/reedsolomon v1.9.2
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.0
github.com/magiconair/properties v1.8.1 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/mailru/easyjson v0.7.1 // indirect
github.com/mattn/go-ieproxy v0.0.1 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/nats-io/jwt v1.0.1 // indirect
github.com/nats-io/nats.go v1.10.0 // indirect
github.com/nats-io/nkeys v0.2.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/olivere/elastic/v7 v7.0.19
github.com/pelletier/go-toml v1.7.0 // indirect
github.com/peterh/liner v1.1.0
github.com/pierrec/lz4 v2.2.7+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.1.0
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/seaweedfs/fuse v1.1.8
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/seaweedfs/fuse v1.2.0
github.com/seaweedfs/goexif v1.0.2
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/sirupsen/logrus v1.6.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.3.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.4.0
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71
github.com/stretchr/testify v1.6.1
github.com/syndtr/goleveldb v1.0.0
github.com/stretchr/testify v1.7.0
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
github.com/tidwall/gjson v1.8.1
github.com/tidwall/match v1.0.3
github.com/tidwall/pretty v1.1.0 // indirect
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210824090536-16d902a3c7e5
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
github.com/valyala/bytebufferpool v1.0.0
github.com/viant/assertly v0.5.4 // indirect
github.com/viant/ptrie v0.3.0
github.com/viant/toolbox v0.33.2 // indirect
github.com/willf/bitset v1.1.10 // indirect
github.com/willf/bloom v2.0.3+incompatible
go.etcd.io/etcd v3.3.15+incompatible
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.0.2 // indirect
github.com/xdg-go/stringprep v1.0.2 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.etcd.io/etcd v3.3.25+incompatible
go.mongodb.org/mongo-driver v1.7.0
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel v0.15.0 // indirect
gocloud.dev v0.20.0
gocloud.dev/pubsub/natspubsub v0.20.0
gocloud.dev/pubsub/rabbitpubsub v0.20.0
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78
google.golang.org/api v0.26.0
google.golang.org/grpc v1.29.1
google.golang.org/protobuf v1.26.0-rc.1
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect
golang.org/x/image v0.0.0-20200119044424-58c23975cae1
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.5
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/api v0.56.0
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 // indirect
google.golang.org/grpc v1.40.0
google.golang.org/protobuf v1.27.1
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
modernc.org/b v1.0.0 // indirect
modernc.org/cc/v3 v3.33.5 // indirect
modernc.org/ccgo/v3 v3.9.4 // indirect
modernc.org/libc v1.9.5 // indirect
modernc.org/mathutil v1.2.2 // indirect
modernc.org/memory v1.0.4 // indirect
modernc.org/opt v0.1.1 // indirect
modernc.org/sqlite v1.10.7
modernc.org/strutil v1.1.0 // indirect
modernc.org/token v1.0.0 // indirect
)
require (
github.com/coreos/etcd v3.3.10+incompatible // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
github.com/d4l3k/messagediff v1.2.1 // indirect
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/rpc/v2 v2.0.2 // indirect
github.com/mattn/go-runewidth v0.0.7 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd // indirect
github.com/pingcap/kvproto v0.0.0-20210806074406-317f69fb54b4 // indirect
github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 // indirect
github.com/pingcap/parser v0.0.0-20210525032559-c37778aff307 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d // indirect
github.com/twmb/murmur3 v1.1.3 // indirect
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft
replace go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547

617
go.sum

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "2.62"
version: "2.62"
appVersion: "2.67"
version: "2.67"

View file

@ -4,7 +4,6 @@ import com.google.common.base.Strings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
@ -59,15 +58,24 @@ public class FilerClient extends FilerGrpcClient {
public static FilerProto.Entry afterEntryDeserialization(FilerProto.Entry entry) {
if (entry.getChunksList().size() <= 0) {
if (entry.getContent().isEmpty()) {
return entry;
} else {
if (entry.getAttributes().getFileSize() <= 0) {
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
FilerProto.FuseAttributes.Builder attrBuilder = entry.getAttributes().toBuilder();
attrBuilder.setFileSize(entry.getContent().size());
entryBuilder.setAttributes(attrBuilder);
return entryBuilder.build();
}
}
String fileId = entry.getChunks(0).getFileId();
if (fileId != null && fileId.length() != 0) {
return entry;
}
} else {
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
entryBuilder.clearChunks();
long fileSize = 0;
for (FilerProto.FileChunk chunk : entry.getChunksList()) {
fileSize = Math.max(fileSize, chunk.getOffset()+chunk.getSize());
FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder();
chunkBuilder.setFileId(toFileId(chunk.getFid()));
String sourceFileId = toFileId(chunk.getSourceFid());
@ -76,8 +84,14 @@ public class FilerClient extends FilerGrpcClient {
}
entryBuilder.addChunks(chunkBuilder);
}
if (entry.getAttributes().getFileSize() <= 0) {
FilerProto.FuseAttributes.Builder attrBuilder = entry.getAttributes().toBuilder();
attrBuilder.setFileSize(fileSize);
entryBuilder.setAttributes(attrBuilder);
}
return entryBuilder.build();
}
}
public boolean mkdirs(String path, int mode) {
String currentUser = System.getProperty("user.name");
@ -93,9 +107,9 @@ public class FilerClient extends FilerGrpcClient {
if ("/".equals(path)) {
return true;
}
File pathFile = new File(path);
String parent = pathFile.getParent().replace('\\','/');
String name = pathFile.getName();
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String name = dirAndName[1];
mkdirs(parent, mode, uid, gid, userName, groupNames);
@ -114,35 +128,32 @@ public class FilerClient extends FilerGrpcClient {
public boolean mv(String oldPath, String newPath) {
File oldPathFile = new File(oldPath);
String oldParent = oldPathFile.getParent().replace('\\','/');
String oldName = oldPathFile.getName();
String[] oldDirAndName = SeaweedUtil.toDirAndName(oldPath);
String oldParent = oldDirAndName[0];
String oldName = oldDirAndName[1];
File newPathFile = new File(newPath);
String newParent = newPathFile.getParent().replace('\\','/');
String newName = newPathFile.getName();
String[] newDirAndName = SeaweedUtil.toDirAndName(newPath);
String newParent = newDirAndName[0];
String newName = newDirAndName[1];
return atomicRenameEntry(oldParent, oldName, newParent, newName);
}
public boolean exists(String path){
File pathFile = new File(path);
String parent = pathFile.getParent();
String entryName = pathFile.getName();
if(parent == null) {
parent = path;
entryName ="";
}
return lookupEntry(parent, entryName) != null;
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String entryName = dirAndName[1];
return lookupEntry(parent, entryName) != null;
}
public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
File pathFile = new File(path);
String parent = pathFile.getParent().replace('\\','/');
String name = pathFile.getName();
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String name = dirAndName[1];
return deleteEntry(
parent,
@ -153,17 +164,19 @@ public class FilerClient extends FilerGrpcClient {
}
public boolean touch(String path, int mode) {
String currentUser = System.getProperty("user.name");
long now = System.currentTimeMillis() / 1000L;
return touch(path, now, mode, 0, 0, currentUser, new String[]{});
}
public boolean touch(String path, long modifiedTimeSecond, int mode, int uid, int gid, String userName, String[] groupNames) {
File pathFile = new File(path);
String parent = pathFile.getParent().replace('\\','/');
String name = pathFile.getName();
String[] dirAndName = SeaweedUtil.toDirAndName(path);
String parent = dirAndName[0];
String name = dirAndName[1];
FilerProto.Entry entry = lookupEntry(parent, name);
if (entry == null) {

View file

@ -54,7 +54,7 @@ public class FilerGrpcClient {
.negotiationType(NegotiationType.TLS)
.sslContext(sslContext));
filerAddress = String.format("%s:%d", host, grpcPort - 10000);
filerAddress = SeaweedUtil.joinHostPort(host, grpcPort - 10000);
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
this.getBlockingStub().getFilerConfiguration(

View file

@ -64,10 +64,11 @@ public class SeaweedRead {
startOffset += gap;
}
FilerProto.Locations locations = knownLocations.get(parseVolumeId(chunkView.fileId));
String volumeId = parseVolumeId(chunkView.fileId);
FilerProto.Locations locations = knownLocations.get(volumeId);
if (locations == null || locations.getLocationsCount() == 0) {
LOG.error("failed to locate {}", chunkView.fileId);
// log here!
volumeIdCache.clearLocations(volumeId);
return 0;
}

View file

@ -27,4 +27,30 @@ public class SeaweedUtil {
public static CloseableHttpClient getClosableHttpClient() {
return httpClient;
}
public static String[] toDirAndName(String fullpath) {
if (fullpath == null) {
return new String[]{"/", ""};
}
if (fullpath.endsWith("/")) {
fullpath = fullpath.substring(0, fullpath.length() - 1);
}
if (fullpath.length() == 0) {
return new String[]{"/", ""};
}
int sep = fullpath.lastIndexOf("/");
String parent = sep == 0 ? "/" : fullpath.substring(0, sep);
String name = fullpath.substring(sep + 1);
return new String[]{parent, name};
}
public static String joinHostPort(String host, int port) {
if (host.startsWith("[") && host.endsWith("]")) {
return host + ":" + port;
}
if (host.indexOf(':')>=0) {
return "[" + host + "]:" + port;
}
return host + ":" + port;
}
}

View file

@ -26,6 +26,13 @@ public class VolumeIdCache {
return this.cache.getIfPresent(volumeId);
}
public void clearLocations(String volumeId) {
if (this.cache == null) {
return;
}
this.cache.invalidate(volumeId);
}
public void setLocations(String volumeId, FilerProto.Locations locations) {
if (this.cache == null) {
return;

View file

@ -305,6 +305,7 @@ message GetFilerConfigurationResponse {
string metrics_address = 9;
int32 metrics_interval_sec = 10;
string version = 11;
string cluster_id = 12;
}
message SubscribeMetadataRequest {
@ -312,6 +313,7 @@ message SubscribeMetadataRequest {
string path_prefix = 2;
int64 since_ns = 3;
int32 signature = 4;
repeated string path_prefixes = 6;
}
message SubscribeMetadataResponse {
string directory = 1;
@ -336,6 +338,7 @@ message KeepConnectedResponse {
message LocateBrokerRequest {
string resource = 1;
}
message LocateBrokerResponse {
bool found = 1;
// if found, send the exact address
@ -386,23 +389,6 @@ message FilerConf {
/////////////////////////
// Remote Storage related
/////////////////////////
message RemoteConf {
string type = 1;
string name = 2;
string s3_access_key = 4;
string s3_secret_key = 5;
string s3_region = 6;
string s3_endpoint = 7;
}
message RemoteStorageMapping {
map<string,RemoteStorageLocation> mappings = 1;
}
message RemoteStorageLocation {
string name = 1;
string bucket = 2;
string path = 3;
}
message DownloadToLocalRequest {
string directory = 1;
string name = 2;

File diff suppressed because it is too large Load diff

View file

@ -16,6 +16,7 @@ var (
n = flag.Int("n", 100, "the number of metadata")
tailFiler = flag.String("filer", "localhost:8888", "the filer address")
isWrite = flag.Bool("write", false, "only write")
writeInterval = flag.Duration("writeInterval", 0, "write interval, e.g., 1s")
)
func main() {
@ -54,6 +55,7 @@ func startGenerateMetadata() {
for i := 0; i < *n; i++ {
name := fmt.Sprintf("file%d", i)
glog.V(0).Infof("write %s/%s", *dir, name)
if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
Directory: *dir,
Entry: &filer_pb.Entry{
@ -63,6 +65,9 @@ func startGenerateMetadata() {
fmt.Printf("create entry %s: %v\n", name, err)
return err
}
if *writeInterval > 0 {
time.Sleep(*writeInterval)
}
}
return nil
@ -72,8 +77,7 @@ func startGenerateMetadata() {
func startSubscribeMetadata(eachEntryFunc func(event *filer_pb.SubscribeMetadataResponse) error) {
tailErr := pb.FollowMetadata(*tailFiler, grpc.WithInsecure(), "tail",
*dir, 0, 0, eachEntryFunc, false)
tailErr := pb.FollowMetadata(*tailFiler, grpc.WithInsecure(), "tail", *dir, nil, 0, 0, eachEntryFunc, false)
if tailErr != nil {
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)

View file

@ -65,7 +65,16 @@ func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, st
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
_, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth)
uploadOption := &operation.UploadOption{
UploadUrl: targetUrl,
Filename: fmt.Sprintf("test%d", i),
Cipher: false,
IsInputCompressed: true,
MimeType: "bench/test",
PairMap: nil,
Jwt: assignResult.Auth,
}
_, err = operation.UploadData(data, uploadOption)
if err != nil {
log.Fatalf("upload: %v", err)
}

View file

@ -38,9 +38,13 @@ debug_filer_copy:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h
debug_filer_remote_sync:
debug_filer_remote_sync_dir:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=10000h
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=1h
debug_filer_remote_sync_buckets:
go build -gcflags="all=-N -l"
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -createBucketAt=cloud1 -timeAgo=1h
debug_master_follower:
go build -gcflags="all=-N -l"

View file

@ -4,7 +4,6 @@ import (
"fmt"
"net/http"
"os"
"strconv"
"strings"
"time"
@ -134,7 +133,7 @@ func runFiler(cmd *Command, args []string) bool {
go stats_collect.StartMetricsServer(*f.metricsHttpPort)
filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
filerAddress := util.JoinHostPort(*f.ip, *f.port)
startDelay := time.Duration(2)
if *filerStartS3 {
filerS3Options.filer = &filerAddress
@ -207,7 +206,7 @@ func (fo *FilerOptions) startFiler() {
}
if *fo.publicPort != 0 {
publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
publicListeningAddress := util.JoinHostPort(*fo.bindIp, *fo.publicPort)
glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil {
@ -222,7 +221,7 @@ func (fo *FilerOptions) startFiler() {
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener(
*fo.bindIp+":"+strconv.Itoa(*fo.port),
util.JoinHostPort(*fo.bindIp, *fo.port),
time.Duration(10)*time.Second,
)
if e != nil {
@ -231,7 +230,7 @@ func (fo *FilerOptions) startFiler() {
// starting grpc server
grpcPort := *fo.port + 10000
grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0)
grpcL, err := util.NewListener(util.JoinHostPort(*fo.bindIp, grpcPort), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}

View file

@ -113,6 +113,6 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_"+dataSink.GetName(),
sourcePath, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
sourcePath, nil, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
}

View file

@ -108,6 +108,11 @@ func runFilerCat(cmd *Command, args []string) bool {
return err
}
if len(respLookupEntry.Entry.Content) > 0 {
_, err = writer.Write(respLookupEntry.Entry.Content)
return err
}
filerCat.filerClient = client
return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)

View file

@ -115,7 +115,7 @@ func runCopy(cmd *Command, args []string) bool {
}
filerGrpcPort := filerPort + 10000
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
filerGrpcAddress := util.JoinHostPort(filerUrl.Hostname(), int(filerGrpcPort))
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
@ -392,8 +392,16 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
}
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth))
uploadOption := &operation.UploadOption{
UploadUrl: targetUrl,
Filename: fileName,
Cipher: worker.options.cipher,
IsInputCompressed: false,
MimeType: mimeType,
PairMap: nil,
Jwt: security.EncodedJwt(assignResult.Auth),
}
uploadResult, err := operation.UploadData(data, uploadOption)
if err != nil {
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
}
@ -498,7 +506,16 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
replication = assignResult.Replication
}
uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth))
uploadOption := &operation.UploadOption{
UploadUrl: targetUrl,
Filename: fileName+"-"+strconv.FormatInt(i+1, 10),
Cipher: worker.options.cipher,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
Jwt: security.EncodedJwt(assignResult.Auth),
}
uploadResult, err, _ := operation.Upload(io.NewSectionReader(f, i*chunkSize, chunkSize), uploadOption)
if err != nil {
uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
return
@ -531,6 +548,11 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
return uploadError
}
manifestedChunks, manifestErr := filer.MaybeManifestize(worker.saveDataAsChunk, chunks)
if manifestErr != nil {
return fmt.Errorf("create manifest: %v", manifestErr)
}
if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: task.destinationUrlPath,
@ -548,7 +570,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
Collection: collection,
TtlSec: worker.options.ttlSec,
},
Chunks: chunks,
Chunks: manifestedChunks,
},
}
@ -583,3 +605,63 @@ func detectMimeType(f *os.File) string {
}
return mimeType
}
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
var fileId, host string
var auth security.EncodedJwt
if flushErr := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
ctx := context.Background()
assignErr := util.Retry("assignVolume", func() error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: *worker.options.replication,
Collection: *worker.options.collection,
TtlSec: worker.options.ttlSec,
DiskType: *worker.options.diskType,
Path: name,
}
resp, err := client.AssignVolume(ctx, request)
if err != nil {
return fmt.Errorf("assign volume failure %v: %v", request, err)
}
if resp.Error != "" {
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
}
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
collection, replication = resp.Collection, resp.Replication
return nil
})
if assignErr != nil {
return assignErr
}
return nil
}); flushErr != nil {
return nil, collection, replication, fmt.Errorf("filerGrpcAddress assign volume: %v", flushErr)
}
uploadOption := &operation.UploadOption{
UploadUrl: fmt.Sprintf("http://%s/%s", host, fileId),
Filename: name,
Cipher: worker.options.cipher,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
Jwt: auth,
}
uploadResult, flushErr, _ := operation.Upload(reader, uploadOption)
if flushErr != nil {
return nil, collection, replication, fmt.Errorf("upload data: %v", flushErr)
}
if uploadResult.Error != "" {
return nil, collection, replication, fmt.Errorf("upload result: %v", uploadResult.Error)
}
return uploadResult.ToPbFileChunk(fileId, offset), collection, replication, nil
}

View file

@ -196,7 +196,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
})
return pb.FollowMetadata(*metaBackup.filerAddress, metaBackup.grpcDialOption, "meta_backup",
*metaBackup.filerDirectory, startTime.UnixNano(), 0, processEventFnWithOffset, false)
*metaBackup.filerDirectory, nil, startTime.UnixNano(), 0, processEventFnWithOffset, false)
}

View file

@ -104,7 +104,7 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
}
tailErr := pb.FollowMetadata(*tailFiler, grpcDialOption, "tail",
*tailTarget, time.Now().Add(-*tailStart).UnixNano(), 0,
*tailTarget, nil, time.Now().Add(-*tailStart).UnixNano(), 0,
func(resp *filer_pb.SubscribeMetadataResponse) error {
if !shouldPrint(resp) {
return nil

View file

@ -3,16 +3,15 @@ package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"os"
"time"
)
@ -23,11 +22,13 @@ type RemoteSyncOptions struct {
debug *bool
timeAgo *time.Duration
dir *string
}
createBucketAt *string
createBucketRandomSuffix *bool
const (
RemoteSyncKeyPrefix = "remote.sync."
)
mappings *remote_pb.RemoteStorageMapping
remoteConfs map[string]*remote_pb.RemoteConf
bucketsDir string
}
var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
@ -47,20 +48,38 @@ var (
func init() {
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "/", "a mounted directory on filer")
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "", "a mounted directory on filer")
remoteSyncOptions.createBucketAt = cmdFilerRemoteSynchronize.Flag.String("createBucketAt", "", "one remote storage name to create new buckets in")
remoteSyncOptions.createBucketRandomSuffix = cmdFilerRemoteSynchronize.Flag.Bool("createBucketWithRandomSuffix", true, "add randomized suffix to bucket name to avoid conflicts")
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
remoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool("debug", false, "debug mode to print out filer updated remote files")
remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
}
var cmdFilerRemoteSynchronize = &Command{
UsageLine: "filer.remote.sync -filer=<filerHost>:<filerPort> -dir=/mount/s3_on_cloud",
Short: "resumable continuously write back updates to remote storage if the directory is mounted to the remote storage",
Long: `resumable continuously write back updates to remote storage if the directory is mounted to the remote storage
UsageLine: "filer.remote.sync",
Short: "resumable continuously write back updates to remote storage",
Long: `resumable continuously write back updates to remote storage
filer.remote.sync listens on filer update events.
If any mounted remote file is updated, it will fetch the updated content,
and write to the remote storage.
There are two modes:
1)By default, watch /buckets folder and write back all changes.
# if there is only one remote storage configured
weed filer.remote.sync
# if there are multiple remote storages configured
# specify a remote storage to create new buckets.
weed filer.remote.sync -createBucketAt=cloud1
2)Write back one mounted folder to remote storage
weed filer.remote.sync -dir=/mount/s3_on_cloud
`,
}
@ -70,188 +89,57 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
remoteSyncOptions.grpcDialOption = grpcDialOption
// read filer remote storage mount mappings
mappings, readErr := filer.ReadMountMappings(grpcDialOption, *remoteSyncOptions.filerAddress)
if readErr != nil {
fmt.Printf("read mount mapping: %v", readErr)
return false
}
dir := *remoteSyncOptions.dir
filerAddress := *remoteSyncOptions.filerAddress
filerSource := &source.FilerSource{}
filerSource.DoInitialize(
*remoteSyncOptions.filerAddress,
pb.ServerToGrpcAddress(*remoteSyncOptions.filerAddress),
filerAddress,
pb.ServerToGrpcAddress(filerAddress),
"/", // does not matter
*remoteSyncOptions.readChunkFromFiler,
)
var found bool
for dir, remoteStorageMountLocation := range mappings.Mappings {
if *remoteSyncOptions.dir == dir {
found = true
storageConf, readErr := filer.ReadRemoteStorageConf(grpcDialOption, *remoteSyncOptions.filerAddress, remoteStorageMountLocation.Name)
if readErr != nil {
fmt.Printf("read remote storage configuration for %s: %v", dir, readErr)
continue
}
fmt.Printf("synchronize %s to remote storage...\n", *remoteSyncOptions.dir)
if err := util.Retry("filer.remote.sync "+dir, func() error {
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir, storageConf, remoteStorageMountLocation)
}); err != nil {
fmt.Printf("synchronize %s: %v\n", *remoteSyncOptions.dir, err)
}
break
}
}
if !found {
fmt.Printf("directory %s is not mounted to any remote storage\n", *remoteSyncOptions.dir)
return false
}
return true
}
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string, remoteStorage *filer_pb.RemoteConf, remoteStorageMountLocation *filer_pb.RemoteStorageLocation) error {
dirHash := util.HashStringToLong(mountedDir)
// 1. specified by timeAgo
// 2. last offset timestamp for this directory
// 3. directory creation time
var lastOffsetTs time.Time
if *option.timeAgo == 0 {
mountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))
if err != nil {
return fmt.Errorf("lookup %s: %v", mountedDir, err)
}
lastOffsetTsNs, err := getOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash))
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
glog.V(0).Infof("resume from %v", lastOffsetTs)
} else {
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
}
} else {
lastOffsetTs = time.Now().Add(-*option.timeAgo)
}
client, err := remote_storage.GetRemoteStorage(remoteStorage)
remoteSyncOptions.bucketsDir = "/buckets"
// check buckets again
remoteSyncOptions.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return err
}
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
if message.OldEntry == nil && message.NewEntry == nil {
remoteSyncOptions.bucketsDir = resp.DirBuckets
return nil
}
if message.OldEntry == nil && message.NewEntry != nil {
if len(message.NewEntry.Chunks) == 0 {
return nil
}
fmt.Printf("create: %+v\n", resp)
if !shouldSendToRemote(message.NewEntry) {
fmt.Printf("skipping creating: %+v\n", resp)
return nil
}
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
if message.NewEntry.IsDirectory {
return client.WriteDirectory(dest, message.NewEntry)
}
reader := filer.NewChunkStreamReader(filerSource, message.NewEntry.Chunks)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
if message.OldEntry != nil && message.NewEntry == nil {
fmt.Printf("delete: %+v\n", resp)
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
return client.DeleteFile(dest)
}
if message.OldEntry != nil && message.NewEntry != nil {
oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
if !shouldSendToRemote(message.NewEntry) {
fmt.Printf("skipping updating: %+v\n", resp)
return nil
}
if message.NewEntry.IsDirectory {
return client.WriteDirectory(dest, message.NewEntry)
}
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
if isSameChunks(message.OldEntry.Chunks, message.NewEntry.Chunks) {
fmt.Printf("update meta: %+v\n", resp)
return client.UpdateFileMetadata(dest, message.NewEntry)
}
}
fmt.Printf("update: %+v\n", resp)
if err := client.DeleteFile(oldDest); err != nil {
return err
}
reader := filer.NewChunkStreamReader(filerSource, message.NewEntry.Chunks)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
return nil
}
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
lastTime := time.Unix(0, lastTsNs)
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
return setOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash), lastTsNs)
})
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption,
"filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
}
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *filer_pb.RemoteStorageLocation) *filer_pb.RemoteStorageLocation {
source := string(sourcePath[len(mountDir):])
dest := util.FullPath(remoteMountLocation.Path).Child(source)
return &filer_pb.RemoteStorageLocation{
Name: remoteMountLocation.Name,
Bucket: remoteMountLocation.Bucket,
Path: string(dest),
}
}
func isSameChunks(a, b []*filer_pb.FileChunk) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
x, y := a[i], b[i]
if !proto.Equal(x, y) {
return false
}
if dir != "" && dir != remoteSyncOptions.bucketsDir {
fmt.Printf("synchronize %s to remote storage...\n", dir)
util.RetryForever("filer.remote.sync "+dir, func() error {
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
}, func(err error) bool {
if err != nil {
glog.Errorf("synchronize %s: %v", dir, err)
}
return true
}
func shouldSendToRemote(entry *filer_pb.Entry) bool {
if entry.RemoteEntry == nil {
return true
}
if entry.RemoteEntry.LastLocalSyncTsNs/1e9 < entry.Attributes.Mtime {
return true
}
return false
}
func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {
entry.RemoteEntry = remoteEntry
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
})
return err
return true
}
// read filer remote storage mount mappings
if detectErr := remoteSyncOptions.collectRemoteStorageConf(); detectErr != nil {
fmt.Fprintf(os.Stderr, "read mount info: %v\n", detectErr)
return true
}
// synchronize /buckets folder
fmt.Printf("synchronize buckets in %s ...\n", remoteSyncOptions.bucketsDir)
util.RetryForever("filer.remote.sync buckets", func() error {
return remoteSyncOptions.followBucketUpdatesAndUploadToRemote(filerSource)
}, func(err error) bool {
if err != nil {
glog.Errorf("synchronize %s: %v", remoteSyncOptions.bucketsDir, err)
}
return true
})
return true
}

View file

@ -0,0 +1,387 @@
package command
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"math"
"math/rand"
"strings"
"time"
)
func (option *RemoteSyncOptions) followBucketUpdatesAndUploadToRemote(filerSource *source.FilerSource) error {
// read filer remote storage mount mappings
if detectErr := option.collectRemoteStorageConf(); detectErr != nil {
return fmt.Errorf("read mount info: %v", detectErr)
}
eachEntryFunc, err := option.makeBucketedEventProcessor(filerSource)
if err != nil {
return err
}
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
lastTime := time.Unix(0, lastTsNs)
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
return remote_storage.SetSyncOffset(option.grpcDialOption, *option.filerAddress, option.bucketsDir, lastTsNs)
})
lastOffsetTs := collectLastSyncOffset(option, option.bucketsDir)
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption, "filer.remote.sync",
option.bucketsDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
}
func (option *RemoteSyncOptions) makeBucketedEventProcessor(filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {
handleCreateBucket := func(entry *filer_pb.Entry) error {
if !entry.IsDirectory {
return nil
}
if entry.RemoteEntry != nil {
// this directory is imported from "remote.mount.buckets" or "remote.mount"
return nil
}
if option.mappings.PrimaryBucketStorageName != "" && *option.createBucketAt == "" {
*option.createBucketAt = option.mappings.PrimaryBucketStorageName
glog.V(0).Infof("%s is set as the primary remote storage", *option.createBucketAt)
}
if len(option.mappings.Mappings) == 1 && *option.createBucketAt == "" {
for k := range option.mappings.Mappings {
*option.createBucketAt = k
glog.V(0).Infof("%s is set as the only remote storage", *option.createBucketAt)
}
}
if *option.createBucketAt == "" {
return nil
}
remoteConf, found := option.remoteConfs[*option.createBucketAt]
if !found {
return fmt.Errorf("un-configured remote storage %s", *option.createBucketAt)
}
client, err := remote_storage.GetRemoteStorage(remoteConf)
if err != nil {
return err
}
bucketName := strings.ToLower(entry.Name)
if *option.createBucketRandomSuffix {
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
if len(bucketName)+5 > 63 {
bucketName = bucketName[:58]
}
bucketName = fmt.Sprintf("%s-%4d", bucketName, rand.Uint32()%10000)
}
glog.V(0).Infof("create bucket %s", bucketName)
if err := client.CreateBucket(bucketName); err != nil {
return fmt.Errorf("create bucket %s in %s: %v", bucketName, remoteConf.Name, err)
}
bucketPath := util.FullPath(option.bucketsDir).Child(entry.Name)
remoteLocation := &remote_pb.RemoteStorageLocation{
Name: *option.createBucketAt,
Bucket: bucketName,
Path: "/",
}
// need to add new mapping here before getting upates from metadata tailing
option.mappings.Mappings[string(bucketPath)] = remoteLocation
return filer.InsertMountMapping(option, string(bucketPath), remoteLocation)
}
handleDeleteBucket := func(entry *filer_pb.Entry) error {
if !entry.IsDirectory {
return nil
}
client, remoteStorageMountLocation, err := option.findRemoteStorageClient(entry.Name)
if err != nil {
return fmt.Errorf("findRemoteStorageClient %s: %v", entry.Name, err)
}
glog.V(0).Infof("delete remote bucket %s", remoteStorageMountLocation.Bucket)
if err := client.DeleteBucket(remoteStorageMountLocation.Bucket); err != nil {
return fmt.Errorf("delete remote bucket %s: %v", remoteStorageMountLocation.Bucket, err)
}
bucketPath := util.FullPath(option.bucketsDir).Child(entry.Name)
return filer.DeleteMountMapping(option, string(bucketPath))
}
handleEtcRemoteChanges := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
if message.NewEntry != nil {
// update
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
newMappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
if readErr != nil {
return fmt.Errorf("unmarshal mappings: %v", readErr)
}
option.mappings = newMappings
}
if strings.HasSuffix(message.NewEntry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
conf := &remote_pb.RemoteConf{}
if err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.NewEntry.Name, err)
}
option.remoteConfs[conf.Name] = conf
}
} else if message.OldEntry != nil {
// deletion
if strings.HasSuffix(message.OldEntry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
conf := &remote_pb.RemoteConf{}
if err := proto.Unmarshal(message.OldEntry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.OldEntry.Name, err)
}
delete(option.remoteConfs, conf.Name)
}
}
return nil
}
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
if strings.HasPrefix(resp.Directory, filer.DirectoryEtcRemote) {
return handleEtcRemoteChanges(resp)
}
if message.OldEntry == nil && message.NewEntry == nil {
return nil
}
if message.OldEntry == nil && message.NewEntry != nil {
if message.NewParentPath == option.bucketsDir {
return handleCreateBucket(message.NewEntry)
}
if !filer.HasData(message.NewEntry) {
return nil
}
bucket, remoteStorageMountLocation, remoteStorage, ok := option.detectBucketInfo(message.NewParentPath)
if !ok {
return nil
}
client, err := remote_storage.GetRemoteStorage(remoteStorage)
if err != nil {
return err
}
glog.V(2).Infof("create: %+v", resp)
if !shouldSendToRemote(message.NewEntry) {
glog.V(2).Infof("skipping creating: %+v", resp)
return nil
}
dest := toRemoteStorageLocation(bucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
if message.NewEntry.IsDirectory {
glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
return client.WriteDirectory(dest, message.NewEntry)
}
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
reader := filer.NewFileReader(filerSource, message.NewEntry)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
if message.OldEntry != nil && message.NewEntry == nil {
if resp.Directory == option.bucketsDir {
return handleDeleteBucket(message.OldEntry)
}
bucket, remoteStorageMountLocation, remoteStorage, ok := option.detectBucketInfo(resp.Directory)
if !ok {
return nil
}
client, err := remote_storage.GetRemoteStorage(remoteStorage)
if err != nil {
return err
}
glog.V(2).Infof("delete: %+v", resp)
dest := toRemoteStorageLocation(bucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
if message.OldEntry.IsDirectory {
glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
return client.RemoveDirectory(dest)
}
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
return client.DeleteFile(dest)
}
if message.OldEntry != nil && message.NewEntry != nil {
if resp.Directory == option.bucketsDir {
if message.NewParentPath == option.bucketsDir {
if message.OldEntry.Name == message.NewEntry.Name {
return nil
}
if err := handleCreateBucket(message.NewEntry); err != nil {
return err
}
if err := handleDeleteBucket(message.OldEntry); err != nil {
return err
}
}
}
oldBucket, oldRemoteStorageMountLocation, oldRemoteStorage, oldOk := option.detectBucketInfo(resp.Directory)
newBucket, newRemoteStorageMountLocation, newRemoteStorage, newOk := option.detectBucketInfo(message.NewParentPath)
if oldOk && newOk {
if !shouldSendToRemote(message.NewEntry) {
glog.V(2).Infof("skipping updating: %+v", resp)
return nil
}
client, err := remote_storage.GetRemoteStorage(oldRemoteStorage)
if err != nil {
return err
}
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
// update the same entry
if message.NewEntry.IsDirectory {
// update directory property
return nil
}
if filer.IsSameData(message.OldEntry, message.NewEntry) {
glog.V(2).Infof("update meta: %+v", resp)
oldDest := toRemoteStorageLocation(oldBucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), oldRemoteStorageMountLocation)
return client.UpdateFileMetadata(oldDest, message.OldEntry, message.NewEntry)
} else {
newDest := toRemoteStorageLocation(newBucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), newRemoteStorageMountLocation)
reader := filer.NewFileReader(filerSource, message.NewEntry)
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
}
}
// the following is entry rename
if oldOk {
client, err := remote_storage.GetRemoteStorage(oldRemoteStorage)
if err != nil {
return err
}
oldDest := toRemoteStorageLocation(oldBucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), oldRemoteStorageMountLocation)
if message.OldEntry.IsDirectory {
return client.RemoveDirectory(oldDest)
}
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
if err := client.DeleteFile(oldDest); err != nil {
return err
}
}
if newOk {
if !shouldSendToRemote(message.NewEntry) {
glog.V(2).Infof("skipping updating: %+v", resp)
return nil
}
client, err := remote_storage.GetRemoteStorage(newRemoteStorage)
if err != nil {
return err
}
newDest := toRemoteStorageLocation(newBucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), newRemoteStorageMountLocation)
if message.NewEntry.IsDirectory {
return client.WriteDirectory(newDest, message.NewEntry)
}
reader := filer.NewFileReader(filerSource, message.NewEntry)
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
}
return nil
}
return eachEntryFunc, nil
}
func (option *RemoteSyncOptions) findRemoteStorageClient(bucketName string) (client remote_storage.RemoteStorageClient, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, err error) {
bucket := util.FullPath(option.bucketsDir).Child(bucketName)
var isMounted bool
remoteStorageMountLocation, isMounted = option.mappings.Mappings[string(bucket)]
if !isMounted {
return nil, remoteStorageMountLocation, fmt.Errorf("%s is not mounted", bucket)
}
remoteConf, hasClient := option.remoteConfs[remoteStorageMountLocation.Name]
if !hasClient {
return nil, remoteStorageMountLocation, fmt.Errorf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation)
}
client, err = remote_storage.GetRemoteStorage(remoteConf)
if err != nil {
return nil, remoteStorageMountLocation, err
}
return client, remoteStorageMountLocation, nil
}
func (option *RemoteSyncOptions) detectBucketInfo(actualDir string) (bucket util.FullPath, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, remoteConf *remote_pb.RemoteConf, ok bool) {
bucket, ok = extractBucketPath(option.bucketsDir, actualDir)
if !ok {
return "", nil, nil, false
}
var isMounted bool
remoteStorageMountLocation, isMounted = option.mappings.Mappings[string(bucket)]
if !isMounted {
glog.Warningf("%s is not mounted", bucket)
return "", nil, nil, false
}
var hasClient bool
remoteConf, hasClient = option.remoteConfs[remoteStorageMountLocation.Name]
if !hasClient {
glog.Warningf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation)
return "", nil, nil, false
}
return bucket, remoteStorageMountLocation, remoteConf, true
}
func extractBucketPath(bucketsDir, dir string) (util.FullPath, bool) {
if !strings.HasPrefix(dir, bucketsDir+"/") {
return "", false
}
parts := strings.SplitN(dir[len(bucketsDir)+1:], "/", 2)
return util.FullPath(bucketsDir).Child(parts[0]), true
}
func (option *RemoteSyncOptions) collectRemoteStorageConf() (err error) {
if mappings, err := filer.ReadMountMappings(option.grpcDialOption, *option.filerAddress); err != nil {
return err
} else {
option.mappings = mappings
}
option.remoteConfs = make(map[string]*remote_pb.RemoteConf)
var lastConfName string
err = filer_pb.List(option, filer.DirectoryEtcRemote, "", func(entry *filer_pb.Entry, isLast bool) error {
if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
return nil
}
conf := &remote_pb.RemoteConf{}
if err := proto.Unmarshal(entry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, entry.Name, err)
}
option.remoteConfs[conf.Name] = conf
lastConfName = conf.Name
return nil
}, "", false, math.MaxUint32)
if option.mappings.PrimaryBucketStorageName == "" && len(option.remoteConfs) == 1 {
glog.V(0).Infof("%s is set to the default remote storage", lastConfName)
option.mappings.PrimaryBucketStorageName = lastConfName
}
return
}

View file

@ -0,0 +1,221 @@
package command
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"os"
"strings"
"time"
)
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {
// read filer remote storage mount mappings
_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, *option.filerAddress, mountedDir)
if detectErr != nil {
return fmt.Errorf("read mount info: %v", detectErr)
}
eachEntryFunc, err := makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource)
if err != nil {
return err
}
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
lastTime := time.Unix(0, lastTsNs)
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
return remote_storage.SetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir, lastTsNs)
})
lastOffsetTs := collectLastSyncOffset(option, mountedDir)
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption, "filer.remote.sync",
mountedDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
}
func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {
client, err := remote_storage.GetRemoteStorage(remoteStorage)
if err != nil {
return nil, err
}
handleEtcRemoteChanges := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
if message.NewEntry == nil {
return nil
}
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
mappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
if readErr != nil {
return fmt.Errorf("unmarshal mappings: %v", readErr)
}
if remoteLoc, found := mappings.Mappings[mountedDir]; found {
if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {
glog.Fatalf("Unexpected mount changes %+v => %+v", remoteStorageMountLocation, remoteLoc)
}
} else {
glog.V(0).Infof("unmounted %s exiting ...", mountedDir)
os.Exit(0)
}
}
if message.NewEntry.Name == remoteStorage.Name+filer.REMOTE_STORAGE_CONF_SUFFIX {
conf := &remote_pb.RemoteConf{}
if err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.NewEntry.Name, err)
}
remoteStorage = conf
if newClient, err := remote_storage.GetRemoteStorage(remoteStorage); err == nil {
client = newClient
} else {
return err
}
}
return nil
}
eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {
message := resp.EventNotification
if strings.HasPrefix(resp.Directory, filer.DirectoryEtcRemote) {
return handleEtcRemoteChanges(resp)
}
if message.OldEntry == nil && message.NewEntry == nil {
return nil
}
if message.OldEntry == nil && message.NewEntry != nil {
if !filer.HasData(message.NewEntry) {
return nil
}
glog.V(2).Infof("create: %+v", resp)
if !shouldSendToRemote(message.NewEntry) {
glog.V(2).Infof("skipping creating: %+v", resp)
return nil
}
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
if message.NewEntry.IsDirectory {
glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest))
return client.WriteDirectory(dest, message.NewEntry)
}
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
reader := filer.NewFileReader(filerSource, message.NewEntry)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
if message.OldEntry != nil && message.NewEntry == nil {
glog.V(2).Infof("delete: %+v", resp)
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
if message.OldEntry.IsDirectory {
glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
return client.RemoveDirectory(dest)
}
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
return client.DeleteFile(dest)
}
if message.OldEntry != nil && message.NewEntry != nil {
oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)
if !shouldSendToRemote(message.NewEntry) {
glog.V(2).Infof("skipping updating: %+v", resp)
return nil
}
if message.NewEntry.IsDirectory {
return client.WriteDirectory(dest, message.NewEntry)
}
if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {
if filer.IsSameData(message.OldEntry, message.NewEntry) {
glog.V(2).Infof("update meta: %+v", resp)
return client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry)
}
}
glog.V(2).Infof("update: %+v", resp)
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest))
if err := client.DeleteFile(oldDest); err != nil {
return err
}
reader := filer.NewFileReader(filerSource, message.NewEntry)
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
if writeErr != nil {
return writeErr
}
return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)
}
return nil
}
return eachEntryFunc, nil
}
func collectLastSyncOffset(option *RemoteSyncOptions, mountedDir string) time.Time {
// 1. specified by timeAgo
// 2. last offset timestamp for this directory
// 3. directory creation time
var lastOffsetTs time.Time
if *option.timeAgo == 0 {
mountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))
if err != nil {
glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err)
return time.Now()
}
lastOffsetTsNs, err := remote_storage.GetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir)
if mountedDirEntry != nil {
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
glog.V(0).Infof("resume from %v", lastOffsetTs)
} else {
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
}
} else {
lastOffsetTs = time.Now()
}
} else {
lastOffsetTs = time.Now().Add(-*option.timeAgo)
}
return lastOffsetTs
}
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *remote_pb.RemoteStorageLocation) *remote_pb.RemoteStorageLocation {
source := string(sourcePath[len(mountDir):])
dest := util.FullPath(remoteMountLocation.Path).Child(source)
return &remote_pb.RemoteStorageLocation{
Name: remoteMountLocation.Name,
Bucket: remoteMountLocation.Bucket,
Path: string(dest),
}
}
func shouldSendToRemote(entry *filer_pb.Entry) bool {
if entry.RemoteEntry == nil {
return true
}
if entry.RemoteEntry.RemoteMtime < entry.Attributes.Mtime {
return true
}
return false
}
func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {
remoteEntry.LastLocalSyncTsNs = time.Now().UnixNano()
entry.RemoteEntry = remoteEntry
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
})
return err
})
}

View file

@ -171,7 +171,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+targetFiler,
sourcePath, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
sourcePath, nil, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
}

View file

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package command

View file

@ -3,6 +3,9 @@ package command
import (
_ "net/http/pprof"
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/azure"
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/gcs"
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/hdfs"
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/s3"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
@ -27,4 +30,5 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
_ "github.com/chrislusf/seaweedfs/weed/filer/tikv"
)

View file

@ -7,7 +7,6 @@ import (
"net/http"
"os"
"sort"
"strconv"
"strings"
"time"
@ -116,7 +115,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
listeningAddress := util.JoinHostPort(*masterOption.ipBind, *masterOption.port)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
@ -132,7 +131,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
// starting grpc server
grpcPort := *masterOption.port + 10000
grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0)
grpcL, err := util.NewListener(util.JoinHostPort(*masterOption.ipBind, grpcPort), 0)
if err != nil {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
}
@ -163,7 +162,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
masterAddress = util.JoinHostPort(masterIp, masterPort)
if peers != "" {
cleanedPeers = strings.Split(peers, ",")
}

View file

@ -13,7 +13,6 @@ import (
"github.com/gorilla/mux"
"google.golang.org/grpc/reflection"
"net/http"
"strconv"
"strings"
"time"
)
@ -114,7 +113,7 @@ func startMasterFollower(masterOptions MasterOptions) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, option, masters)
listeningAddress := *masterOptions.ipBind + ":" + strconv.Itoa(*masterOptions.port)
listeningAddress := util.JoinHostPort(*masterOptions.ipBind, *masterOptions.port)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
@ -123,7 +122,7 @@ func startMasterFollower(masterOptions MasterOptions) {
// starting grpc server
grpcPort := *masterOptions.port + 10000
grpcL, err := util.NewListener(*masterOptions.ipBind+":"+strconv.Itoa(grpcPort), 0)
grpcL, err := util.NewListener(util.JoinHostPort(*masterOptions.ipBind, grpcPort), 0)
if err != nil {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
}

View file

@ -1,6 +1,5 @@
// +build !linux
// +build !darwin
// +build !freebsd
//go:build !linux && !darwin && !freebsd
// +build !linux,!darwin,!freebsd
package command

View file

@ -1,3 +1,4 @@
//go:build linux || darwin || freebsd
// +build linux darwin freebsd
package command

View file

@ -3,7 +3,6 @@ package command
import (
"context"
"fmt"
"strconv"
"time"
"google.golang.org/grpc/reflection"
@ -100,7 +99,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
}, grpcDialOption)
// start grpc listener
grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
grpcL, err := util.NewListener(util.JoinHostPort("", *msgBrokerOpt.port), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
}

View file

@ -44,7 +44,7 @@ dbFile = "./filer.db" # sqlite db file
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
# directory TEXT COMMENT 'full path to parent directory',
# directory TEXT BINARY COMMENT 'full path to parent directory',
# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
@ -69,7 +69,7 @@ createTable = """
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
dirhash BIGINT,
name VARCHAR(1000) BINARY,
directory TEXT,
directory TEXT BINARY,
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
@ -230,3 +230,11 @@ location = "/tmp/"
address = "localhost:6379"
password = ""
database = 1
[tikv]
enabled = false
# If you have many pd address, use ',' split then:
# pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
pdaddrs = "localhost:2379"
# Concurrency for TiKV delete range
deleterange_concurrency = 1

View file

@ -194,7 +194,7 @@ func runServer(cmd *Command, args []string) bool {
filerOptions.disableHttp = serverDisableHttp
masterOptions.disableHttp = serverDisableHttp
filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port)
filerAddress := util.JoinHostPort(*serverIp, *filerOptions.port)
s3Options.filer = &filerAddress
webdavOptions.filer = &filerAddress
msgBrokerOptions.filer = &filerAddress

View file

@ -194,7 +194,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
*v.publicPort = *v.port
}
if *v.publicUrl == "" {
*v.publicUrl = *v.ip + ":" + strconv.Itoa(*v.publicPort)
*v.publicUrl = util.JoinHostPort(*v.ip, *v.publicPort)
}
volumeMux := http.NewServeMux()
@ -308,7 +308,7 @@ func (v VolumeServerOptions) isSeparatedPublicPort() bool {
func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server {
grpcPort := *v.port + 10000
grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0)
grpcL, err := util.NewListener(util.JoinHostPort(*v.bindIp, grpcPort), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
@ -324,7 +324,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
}
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
publicListeningAddress := util.JoinHostPort(*v.bindIp, *v.publicPort)
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
@ -351,7 +351,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
keyFile = viper.GetString("https.volume.key")
}
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
listeningAddress := util.JoinHostPort(*v.bindIp, *v.port)
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
@ -373,7 +373,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}
func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) {
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port+20000)
listeningAddress := util.JoinHostPort(*v.bindIp,*v.port+20000)
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress)
listener, e := util.NewListener(listeningAddress, 0)
if e != nil {

View file

@ -7,7 +7,7 @@ import (
"strings"
"time"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/client/v3"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"

View file

@ -6,6 +6,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
"io"
"math"
"net/url"
"strings"
"time"
"github.com/golang/protobuf/proto"
@ -108,6 +110,9 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
for _, urlString := range urlStrings {
receivedData = receivedData[:0]
if strings.Contains(urlString, "%") {
urlString = url.PathEscape(urlString)
}
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
receivedData = append(receivedData, data...)
})

View file

@ -66,7 +66,16 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
// upload data
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth)
uploadOption := &operation.UploadOption{
UploadUrl: targetUrl,
Filename: "",
Cipher: f.Cipher,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
Jwt: assignResult.Auth,
}
uploadResult, err := operation.UploadData(data, uploadOption)
if err != nil {
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
}

View file

@ -6,6 +6,7 @@ import (
"fmt"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"os"
@ -47,7 +48,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10,
Filter: filter.NewBloomFilter(8), // false positive rate 0.02
}
if store.db, err = leveldb.OpenFile(dir, opts); err != nil {

View file

@ -48,7 +48,6 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
Filter: filter.NewBloomFilter(8), // false positive rate 0.02
}

View file

@ -68,14 +68,12 @@ func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) {
opts := &opt.Options{
BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
Filter: bloom,
}
if name != DEFAULT {
opts = &opt.Options{
BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 4,
Filter: bloom,
}
}

View file

@ -2,8 +2,8 @@ package filer
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -11,21 +11,8 @@ func (entry *Entry) IsInRemoteOnly() bool {
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
}
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data []byte, err error) {
client, _, found := f.RemoteStorage.GetRemoteStorageClient(entry.Remote.StorageName)
if !found {
return nil, fmt.Errorf("remote storage %v not found", entry.Remote.StorageName)
}
mountDir, remoteLoation := f.RemoteStorage.FindMountDirectory(entry.FullPath)
sourceLoc := MapFullPathToRemoteStorageLocation(mountDir, remoteLoation, entry.FullPath)
return client.ReadFile(sourceLoc, offset, size)
}
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, fp util.FullPath) *filer_pb.RemoteStorageLocation {
remoteLocation := &filer_pb.RemoteStorageLocation{
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, fp util.FullPath) *remote_pb.RemoteStorageLocation {
remoteLocation := &remote_pb.RemoteStorageLocation{
Name: remoteMountedLocation.Name,
Bucket: remoteMountedLocation.Bucket,
Path: remoteMountedLocation.Path,
@ -34,11 +21,11 @@ func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMou
return remoteLocation
}
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, remoteLocationPath string)(fp util.FullPath) {
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, remoteLocationPath string) (fp util.FullPath) {
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
}
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *filer_pb.RemoteConf, remoteLocation *filer_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *remote_pb.RemoteConf, remoteLocation *remote_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
Directory: string(parent),

View file

@ -0,0 +1,121 @@
package filer
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
)
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *remote_pb.RemoteStorageMapping, readErr error) {
var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
return readErr
}); readErr != nil {
return nil, readErr
}
mappings, readErr = UnmarshalRemoteStorageMappings(oldContent)
if readErr != nil {
return nil, fmt.Errorf("unmarshal mappings: %v", readErr)
}
return
}
func InsertMountMapping(filerClient filer_pb.FilerClient, dir string, remoteStorageLocation *remote_pb.RemoteStorageLocation) (err error) {
// read current mapping
var oldContent, newContent []byte
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
oldContent, err = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
return err
})
if err != nil {
if err != filer_pb.ErrNotFound {
return fmt.Errorf("read existing mapping: %v", err)
}
}
// add new mapping
newContent, err = addRemoteStorageMapping(oldContent, dir, remoteStorageLocation)
if err != nil {
return fmt.Errorf("add mapping %s~%s: %v", dir, remoteStorageLocation, err)
}
// save back
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent)
})
if err != nil {
return fmt.Errorf("save mapping: %v", err)
}
return nil
}
func DeleteMountMapping(filerClient filer_pb.FilerClient, dir string) (err error) {
// read current mapping
var oldContent, newContent []byte
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
oldContent, err = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
return err
})
if err != nil {
if err != filer_pb.ErrNotFound {
return fmt.Errorf("read existing mapping: %v", err)
}
}
// add new mapping
newContent, err = removeRemoteStorageMapping(oldContent, dir)
if err != nil {
return fmt.Errorf("delete mount %s: %v", dir, err)
}
// save back
err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent)
})
if err != nil {
return fmt.Errorf("save mapping: %v", err)
}
return nil
}
func addRemoteStorageMapping(oldContent []byte, dir string, storageLocation *remote_pb.RemoteStorageLocation) (newContent []byte, err error) {
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
if unmarshalErr != nil {
// skip
}
// set the new mapping
mappings.Mappings[dir] = storageLocation
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
}
return
}
func removeRemoteStorageMapping(oldContent []byte, dir string) (newContent []byte, err error) {
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
if unmarshalErr != nil {
return nil, unmarshalErr
}
// set the new mapping
delete(mappings.Mappings, dir)
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
}
return
}

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -21,13 +22,13 @@ const REMOTE_STORAGE_MOUNT_FILE = "mount.mapping"
type FilerRemoteStorage struct {
rules ptrie.Trie
storageNameToConf map[string]*filer_pb.RemoteConf
storageNameToConf map[string]*remote_pb.RemoteConf
}
func NewFilerRemoteStorage() (rs *FilerRemoteStorage) {
rs = &FilerRemoteStorage{
rules: ptrie.New(),
storageNameToConf: make(map[string]*filer_pb.RemoteConf),
storageNameToConf: make(map[string]*remote_pb.RemoteConf),
}
return rs
}
@ -56,7 +57,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F
if !strings.HasSuffix(entry.Name(), REMOTE_STORAGE_CONF_SUFFIX) {
return nil
}
conf := &filer_pb.RemoteConf{}
conf := &remote_pb.RemoteConf{}
if err := proto.Unmarshal(entry.Content, conf); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, entry.Name(), err)
}
@ -66,7 +67,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F
}
func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err error) {
mappings := &filer_pb.RemoteStorageMapping{}
mappings := &remote_pb.RemoteStorageMapping{}
if err := proto.Unmarshal(data, mappings); err != nil {
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, err)
}
@ -76,23 +77,23 @@ func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err er
return nil
}
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *filer_pb.RemoteStorageLocation) {
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *remote_pb.RemoteStorageLocation) {
rs.rules.Put([]byte(dir+"/"), loc)
}
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *filer_pb.RemoteStorageLocation) {
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *remote_pb.RemoteStorageLocation) {
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
mountDir = util.FullPath(string(key[:len(key)-1]))
remoteLocation = value.(*filer_pb.RemoteStorageLocation)
remoteLocation = value.(*remote_pb.RemoteStorageLocation)
return true
})
return
}
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
var storageLocation *filer_pb.RemoteStorageLocation
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) {
var storageLocation *remote_pb.RemoteStorageLocation
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
storageLocation = value.(*filer_pb.RemoteStorageLocation)
storageLocation = value.(*remote_pb.RemoteStorageLocation)
return true
})
@ -104,7 +105,7 @@ func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client r
return rs.GetRemoteStorageClient(storageLocation.Name)
}
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) {
remoteConf, found = rs.storageNameToConf[storageName]
if !found {
return
@ -118,9 +119,9 @@ func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client
return
}
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.RemoteStorageMapping, err error) {
mappings = &filer_pb.RemoteStorageMapping{
Mappings: make(map[string]*filer_pb.RemoteStorageLocation),
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.RemoteStorageMapping, err error) {
mappings = &remote_pb.RemoteStorageMapping{
Mappings: make(map[string]*remote_pb.RemoteStorageLocation),
}
if len(oldContent) > 0 {
if err = proto.Unmarshal(oldContent, mappings); err != nil {
@ -130,56 +131,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.Remot
return
}
func AddRemoteStorageMapping(oldContent []byte, dir string, storageLocation *filer_pb.RemoteStorageLocation) (newContent []byte, err error) {
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
if unmarshalErr != nil {
// skip
}
// set the new mapping
mappings.Mappings[dir] = storageLocation
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
}
return
}
func RemoveRemoteStorageMapping(oldContent []byte, dir string) (newContent []byte, err error) {
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
if unmarshalErr != nil {
return nil, unmarshalErr
}
// set the new mapping
delete(mappings.Mappings, dir)
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
}
return
}
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *filer_pb.RemoteStorageMapping, readErr error) {
var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
return readErr
}); readErr != nil {
return nil, readErr
}
mappings, readErr = UnmarshalRemoteStorageMappings(oldContent)
if readErr != nil {
return nil, fmt.Errorf("unmarshal mappings: %v", readErr)
}
return
}
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *filer_pb.RemoteConf, readErr error) {
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *remote_pb.RemoteConf, readErr error) {
var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
@ -189,7 +141,7 @@ func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string,
}
// unmarshal storage configuration
conf = &filer_pb.RemoteConf{}
conf = &remote_pb.RemoteConf{}
if unMarshalErr := proto.Unmarshal(oldContent, conf); unMarshalErr != nil {
readErr = fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
return
@ -197,3 +149,33 @@ func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string,
return
}
func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir string) (*remote_pb.RemoteStorageMapping, string, *remote_pb.RemoteStorageLocation, *remote_pb.RemoteConf, error) {
mappings, listErr := ReadMountMappings(grpcDialOption, filerAddress)
if listErr != nil {
return nil, "", nil, nil, listErr
}
if dir == "" {
return mappings, "", nil, nil, fmt.Errorf("need to specify '-dir' option")
}
var localMountedDir string
var remoteStorageMountedLocation *remote_pb.RemoteStorageLocation
for k, loc := range mappings.Mappings {
if strings.HasPrefix(dir, k) {
localMountedDir, remoteStorageMountedLocation = k, loc
}
}
if localMountedDir == "" {
return mappings, localMountedDir, remoteStorageMountedLocation, nil, fmt.Errorf("%s is not mounted", dir)
}
// find remote storage configuration
remoteStorageConf, err := ReadRemoteStorageConf(grpcDialOption, filerAddress, remoteStorageMountedLocation.Name)
if err != nil {
return mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, err
}
return mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, nil
}

View file

@ -1,20 +1,20 @@
package filer
import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/stretchr/testify/assert"
"testing"
)
func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
conf := &filer_pb.RemoteConf{
conf := &remote_pb.RemoteConf{
Name: "s7",
Type: "s3",
}
rs := NewFilerRemoteStorage()
rs.storageNameToConf[conf.Name] = conf
rs.mapDirectoryToRemoteStorage("/a/b/c", &filer_pb.RemoteStorageLocation{
rs.mapDirectoryToRemoteStorage("/a/b/c", &remote_pb.RemoteStorageLocation{
Name: "s7",
Bucket: "some",
Path: "/dir",

View file

@ -1,3 +1,4 @@
//go:build linux || darwin || windows
// +build linux darwin windows
// limited GOOS due to modernc.org/libc/unistd

View file

@ -1,3 +1,4 @@
//go:build !linux && !darwin && !windows && !s390 && !ppc64le && !mips64
// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64
// limited GOOS due to modernc.org/libc/unistd

View file

@ -7,6 +7,7 @@ import (
"math"
"sort"
"strings"
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
@ -16,6 +17,49 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
func HasData(entry *filer_pb.Entry) bool {
if len(entry.Content) > 0 {
return true
}
return len(entry.Chunks) > 0
}
func IsSameData(a, b *filer_pb.Entry) bool {
if len(a.Content) > 0 || len(b.Content) > 0 {
return bytes.Equal(a.Content, b.Content)
}
return isSameChunks(a.Chunks, b.Chunks)
}
func isSameChunks(a, b []*filer_pb.FileChunk) bool {
if len(a) != len(b) {
return false
}
sort.Slice(a, func(i, j int) bool {
return strings.Compare(a[i].ETag, a[j].ETag) < 0
})
sort.Slice(b, func(i, j int) bool {
return strings.Compare(b[i].ETag, b[j].ETag) < 0
})
for i := 0; i < len(a); i++ {
if a[i].ETag != b[i].ETag {
return false
}
}
return true
}
func NewFileReader(filerClient filer_pb.FilerClient, entry *filer_pb.Entry) io.Reader {
if len(entry.Content) > 0 {
return bytes.NewReader(entry.Content)
}
return NewChunkStreamReader(filerClient, entry.Chunks)
}
func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks)
@ -88,8 +132,8 @@ type ChunkStreamReader struct {
logicOffset int64
buffer []byte
bufferOffset int64
bufferPos int
nextChunkViewIndex int
bufferLock sync.Mutex
chunk string
lookupFileId wdclient.LookupFileIdFunctionType
}
@ -132,26 +176,29 @@ func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.F
}
func (c *ChunkStreamReader) ReadAt(p []byte, off int64) (n int, err error) {
if err = c.prepareBufferFor(c.logicOffset); err != nil {
c.bufferLock.Lock()
defer c.bufferLock.Unlock()
if err = c.prepareBufferFor(off); err != nil {
return
}
return c.Read(p)
c.logicOffset = off
return c.doRead(p)
}
func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
c.bufferLock.Lock()
defer c.bufferLock.Unlock()
return c.doRead(p)
}
func (c *ChunkStreamReader) doRead(p []byte) (n int, err error) {
// fmt.Printf("do read [%d,%d) at %s[%d,%d)\n", c.logicOffset, c.logicOffset+int64(len(p)), c.chunk, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
for n < len(p) {
if c.isBufferEmpty() {
if c.nextChunkViewIndex >= len(c.chunkViews) {
return n, io.EOF
}
chunkView := c.chunkViews[c.nextChunkViewIndex]
if err = c.fetchChunkToBuffer(chunkView); err != nil {
// println("read", c.logicOffset)
if err = c.prepareBufferFor(c.logicOffset); err != nil {
return
}
c.nextChunkViewIndex++
}
t := copy(p[n:], c.buffer[c.bufferPos:])
c.bufferPos += t
t := copy(p[n:], c.buffer[c.logicOffset-c.bufferOffset:])
n += t
c.logicOffset += int64(t)
}
@ -159,10 +206,12 @@ func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
}
func (c *ChunkStreamReader) isBufferEmpty() bool {
return len(c.buffer) <= c.bufferPos
return len(c.buffer) <= int(c.logicOffset-c.bufferOffset)
}
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
c.bufferLock.Lock()
defer c.bufferLock.Unlock()
var err error
switch whence {
@ -182,33 +231,59 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
}
func insideChunk(offset int64, chunk *ChunkView) bool {
return chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size)
}
func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
// stay in the same chunk
if !c.isBufferEmpty() {
if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) {
c.bufferPos = int(offset - c.bufferOffset)
return nil
}
}
// fmt.Printf("fetch for offset %d\n", offset)
// need to seek to a different chunk
currentChunkIndex := sort.Search(len(c.chunkViews), func(i int) bool {
return c.chunkViews[i].LogicOffset <= offset
return offset < c.chunkViews[i].LogicOffset
})
if currentChunkIndex == len(c.chunkViews) {
// not found
if insideChunk(offset, c.chunkViews[0]) {
// fmt.Printf("select0 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
currentChunkIndex = 0
} else if insideChunk(offset, c.chunkViews[len(c.chunkViews)-1]) {
currentChunkIndex = len(c.chunkViews) - 1
// fmt.Printf("select last chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
} else {
return io.EOF
}
} else if currentChunkIndex > 0 {
if insideChunk(offset, c.chunkViews[currentChunkIndex]) {
// good hit
} else if insideChunk(offset, c.chunkViews[currentChunkIndex-1]) {
currentChunkIndex -= 1
// fmt.Printf("select -1 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
} else {
// glog.Fatalf("unexpected1 offset %d", offset)
return fmt.Errorf("unexpected1 offset %d", offset)
}
} else {
// glog.Fatalf("unexpected2 offset %d", offset)
return fmt.Errorf("unexpected2 offset %d", offset)
}
// positioning within the new chunk
chunk := c.chunkViews[currentChunkIndex]
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
if insideChunk(offset, chunk) {
if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
if err = c.fetchChunkToBuffer(chunk); err != nil {
return
}
c.nextChunkViewIndex = currentChunkIndex + 1
}
c.bufferPos = int(offset - c.bufferOffset)
} else {
// glog.Fatalf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size))
return fmt.Errorf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size))
}
return
}
@ -239,10 +314,10 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
return err
}
c.buffer = buffer.Bytes()
c.bufferPos = 0
c.bufferOffset = chunkView.LogicOffset
c.chunk = chunkView.FileId
// glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
// glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
return nil
}

5
weed/filer/tikv/tikv.go Normal file
View file

@ -0,0 +1,5 @@
package tikv
/*
* This empty file is let go build can work without tikv tag
*/

View file

@ -0,0 +1,389 @@
//go:build tikv
// +build tikv
package tikv
import (
"bytes"
"context"
"crypto/sha1"
"fmt"
"io"
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/tikv/client-go/v2/tikv"
"github.com/tikv/client-go/v2/txnkv"
)
var (
_ filer.FilerStore = ((*TikvStore)(nil))
)
func init() {
filer.Stores = append(filer.Stores, &TikvStore{})
}
type TikvStore struct {
client *tikv.KVStore
deleteRangeConcurrency int
}
// Basic APIs
func (store *TikvStore) GetName() string {
return "tikv"
}
func (store *TikvStore) Initialize(config util.Configuration, prefix string) error {
pdAddrs := []string{}
pdAddrsStr := config.GetString(prefix + "pdaddrs")
for _, item := range strings.Split(pdAddrsStr, ",") {
pdAddrs = append(pdAddrs, strings.TrimSpace(item))
}
drc := config.GetInt(prefix + "deleterange_concurrency")
if drc <= 0 {
drc = 1
}
store.deleteRangeConcurrency = drc
return store.initialize(pdAddrs)
}
func (store *TikvStore) initialize(pdAddrs []string) error {
client, err := tikv.NewTxnClient(pdAddrs)
store.client = client
return err
}
func (store *TikvStore) Shutdown() {
err := store.client.Close()
if err != nil {
glog.V(0).Infof("Shutdown TiKV client got error: %v", err)
}
}
// ~ Basic APIs
// Entry APIs
func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer.Entry) error {
dir, name := entry.DirAndName()
key := generateKey(dir, name)
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
txn, err := store.getTxn(ctx)
if err != nil {
return err
}
err = txn.RunInTxn(func(txn *txnkv.KVTxn) error {
return txn.Set(key, value)
})
if err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
return nil
}
func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer.Entry) error {
return store.InsertEntry(ctx, entry)
}
func (store *TikvStore) FindEntry(ctx context.Context, path util.FullPath) (*filer.Entry, error) {
dir, name := path.DirAndName()
key := generateKey(dir, name)
txn, err := store.getTxn(ctx)
if err != nil {
return nil, err
}
var value []byte = nil
err = txn.RunInTxn(func(txn *txnkv.KVTxn) error {
val, err := txn.Get(context.TODO(), key)
if err == nil {
value = val
}
return err
})
if isNotExists(err) || value == nil {
return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", path, err)
}
entry := &filer.Entry{
FullPath: path,
}
err = entry.DecodeAttributesAndChunks(value)
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
return entry, nil
}
func (store *TikvStore) DeleteEntry(ctx context.Context, path util.FullPath) error {
dir, name := path.DirAndName()
key := generateKey(dir, name)
txn, err := store.getTxn(ctx)
if err != nil {
return err
}
err = txn.RunInTxn(func(txn *txnkv.KVTxn) error {
return txn.Delete(key)
})
if err != nil {
return fmt.Errorf("delete %s : %v", path, err)
}
return nil
}
// ~ Entry APIs
// Directory APIs
func (store *TikvStore) DeleteFolderChildren(ctx context.Context, path util.FullPath) error {
directoryPrefix := genDirectoryKeyPrefix(path, "")
txn, err := store.getTxn(ctx)
if err != nil {
return err
}
var (
startKey []byte = nil
endKey []byte = nil
)
err = txn.RunInTxn(func(txn *txnkv.KVTxn) error {
iter, err := txn.Iter(directoryPrefix, nil)
if err != nil {
return err
}
defer iter.Close()
for iter.Valid() {
key := iter.Key()
endKey = key
if !bytes.HasPrefix(key, directoryPrefix) {
break
}
if startKey == nil {
startKey = key
}
err = iter.Next()
if err != nil {
return err
}
}
// Only one Key matched just delete it.
if startKey != nil && bytes.Equal(startKey, endKey) {
return txn.Delete(startKey)
}
return nil
})
if err != nil {
return fmt.Errorf("delete %s : %v", path, err)
}
if startKey != nil && endKey != nil && !bytes.Equal(startKey, endKey) {
// has startKey and endKey and they are not equals, so use delete range
_, err = store.client.DeleteRange(context.Background(), startKey, endKey, store.deleteRangeConcurrency)
if err != nil {
return fmt.Errorf("delete %s : %v", path, err)
}
}
return err
}
func (store *TikvStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (string, error) {
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc)
}
func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (string, error) {
lastFileName := ""
directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix)
lastFileStart := directoryPrefix
if startFileName != "" {
lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName)
}
txn, err := store.getTxn(ctx)
if err != nil {
return lastFileName, err
}
err = txn.RunInTxn(func(txn *txnkv.KVTxn) error {
iter, err := txn.Iter(lastFileStart, nil)
if err != nil {
return err
}
defer iter.Close()
i := int64(0)
first := true
for iter.Valid() {
if first {
first = false
if !includeStartFile {
if iter.Valid() {
// Check first item is lastFileStart
if bytes.Equal(iter.Key(), lastFileStart) {
// Is lastFileStart and not include start file, just
// ignore it.
err = iter.Next()
if err != nil {
return err
}
continue
}
}
}
}
// Check for limitation
if limit > 0 {
i++
if i > limit {
break
}
}
// Validate key prefix
key := iter.Key()
if !bytes.HasPrefix(key, directoryPrefix) {
break
}
value := iter.Value()
// Start process
fileName := getNameFromKey(key)
if fileName != "" {
// Got file name, then generate the Entry
entry := &filer.Entry{
FullPath: util.NewFullPath(string(dirPath), fileName),
}
// Update lastFileName
lastFileName = fileName
// Check for decode value.
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
// Got error just return the error
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
return err
}
// Run for each callback if return false just break the iteration
if !eachEntryFunc(entry) {
break
}
}
// End process
err = iter.Next()
if err != nil {
return err
}
}
return nil
})
if err != nil {
return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err)
}
return lastFileName, nil
}
// ~ Directory APIs
// Transaction Related APIs
func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) {
tx, err := store.client.Begin()
if err != nil {
return ctx, err
}
return context.WithValue(ctx, "tx", tx), nil
}
func (store *TikvStore) CommitTransaction(ctx context.Context) error {
if tx, ok := ctx.Value("tx").(*txnkv.KVTxn); ok {
return tx.Commit(context.Background())
}
return nil
}
func (store *TikvStore) RollbackTransaction(ctx context.Context) error {
if tx, ok := ctx.Value("tx").(*txnkv.KVTxn); ok {
return tx.Rollback()
}
return nil
}
// ~ Transaction Related APIs
// Transaction Wrapper
type TxnWrapper struct {
*txnkv.KVTxn
inContext bool
}
func (w *TxnWrapper) RunInTxn(f func(txn *txnkv.KVTxn) error) error {
err := f(w.KVTxn)
if !w.inContext {
if err != nil {
w.KVTxn.Rollback()
return err
}
w.KVTxn.Commit(context.Background())
return nil
}
return err
}
func (store *TikvStore) getTxn(ctx context.Context) (*TxnWrapper, error) {
if tx, ok := ctx.Value("tx").(*txnkv.KVTxn); ok {
return &TxnWrapper{tx, true}, nil
}
txn, err := store.client.Begin()
if err != nil {
return nil, err
}
return &TxnWrapper{txn, false}, nil
}
// ~ Transaction Wrapper
// Encoding Functions
func hashToBytes(dir string) []byte {
h := sha1.New()
io.WriteString(h, dir)
b := h.Sum(nil)
return b
}
func generateKey(dirPath, fileName string) []byte {
key := hashToBytes(dirPath)
key = append(key, []byte(fileName)...)
return key
}
func getNameFromKey(key []byte) string {
return string(key[sha1.Size:])
}
func genDirectoryKeyPrefix(fullpath util.FullPath, startFileName string) (keyPrefix []byte) {
keyPrefix = hashToBytes(string(fullpath))
if len(startFileName) > 0 {
keyPrefix = append(keyPrefix, []byte(startFileName)...)
}
return keyPrefix
}
func isNotExists(err error) bool {
if err == nil {
return false
}
if err.Error() == "not exist" {
return true
}
return false
}
// ~ Encoding Functions

View file

@ -0,0 +1,50 @@
//go:build tikv
// +build tikv
package tikv
import (
"context"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/tikv/client-go/v2/txnkv"
)
func (store *TikvStore) KvPut(ctx context.Context, key []byte, value []byte) error {
tw, err := store.getTxn(ctx)
if err != nil {
return err
}
return tw.RunInTxn(func(txn *txnkv.KVTxn) error {
return txn.Set(key, value)
})
}
func (store *TikvStore) KvGet(ctx context.Context, key []byte) ([]byte, error) {
tw, err := store.getTxn(ctx)
if err != nil {
return nil, err
}
var data []byte = nil
err = tw.RunInTxn(func(txn *txnkv.KVTxn) error {
val, err := txn.Get(context.TODO(), key)
if err == nil {
data = val
}
return err
})
if isNotExists(err) {
return data, filer.ErrKvNotFound
}
return data, err
}
func (store *TikvStore) KvDelete(ctx context.Context, key []byte) error {
tw, err := store.getTxn(ctx)
if err != nil {
return err
}
return tw.RunInTxn(func(txn *txnkv.KVTxn) error {
return txn.Delete(key)
})
}

View file

@ -39,11 +39,9 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
if err == nil {
if message.OldEntry != nil && message.NewEntry != nil {
if message.OldEntry.Name == message.NewEntry.Name {
// no need to invalidate
} else {
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
mc.invalidateFunc(oldKey)
if message.OldEntry.Name != message.NewEntry.Name {
newKey := util.NewFullPath(dir, message.NewEntry.Name)
mc.invalidateFunc(newKey)
}
@ -59,8 +57,12 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
}
return util.Retry("followMetaUpdates", func() error {
util.RetryForever("followMetaUpdates", func() error {
return pb.WithFilerClientFollowMetadata(client, "mount", dir, lastTsNs, selfSignature, processEventFn, true)
}, func(err error) bool {
glog.Errorf("follow metadata updates: %v", err)
return true
})
return nil
}

View file

@ -58,7 +58,16 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath, writeOnly bool) filer.Sa
if wfs.option.VolumeServerAccess == "filerProxy" {
fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", wfs.getCurrentFiler(), fileId)
}
uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth)
uploadOption := &operation.UploadOption{
UploadUrl: fileUrl,
Filename: filename,
Cipher: wfs.option.Cipher,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
Jwt: auth,
}
uploadResult, err, data := operation.Upload(reader, uploadOption)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload data: %v", err)

View file

@ -116,7 +116,7 @@ func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err err
// return a valid entry for the mount root
if string(fullpath) == wfs.option.FilerMountRootPath {
return &filer_pb.Entry{
Name: wfs.option.FilerMountRootPath,
Name: name,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
Mtime: wfs.option.MountMtime.Unix(),

View file

@ -3,8 +3,8 @@ package ftpd
import (
"crypto/tls"
"errors"
"fmt"
"net"
"github.com/chrislusf/seaweedfs/weed/util"
ftpserver "github.com/fclairamb/ftpserverlib"
"google.golang.org/grpc"
@ -51,7 +51,7 @@ func (s *SftpServer) GetSettings() (*ftpserver.Settings, error) {
return &ftpserver.Settings{
Listener: s.ftpListener,
ListenAddr: fmt.Sprintf("%s:%d", s.option.IpBind, s.option.Port),
ListenAddr: util.JoinHostPort(s.option.IpBind, s.option.Port),
PublicHost: s.option.IP,
PassiveTransferPortRange: portRange,
ActiveTransferPortNon20: true,

View file

@ -88,7 +88,16 @@ func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConf
// upload data
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth)
uploadOption := &operation.UploadOption{
UploadUrl: targetUrl,
Filename: "",
Cipher: broker.option.Cipher,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
Jwt: assignResult.Auth,
}
uploadResult, err := operation.UploadData(data, uploadOption)
if err != nil {
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
}

View file

@ -22,6 +22,10 @@ type VolumeAssignRequest struct {
WritableVolumeCount uint32
}
type AssignResultReplica struct {
Url string `json:"url,omitempty"`
PublicUrl string `json:"publicUrl,omitempty"`
}
type AssignResult struct {
Fid string `json:"fid,omitempty"`
Url string `json:"url,omitempty"`
@ -29,6 +33,7 @@ type AssignResult struct {
Count uint64 `json:"count,omitempty"`
Error string `json:"error,omitempty"`
Auth security.EncodedJwt `json:"auth,omitempty"`
Replicas []AssignResultReplica `json:"replicas,omitempty"`
}
func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
@ -69,6 +74,12 @@ func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest
ret.PublicUrl = resp.PublicUrl
ret.Error = resp.Error
ret.Auth = security.EncodedJwt(resp.Auth)
for _, r := range resp.Replicas {
ret.Replicas = append(ret.Replicas, AssignResultReplica{
Url: r.Url,
PublicUrl: r.PublicUrl,
})
}
if resp.Error != "" {
return fmt.Errorf("assignRequest: %v", resp.Error)

Some files were not shown because too many files have changed in this diff Show more