mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge branch 'master' into filerstore-tikv
This commit is contained in:
commit
e5f9ff983d
50
.github/workflows/binary_test.yml
vendored
Normal file
50
.github/workflows/binary_test.yml
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
name: "go: test building cross-platform binary"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
workflow_dispatch: []
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
goos: [linux, windows, darwin, freebsd, netbsd, openbsd]
|
||||||
|
goarch: [amd64, arm, arm64, 386]
|
||||||
|
exclude:
|
||||||
|
- goarch: arm
|
||||||
|
goos: darwin
|
||||||
|
- goarch: 386
|
||||||
|
goos: darwin
|
||||||
|
- goarch: arm
|
||||||
|
goos: windows
|
||||||
|
- goarch: arm64
|
||||||
|
goos: windows
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.head_ref }}/binary_test/${{ matrix.goos }}/${{ matrix.goarch }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Set up Go 1.x
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ^1.13
|
||||||
|
id: go
|
||||||
|
|
||||||
|
- name: Check out code into the Go module directory
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Get dependencies
|
||||||
|
run: |
|
||||||
|
cd weed; go get -v -t -d ./...
|
||||||
|
if [ -f Gopkg.toml ]; then
|
||||||
|
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||||
|
dep ensure
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: cd weed; GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} go build -v .
|
2
.github/workflows/cleanup.yml
vendored
2
.github/workflows/cleanup.yml
vendored
|
@ -1,4 +1,4 @@
|
||||||
name: Cleanup
|
name: "chore: cleanup"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
|
13
.github/workflows/container_latest.yml
vendored
13
.github/workflows/container_latest.yml
vendored
|
@ -1,4 +1,5 @@
|
||||||
name: Build Latest Containers
|
name: "docker: build latest containers"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
|
@ -8,6 +9,7 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
build-latest:
|
build-latest:
|
||||||
runs-on: [ubuntu-latest]
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
@ -15,7 +17,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: crazy-max/ghaction-docker-meta@v2
|
uses: docker/metadata-action@v3
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
chrislusf/seaweedfs
|
chrislusf/seaweedfs
|
||||||
|
@ -55,12 +57,13 @@ jobs:
|
||||||
context: ./docker
|
context: ./docker
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
file: ./docker/Dockerfile
|
file: ./docker/Dockerfile
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||||
|
|
||||||
build-dev:
|
build-dev:
|
||||||
runs-on: [ubuntu-latest]
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
@ -68,7 +71,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: crazy-max/ghaction-docker-meta@v2
|
uses: docker/metadata-action@v3
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
chrislusf/seaweedfs
|
chrislusf/seaweedfs
|
||||||
|
@ -109,6 +112,6 @@ jobs:
|
||||||
context: ./docker
|
context: ./docker
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
file: ./docker/Dockerfile.go_build
|
file: ./docker/Dockerfile.go_build
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||||
|
|
13
.github/workflows/container_release.yml
vendored
13
.github/workflows/container_release.yml
vendored
|
@ -1,4 +1,5 @@
|
||||||
name: Build Release Containers
|
name: "docker: build release containers"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
|
@ -8,6 +9,7 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
build-default:
|
build-default:
|
||||||
runs-on: [ubuntu-latest]
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
@ -15,7 +17,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: crazy-max/ghaction-docker-meta@v2
|
uses: docker/metadata-action@v3
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
chrislusf/seaweedfs
|
chrislusf/seaweedfs
|
||||||
|
@ -58,11 +60,12 @@ jobs:
|
||||||
context: ./docker
|
context: ./docker
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
file: ./docker/Dockerfile.go_build
|
file: ./docker/Dockerfile.go_build
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||||
build-large:
|
build-large:
|
||||||
runs-on: [ubuntu-latest]
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
|
@ -70,7 +73,7 @@ jobs:
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: crazy-max/ghaction-docker-meta@v2
|
uses: docker/metadata-action@v3
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
chrislusf/seaweedfs
|
chrislusf/seaweedfs
|
||||||
|
@ -113,6 +116,6 @@ jobs:
|
||||||
context: ./docker
|
context: ./docker
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
file: ./docker/Dockerfile.go_build_large
|
file: ./docker/Dockerfile.go_build_large
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||||
|
|
53
.github/workflows/container_test.yml
vendored
Normal file
53
.github/workflows/container_test.yml
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
name: "docker: test building container images"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
workflow_dispatch: []
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.head_ref }}/container_test
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-test:
|
||||||
|
runs-on: [ubuntu-latest]
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
platform: [ linux ]
|
||||||
|
arch: [ amd64, arm, arm64, 386 ]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Docker meta
|
||||||
|
id: docker_meta
|
||||||
|
uses: docker/metadata-action@v3
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
chrislusf/seaweedfs
|
||||||
|
ghcr.io/chrislusf/seaweedfs
|
||||||
|
tags: |
|
||||||
|
type=raw,value=latest
|
||||||
|
labels: |
|
||||||
|
org.opencontainers.image.title=seaweedfs
|
||||||
|
org.opencontainers.image.vendor=Chris Lu
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
buildkitd-flags: "--debug"
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./docker
|
||||||
|
push: false
|
||||||
|
file: ./docker/Dockerfile
|
||||||
|
platforms: ${{ matrix.platform }}/${{ matrix.arch }}
|
||||||
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
6
.github/workflows/go.yml
vendored
6
.github/workflows/go.yml
vendored
|
@ -1,4 +1,4 @@
|
||||||
name: Go
|
name: "go: test building binary"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
@ -6,6 +6,10 @@ on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master ]
|
branches: [ master ]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.head_ref }}/go
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
|
14
.github/workflows/release.yml
vendored
14
.github/workflows/release.yml
vendored
|
@ -1,4 +1,4 @@
|
||||||
name: Build Dev Binaries
|
name: "go: build dev binaries"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
@ -11,17 +11,17 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
goos: [linux, windows, darwin, freebsd, netbsd, openbsd ]
|
goos: [linux, windows, darwin, freebsd, netbsd, openbsd]
|
||||||
goarch: [amd64, arm, arm64, 386]
|
goarch: [amd64, arm, arm64, 386]
|
||||||
exclude:
|
exclude:
|
||||||
- goarch: arm
|
- goarch: arm
|
||||||
goos: darwin
|
goos: darwin
|
||||||
- goarch: 386
|
- goarch: 386
|
||||||
goos: darwin
|
goos: darwin
|
||||||
- goarch: arm64
|
|
||||||
goos: windows
|
|
||||||
- goarch: arm
|
- goarch: arm
|
||||||
goos: windows
|
goos: windows
|
||||||
|
- goarch: arm64
|
||||||
|
goos: windows
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
|
@ -37,9 +37,8 @@ jobs:
|
||||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||||
|
|
||||||
- name: Go Release Binaries
|
- name: Go Release Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.17
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
with:
|
with:
|
||||||
goversion: 1.17
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
|
@ -54,9 +53,8 @@ jobs:
|
||||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||||
|
|
||||||
- name: Go Release Binaries
|
- name: Go Release Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.17
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
with:
|
with:
|
||||||
goversion: 1.17
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
|
|
16
.github/workflows/release_binaries.yml
vendored
16
.github/workflows/release_binaries.yml
vendored
|
@ -1,6 +1,6 @@
|
||||||
# This is a basic workflow to help you get started with Actions
|
# This is a basic workflow to help you get started with Actions
|
||||||
|
|
||||||
name: Build Versioned Releases
|
name: "go: build versioned binaries"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
|
@ -15,22 +15,25 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
goos: [linux, windows, darwin, freebsd ]
|
goos: [linux, windows, darwin, freebsd, netbsd, openbsd]
|
||||||
goarch: [amd64, arm]
|
goarch: [amd64, arm, arm64, 386]
|
||||||
exclude:
|
exclude:
|
||||||
- goarch: arm
|
- goarch: arm
|
||||||
goos: darwin
|
goos: darwin
|
||||||
|
- goarch: 386
|
||||||
|
goos: darwin
|
||||||
- goarch: arm
|
- goarch: arm
|
||||||
goos: windows
|
goos: windows
|
||||||
|
- goarch: arm64
|
||||||
|
goos: windows
|
||||||
|
|
||||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
steps:
|
steps:
|
||||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Go Release Binaries
|
- name: Go Release Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.19
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
with:
|
with:
|
||||||
goversion: 1.17
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
|
@ -43,9 +46,8 @@ jobs:
|
||||||
binary_name: weed
|
binary_name: weed
|
||||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
- name: Go Release Large Disk Binaries
|
- name: Go Release Large Disk Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.19
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
with:
|
with:
|
||||||
goversion: 1.17
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
|
|
|
@ -8,7 +8,9 @@ RUN \
|
||||||
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
||||||
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
|
elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \
|
||||||
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
|
elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \
|
||||||
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \
|
elif [ $(uname -m) == "armv6l" ]; then echo "arm"; \
|
||||||
|
elif [ $(uname -m) == "s390x" ]; then echo "s390x"; \
|
||||||
|
elif [ $(uname -m) == "ppc64le" ]; then echo "ppc64le"; fi;) && \
|
||||||
echo "Building for $ARCH" 1>&2 && \
|
echo "Building for $ARCH" 1>&2 && \
|
||||||
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
|
SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \
|
||||||
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
|
SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \
|
||||||
|
|
15
go.mod
15
go.mod
|
@ -19,6 +19,7 @@ require (
|
||||||
github.com/cespare/xxhash v1.1.0
|
github.com/cespare/xxhash v1.1.0
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||||
github.com/chrislusf/raft v1.0.7
|
github.com/chrislusf/raft v1.0.7
|
||||||
|
github.com/colinmarc/hdfs/v2 v2.2.0
|
||||||
github.com/coreos/go-semver v0.3.0 // indirect
|
github.com/coreos/go-semver v0.3.0 // indirect
|
||||||
github.com/coreos/go-systemd/v22 v22.0.0 // indirect
|
github.com/coreos/go-systemd/v22 v22.0.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
@ -58,9 +59,10 @@ require (
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.0.0 // indirect
|
github.com/hashicorp/go-multierror v1.0.0 // indirect
|
||||||
github.com/hashicorp/go-uuid v1.0.1 // indirect
|
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||||
|
github.com/jcmturner/gokrb5/v8 v8.4.1
|
||||||
github.com/jinzhu/copier v0.2.8
|
github.com/jinzhu/copier v0.2.8
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.11
|
github.com/json-iterator/go v1.1.11
|
||||||
|
@ -118,12 +120,11 @@ require (
|
||||||
github.com/tikv/client-go v0.0.0-20210412055529-d811a08025fa
|
github.com/tikv/client-go v0.0.0-20210412055529-d811a08025fa
|
||||||
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210824090536-16d902a3c7e5 // indirect
|
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210824090536-16d902a3c7e5 // indirect
|
||||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||||
|
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
||||||
github.com/valyala/bytebufferpool v1.0.0
|
github.com/valyala/bytebufferpool v1.0.0
|
||||||
github.com/viant/assertly v0.5.4 // indirect
|
github.com/viant/assertly v0.5.4 // indirect
|
||||||
github.com/viant/ptrie v0.3.0
|
github.com/viant/ptrie v0.3.0
|
||||||
github.com/viant/toolbox v0.33.2 // indirect
|
github.com/viant/toolbox v0.33.2 // indirect
|
||||||
github.com/willf/bitset v1.1.10 // indirect
|
|
||||||
github.com/willf/bloom v2.0.3+incompatible
|
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||||
github.com/xdg-go/scram v1.0.2 // indirect
|
github.com/xdg-go/scram v1.0.2 // indirect
|
||||||
github.com/xdg-go/stringprep v1.0.2 // indirect
|
github.com/xdg-go/stringprep v1.0.2 // indirect
|
||||||
|
@ -166,6 +167,14 @@ require (
|
||||||
modernc.org/token v1.0.0 // indirect
|
modernc.org/token v1.0.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/d4l3k/messagediff v1.2.1 // indirect
|
||||||
|
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||||
|
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||||
|
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||||
|
github.com/jcmturner/rpc/v2 v2.0.2 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
|
// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse
|
||||||
// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft
|
// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft
|
||||||
|
|
||||||
|
|
26
go.sum
26
go.sum
|
@ -169,6 +169,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
|
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||||
|
github.com/colinmarc/hdfs/v2 v2.2.0 h1:4AaIlTq+/sWmeqYhI0dX8bD4YrMQM990tRjm636FkGM=
|
||||||
|
github.com/colinmarc/hdfs/v2 v2.2.0/go.mod h1:Wss6n3mtaZyRwWaqtSH+6ge01qT0rw9dJJmvoUnIQ/E=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/etcd v3.3.25+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.25+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
@ -191,6 +193,8 @@ github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZ
|
||||||
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
||||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
||||||
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
|
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
|
||||||
|
github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U=
|
||||||
|
github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
@ -363,6 +367,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
||||||
github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
|
github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
|
||||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
@ -434,6 +439,10 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
||||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
|
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
||||||
|
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||||
|
github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=
|
||||||
|
github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||||
|
@ -466,8 +475,9 @@ github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa
|
||||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
|
||||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
@ -484,10 +494,20 @@ github.com/hypnoglow/gormzap v0.3.0/go.mod h1:5Wom8B7Jl2oK0Im9hs6KQ+Kl92w4Y7gKCr
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||||
|
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||||
|
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||||
|
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
|
||||||
|
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
|
||||||
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||||
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
|
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
|
||||||
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
|
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
|
||||||
|
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
|
||||||
|
github.com/jcmturner/gokrb5/v8 v8.4.1 h1:IGSJfqBzMS6TA0oJ7DxXdyzPK563QHa8T2IqER2ggyQ=
|
||||||
|
github.com/jcmturner/gokrb5/v8 v8.4.1/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZgZdoFrZaZNM=
|
||||||
|
github.com/jcmturner/rpc/v2 v2.0.2 h1:gMB4IwRXYsWw4Bc6o/az2HJgFUA1ffSh90i26ZJ6Xl0=
|
||||||
|
github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||||
github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE=
|
github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE=
|
||||||
github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
|
github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
|
||||||
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
|
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
|
||||||
|
@ -675,6 +695,7 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
|
||||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
|
github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||||
|
@ -906,6 +927,8 @@ github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 h1:6iRwZdrFUzbcVYZwa
|
||||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365/go.mod h1:zj0GJHGvyf1ed3Jm/Tb4830c/ZKDq+YoLsCt2rGQuT0=
|
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365/go.mod h1:zj0GJHGvyf1ed3Jm/Tb4830c/ZKDq+YoLsCt2rGQuT0=
|
||||||
github.com/twmb/murmur3 v1.1.3 h1:D83U0XYKcHRYwYIpBKf3Pks91Z0Byda/9SJ8B6EMRcA=
|
github.com/twmb/murmur3 v1.1.3 h1:D83U0XYKcHRYwYIpBKf3Pks91Z0Byda/9SJ8B6EMRcA=
|
||||||
github.com/twmb/murmur3 v1.1.3/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
github.com/twmb/murmur3 v1.1.3/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||||
|
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 h1:QEePdg0ty2r0t1+qwfZmQ4OOl/MB2UXIeJSpIZv56lg=
|
||||||
|
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43/go.mod h1:OYRfF6eb5wY9VRFkXJH8FFBi3plw2v+giaIu7P054pM=
|
||||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||||
github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0=
|
github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0=
|
||||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||||
|
@ -1026,6 +1049,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||||
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
description: SeaweedFS
|
description: SeaweedFS
|
||||||
name: seaweedfs
|
name: seaweedfs
|
||||||
appVersion: "2.64"
|
appVersion: "2.65"
|
||||||
version: "2.64"
|
version: "2.65"
|
||||||
|
|
|
@ -72,10 +72,6 @@ public class FilerClient extends FilerGrpcClient {
|
||||||
}
|
}
|
||||||
return entry;
|
return entry;
|
||||||
} else {
|
} else {
|
||||||
String fileId = entry.getChunks(0).getFileId();
|
|
||||||
if (fileId != null && fileId.length() != 0) {
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
FilerProto.Entry.Builder entryBuilder = entry.toBuilder();
|
||||||
entryBuilder.clearChunks();
|
entryBuilder.clearChunks();
|
||||||
long fileSize = 0;
|
long fileSize = 0;
|
||||||
|
|
|
@ -64,10 +64,11 @@ public class SeaweedRead {
|
||||||
startOffset += gap;
|
startOffset += gap;
|
||||||
}
|
}
|
||||||
|
|
||||||
FilerProto.Locations locations = knownLocations.get(parseVolumeId(chunkView.fileId));
|
String volumeId = parseVolumeId(chunkView.fileId);
|
||||||
|
FilerProto.Locations locations = knownLocations.get(volumeId);
|
||||||
if (locations == null || locations.getLocationsCount() == 0) {
|
if (locations == null || locations.getLocationsCount() == 0) {
|
||||||
LOG.error("failed to locate {}", chunkView.fileId);
|
LOG.error("failed to locate {}", chunkView.fileId);
|
||||||
// log here!
|
volumeIdCache.clearLocations(volumeId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,13 @@ public class VolumeIdCache {
|
||||||
return this.cache.getIfPresent(volumeId);
|
return this.cache.getIfPresent(volumeId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void clearLocations(String volumeId) {
|
||||||
|
if (this.cache == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.cache.invalidate(volumeId);
|
||||||
|
}
|
||||||
|
|
||||||
public void setLocations(String volumeId, FilerProto.Locations locations) {
|
public void setLocations(String volumeId, FilerProto.Locations locations) {
|
||||||
if (this.cache == null) {
|
if (this.cache == null) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -305,6 +305,7 @@ message GetFilerConfigurationResponse {
|
||||||
string metrics_address = 9;
|
string metrics_address = 9;
|
||||||
int32 metrics_interval_sec = 10;
|
int32 metrics_interval_sec = 10;
|
||||||
string version = 11;
|
string version = 11;
|
||||||
|
string cluster_id = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SubscribeMetadataRequest {
|
message SubscribeMetadataRequest {
|
||||||
|
@ -336,6 +337,7 @@ message KeepConnectedResponse {
|
||||||
message LocateBrokerRequest {
|
message LocateBrokerRequest {
|
||||||
string resource = 1;
|
string resource = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message LocateBrokerResponse {
|
message LocateBrokerResponse {
|
||||||
bool found = 1;
|
bool found = 1;
|
||||||
// if found, send the exact address
|
// if found, send the exact address
|
||||||
|
@ -386,49 +388,6 @@ message FilerConf {
|
||||||
/////////////////////////
|
/////////////////////////
|
||||||
// Remote Storage related
|
// Remote Storage related
|
||||||
/////////////////////////
|
/////////////////////////
|
||||||
message RemoteConf {
|
|
||||||
string type = 1;
|
|
||||||
string name = 2;
|
|
||||||
string s3_access_key = 4;
|
|
||||||
string s3_secret_key = 5;
|
|
||||||
string s3_region = 6;
|
|
||||||
string s3_endpoint = 7;
|
|
||||||
string s3_storage_class = 8;
|
|
||||||
bool s3_force_path_style = 9;
|
|
||||||
|
|
||||||
string gcs_google_application_credentials = 10;
|
|
||||||
|
|
||||||
string azure_account_name = 15;
|
|
||||||
string azure_account_key = 16;
|
|
||||||
|
|
||||||
string backblaze_key_id = 20;
|
|
||||||
string backblaze_application_key = 21;
|
|
||||||
string backblaze_endpoint = 22;
|
|
||||||
|
|
||||||
string aliyun_access_key = 25;
|
|
||||||
string aliyun_secret_key = 26;
|
|
||||||
string aliyun_endpoint = 27;
|
|
||||||
string aliyun_region = 28;
|
|
||||||
|
|
||||||
string tencent_secret_id = 30;
|
|
||||||
string tencent_secret_key = 31;
|
|
||||||
string tencent_endpoint = 32;
|
|
||||||
|
|
||||||
string baidu_access_key = 35;
|
|
||||||
string baidu_secret_key = 36;
|
|
||||||
string baidu_endpoint = 37;
|
|
||||||
string baidu_region = 38;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
message RemoteStorageMapping {
|
|
||||||
map<string,RemoteStorageLocation> mappings = 1;
|
|
||||||
}
|
|
||||||
message RemoteStorageLocation {
|
|
||||||
string name = 1;
|
|
||||||
string bucket = 2;
|
|
||||||
string path = 3;
|
|
||||||
}
|
|
||||||
message DownloadToLocalRequest {
|
message DownloadToLocalRequest {
|
||||||
string directory = 1;
|
string directory = 1;
|
||||||
string name = 2;
|
string name = 2;
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||||
"github.com/chrislusf/seaweedfs/weed/security"
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
|
@ -72,13 +73,6 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||||
dir := *remoteSyncOptions.dir
|
dir := *remoteSyncOptions.dir
|
||||||
filerAddress := *remoteSyncOptions.filerAddress
|
filerAddress := *remoteSyncOptions.filerAddress
|
||||||
|
|
||||||
// read filer remote storage mount mappings
|
|
||||||
_, _, remoteStorageMountLocation, storageConf, detectErr := filer.DetectMountInfo(grpcDialOption, filerAddress, dir)
|
|
||||||
if detectErr != nil {
|
|
||||||
fmt.Printf("read mount info: %v", detectErr)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
filerSource := &source.FilerSource{}
|
filerSource := &source.FilerSource{}
|
||||||
filerSource.DoInitialize(
|
filerSource.DoInitialize(
|
||||||
filerAddress,
|
filerAddress,
|
||||||
|
@ -89,7 +83,7 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||||
|
|
||||||
fmt.Printf("synchronize %s to remote storage...\n", dir)
|
fmt.Printf("synchronize %s to remote storage...\n", dir)
|
||||||
util.RetryForever("filer.remote.sync "+dir, func() error {
|
util.RetryForever("filer.remote.sync "+dir, func() error {
|
||||||
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir, storageConf, remoteStorageMountLocation)
|
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
|
||||||
}, func(err error) bool {
|
}, func(err error) bool {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("synchronize %s: %v", dir, err)
|
glog.Errorf("synchronize %s: %v", dir, err)
|
||||||
|
@ -100,7 +94,13 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string, remoteStorage *filer_pb.RemoteConf, remoteStorageMountLocation *filer_pb.RemoteStorageLocation) error {
|
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {
|
||||||
|
|
||||||
|
// read filer remote storage mount mappings
|
||||||
|
_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, *option.filerAddress, mountedDir)
|
||||||
|
if detectErr != nil {
|
||||||
|
return fmt.Errorf("read mount info: %v", detectErr)
|
||||||
|
}
|
||||||
|
|
||||||
dirHash := util.HashStringToLong(mountedDir)
|
dirHash := util.HashStringToLong(mountedDir)
|
||||||
|
|
||||||
|
@ -115,11 +115,15 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
|
||||||
}
|
}
|
||||||
|
|
||||||
lastOffsetTsNs, err := getOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash))
|
lastOffsetTsNs, err := getOffset(option.grpcDialOption, *option.filerAddress, RemoteSyncKeyPrefix, int32(dirHash))
|
||||||
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
if mountedDirEntry != nil {
|
||||||
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
||||||
glog.V(0).Infof("resume from %v", lastOffsetTs)
|
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
||||||
|
glog.V(0).Infof("resume from %v", lastOffsetTs)
|
||||||
|
} else {
|
||||||
|
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)
|
lastOffsetTs = time.Now()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
lastOffsetTs = time.Now().Add(-*option.timeAgo)
|
lastOffsetTs = time.Now().Add(-*option.timeAgo)
|
||||||
|
@ -160,6 +164,10 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
|
||||||
if message.OldEntry != nil && message.NewEntry == nil {
|
if message.OldEntry != nil && message.NewEntry == nil {
|
||||||
glog.V(2).Infof("delete: %+v", resp)
|
glog.V(2).Infof("delete: %+v", resp)
|
||||||
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)
|
||||||
|
if message.OldEntry.IsDirectory {
|
||||||
|
glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest))
|
||||||
|
return client.RemoveDirectory(dest)
|
||||||
|
}
|
||||||
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
|
glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest))
|
||||||
return client.DeleteFile(dest)
|
return client.DeleteFile(dest)
|
||||||
}
|
}
|
||||||
|
@ -206,10 +214,10 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
|
||||||
"filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
"filer.remote.sync", mountedDir, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *filer_pb.RemoteStorageLocation) *filer_pb.RemoteStorageLocation {
|
func toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *remote_pb.RemoteStorageLocation) *remote_pb.RemoteStorageLocation {
|
||||||
source := string(sourcePath[len(mountDir):])
|
source := string(sourcePath[len(mountDir):])
|
||||||
dest := util.FullPath(remoteMountLocation.Path).Child(source)
|
dest := util.FullPath(remoteMountLocation.Path).Child(source)
|
||||||
return &filer_pb.RemoteStorageLocation{
|
return &remote_pb.RemoteStorageLocation{
|
||||||
Name: remoteMountLocation.Name,
|
Name: remoteMountLocation.Name,
|
||||||
Bucket: remoteMountLocation.Bucket,
|
Bucket: remoteMountLocation.Bucket,
|
||||||
Path: string(dest),
|
Path: string(dest),
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
|
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/azure"
|
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/azure"
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/gcs"
|
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/gcs"
|
||||||
|
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/hdfs"
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/s3"
|
_ "github.com/chrislusf/seaweedfs/weed/remote_storage/s3"
|
||||||
|
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
|
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
@ -21,13 +22,13 @@ const REMOTE_STORAGE_MOUNT_FILE = "mount.mapping"
|
||||||
|
|
||||||
type FilerRemoteStorage struct {
|
type FilerRemoteStorage struct {
|
||||||
rules ptrie.Trie
|
rules ptrie.Trie
|
||||||
storageNameToConf map[string]*filer_pb.RemoteConf
|
storageNameToConf map[string]*remote_pb.RemoteConf
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFilerRemoteStorage() (rs *FilerRemoteStorage) {
|
func NewFilerRemoteStorage() (rs *FilerRemoteStorage) {
|
||||||
rs = &FilerRemoteStorage{
|
rs = &FilerRemoteStorage{
|
||||||
rules: ptrie.New(),
|
rules: ptrie.New(),
|
||||||
storageNameToConf: make(map[string]*filer_pb.RemoteConf),
|
storageNameToConf: make(map[string]*remote_pb.RemoteConf),
|
||||||
}
|
}
|
||||||
return rs
|
return rs
|
||||||
}
|
}
|
||||||
|
@ -56,7 +57,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F
|
||||||
if !strings.HasSuffix(entry.Name(), REMOTE_STORAGE_CONF_SUFFIX) {
|
if !strings.HasSuffix(entry.Name(), REMOTE_STORAGE_CONF_SUFFIX) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
conf := &filer_pb.RemoteConf{}
|
conf := &remote_pb.RemoteConf{}
|
||||||
if err := proto.Unmarshal(entry.Content, conf); err != nil {
|
if err := proto.Unmarshal(entry.Content, conf); err != nil {
|
||||||
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, entry.Name(), err)
|
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, entry.Name(), err)
|
||||||
}
|
}
|
||||||
|
@ -66,7 +67,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err error) {
|
func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err error) {
|
||||||
mappings := &filer_pb.RemoteStorageMapping{}
|
mappings := &remote_pb.RemoteStorageMapping{}
|
||||||
if err := proto.Unmarshal(data, mappings); err != nil {
|
if err := proto.Unmarshal(data, mappings); err != nil {
|
||||||
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, err)
|
return fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, err)
|
||||||
}
|
}
|
||||||
|
@ -76,23 +77,23 @@ func (rs *FilerRemoteStorage) loadRemoteStorageMountMapping(data []byte) (err er
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *filer_pb.RemoteStorageLocation) {
|
func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc *remote_pb.RemoteStorageLocation) {
|
||||||
rs.rules.Put([]byte(dir+"/"), loc)
|
rs.rules.Put([]byte(dir+"/"), loc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *filer_pb.RemoteStorageLocation) {
|
func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *remote_pb.RemoteStorageLocation) {
|
||||||
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
|
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
|
||||||
mountDir = util.FullPath(string(key[:len(key)-1]))
|
mountDir = util.FullPath(string(key[:len(key)-1]))
|
||||||
remoteLocation = value.(*filer_pb.RemoteStorageLocation)
|
remoteLocation = value.(*remote_pb.RemoteStorageLocation)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
|
func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) {
|
||||||
var storageLocation *filer_pb.RemoteStorageLocation
|
var storageLocation *remote_pb.RemoteStorageLocation
|
||||||
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
|
rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool {
|
||||||
storageLocation = value.(*filer_pb.RemoteStorageLocation)
|
storageLocation = value.(*remote_pb.RemoteStorageLocation)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -104,7 +105,7 @@ func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client r
|
||||||
return rs.GetRemoteStorageClient(storageLocation.Name)
|
return rs.GetRemoteStorageClient(storageLocation.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *filer_pb.RemoteConf, found bool) {
|
func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) {
|
||||||
remoteConf, found = rs.storageNameToConf[storageName]
|
remoteConf, found = rs.storageNameToConf[storageName]
|
||||||
if !found {
|
if !found {
|
||||||
return
|
return
|
||||||
|
@ -118,9 +119,9 @@ func (rs *FilerRemoteStorage) GetRemoteStorageClient(storageName string) (client
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.RemoteStorageMapping, err error) {
|
func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.RemoteStorageMapping, err error) {
|
||||||
mappings = &filer_pb.RemoteStorageMapping{
|
mappings = &remote_pb.RemoteStorageMapping{
|
||||||
Mappings: make(map[string]*filer_pb.RemoteStorageLocation),
|
Mappings: make(map[string]*remote_pb.RemoteStorageLocation),
|
||||||
}
|
}
|
||||||
if len(oldContent) > 0 {
|
if len(oldContent) > 0 {
|
||||||
if err = proto.Unmarshal(oldContent, mappings); err != nil {
|
if err = proto.Unmarshal(oldContent, mappings); err != nil {
|
||||||
|
@ -130,7 +131,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *filer_pb.Remot
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func AddRemoteStorageMapping(oldContent []byte, dir string, storageLocation *filer_pb.RemoteStorageLocation) (newContent []byte, err error) {
|
func AddRemoteStorageMapping(oldContent []byte, dir string, storageLocation *remote_pb.RemoteStorageLocation) (newContent []byte, err error) {
|
||||||
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
|
mappings, unmarshalErr := UnmarshalRemoteStorageMappings(oldContent)
|
||||||
if unmarshalErr != nil {
|
if unmarshalErr != nil {
|
||||||
// skip
|
// skip
|
||||||
|
@ -162,7 +163,7 @@ func RemoveRemoteStorageMapping(oldContent []byte, dir string) (newContent []byt
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *filer_pb.RemoteStorageMapping, readErr error) {
|
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *remote_pb.RemoteStorageMapping, readErr error) {
|
||||||
var oldContent []byte
|
var oldContent []byte
|
||||||
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
|
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
|
||||||
|
@ -179,7 +180,7 @@ func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (map
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *filer_pb.RemoteConf, readErr error) {
|
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *remote_pb.RemoteConf, readErr error) {
|
||||||
var oldContent []byte
|
var oldContent []byte
|
||||||
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
|
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
|
||||||
|
@ -189,7 +190,7 @@ func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmarshal storage configuration
|
// unmarshal storage configuration
|
||||||
conf = &filer_pb.RemoteConf{}
|
conf = &remote_pb.RemoteConf{}
|
||||||
if unMarshalErr := proto.Unmarshal(oldContent, conf); unMarshalErr != nil {
|
if unMarshalErr := proto.Unmarshal(oldContent, conf); unMarshalErr != nil {
|
||||||
readErr = fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
|
readErr = fmt.Errorf("unmarshal %s/%s: %v", DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
|
||||||
return
|
return
|
||||||
|
@ -198,7 +199,7 @@ func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string,
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir string) (*filer_pb.RemoteStorageMapping, string, *filer_pb.RemoteStorageLocation, *filer_pb.RemoteConf, error) {
|
func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir string) (*remote_pb.RemoteStorageMapping, string, *remote_pb.RemoteStorageLocation, *remote_pb.RemoteConf, error) {
|
||||||
|
|
||||||
mappings, listErr := ReadMountMappings(grpcDialOption, filerAddress)
|
mappings, listErr := ReadMountMappings(grpcDialOption, filerAddress)
|
||||||
if listErr != nil {
|
if listErr != nil {
|
||||||
|
@ -209,7 +210,7 @@ func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir st
|
||||||
}
|
}
|
||||||
|
|
||||||
var localMountedDir string
|
var localMountedDir string
|
||||||
var remoteStorageMountedLocation *filer_pb.RemoteStorageLocation
|
var remoteStorageMountedLocation *remote_pb.RemoteStorageLocation
|
||||||
for k, loc := range mappings.Mappings {
|
for k, loc := range mappings.Mappings {
|
||||||
if strings.HasPrefix(dir, k) {
|
if strings.HasPrefix(dir, k) {
|
||||||
localMountedDir, remoteStorageMountedLocation = k, loc
|
localMountedDir, remoteStorageMountedLocation = k, loc
|
||||||
|
|
|
@ -1,20 +1,20 @@
|
||||||
package filer
|
package filer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
|
func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
|
||||||
conf := &filer_pb.RemoteConf{
|
conf := &remote_pb.RemoteConf{
|
||||||
Name: "s7",
|
Name: "s7",
|
||||||
Type: "s3",
|
Type: "s3",
|
||||||
}
|
}
|
||||||
rs := NewFilerRemoteStorage()
|
rs := NewFilerRemoteStorage()
|
||||||
rs.storageNameToConf[conf.Name] = conf
|
rs.storageNameToConf[conf.Name] = conf
|
||||||
|
|
||||||
rs.mapDirectoryToRemoteStorage("/a/b/c", &filer_pb.RemoteStorageLocation{
|
rs.mapDirectoryToRemoteStorage("/a/b/c", &remote_pb.RemoteStorageLocation{
|
||||||
Name: "s7",
|
Name: "s7",
|
||||||
Bucket: "some",
|
Bucket: "some",
|
||||||
Path: "/dir",
|
Path: "/dir",
|
||||||
|
|
|
@ -2,8 +2,8 @@ package filer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -11,21 +11,8 @@ func (entry *Entry) IsInRemoteOnly() bool {
|
||||||
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
|
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data []byte, err error) {
|
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, fp util.FullPath) *remote_pb.RemoteStorageLocation {
|
||||||
client, _, found := f.RemoteStorage.GetRemoteStorageClient(entry.Remote.StorageName)
|
remoteLocation := &remote_pb.RemoteStorageLocation{
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("remote storage %v not found", entry.Remote.StorageName)
|
|
||||||
}
|
|
||||||
|
|
||||||
mountDir, remoteLoation := f.RemoteStorage.FindMountDirectory(entry.FullPath)
|
|
||||||
|
|
||||||
sourceLoc := MapFullPathToRemoteStorageLocation(mountDir, remoteLoation, entry.FullPath)
|
|
||||||
|
|
||||||
return client.ReadFile(sourceLoc, offset, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, fp util.FullPath) *filer_pb.RemoteStorageLocation {
|
|
||||||
remoteLocation := &filer_pb.RemoteStorageLocation{
|
|
||||||
Name: remoteMountedLocation.Name,
|
Name: remoteMountedLocation.Name,
|
||||||
Bucket: remoteMountedLocation.Bucket,
|
Bucket: remoteMountedLocation.Bucket,
|
||||||
Path: remoteMountedLocation.Path,
|
Path: remoteMountedLocation.Path,
|
||||||
|
@ -34,11 +21,11 @@ func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMou
|
||||||
return remoteLocation
|
return remoteLocation
|
||||||
}
|
}
|
||||||
|
|
||||||
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, remoteLocationPath string)(fp util.FullPath) {
|
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, remoteLocationPath string)(fp util.FullPath) {
|
||||||
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
|
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *filer_pb.RemoteConf, remoteLocation *filer_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
|
func DownloadToLocal(filerClient filer_pb.FilerClient, remoteConf *remote_pb.RemoteConf, remoteLocation *remote_pb.RemoteStorageLocation, parent util.FullPath, entry *filer_pb.Entry) error {
|
||||||
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||||
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
|
_, err := client.DownloadToLocal(context.Background(), &filer_pb.DownloadToLocalRequest{
|
||||||
Directory: string(parent),
|
Directory: string(parent),
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
@ -131,8 +132,8 @@ type ChunkStreamReader struct {
|
||||||
logicOffset int64
|
logicOffset int64
|
||||||
buffer []byte
|
buffer []byte
|
||||||
bufferOffset int64
|
bufferOffset int64
|
||||||
bufferPos int
|
bufferLock sync.Mutex
|
||||||
nextChunkViewIndex int
|
chunk string
|
||||||
lookupFileId wdclient.LookupFileIdFunctionType
|
lookupFileId wdclient.LookupFileIdFunctionType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,27 +176,29 @@ func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.F
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChunkStreamReader) ReadAt(p []byte, off int64) (n int, err error) {
|
func (c *ChunkStreamReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
c.bufferLock.Lock()
|
||||||
|
defer c.bufferLock.Unlock()
|
||||||
if err = c.prepareBufferFor(off); err != nil {
|
if err = c.prepareBufferFor(off); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.logicOffset = off
|
c.logicOffset = off
|
||||||
return c.Read(p)
|
return c.doRead(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
|
func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
|
||||||
|
c.bufferLock.Lock()
|
||||||
|
defer c.bufferLock.Unlock()
|
||||||
|
return c.doRead(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ChunkStreamReader) doRead(p []byte) (n int, err error) {
|
||||||
|
// fmt.Printf("do read [%d,%d) at %s[%d,%d)\n", c.logicOffset, c.logicOffset+int64(len(p)), c.chunk, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
|
||||||
for n < len(p) {
|
for n < len(p) {
|
||||||
if c.isBufferEmpty() {
|
// println("read", c.logicOffset)
|
||||||
if c.nextChunkViewIndex >= len(c.chunkViews) {
|
if err = c.prepareBufferFor(c.logicOffset); err != nil {
|
||||||
return n, io.EOF
|
return
|
||||||
}
|
|
||||||
chunkView := c.chunkViews[c.nextChunkViewIndex]
|
|
||||||
if err = c.fetchChunkToBuffer(chunkView); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.nextChunkViewIndex++
|
|
||||||
}
|
}
|
||||||
t := copy(p[n:], c.buffer[c.bufferPos:])
|
t := copy(p[n:], c.buffer[c.logicOffset-c.bufferOffset:])
|
||||||
c.bufferPos += t
|
|
||||||
n += t
|
n += t
|
||||||
c.logicOffset += int64(t)
|
c.logicOffset += int64(t)
|
||||||
}
|
}
|
||||||
|
@ -203,10 +206,12 @@ func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChunkStreamReader) isBufferEmpty() bool {
|
func (c *ChunkStreamReader) isBufferEmpty() bool {
|
||||||
return len(c.buffer) <= c.bufferPos
|
return len(c.buffer) <= int(c.logicOffset - c.bufferOffset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
|
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
c.bufferLock.Lock()
|
||||||
|
defer c.bufferLock.Unlock()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
switch whence {
|
switch whence {
|
||||||
|
@ -226,48 +231,59 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func insideChunk(offset int64, chunk *ChunkView) bool {
|
||||||
|
return chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
|
func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
|
||||||
// stay in the same chunk
|
// stay in the same chunk
|
||||||
if !c.isBufferEmpty() {
|
if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) {
|
||||||
if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) {
|
return nil
|
||||||
c.bufferPos = int(offset - c.bufferOffset)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fmt.Printf("fetch for offset %d\n", offset)
|
||||||
|
|
||||||
// need to seek to a different chunk
|
// need to seek to a different chunk
|
||||||
currentChunkIndex := sort.Search(len(c.chunkViews), func(i int) bool {
|
currentChunkIndex := sort.Search(len(c.chunkViews), func(i int) bool {
|
||||||
return offset < c.chunkViews[i].LogicOffset
|
return offset < c.chunkViews[i].LogicOffset
|
||||||
})
|
})
|
||||||
if currentChunkIndex == len(c.chunkViews) {
|
if currentChunkIndex == len(c.chunkViews) {
|
||||||
// not found
|
// not found
|
||||||
if c.chunkViews[0].LogicOffset <= offset {
|
if insideChunk(offset, c.chunkViews[0]) {
|
||||||
|
// fmt.Printf("select0 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
|
||||||
currentChunkIndex = 0
|
currentChunkIndex = 0
|
||||||
} else if c.chunkViews[len(c.chunkViews)-1].LogicOffset <= offset {
|
} else if insideChunk(offset, c.chunkViews[len(c.chunkViews)-1]) {
|
||||||
currentChunkIndex = len(c.chunkViews) -1
|
currentChunkIndex = len(c.chunkViews) - 1
|
||||||
|
// fmt.Printf("select last chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
|
||||||
} else {
|
} else {
|
||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
} else if currentChunkIndex > 0 {
|
} else if currentChunkIndex > 0 {
|
||||||
if c.chunkViews[currentChunkIndex-1].LogicOffset <= offset {
|
if insideChunk(offset, c.chunkViews[currentChunkIndex]) {
|
||||||
|
// good hit
|
||||||
|
} else if insideChunk(offset, c.chunkViews[currentChunkIndex-1]){
|
||||||
currentChunkIndex -= 1
|
currentChunkIndex -= 1
|
||||||
|
// fmt.Printf("select -1 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
|
||||||
} else {
|
} else {
|
||||||
|
// glog.Fatalf("unexpected1 offset %d", offset)
|
||||||
return fmt.Errorf("unexpected1 offset %d", offset)
|
return fmt.Errorf("unexpected1 offset %d", offset)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// glog.Fatalf("unexpected2 offset %d", offset)
|
||||||
return fmt.Errorf("unexpected2 offset %d", offset)
|
return fmt.Errorf("unexpected2 offset %d", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// positioning within the new chunk
|
// positioning within the new chunk
|
||||||
chunk := c.chunkViews[currentChunkIndex]
|
chunk := c.chunkViews[currentChunkIndex]
|
||||||
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
|
if insideChunk(offset, chunk) {
|
||||||
if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
|
if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset {
|
||||||
if err = c.fetchChunkToBuffer(chunk); err != nil {
|
if err = c.fetchChunkToBuffer(chunk); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.nextChunkViewIndex = currentChunkIndex + 1
|
|
||||||
}
|
}
|
||||||
c.bufferPos = int(offset - c.bufferOffset)
|
} else {
|
||||||
|
// glog.Fatalf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size))
|
||||||
|
return fmt.Errorf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -298,10 +314,10 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.buffer = buffer.Bytes()
|
c.buffer = buffer.Bytes()
|
||||||
c.bufferPos = 0
|
|
||||||
c.bufferOffset = chunkView.LogicOffset
|
c.bufferOffset = chunkView.LogicOffset
|
||||||
|
c.chunk = chunkView.FileId
|
||||||
|
|
||||||
// glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
// glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,11 +39,9 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
|
||||||
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
|
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if message.OldEntry != nil && message.NewEntry != nil {
|
if message.OldEntry != nil && message.NewEntry != nil {
|
||||||
if message.OldEntry.Name == message.NewEntry.Name {
|
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
|
||||||
// no need to invalidate
|
mc.invalidateFunc(oldKey)
|
||||||
} else {
|
if message.OldEntry.Name != message.NewEntry.Name {
|
||||||
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
|
|
||||||
mc.invalidateFunc(oldKey)
|
|
||||||
newKey := util.NewFullPath(dir, message.NewEntry.Name)
|
newKey := util.NewFullPath(dir, message.NewEntry.Name)
|
||||||
mc.invalidateFunc(newKey)
|
mc.invalidateFunc(newKey)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ gen:
|
||||||
protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative
|
protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative
|
||||||
protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative
|
protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative
|
||||||
protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative
|
protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative
|
||||||
|
protoc remote.proto --go_out=plugins=grpc:./remote_pb --go_opt=paths=source_relative
|
||||||
protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative
|
protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative
|
||||||
protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative
|
protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative
|
||||||
# protoc filer.proto --java_out=../../other/java/client/src/main/java
|
# protoc filer.proto --java_out=../../other/java/client/src/main/java
|
||||||
|
|
|
@ -305,6 +305,7 @@ message GetFilerConfigurationResponse {
|
||||||
string metrics_address = 9;
|
string metrics_address = 9;
|
||||||
int32 metrics_interval_sec = 10;
|
int32 metrics_interval_sec = 10;
|
||||||
string version = 11;
|
string version = 11;
|
||||||
|
string cluster_id = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
message SubscribeMetadataRequest {
|
message SubscribeMetadataRequest {
|
||||||
|
@ -336,6 +337,7 @@ message KeepConnectedResponse {
|
||||||
message LocateBrokerRequest {
|
message LocateBrokerRequest {
|
||||||
string resource = 1;
|
string resource = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message LocateBrokerResponse {
|
message LocateBrokerResponse {
|
||||||
bool found = 1;
|
bool found = 1;
|
||||||
// if found, send the exact address
|
// if found, send the exact address
|
||||||
|
@ -386,49 +388,6 @@ message FilerConf {
|
||||||
/////////////////////////
|
/////////////////////////
|
||||||
// Remote Storage related
|
// Remote Storage related
|
||||||
/////////////////////////
|
/////////////////////////
|
||||||
message RemoteConf {
|
|
||||||
string type = 1;
|
|
||||||
string name = 2;
|
|
||||||
string s3_access_key = 4;
|
|
||||||
string s3_secret_key = 5;
|
|
||||||
string s3_region = 6;
|
|
||||||
string s3_endpoint = 7;
|
|
||||||
string s3_storage_class = 8;
|
|
||||||
bool s3_force_path_style = 9;
|
|
||||||
|
|
||||||
string gcs_google_application_credentials = 10;
|
|
||||||
|
|
||||||
string azure_account_name = 15;
|
|
||||||
string azure_account_key = 16;
|
|
||||||
|
|
||||||
string backblaze_key_id = 20;
|
|
||||||
string backblaze_application_key = 21;
|
|
||||||
string backblaze_endpoint = 22;
|
|
||||||
|
|
||||||
string aliyun_access_key = 25;
|
|
||||||
string aliyun_secret_key = 26;
|
|
||||||
string aliyun_endpoint = 27;
|
|
||||||
string aliyun_region = 28;
|
|
||||||
|
|
||||||
string tencent_secret_id = 30;
|
|
||||||
string tencent_secret_key = 31;
|
|
||||||
string tencent_endpoint = 32;
|
|
||||||
|
|
||||||
string baidu_access_key = 35;
|
|
||||||
string baidu_secret_key = 36;
|
|
||||||
string baidu_endpoint = 37;
|
|
||||||
string baidu_region = 38;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
message RemoteStorageMapping {
|
|
||||||
map<string,RemoteStorageLocation> mappings = 1;
|
|
||||||
}
|
|
||||||
message RemoteStorageLocation {
|
|
||||||
string name = 1;
|
|
||||||
string bucket = 2;
|
|
||||||
string path = 3;
|
|
||||||
}
|
|
||||||
message DownloadToLocalRequest {
|
message DownloadToLocalRequest {
|
||||||
string directory = 1;
|
string directory = 1;
|
||||||
string name = 2;
|
string name = 2;
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -151,7 +151,3 @@ func (fp *FilerConf_PathConf) Key() interface{} {
|
||||||
key, _ := proto.Marshal(fp)
|
key, _ := proto.Marshal(fp)
|
||||||
return string(key)
|
return string(key)
|
||||||
}
|
}
|
||||||
func (fp *RemoteStorageLocation) Key() interface{} {
|
|
||||||
key, _ := proto.Marshal(fp)
|
|
||||||
return string(key)
|
|
||||||
}
|
|
||||||
|
|
64
weed/pb/remote.proto
Normal file
64
weed/pb/remote.proto
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package remote_pb;
|
||||||
|
|
||||||
|
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/remote_pb";
|
||||||
|
option java_package = "seaweedfs.client";
|
||||||
|
option java_outer_classname = "FilerProto";
|
||||||
|
|
||||||
|
/////////////////////////
|
||||||
|
// Remote Storage related
|
||||||
|
/////////////////////////
|
||||||
|
message RemoteConf {
|
||||||
|
string type = 1;
|
||||||
|
string name = 2;
|
||||||
|
string s3_access_key = 4;
|
||||||
|
string s3_secret_key = 5;
|
||||||
|
string s3_region = 6;
|
||||||
|
string s3_endpoint = 7;
|
||||||
|
string s3_storage_class = 8;
|
||||||
|
bool s3_force_path_style = 9;
|
||||||
|
|
||||||
|
string gcs_google_application_credentials = 10;
|
||||||
|
|
||||||
|
string azure_account_name = 15;
|
||||||
|
string azure_account_key = 16;
|
||||||
|
|
||||||
|
string backblaze_key_id = 20;
|
||||||
|
string backblaze_application_key = 21;
|
||||||
|
string backblaze_endpoint = 22;
|
||||||
|
|
||||||
|
string aliyun_access_key = 25;
|
||||||
|
string aliyun_secret_key = 26;
|
||||||
|
string aliyun_endpoint = 27;
|
||||||
|
string aliyun_region = 28;
|
||||||
|
|
||||||
|
string tencent_secret_id = 30;
|
||||||
|
string tencent_secret_key = 31;
|
||||||
|
string tencent_endpoint = 32;
|
||||||
|
|
||||||
|
string baidu_access_key = 35;
|
||||||
|
string baidu_secret_key = 36;
|
||||||
|
string baidu_endpoint = 37;
|
||||||
|
string baidu_region = 38;
|
||||||
|
|
||||||
|
string wasabi_access_key = 40;
|
||||||
|
string wasabi_secret_key = 41;
|
||||||
|
string wasabi_endpoint = 42;
|
||||||
|
string wasabi_region = 43;
|
||||||
|
|
||||||
|
repeated string hdfs_namenodes = 50;
|
||||||
|
string hdfs_username = 51;
|
||||||
|
string hdfs_service_principal_name = 52;
|
||||||
|
string hdfs_data_transfer_protection = 53;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
message RemoteStorageMapping {
|
||||||
|
map<string,RemoteStorageLocation> mappings = 1;
|
||||||
|
}
|
||||||
|
message RemoteStorageLocation {
|
||||||
|
string name = 1;
|
||||||
|
string bucket = 2;
|
||||||
|
string path = 3;
|
||||||
|
}
|
653
weed/pb/remote_pb/remote.pb.go
Normal file
653
weed/pb/remote_pb/remote.pb.go
Normal file
|
@ -0,0 +1,653 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.25.0
|
||||||
|
// protoc v3.12.3
|
||||||
|
// source: remote.proto
|
||||||
|
|
||||||
|
package remote_pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||||
|
// of the legacy proto package is being used.
|
||||||
|
const _ = proto.ProtoPackageIsVersion4
|
||||||
|
|
||||||
|
/////////////////////////
|
||||||
|
// Remote Storage related
|
||||||
|
/////////////////////////
|
||||||
|
type RemoteConf struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||||
|
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
S3AccessKey string `protobuf:"bytes,4,opt,name=s3_access_key,json=s3AccessKey,proto3" json:"s3_access_key,omitempty"`
|
||||||
|
S3SecretKey string `protobuf:"bytes,5,opt,name=s3_secret_key,json=s3SecretKey,proto3" json:"s3_secret_key,omitempty"`
|
||||||
|
S3Region string `protobuf:"bytes,6,opt,name=s3_region,json=s3Region,proto3" json:"s3_region,omitempty"`
|
||||||
|
S3Endpoint string `protobuf:"bytes,7,opt,name=s3_endpoint,json=s3Endpoint,proto3" json:"s3_endpoint,omitempty"`
|
||||||
|
S3StorageClass string `protobuf:"bytes,8,opt,name=s3_storage_class,json=s3StorageClass,proto3" json:"s3_storage_class,omitempty"`
|
||||||
|
S3ForcePathStyle bool `protobuf:"varint,9,opt,name=s3_force_path_style,json=s3ForcePathStyle,proto3" json:"s3_force_path_style,omitempty"`
|
||||||
|
GcsGoogleApplicationCredentials string `protobuf:"bytes,10,opt,name=gcs_google_application_credentials,json=gcsGoogleApplicationCredentials,proto3" json:"gcs_google_application_credentials,omitempty"`
|
||||||
|
AzureAccountName string `protobuf:"bytes,15,opt,name=azure_account_name,json=azureAccountName,proto3" json:"azure_account_name,omitempty"`
|
||||||
|
AzureAccountKey string `protobuf:"bytes,16,opt,name=azure_account_key,json=azureAccountKey,proto3" json:"azure_account_key,omitempty"`
|
||||||
|
BackblazeKeyId string `protobuf:"bytes,20,opt,name=backblaze_key_id,json=backblazeKeyId,proto3" json:"backblaze_key_id,omitempty"`
|
||||||
|
BackblazeApplicationKey string `protobuf:"bytes,21,opt,name=backblaze_application_key,json=backblazeApplicationKey,proto3" json:"backblaze_application_key,omitempty"`
|
||||||
|
BackblazeEndpoint string `protobuf:"bytes,22,opt,name=backblaze_endpoint,json=backblazeEndpoint,proto3" json:"backblaze_endpoint,omitempty"`
|
||||||
|
AliyunAccessKey string `protobuf:"bytes,25,opt,name=aliyun_access_key,json=aliyunAccessKey,proto3" json:"aliyun_access_key,omitempty"`
|
||||||
|
AliyunSecretKey string `protobuf:"bytes,26,opt,name=aliyun_secret_key,json=aliyunSecretKey,proto3" json:"aliyun_secret_key,omitempty"`
|
||||||
|
AliyunEndpoint string `protobuf:"bytes,27,opt,name=aliyun_endpoint,json=aliyunEndpoint,proto3" json:"aliyun_endpoint,omitempty"`
|
||||||
|
AliyunRegion string `protobuf:"bytes,28,opt,name=aliyun_region,json=aliyunRegion,proto3" json:"aliyun_region,omitempty"`
|
||||||
|
TencentSecretId string `protobuf:"bytes,30,opt,name=tencent_secret_id,json=tencentSecretId,proto3" json:"tencent_secret_id,omitempty"`
|
||||||
|
TencentSecretKey string `protobuf:"bytes,31,opt,name=tencent_secret_key,json=tencentSecretKey,proto3" json:"tencent_secret_key,omitempty"`
|
||||||
|
TencentEndpoint string `protobuf:"bytes,32,opt,name=tencent_endpoint,json=tencentEndpoint,proto3" json:"tencent_endpoint,omitempty"`
|
||||||
|
BaiduAccessKey string `protobuf:"bytes,35,opt,name=baidu_access_key,json=baiduAccessKey,proto3" json:"baidu_access_key,omitempty"`
|
||||||
|
BaiduSecretKey string `protobuf:"bytes,36,opt,name=baidu_secret_key,json=baiduSecretKey,proto3" json:"baidu_secret_key,omitempty"`
|
||||||
|
BaiduEndpoint string `protobuf:"bytes,37,opt,name=baidu_endpoint,json=baiduEndpoint,proto3" json:"baidu_endpoint,omitempty"`
|
||||||
|
BaiduRegion string `protobuf:"bytes,38,opt,name=baidu_region,json=baiduRegion,proto3" json:"baidu_region,omitempty"`
|
||||||
|
WasabiAccessKey string `protobuf:"bytes,40,opt,name=wasabi_access_key,json=wasabiAccessKey,proto3" json:"wasabi_access_key,omitempty"`
|
||||||
|
WasabiSecretKey string `protobuf:"bytes,41,opt,name=wasabi_secret_key,json=wasabiSecretKey,proto3" json:"wasabi_secret_key,omitempty"`
|
||||||
|
WasabiEndpoint string `protobuf:"bytes,42,opt,name=wasabi_endpoint,json=wasabiEndpoint,proto3" json:"wasabi_endpoint,omitempty"`
|
||||||
|
WasabiRegion string `protobuf:"bytes,43,opt,name=wasabi_region,json=wasabiRegion,proto3" json:"wasabi_region,omitempty"`
|
||||||
|
HdfsNamenodes []string `protobuf:"bytes,50,rep,name=hdfs_namenodes,json=hdfsNamenodes,proto3" json:"hdfs_namenodes,omitempty"`
|
||||||
|
HdfsUsername string `protobuf:"bytes,51,opt,name=hdfs_username,json=hdfsUsername,proto3" json:"hdfs_username,omitempty"`
|
||||||
|
HdfsServicePrincipalName string `protobuf:"bytes,52,opt,name=hdfs_service_principal_name,json=hdfsServicePrincipalName,proto3" json:"hdfs_service_principal_name,omitempty"`
|
||||||
|
HdfsDataTransferProtection string `protobuf:"bytes,53,opt,name=hdfs_data_transfer_protection,json=hdfsDataTransferProtection,proto3" json:"hdfs_data_transfer_protection,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) Reset() {
|
||||||
|
*x = RemoteConf{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_remote_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RemoteConf) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RemoteConf) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_remote_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RemoteConf.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RemoteConf) Descriptor() ([]byte, []int) {
|
||||||
|
return file_remote_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetType() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Type
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetName() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetS3AccessKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.S3AccessKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetS3SecretKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.S3SecretKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetS3Region() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.S3Region
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetS3Endpoint() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.S3Endpoint
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetS3StorageClass() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.S3StorageClass
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetS3ForcePathStyle() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.S3ForcePathStyle
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetGcsGoogleApplicationCredentials() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.GcsGoogleApplicationCredentials
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetAzureAccountName() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.AzureAccountName
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetAzureAccountKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.AzureAccountKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetBackblazeKeyId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.BackblazeKeyId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetBackblazeApplicationKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.BackblazeApplicationKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetBackblazeEndpoint() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.BackblazeEndpoint
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetAliyunAccessKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.AliyunAccessKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetAliyunSecretKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.AliyunSecretKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetAliyunEndpoint() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.AliyunEndpoint
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetAliyunRegion() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.AliyunRegion
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetTencentSecretId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TencentSecretId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetTencentSecretKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TencentSecretKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetTencentEndpoint() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.TencentEndpoint
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetBaiduAccessKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.BaiduAccessKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetBaiduSecretKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.BaiduSecretKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetBaiduEndpoint() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.BaiduEndpoint
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetBaiduRegion() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.BaiduRegion
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetWasabiAccessKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.WasabiAccessKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetWasabiSecretKey() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.WasabiSecretKey
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetWasabiEndpoint() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.WasabiEndpoint
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetWasabiRegion() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.WasabiRegion
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetHdfsNamenodes() []string {
|
||||||
|
if x != nil {
|
||||||
|
return x.HdfsNamenodes
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetHdfsUsername() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.HdfsUsername
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetHdfsServicePrincipalName() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.HdfsServicePrincipalName
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteConf) GetHdfsDataTransferProtection() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.HdfsDataTransferProtection
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoteStorageMapping struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Mappings map[string]*RemoteStorageLocation `protobuf:"bytes,1,rep,name=mappings,proto3" json:"mappings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageMapping) Reset() {
|
||||||
|
*x = RemoteStorageMapping{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_remote_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageMapping) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RemoteStorageMapping) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RemoteStorageMapping) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_remote_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RemoteStorageMapping.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RemoteStorageMapping) Descriptor() ([]byte, []int) {
|
||||||
|
return file_remote_proto_rawDescGZIP(), []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageMapping) GetMappings() map[string]*RemoteStorageLocation {
|
||||||
|
if x != nil {
|
||||||
|
return x.Mappings
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoteStorageLocation struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"`
|
||||||
|
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageLocation) Reset() {
|
||||||
|
*x = RemoteStorageLocation{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_remote_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageLocation) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*RemoteStorageLocation) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *RemoteStorageLocation) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_remote_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use RemoteStorageLocation.ProtoReflect.Descriptor instead.
|
||||||
|
func (*RemoteStorageLocation) Descriptor() ([]byte, []int) {
|
||||||
|
return file_remote_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageLocation) GetName() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageLocation) GetBucket() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Bucket
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *RemoteStorageLocation) GetPath() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Path
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_remote_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_remote_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09,
|
||||||
|
0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x22, 0x8c, 0x0b, 0x0a, 0x0a, 0x52, 0x65,
|
||||||
|
0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
|
||||||
|
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04,
|
||||||
|
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
||||||
|
0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65,
|
||||||
|
0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x41, 0x63, 0x63, 0x65, 0x73,
|
||||||
|
0x73, 0x4b, 0x65, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65,
|
||||||
|
0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x53,
|
||||||
|
0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x72,
|
||||||
|
0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x33, 0x52,
|
||||||
|
0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x33, 0x5f, 0x65, 0x6e, 0x64, 0x70,
|
||||||
|
0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x33, 0x45, 0x6e,
|
||||||
|
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x33, 0x5f, 0x73, 0x74, 0x6f,
|
||||||
|
0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
|
||||||
|
0x52, 0x0e, 0x73, 0x33, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73,
|
||||||
|
0x12, 0x2d, 0x0a, 0x13, 0x73, 0x33, 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74,
|
||||||
|
0x68, 0x5f, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73,
|
||||||
|
0x33, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12,
|
||||||
|
0x4b, 0x0a, 0x22, 0x67, 0x63, 0x73, 0x5f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x61, 0x70,
|
||||||
|
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
|
||||||
|
0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x67, 0x63, 0x73,
|
||||||
|
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
|
||||||
|
0x6e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12,
|
||||||
|
0x61, 0x7a, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61,
|
||||||
|
0x6d, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41,
|
||||||
|
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x7a,
|
||||||
|
0x75, 0x72, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
||||||
|
0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41, 0x63, 0x63, 0x6f,
|
||||||
|
0x75, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c,
|
||||||
|
0x61, 0x7a, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09,
|
||||||
|
0x52, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x4b, 0x65, 0x79, 0x49, 0x64,
|
||||||
|
0x12, 0x3a, 0x0a, 0x19, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x61, 0x70,
|
||||||
|
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x15, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x17, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x41, 0x70,
|
||||||
|
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x12,
|
||||||
|
0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
|
||||||
|
0x6e, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c,
|
||||||
|
0x61, 0x7a, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x61,
|
||||||
|
0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79,
|
||||||
|
0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x41, 0x63,
|
||||||
|
0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x69, 0x79, 0x75,
|
||||||
|
0x6e, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1a, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
|
||||||
|
0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x65, 0x6e,
|
||||||
|
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x6c,
|
||||||
|
0x69, 0x79, 0x75, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d,
|
||||||
|
0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x1c, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x6f,
|
||||||
|
0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63,
|
||||||
|
0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65,
|
||||||
|
0x6e, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a,
|
||||||
|
0x12, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f,
|
||||||
|
0x6b, 0x65, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x6e, 0x63, 0x65,
|
||||||
|
0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x74,
|
||||||
|
0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
|
||||||
|
0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x45, 0x6e,
|
||||||
|
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f,
|
||||||
|
0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09,
|
||||||
|
0x52, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79,
|
||||||
|
0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74,
|
||||||
|
0x5f, 0x6b, 0x65, 0x79, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, 0x69, 0x64,
|
||||||
|
0x75, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61,
|
||||||
|
0x69, 0x64, 0x75, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x25, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x0d, 0x62, 0x61, 0x69, 0x64, 0x75, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
|
||||||
|
0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f,
|
||||||
|
0x6e, 0x18, 0x26, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x69, 0x64, 0x75, 0x52, 0x65,
|
||||||
|
0x67, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x61,
|
||||||
|
0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79,
|
||||||
|
0x12, 0x2a, 0x0a, 0x11, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65,
|
||||||
|
0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x61, 0x73,
|
||||||
|
0x61, 0x62, 0x69, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f,
|
||||||
|
0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
|
||||||
|
0x2a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x45, 0x6e, 0x64,
|
||||||
|
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f,
|
||||||
|
0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61,
|
||||||
|
0x73, 0x61, 0x62, 0x69, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x68, 0x64,
|
||||||
|
0x66, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x32, 0x20, 0x03,
|
||||||
|
0x28, 0x09, 0x52, 0x0d, 0x68, 0x64, 0x66, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x6e, 0x6f, 0x64, 0x65,
|
||||||
|
0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x64, 0x66, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61,
|
||||||
|
0x6d, 0x65, 0x18, 0x33, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x64, 0x66, 0x73, 0x55, 0x73,
|
||||||
|
0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x68, 0x64, 0x66, 0x73, 0x5f, 0x73,
|
||||||
|
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c,
|
||||||
|
0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x34, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x68, 0x64, 0x66,
|
||||||
|
0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61,
|
||||||
|
0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x1d, 0x68, 0x64, 0x66, 0x73, 0x5f, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x35, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x68, 0x64,
|
||||||
|
0x66, 0x73, 0x44, 0x61, 0x74, 0x61, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x50, 0x72,
|
||||||
|
0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc0, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x6d,
|
||||||
|
0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e,
|
||||||
|
0x67, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20,
|
||||||
|
0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e,
|
||||||
|
0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70,
|
||||||
|
0x70, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74,
|
||||||
|
0x72, 0x79, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x5d, 0x0a, 0x0d,
|
||||||
|
0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
|
||||||
|
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
|
||||||
|
0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20,
|
||||||
|
0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74,
|
||||||
|
0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x15, 0x52,
|
||||||
|
0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61,
|
||||||
|
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
|
||||||
|
0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
|
||||||
|
0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||||
|
0x70, 0x61, 0x74, 0x68, 0x42, 0x50, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66,
|
||||||
|
0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50,
|
||||||
|
0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||||
|
0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65,
|
||||||
|
0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x72, 0x65, 0x6d,
|
||||||
|
0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_remote_proto_rawDescOnce sync.Once
|
||||||
|
file_remote_proto_rawDescData = file_remote_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_remote_proto_rawDescGZIP() []byte {
|
||||||
|
file_remote_proto_rawDescOnce.Do(func() {
|
||||||
|
file_remote_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_remote_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||||
|
var file_remote_proto_goTypes = []interface{}{
|
||||||
|
(*RemoteConf)(nil), // 0: remote_pb.RemoteConf
|
||||||
|
(*RemoteStorageMapping)(nil), // 1: remote_pb.RemoteStorageMapping
|
||||||
|
(*RemoteStorageLocation)(nil), // 2: remote_pb.RemoteStorageLocation
|
||||||
|
nil, // 3: remote_pb.RemoteStorageMapping.MappingsEntry
|
||||||
|
}
|
||||||
|
var file_remote_proto_depIdxs = []int32{
|
||||||
|
3, // 0: remote_pb.RemoteStorageMapping.mappings:type_name -> remote_pb.RemoteStorageMapping.MappingsEntry
|
||||||
|
2, // 1: remote_pb.RemoteStorageMapping.MappingsEntry.value:type_name -> remote_pb.RemoteStorageLocation
|
||||||
|
2, // [2:2] is the sub-list for method output_type
|
||||||
|
2, // [2:2] is the sub-list for method input_type
|
||||||
|
2, // [2:2] is the sub-list for extension type_name
|
||||||
|
2, // [2:2] is the sub-list for extension extendee
|
||||||
|
0, // [0:2] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_remote_proto_init() }
|
||||||
|
func file_remote_proto_init() {
|
||||||
|
if File_remote_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_remote_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RemoteConf); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_remote_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RemoteStorageMapping); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_remote_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*RemoteStorageLocation); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_remote_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 4,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_remote_proto_goTypes,
|
||||||
|
DependencyIndexes: file_remote_proto_depIdxs,
|
||||||
|
MessageInfos: file_remote_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_remote_proto = out.File
|
||||||
|
file_remote_proto_rawDesc = nil
|
||||||
|
file_remote_proto_goTypes = nil
|
||||||
|
file_remote_proto_depIdxs = nil
|
||||||
|
}
|
8
weed/pb/remote_pb/remote_pb_helper.go
Normal file
8
weed/pb/remote_pb/remote_pb_helper.go
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package remote_pb
|
||||||
|
|
||||||
|
import "github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
func (fp *RemoteStorageLocation) Key() interface{} {
|
||||||
|
key, _ := proto.Marshal(fp)
|
||||||
|
return string(key)
|
||||||
|
}
|
|
@ -3,6 +3,8 @@ syntax = "proto3";
|
||||||
package volume_server_pb;
|
package volume_server_pb;
|
||||||
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb";
|
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb";
|
||||||
|
|
||||||
|
import "remote.proto";
|
||||||
|
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
|
|
||||||
service VolumeServer {
|
service VolumeServer {
|
||||||
|
@ -473,14 +475,8 @@ message FetchAndWriteNeedleRequest {
|
||||||
int64 offset = 4;
|
int64 offset = 4;
|
||||||
int64 size = 5;
|
int64 size = 5;
|
||||||
// remote conf
|
// remote conf
|
||||||
string remote_type = 6;
|
remote_pb.RemoteConf remote_conf = 15;
|
||||||
string remote_name = 7;
|
remote_pb.RemoteStorageLocation remote_location = 16;
|
||||||
string s3_access_key = 8;
|
|
||||||
string s3_secret_key = 9;
|
|
||||||
string s3_region = 10;
|
|
||||||
string s3_endpoint = 11;
|
|
||||||
string remote_bucket = 12;
|
|
||||||
string remote_path = 13;
|
|
||||||
}
|
}
|
||||||
message FetchAndWriteNeedleResponse {
|
message FetchAndWriteNeedleResponse {
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -6,6 +6,7 @@ import (
|
||||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"io"
|
"io"
|
||||||
|
@ -21,7 +22,11 @@ func init() {
|
||||||
|
|
||||||
type azureRemoteStorageMaker struct{}
|
type azureRemoteStorageMaker struct{}
|
||||||
|
|
||||||
func (s azureRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
func (s azureRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s azureRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
|
|
||||||
client := &azureRemoteStorageClient{
|
client := &azureRemoteStorageClient{
|
||||||
conf: conf,
|
conf: conf,
|
||||||
|
@ -52,13 +57,13 @@ func (s azureRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage
|
||||||
}
|
}
|
||||||
|
|
||||||
type azureRemoteStorageClient struct {
|
type azureRemoteStorageClient struct {
|
||||||
conf *filer_pb.RemoteConf
|
conf *remote_pb.RemoteConf
|
||||||
serviceURL azblob.ServiceURL
|
serviceURL azblob.ServiceURL
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = remote_storage.RemoteStorageClient(&azureRemoteStorageClient{})
|
var _ = remote_storage.RemoteStorageClient(&azureRemoteStorageClient{})
|
||||||
|
|
||||||
func (az *azureRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
func (az *azureRemoteStorageClient) Traverse(loc *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
||||||
|
|
||||||
pathKey := loc.Path[1:]
|
pathKey := loc.Path[1:]
|
||||||
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
||||||
|
@ -96,7 +101,7 @@ func (az *azureRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (az *azureRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
func (az *azureRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
||||||
|
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
||||||
|
@ -119,11 +124,15 @@ func (az *azureRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (az *azureRemoteStorageClient) WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
func (az *azureRemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (az *azureRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
func (az *azureRemoteStorageClient) RemoveDirectory(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (az *azureRemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
||||||
|
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
||||||
|
@ -155,7 +164,7 @@ func (az *azureRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocatio
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (az *azureRemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
|
func (az *azureRemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
||||||
blobURL := containerURL.NewBlockBlobURL(key)
|
blobURL := containerURL.NewBlockBlobURL(key)
|
||||||
|
@ -183,7 +192,7 @@ func toMetadata(attributes map[string][]byte) map[string]string {
|
||||||
return metadata
|
return metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
func (az *azureRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
|
func (az *azureRemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
|
||||||
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
|
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -196,7 +205,7 @@ func (az *azureRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStora
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (az *azureRemoteStorageClient) DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error) {
|
func (az *azureRemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
containerURL := az.serviceURL.NewContainerURL(loc.Bucket)
|
||||||
if _, err = containerURL.NewBlobURL(key).Delete(context.Background(),
|
if _, err = containerURL.NewBlobURL(key).Delete(context.Background(),
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
|
@ -21,7 +22,11 @@ func init() {
|
||||||
|
|
||||||
type gcsRemoteStorageMaker struct{}
|
type gcsRemoteStorageMaker struct{}
|
||||||
|
|
||||||
func (s gcsRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
func (s gcsRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
client := &gcsRemoteStorageClient{
|
client := &gcsRemoteStorageClient{
|
||||||
conf: conf,
|
conf: conf,
|
||||||
}
|
}
|
||||||
|
@ -48,13 +53,13 @@ func (s gcsRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.R
|
||||||
}
|
}
|
||||||
|
|
||||||
type gcsRemoteStorageClient struct {
|
type gcsRemoteStorageClient struct {
|
||||||
conf *filer_pb.RemoteConf
|
conf *remote_pb.RemoteConf
|
||||||
client *storage.Client
|
client *storage.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = remote_storage.RemoteStorageClient(&gcsRemoteStorageClient{})
|
var _ = remote_storage.RemoteStorageClient(&gcsRemoteStorageClient{})
|
||||||
|
|
||||||
func (gcs *gcsRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
func (gcs *gcsRemoteStorageClient) Traverse(loc *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
||||||
|
|
||||||
pathKey := loc.Path[1:]
|
pathKey := loc.Path[1:]
|
||||||
|
|
||||||
|
@ -86,7 +91,7 @@ func (gcs *gcsRemoteStorageClient) Traverse(loc *filer_pb.RemoteStorageLocation,
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (gcs *gcsRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
func (gcs *gcsRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
||||||
|
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
rangeReader, readErr := gcs.client.Bucket(loc.Bucket).Object(key).NewRangeReader(context.Background(), offset, size)
|
rangeReader, readErr := gcs.client.Bucket(loc.Bucket).Object(key).NewRangeReader(context.Background(), offset, size)
|
||||||
|
@ -102,11 +107,15 @@ func (gcs *gcsRemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation,
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gcs *gcsRemoteStorageClient) WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
func (gcs *gcsRemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gcs *gcsRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
func (gcs *gcsRemoteStorageClient) RemoveDirectory(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gcs *gcsRemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
||||||
|
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
|
|
||||||
|
@ -125,7 +134,7 @@ func (gcs *gcsRemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gcs *gcsRemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
|
func (gcs *gcsRemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
attr, err := gcs.client.Bucket(loc.Bucket).Object(key).Attrs(context.Background())
|
attr, err := gcs.client.Bucket(loc.Bucket).Object(key).Attrs(context.Background())
|
||||||
|
|
||||||
|
@ -150,7 +159,7 @@ func toMetadata(attributes map[string][]byte) map[string]string {
|
||||||
return metadata
|
return metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gcs *gcsRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
|
func (gcs *gcsRemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
|
||||||
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
|
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -168,7 +177,7 @@ func (gcs *gcsRemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorag
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (gcs *gcsRemoteStorageClient) DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error) {
|
func (gcs *gcsRemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
key := loc.Path[1:]
|
key := loc.Path[1:]
|
||||||
if err = gcs.client.Bucket(loc.Bucket).Object(key).Delete(context.Background()); err != nil {
|
if err = gcs.client.Bucket(loc.Bucket).Object(key).Delete(context.Background()); err != nil {
|
||||||
return fmt.Errorf("gcs delete %s%s: %v", loc.Bucket, key, err)
|
return fmt.Errorf("gcs delete %s%s: %v", loc.Bucket, key, err)
|
||||||
|
|
55
weed/remote_storage/hdfs/hdfs_kerberos.go
Normal file
55
weed/remote_storage/hdfs/hdfs_kerberos.go
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
krb "github.com/jcmturner/gokrb5/v8/client"
|
||||||
|
"github.com/jcmturner/gokrb5/v8/config"
|
||||||
|
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||||
|
func getKerberosClient() (*krb.Client, error) {
|
||||||
|
configPath := os.Getenv("KRB5_CONFIG")
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "/etc/krb5.conf"
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := config.Load(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the ccache location from the environment, falling back to the
|
||||||
|
// default location.
|
||||||
|
ccachePath := os.Getenv("KRB5CCNAME")
|
||||||
|
if strings.Contains(ccachePath, ":") {
|
||||||
|
if strings.HasPrefix(ccachePath, "FILE:") {
|
||||||
|
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
|
||||||
|
}
|
||||||
|
} else if ccachePath == "" {
|
||||||
|
u, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
ccache, err := credentials.LoadCCache(ccachePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := krb.NewFromCCache(ccache, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
178
weed/remote_storage/hdfs/hdfs_storage_client.go
Normal file
178
weed/remote_storage/hdfs/hdfs_storage_client.go
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
package hdfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"github.com/colinmarc/hdfs/v2"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
remote_storage.RemoteStorageClientMakers["hdfs"] = new(hdfsRemoteStorageMaker)
|
||||||
|
}
|
||||||
|
|
||||||
|
type hdfsRemoteStorageMaker struct{}
|
||||||
|
|
||||||
|
func (s hdfsRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s hdfsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
|
client := &hdfsRemoteStorageClient{
|
||||||
|
conf: conf,
|
||||||
|
}
|
||||||
|
|
||||||
|
options := hdfs.ClientOptions{
|
||||||
|
Addresses: conf.HdfsNamenodes,
|
||||||
|
UseDatanodeHostname: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.HdfsServicePrincipalName != "" {
|
||||||
|
var err error
|
||||||
|
options.KerberosClient, err = getKerberosClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get kerberos authentication: %s", err)
|
||||||
|
}
|
||||||
|
options.KerberosServicePrincipleName = conf.HdfsServicePrincipalName
|
||||||
|
|
||||||
|
if conf.HdfsDataTransferProtection != "" {
|
||||||
|
options.DataTransferProtection = conf.HdfsDataTransferProtection
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
options.User = conf.HdfsUsername
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := hdfs.NewClient(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client.client = c
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type hdfsRemoteStorageClient struct {
|
||||||
|
conf *remote_pb.RemoteConf
|
||||||
|
client *hdfs.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = remote_storage.RemoteStorageClient(&hdfsRemoteStorageClient{})
|
||||||
|
|
||||||
|
func (c *hdfsRemoteStorageClient) Traverse(loc *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
||||||
|
|
||||||
|
return remote_storage.TraverseBfs(func(parentDir util.FullPath, visitFn remote_storage.VisitFunc) error {
|
||||||
|
children, err := c.client.ReadDir(string(parentDir))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, child := range children {
|
||||||
|
if err := visitFn(string(parentDir), child.Name(), child.IsDir(), &filer_pb.RemoteEntry{
|
||||||
|
StorageName: c.conf.Name,
|
||||||
|
LastLocalSyncTsNs: 0,
|
||||||
|
RemoteETag: "",
|
||||||
|
RemoteMtime: child.ModTime().Unix(),
|
||||||
|
RemoteSize: child.Size(),
|
||||||
|
}); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, util.FullPath(loc.Path), visitFn)
|
||||||
|
|
||||||
|
}
|
||||||
|
func (c *hdfsRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
||||||
|
|
||||||
|
f, err := c.client.Open(loc.Path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
data = make([]byte, size)
|
||||||
|
_, err = f.ReadAt(data, offset)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *hdfsRemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
||||||
|
return c.client.MkdirAll(loc.Path, os.FileMode(entry.Attributes.FileMode))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *hdfsRemoteStorageClient) RemoveDirectory(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
|
return c.client.RemoveAll(loc.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *hdfsRemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
||||||
|
|
||||||
|
dirname := path.Dir(loc.Path)
|
||||||
|
|
||||||
|
// ensure parent directory
|
||||||
|
if err = c.client.MkdirAll(dirname, 0755); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove existing file
|
||||||
|
info, err := c.client.Stat(loc.Path)
|
||||||
|
if err == nil {
|
||||||
|
err = c.client.Remove(loc.Path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create new file
|
||||||
|
out, err := c.client.Create(loc.Path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
if removeErr := c.client.Remove(loc.Path); removeErr != nil {
|
||||||
|
glog.Errorf("clean up %s%s: %v", loc.Name, loc.Path, removeErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(out, reader); err != nil {
|
||||||
|
cleanup()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = out.Close(); err != nil {
|
||||||
|
cleanup()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err = c.client.Stat(loc.Path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return &filer_pb.RemoteEntry{
|
||||||
|
RemoteMtime: info.ModTime().Unix(),
|
||||||
|
RemoteSize: info.Size(),
|
||||||
|
RemoteETag: "",
|
||||||
|
StorageName: c.conf.Name,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *hdfsRemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) error {
|
||||||
|
if oldEntry.Attributes.FileMode != newEntry.Attributes.FileMode {
|
||||||
|
if err := c.client.Chmod(loc.Path, os.FileMode(newEntry.Attributes.FileMode)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (c *hdfsRemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
|
if err = c.client.Remove(loc.Path); err != nil {
|
||||||
|
return fmt.Errorf("hdfs delete %s: %v", loc.Path, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -3,13 +3,25 @@ package remote_storage
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ParseLocation(remote string) (loc *filer_pb.RemoteStorageLocation) {
|
func ParseLocationName(remote string) (locationName string) {
|
||||||
loc = &filer_pb.RemoteStorageLocation{}
|
if strings.HasSuffix(string(remote), "/") {
|
||||||
|
remote = remote[:len(remote)-1]
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(string(remote), "/", 2)
|
||||||
|
if len(parts) >= 1 {
|
||||||
|
return parts[0]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseBucketLocation(remote string) (loc *remote_pb.RemoteStorageLocation) {
|
||||||
|
loc = &remote_pb.RemoteStorageLocation{}
|
||||||
if strings.HasSuffix(string(remote), "/") {
|
if strings.HasSuffix(string(remote), "/") {
|
||||||
remote = remote[:len(remote)-1]
|
remote = remote[:len(remote)-1]
|
||||||
}
|
}
|
||||||
|
@ -27,23 +39,41 @@ func ParseLocation(remote string) (loc *filer_pb.RemoteStorageLocation) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func FormatLocation(loc *filer_pb.RemoteStorageLocation) string {
|
func parseNoBucketLocation(remote string) (loc *remote_pb.RemoteStorageLocation) {
|
||||||
|
loc = &remote_pb.RemoteStorageLocation{}
|
||||||
|
if strings.HasSuffix(string(remote), "/") {
|
||||||
|
remote = remote[:len(remote)-1]
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(string(remote), "/", 2)
|
||||||
|
if len(parts) >= 1 {
|
||||||
|
loc.Name = parts[0]
|
||||||
|
}
|
||||||
|
loc.Path = string(remote[len(loc.Name):])
|
||||||
|
if loc.Path == "" {
|
||||||
|
loc.Path = "/"
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatLocation(loc *remote_pb.RemoteStorageLocation) string {
|
||||||
return fmt.Sprintf("%s/%s%s", loc.Name, loc.Bucket, loc.Path)
|
return fmt.Sprintf("%s/%s%s", loc.Name, loc.Bucket, loc.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
type VisitFunc func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error
|
type VisitFunc func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error
|
||||||
|
|
||||||
type RemoteStorageClient interface {
|
type RemoteStorageClient interface {
|
||||||
Traverse(loc *filer_pb.RemoteStorageLocation, visitFn VisitFunc) error
|
Traverse(loc *remote_pb.RemoteStorageLocation, visitFn VisitFunc) error
|
||||||
ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error)
|
ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error)
|
||||||
WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error)
|
WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error)
|
||||||
WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error)
|
RemoveDirectory(loc *remote_pb.RemoteStorageLocation) (err error)
|
||||||
UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error)
|
WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error)
|
||||||
DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error)
|
UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error)
|
||||||
|
DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteStorageClientMaker interface {
|
type RemoteStorageClientMaker interface {
|
||||||
Make(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error)
|
Make(remoteConf *remote_pb.RemoteConf) (RemoteStorageClient, error)
|
||||||
|
HasBucket() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -52,7 +82,19 @@ var (
|
||||||
remoteStorageClientsLock sync.Mutex
|
remoteStorageClientsLock sync.Mutex
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeRemoteStorageClient(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error) {
|
func ParseRemoteLocation(remoteConfType string, remote string) (remoteStorageLocation *remote_pb.RemoteStorageLocation, err error) {
|
||||||
|
maker, found := RemoteStorageClientMakers[remoteConfType]
|
||||||
|
if !found {
|
||||||
|
return nil, fmt.Errorf("remote storage type %s not found", remoteConfType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !maker.HasBucket() {
|
||||||
|
return parseNoBucketLocation(remote), nil
|
||||||
|
}
|
||||||
|
return parseBucketLocation(remote), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeRemoteStorageClient(remoteConf *remote_pb.RemoteConf) (RemoteStorageClient, error) {
|
||||||
maker, found := RemoteStorageClientMakers[remoteConf.Type]
|
maker, found := RemoteStorageClientMakers[remoteConf.Type]
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("remote storage type %s not found", remoteConf.Type)
|
return nil, fmt.Errorf("remote storage type %s not found", remoteConf.Type)
|
||||||
|
@ -60,7 +102,7 @@ func makeRemoteStorageClient(remoteConf *filer_pb.RemoteConf) (RemoteStorageClie
|
||||||
return maker.Make(remoteConf)
|
return maker.Make(remoteConf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetRemoteStorage(remoteConf *filer_pb.RemoteConf) (RemoteStorageClient, error) {
|
func GetRemoteStorage(remoteConf *remote_pb.RemoteConf) (RemoteStorageClient, error) {
|
||||||
remoteStorageClientsLock.Lock()
|
remoteStorageClientsLock.Lock()
|
||||||
defer remoteStorageClientsLock.Unlock()
|
defer remoteStorageClientsLock.Unlock()
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"os"
|
"os"
|
||||||
|
@ -18,7 +18,11 @@ func init() {
|
||||||
|
|
||||||
type AliyunRemoteStorageMaker struct{}
|
type AliyunRemoteStorageMaker struct{}
|
||||||
|
|
||||||
func (s AliyunRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
func (s AliyunRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s AliyunRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
client := &s3RemoteStorageClient{
|
client := &s3RemoteStorageClient{
|
||||||
conf: conf,
|
conf: conf,
|
||||||
}
|
}
|
||||||
|
@ -29,6 +33,7 @@ func (s AliyunRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storag
|
||||||
Endpoint: aws.String(conf.AliyunEndpoint),
|
Endpoint: aws.String(conf.AliyunEndpoint),
|
||||||
Region: aws.String(conf.AliyunRegion),
|
Region: aws.String(conf.AliyunRegion),
|
||||||
S3ForcePathStyle: aws.Bool(false),
|
S3ForcePathStyle: aws.Bool(false),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
||||||
|
@ -38,6 +43,7 @@ func (s AliyunRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storag
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create aliyun session: %v", err)
|
return nil, fmt.Errorf("create aliyun session: %v", err)
|
||||||
}
|
}
|
||||||
|
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
|
||||||
client.conn = s3.New(sess)
|
client.conn = s3.New(sess)
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,7 +16,11 @@ func init() {
|
||||||
|
|
||||||
type BackBlazeRemoteStorageMaker struct{}
|
type BackBlazeRemoteStorageMaker struct{}
|
||||||
|
|
||||||
func (s BackBlazeRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
func (s BackBlazeRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s BackBlazeRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
client := &s3RemoteStorageClient{
|
client := &s3RemoteStorageClient{
|
||||||
conf: conf,
|
conf: conf,
|
||||||
}
|
}
|
||||||
|
@ -24,6 +28,7 @@ func (s BackBlazeRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_sto
|
||||||
Endpoint: aws.String(conf.BackblazeEndpoint),
|
Endpoint: aws.String(conf.BackblazeEndpoint),
|
||||||
Region: aws.String("us-west-002"),
|
Region: aws.String("us-west-002"),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if conf.BackblazeKeyId != "" && conf.BackblazeApplicationKey != "" {
|
if conf.BackblazeKeyId != "" && conf.BackblazeApplicationKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(conf.BackblazeKeyId, conf.BackblazeApplicationKey, "")
|
config.Credentials = credentials.NewStaticCredentials(conf.BackblazeKeyId, conf.BackblazeApplicationKey, "")
|
||||||
|
@ -33,6 +38,7 @@ func (s BackBlazeRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_sto
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create backblaze session: %v", err)
|
return nil, fmt.Errorf("create backblaze session: %v", err)
|
||||||
}
|
}
|
||||||
|
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
|
||||||
client.conn = s3.New(sess)
|
client.conn = s3.New(sess)
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"os"
|
"os"
|
||||||
|
@ -18,7 +18,11 @@ func init() {
|
||||||
|
|
||||||
type BaiduRemoteStorageMaker struct{}
|
type BaiduRemoteStorageMaker struct{}
|
||||||
|
|
||||||
func (s BaiduRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
func (s BaiduRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s BaiduRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
client := &s3RemoteStorageClient{
|
client := &s3RemoteStorageClient{
|
||||||
conf: conf,
|
conf: conf,
|
||||||
}
|
}
|
||||||
|
@ -29,6 +33,7 @@ func (s BaiduRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage
|
||||||
Endpoint: aws.String(conf.BaiduEndpoint),
|
Endpoint: aws.String(conf.BaiduEndpoint),
|
||||||
Region: aws.String(conf.BaiduRegion),
|
Region: aws.String(conf.BaiduRegion),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
||||||
|
@ -38,6 +43,7 @@ func (s BaiduRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create baidu session: %v", err)
|
return nil, fmt.Errorf("create baidu session: %v", err)
|
||||||
}
|
}
|
||||||
|
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
|
||||||
client.conn = s3.New(sess)
|
client.conn = s3.New(sess)
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"io"
|
"io"
|
||||||
|
@ -22,14 +23,19 @@ func init() {
|
||||||
|
|
||||||
type s3RemoteStorageMaker struct{}
|
type s3RemoteStorageMaker struct{}
|
||||||
|
|
||||||
func (s s3RemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
func (s s3RemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s s3RemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
client := &s3RemoteStorageClient{
|
client := &s3RemoteStorageClient{
|
||||||
conf: conf,
|
conf: conf,
|
||||||
}
|
}
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Region: aws.String(conf.S3Region),
|
Region: aws.String(conf.S3Region),
|
||||||
Endpoint: aws.String(conf.S3Endpoint),
|
Endpoint: aws.String(conf.S3Endpoint),
|
||||||
S3ForcePathStyle: aws.Bool(conf.S3ForcePathStyle),
|
S3ForcePathStyle: aws.Bool(conf.S3ForcePathStyle),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if conf.S3AccessKey != "" && conf.S3SecretKey != "" {
|
if conf.S3AccessKey != "" && conf.S3SecretKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(conf.S3AccessKey, conf.S3SecretKey, "")
|
config.Credentials = credentials.NewStaticCredentials(conf.S3AccessKey, conf.S3SecretKey, "")
|
||||||
|
@ -39,18 +45,19 @@ func (s s3RemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.Re
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create aws session: %v", err)
|
return nil, fmt.Errorf("create aws session: %v", err)
|
||||||
}
|
}
|
||||||
|
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
|
||||||
client.conn = s3.New(sess)
|
client.conn = s3.New(sess)
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type s3RemoteStorageClient struct {
|
type s3RemoteStorageClient struct {
|
||||||
conf *filer_pb.RemoteConf
|
conf *remote_pb.RemoteConf
|
||||||
conn s3iface.S3API
|
conn s3iface.S3API
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = remote_storage.RemoteStorageClient(&s3RemoteStorageClient{})
|
var _ = remote_storage.RemoteStorageClient(&s3RemoteStorageClient{})
|
||||||
|
|
||||||
func (s *s3RemoteStorageClient) Traverse(remote *filer_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
||||||
|
|
||||||
pathKey := remote.Path[1:]
|
pathKey := remote.Path[1:]
|
||||||
|
|
||||||
|
@ -92,7 +99,7 @@ func (s *s3RemoteStorageClient) Traverse(remote *filer_pb.RemoteStorageLocation,
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (s *s3RemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
func (s *s3RemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
||||||
downloader := s3manager.NewDownloaderWithClient(s.conn, func(u *s3manager.Downloader) {
|
downloader := s3manager.NewDownloaderWithClient(s.conn, func(u *s3manager.Downloader) {
|
||||||
u.PartSize = int64(4 * 1024 * 1024)
|
u.PartSize = int64(4 * 1024 * 1024)
|
||||||
u.Concurrency = 1
|
u.Concurrency = 1
|
||||||
|
@ -113,11 +120,15 @@ func (s *s3RemoteStorageClient) ReadFile(loc *filer_pb.RemoteStorageLocation, of
|
||||||
return writerAt.Bytes(), nil
|
return writerAt.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *s3RemoteStorageClient) WriteDirectory(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
func (s *s3RemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *s3RemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
func (s *s3RemoteStorageClient) RemoveDirectory(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *s3RemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
||||||
|
|
||||||
fileSize := int64(filer.FileSize(entry))
|
fileSize := int64(filer.FileSize(entry))
|
||||||
|
|
||||||
|
@ -129,7 +140,7 @@ func (s *s3RemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, e
|
||||||
// Create an uploader with the session and custom options
|
// Create an uploader with the session and custom options
|
||||||
uploader := s3manager.NewUploaderWithClient(s.conn, func(u *s3manager.Uploader) {
|
uploader := s3manager.NewUploaderWithClient(s.conn, func(u *s3manager.Uploader) {
|
||||||
u.PartSize = partSize
|
u.PartSize = partSize
|
||||||
u.Concurrency = 5
|
u.Concurrency = 1
|
||||||
})
|
})
|
||||||
|
|
||||||
// process tagging
|
// process tagging
|
||||||
|
@ -152,7 +163,7 @@ func (s *s3RemoteStorageClient) WriteFile(loc *filer_pb.RemoteStorageLocation, e
|
||||||
|
|
||||||
//in case it fails to upload
|
//in case it fails to upload
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("upload to s3 %s/%s%s: %v", loc.Name, loc.Bucket, loc.Path, err)
|
return nil, fmt.Errorf("upload to %s/%s%s: %v", loc.Name, loc.Bucket, loc.Path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// read back the remote entry
|
// read back the remote entry
|
||||||
|
@ -171,7 +182,7 @@ func toTagging(attributes map[string][]byte) *s3.Tagging {
|
||||||
return tagging
|
return tagging
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
|
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
|
||||||
resp, err := s.conn.HeadObject(&s3.HeadObjectInput{
|
resp, err := s.conn.HeadObject(&s3.HeadObjectInput{
|
||||||
Bucket: aws.String(loc.Bucket),
|
Bucket: aws.String(loc.Bucket),
|
||||||
Key: aws.String(loc.Path[1:]),
|
Key: aws.String(loc.Path[1:]),
|
||||||
|
@ -189,7 +200,7 @@ func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageL
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
|
func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
|
||||||
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
|
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -208,7 +219,7 @@ func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLo
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (s *s3RemoteStorageClient) DeleteFile(loc *filer_pb.RemoteStorageLocation) (err error) {
|
func (s *s3RemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
_, err = s.conn.DeleteObject(&s3.DeleteObjectInput{
|
_, err = s.conn.DeleteObject(&s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(loc.Bucket),
|
Bucket: aws.String(loc.Bucket),
|
||||||
Key: aws.String(loc.Path[1:]),
|
Key: aws.String(loc.Path[1:]),
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"os"
|
"os"
|
||||||
|
@ -18,7 +18,11 @@ func init() {
|
||||||
|
|
||||||
type TencentRemoteStorageMaker struct{}
|
type TencentRemoteStorageMaker struct{}
|
||||||
|
|
||||||
func (s TencentRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
func (s TencentRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s TencentRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
client := &s3RemoteStorageClient{
|
client := &s3RemoteStorageClient{
|
||||||
conf: conf,
|
conf: conf,
|
||||||
}
|
}
|
||||||
|
@ -27,7 +31,9 @@ func (s TencentRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_stora
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Endpoint: aws.String(conf.TencentEndpoint),
|
Endpoint: aws.String(conf.TencentEndpoint),
|
||||||
|
Region: aws.String("us-west-2"),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
||||||
|
@ -37,6 +43,7 @@ func (s TencentRemoteStorageMaker) Make(conf *filer_pb.RemoteConf) (remote_stora
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create tencent session: %v", err)
|
return nil, fmt.Errorf("create tencent session: %v", err)
|
||||||
}
|
}
|
||||||
|
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
|
||||||
client.conn = s3.New(sess)
|
client.conn = s3.New(sess)
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
61
weed/remote_storage/s3/wasabi.go
Normal file
61
weed/remote_storage/s3/wasabi.go
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
remote_storage.RemoteStorageClientMakers["wasabi"] = new(WasabiRemoteStorageMaker)
|
||||||
|
}
|
||||||
|
|
||||||
|
type WasabiRemoteStorageMaker struct{}
|
||||||
|
|
||||||
|
func (s WasabiRemoteStorageMaker) HasBucket() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s WasabiRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
||||||
|
client := &s3RemoteStorageClient{
|
||||||
|
conf: conf,
|
||||||
|
}
|
||||||
|
accessKey := util.Nvl(conf.WasabiAccessKey)
|
||||||
|
secretKey := util.Nvl(conf.WasabiSecretKey)
|
||||||
|
|
||||||
|
config := &aws.Config{
|
||||||
|
Endpoint: aws.String(conf.WasabiEndpoint),
|
||||||
|
Region: aws.String(conf.WasabiRegion),
|
||||||
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
|
}
|
||||||
|
if accessKey != "" && secretKey != "" {
|
||||||
|
config.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
sess, err := session.NewSession(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create wasabi session: %v", err)
|
||||||
|
}
|
||||||
|
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
|
||||||
|
client.conn = s3.New(sess)
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var skipSha256PayloadSigning = func(r *request.Request) {
|
||||||
|
// see https://github.com/ceph/ceph/pull/15965/files
|
||||||
|
if r.ClientInfo.ServiceID != "S3" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.Operation.Name == "PutObject" || r.Operation.Name == "UploadPart" {
|
||||||
|
if len(r.HTTPRequest.Header.Get("X-Amz-Content-Sha256")) == 0 {
|
||||||
|
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
62
weed/remote_storage/traverse_bfs.go
Normal file
62
weed/remote_storage/traverse_bfs.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package remote_storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ListDirectoryFunc func(parentDir util.FullPath, visitFn VisitFunc) error
|
||||||
|
|
||||||
|
func TraverseBfs(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc) (err error) {
|
||||||
|
K := 5
|
||||||
|
|
||||||
|
var dirQueueWg sync.WaitGroup
|
||||||
|
dirQueue := util.NewQueue()
|
||||||
|
dirQueueWg.Add(1)
|
||||||
|
dirQueue.Enqueue(parentPath)
|
||||||
|
var isTerminating bool
|
||||||
|
|
||||||
|
for i := 0; i < K; i++ {
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
if isTerminating {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t := dirQueue.Dequeue()
|
||||||
|
if t == nil {
|
||||||
|
time.Sleep(329 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dir := t.(util.FullPath)
|
||||||
|
processErr := processOneDirectory(listDirFn, dir, visitFn, dirQueue, &dirQueueWg)
|
||||||
|
if processErr != nil {
|
||||||
|
err = processErr
|
||||||
|
}
|
||||||
|
dirQueueWg.Done()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
dirQueueWg.Wait()
|
||||||
|
isTerminating = true
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func processOneDirectory(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc, dirQueue *util.Queue, dirQueueWg *sync.WaitGroup) (error) {
|
||||||
|
|
||||||
|
return listDirFn(parentPath, func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error {
|
||||||
|
if err := visitFn(dir, name, isDirectory, remoteEntry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !isDirectory {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dirQueueWg.Add(1)
|
||||||
|
dirQueue.Enqueue(parentPath.Child(name))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
|
@ -77,6 +77,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc
|
||||||
Region: aws.String(s3sink.region),
|
Region: aws.String(s3sink.region),
|
||||||
Endpoint: aws.String(s3sink.endpoint),
|
Endpoint: aws.String(s3sink.endpoint),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
||||||
|
|
|
@ -42,6 +42,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
||||||
|
|
|
@ -384,6 +384,8 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR
|
||||||
|
|
||||||
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
|
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
|
||||||
|
|
||||||
|
clusterId, _ := fs.filer.Store.KvGet(context.Background(), []byte("clusterId"))
|
||||||
|
|
||||||
t := &filer_pb.GetFilerConfigurationResponse{
|
t := &filer_pb.GetFilerConfigurationResponse{
|
||||||
Masters: fs.option.Masters,
|
Masters: fs.option.Masters,
|
||||||
Collection: fs.option.Collection,
|
Collection: fs.option.Collection,
|
||||||
|
@ -395,6 +397,7 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.
|
||||||
MetricsAddress: fs.metricsAddress,
|
MetricsAddress: fs.metricsAddress,
|
||||||
MetricsIntervalSec: int32(fs.metricsIntervalSec),
|
MetricsIntervalSec: int32(fs.metricsIntervalSec),
|
||||||
Version: util.Version(),
|
Version: util.Version(),
|
||||||
|
ClusterId: string(clusterId),
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("GetFilerConfiguration: %v", t)
|
glog.V(4).Infof("GetFilerConfiguration: %v", t)
|
||||||
|
|
|
@ -6,11 +6,13 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,7 +29,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
||||||
}
|
}
|
||||||
|
|
||||||
// find mapping
|
// find mapping
|
||||||
var remoteStorageMountedLocation *filer_pb.RemoteStorageLocation
|
var remoteStorageMountedLocation *remote_pb.RemoteStorageLocation
|
||||||
var localMountedDir string
|
var localMountedDir string
|
||||||
for k, loc := range mappings.Mappings {
|
for k, loc := range mappings.Mappings {
|
||||||
if strings.HasPrefix(req.Directory, k) {
|
if strings.HasPrefix(req.Directory, k) {
|
||||||
|
@ -43,7 +45,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
storageConf := &filer_pb.RemoteConf{}
|
storageConf := &remote_pb.RemoteConf{}
|
||||||
if unMarshalErr := proto.Unmarshal(storageConfEntry.Content, storageConf); unMarshalErr != nil {
|
if unMarshalErr := proto.Unmarshal(storageConfEntry.Content, storageConf); unMarshalErr != nil {
|
||||||
return nil, fmt.Errorf("unmarshal remote storage conf %s/%s: %v", filer.DirectoryEtcRemote, remoteStorageMountedLocation.Name+filer.REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
|
return nil, fmt.Errorf("unmarshal remote storage conf %s/%s: %v", filer.DirectoryEtcRemote, remoteStorageMountedLocation.Name+filer.REMOTE_STORAGE_CONF_SUFFIX, unMarshalErr)
|
||||||
}
|
}
|
||||||
|
@ -79,12 +81,15 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
||||||
|
|
||||||
var chunks []*filer_pb.FileChunk
|
var chunks []*filer_pb.FileChunk
|
||||||
var fetchAndWriteErr error
|
var fetchAndWriteErr error
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
limitedConcurrentExecutor := util.NewLimitedConcurrentExecutor(8)
|
limitedConcurrentExecutor := util.NewLimitedConcurrentExecutor(8)
|
||||||
for offset := int64(0); offset < entry.Remote.RemoteSize; offset += chunkSize {
|
for offset := int64(0); offset < entry.Remote.RemoteSize; offset += chunkSize {
|
||||||
localOffset := offset
|
localOffset := offset
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
limitedConcurrentExecutor.Execute(func() {
|
limitedConcurrentExecutor.Execute(func() {
|
||||||
|
defer wg.Done()
|
||||||
size := chunkSize
|
size := chunkSize
|
||||||
if localOffset+chunkSize > entry.Remote.RemoteSize {
|
if localOffset+chunkSize > entry.Remote.RemoteSize {
|
||||||
size = entry.Remote.RemoteSize - localOffset
|
size = entry.Remote.RemoteSize - localOffset
|
||||||
|
@ -114,14 +119,12 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
||||||
Cookie: uint32(fileId.Cookie),
|
Cookie: uint32(fileId.Cookie),
|
||||||
Offset: localOffset,
|
Offset: localOffset,
|
||||||
Size: size,
|
Size: size,
|
||||||
RemoteType: storageConf.Type,
|
RemoteConf: storageConf,
|
||||||
RemoteName: storageConf.Name,
|
RemoteLocation: &remote_pb.RemoteStorageLocation{
|
||||||
S3AccessKey: storageConf.S3AccessKey,
|
Name: remoteStorageMountedLocation.Name,
|
||||||
S3SecretKey: storageConf.S3SecretKey,
|
Bucket: remoteStorageMountedLocation.Bucket,
|
||||||
S3Region: storageConf.S3Region,
|
Path: string(dest),
|
||||||
S3Endpoint: storageConf.S3Endpoint,
|
},
|
||||||
RemoteBucket: remoteStorageMountedLocation.Bucket,
|
|
||||||
RemotePath: string(dest),
|
|
||||||
})
|
})
|
||||||
if fetchAndWriteErr != nil {
|
if fetchAndWriteErr != nil {
|
||||||
return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr)
|
return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr)
|
||||||
|
@ -129,7 +132,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil && fetchAndWriteErr == nil {
|
||||||
fetchAndWriteErr = err
|
fetchAndWriteErr = err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -148,6 +151,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
if fetchAndWriteErr != nil {
|
if fetchAndWriteErr != nil {
|
||||||
return nil, fetchAndWriteErr
|
return nil, fetchAndWriteErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package weed_server
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
|
@ -12,7 +13,6 @@ import (
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||||
|
@ -60,7 +60,7 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_
|
||||||
}
|
}
|
||||||
|
|
||||||
// write .vif files
|
// write .vif files
|
||||||
if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil {
|
if err := volume_info.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil {
|
||||||
return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
|
return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ package weed_server
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
|
@ -17,25 +16,15 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
|
||||||
return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
|
return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteConf := &filer_pb.RemoteConf{
|
remoteConf := req.RemoteConf
|
||||||
Type: req.RemoteType,
|
|
||||||
Name: req.RemoteName,
|
|
||||||
S3AccessKey: req.S3AccessKey,
|
|
||||||
S3SecretKey: req.S3SecretKey,
|
|
||||||
S3Region: req.S3Region,
|
|
||||||
S3Endpoint: req.S3Endpoint,
|
|
||||||
}
|
|
||||||
|
|
||||||
client, getClientErr := remote_storage.GetRemoteStorage(remoteConf)
|
client, getClientErr := remote_storage.GetRemoteStorage(remoteConf)
|
||||||
if getClientErr != nil {
|
if getClientErr != nil {
|
||||||
return nil, fmt.Errorf("get remote client: %v", getClientErr)
|
return nil, fmt.Errorf("get remote client: %v", getClientErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteStorageLocation := &filer_pb.RemoteStorageLocation{
|
remoteStorageLocation := req.RemoteLocation
|
||||||
Name: req.RemoteName,
|
|
||||||
Bucket: req.RemoteBucket,
|
|
||||||
Path: req.RemotePath,
|
|
||||||
}
|
|
||||||
data, ReadRemoteErr := client.ReadFile(remoteStorageLocation, req.Offset, req.Size)
|
data, ReadRemoteErr := client.ReadFile(remoteStorageLocation, req.Offset, req.Size)
|
||||||
if ReadRemoteErr != nil {
|
if ReadRemoteErr != nil {
|
||||||
return nil, fmt.Errorf("read from remote %+v: %v", remoteStorageLocation, ReadRemoteErr)
|
return nil, fmt.Errorf("read from remote %+v: %v", remoteStorageLocation, ReadRemoteErr)
|
||||||
|
|
|
@ -5,8 +5,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"io"
|
"io"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -49,6 +51,7 @@ func (c *commandRemoteCache) Do(args []string, commandEnv *CommandEnv, writer io
|
||||||
remoteMountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
remoteMountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
|
|
||||||
dir := remoteMountCommand.String("dir", "", "a directory in filer")
|
dir := remoteMountCommand.String("dir", "", "a directory in filer")
|
||||||
|
concurrency := remoteMountCommand.Int("concurrent", 32, "concurrent file downloading")
|
||||||
fileFiler := newFileFilter(remoteMountCommand)
|
fileFiler := newFileFilter(remoteMountCommand)
|
||||||
|
|
||||||
if err = remoteMountCommand.Parse(args); err != nil {
|
if err = remoteMountCommand.Parse(args); err != nil {
|
||||||
|
@ -62,7 +65,7 @@ func (c *commandRemoteCache) Do(args []string, commandEnv *CommandEnv, writer io
|
||||||
}
|
}
|
||||||
|
|
||||||
// pull content from remote
|
// pull content from remote
|
||||||
if err = c.cacheContentData(commandEnv, writer, util.FullPath(localMountedDir), remoteStorageMountedLocation, util.FullPath(*dir), fileFiler, remoteStorageConf); err != nil {
|
if err = c.cacheContentData(commandEnv, writer, util.FullPath(localMountedDir), remoteStorageMountedLocation, util.FullPath(*dir), fileFiler, remoteStorageConf, *concurrency); err != nil {
|
||||||
return fmt.Errorf("cache content data: %v", err)
|
return fmt.Errorf("cache content data: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,15 +113,19 @@ func mayHaveCachedToLocal(entry *filer_pb.Entry) bool {
|
||||||
if entry.RemoteEntry == nil {
|
if entry.RemoteEntry == nil {
|
||||||
return false // should not uncache an entry that is not in remote
|
return false // should not uncache an entry that is not in remote
|
||||||
}
|
}
|
||||||
if entry.RemoteEntry.LastLocalSyncTsNs > 0 && len(entry.Chunks) > 0 {
|
if entry.RemoteEntry.LastLocalSyncTsNs > 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, dirToCache util.FullPath, fileFilter *FileFilter, remoteConf *filer_pb.RemoteConf) error {
|
func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, dirToCache util.FullPath, fileFilter *FileFilter, remoteConf *remote_pb.RemoteConf, concurrency int) error {
|
||||||
|
|
||||||
return recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool {
|
var wg sync.WaitGroup
|
||||||
|
limitedConcurrentExecutor := util.NewLimitedConcurrentExecutor(concurrency)
|
||||||
|
var executionErr error
|
||||||
|
|
||||||
|
traverseErr := recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool {
|
||||||
if !shouldCacheToLocal(entry) {
|
if !shouldCacheToLocal(entry) {
|
||||||
return true // true means recursive traversal should continue
|
return true // true means recursive traversal should continue
|
||||||
}
|
}
|
||||||
|
@ -127,15 +134,32 @@ func (c *commandRemoteCache) cacheContentData(commandEnv *CommandEnv, writer io.
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
println(dir, entry.Name)
|
wg.Add(1)
|
||||||
|
limitedConcurrentExecutor.Execute(func() {
|
||||||
|
defer wg.Done()
|
||||||
|
fmt.Fprintf(writer, "Cache %+v ...\n", dir.Child(entry.Name))
|
||||||
|
|
||||||
remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, dir.Child(entry.Name))
|
remoteLocation := filer.MapFullPathToRemoteStorageLocation(localMountedDir, remoteMountedLocation, dir.Child(entry.Name))
|
||||||
|
|
||||||
if err := filer.DownloadToLocal(commandEnv, remoteConf, remoteLocation, dir, entry); err != nil {
|
if err := filer.DownloadToLocal(commandEnv, remoteConf, remoteLocation, dir, entry); err != nil {
|
||||||
fmt.Fprintf(writer, "DownloadToLocal %+v: %v\n", remoteLocation, err)
|
fmt.Fprintf(writer, "DownloadToLocal %+v: %v\n", remoteLocation, err)
|
||||||
return false
|
if executionErr == nil {
|
||||||
}
|
executionErr = fmt.Errorf("DownloadToLocal %+v: %v\n", remoteLocation, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(writer, "Cache %+v Done\n", dir.Child(entry.Name))
|
||||||
|
})
|
||||||
|
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if traverseErr != nil {
|
||||||
|
return traverseErr
|
||||||
|
}
|
||||||
|
if executionErr != nil {
|
||||||
|
return executionErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/golang/protobuf/jsonpb"
|
"github.com/golang/protobuf/jsonpb"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
@ -35,6 +36,9 @@ func (c *commandRemoteConfigure) Help() string {
|
||||||
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
|
remote.configure -name=cloud1 -type=s3 -s3.access_key=xxx -s3.secret_key=yyy
|
||||||
remote.configure -name=cloud2 -type=gcs -gcs.appCredentialsFile=~/service-account-file.json
|
remote.configure -name=cloud2 -type=gcs -gcs.appCredentialsFile=~/service-account-file.json
|
||||||
remote.configure -name=cloud3 -type=azure -azure.account_name=xxx -azure.account_key=yyy
|
remote.configure -name=cloud3 -type=azure -azure.account_name=xxx -azure.account_key=yyy
|
||||||
|
remote.configure -name=cloud4 -type=aliyun -aliyun.access_key=xxx -aliyun.secret_key=yyy -aliyun.endpoint=oss-cn-shenzhen.aliyuncs.com -aliyun.region=cn-sehnzhen
|
||||||
|
remote.configure -name=cloud5 -type=tencent -tencent.secret_id=xxx -tencent.secret_key=yyy -tencent.endpoint=cos.ap-guangzhou.myqcloud.com
|
||||||
|
remote.configure -name=cloud6 -type=wasabi -wasabi.access_key=xxx -wasabi.secret_key=yyy -wasabi.endpoint=s3.us-west-1.wasabisys.com -wasabi.region=us-west-1
|
||||||
|
|
||||||
# delete one configuration
|
# delete one configuration
|
||||||
remote.configure -delete -name=cloud1
|
remote.configure -delete -name=cloud1
|
||||||
|
@ -48,13 +52,13 @@ var (
|
||||||
|
|
||||||
func (c *commandRemoteConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
func (c *commandRemoteConfigure) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||||
|
|
||||||
conf := &filer_pb.RemoteConf{}
|
conf := &remote_pb.RemoteConf{}
|
||||||
|
|
||||||
remoteConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
remoteConfigureCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
isDelete := remoteConfigureCommand.Bool("delete", false, "delete one remote storage by its name")
|
isDelete := remoteConfigureCommand.Bool("delete", false, "delete one remote storage by its name")
|
||||||
|
|
||||||
remoteConfigureCommand.StringVar(&conf.Name, "name", "", "a short name to identify the remote storage")
|
remoteConfigureCommand.StringVar(&conf.Name, "name", "", "a short name to identify the remote storage")
|
||||||
remoteConfigureCommand.StringVar(&conf.Type, "type", "s3", "[s3|gcs|azure|b2|aliyun|tencent] storage type")
|
remoteConfigureCommand.StringVar(&conf.Type, "type", "s3", "[s3|gcs|azure|b2|aliyun|tencent|baidu|wasabi|hdfs] storage type")
|
||||||
|
|
||||||
remoteConfigureCommand.StringVar(&conf.S3AccessKey, "s3.access_key", "", "s3 access key")
|
remoteConfigureCommand.StringVar(&conf.S3AccessKey, "s3.access_key", "", "s3 access key")
|
||||||
remoteConfigureCommand.StringVar(&conf.S3SecretKey, "s3.secret_key", "", "s3 secret key")
|
remoteConfigureCommand.StringVar(&conf.S3SecretKey, "s3.secret_key", "", "s3 secret key")
|
||||||
|
@ -86,10 +90,32 @@ func (c *commandRemoteConfigure) Do(args []string, commandEnv *CommandEnv, write
|
||||||
remoteConfigureCommand.StringVar(&conf.BaiduEndpoint, "baidu.endpoint", "", "Baidu endpoint")
|
remoteConfigureCommand.StringVar(&conf.BaiduEndpoint, "baidu.endpoint", "", "Baidu endpoint")
|
||||||
remoteConfigureCommand.StringVar(&conf.BaiduRegion, "baidu.region", "", "Baidu region")
|
remoteConfigureCommand.StringVar(&conf.BaiduRegion, "baidu.region", "", "Baidu region")
|
||||||
|
|
||||||
|
remoteConfigureCommand.StringVar(&conf.WasabiAccessKey, "wasabi.access_key", "", "Wasabi access key")
|
||||||
|
remoteConfigureCommand.StringVar(&conf.WasabiSecretKey, "wasabi.secret_key", "", "Wasabi secret key")
|
||||||
|
remoteConfigureCommand.StringVar(&conf.WasabiEndpoint, "wasabi.endpoint", "", "Wasabi endpoint, see https://wasabi.com/wp-content/themes/wasabi/docs/API_Guide/index.html#t=topics%2Fapidiff-intro.htm")
|
||||||
|
remoteConfigureCommand.StringVar(&conf.WasabiRegion, "wasabi.region", "", "Wasabi region")
|
||||||
|
|
||||||
|
var namenodes arrayFlags
|
||||||
|
remoteConfigureCommand.Var(&namenodes, "hdfs.namenodes", "hdfs name node and port, example: namenode1:8020,namenode2:8020")
|
||||||
|
remoteConfigureCommand.StringVar(&conf.HdfsUsername, "hdfs.username", "", "hdfs user name")
|
||||||
|
remoteConfigureCommand.StringVar(&conf.HdfsServicePrincipalName, "hdfs.servicePrincipalName", "", `Kerberos service principal name for the namenode
|
||||||
|
|
||||||
|
Example: hdfs/namenode.hadoop.docker
|
||||||
|
Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.
|
||||||
|
`)
|
||||||
|
remoteConfigureCommand.StringVar(&conf.HdfsDataTransferProtection, "hdfs.dataTransferProtection", "", "[authentication|integrity|privacy] Kerberos data transfer protection")
|
||||||
|
|
||||||
|
|
||||||
if err = remoteConfigureCommand.Parse(args); err != nil {
|
if err = remoteConfigureCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if conf.Type != "s3" {
|
||||||
|
// clear out the default values
|
||||||
|
conf.S3Region = ""
|
||||||
|
conf.S3ForcePathStyle = false
|
||||||
|
}
|
||||||
|
|
||||||
if conf.Name == "" {
|
if conf.Name == "" {
|
||||||
return c.listExistingRemoteStorages(commandEnv, writer)
|
return c.listExistingRemoteStorages(commandEnv, writer)
|
||||||
}
|
}
|
||||||
|
@ -116,7 +142,7 @@ func (c *commandRemoteConfigure) listExistingRemoteStorages(commandEnv *CommandE
|
||||||
if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
|
if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
conf := &filer_pb.RemoteConf{}
|
conf := &remote_pb.RemoteConf{}
|
||||||
|
|
||||||
if err := proto.Unmarshal(entry.Content, conf); err != nil {
|
if err := proto.Unmarshal(entry.Content, conf); err != nil {
|
||||||
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, entry.Name, err)
|
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, entry.Name, err)
|
||||||
|
@ -162,7 +188,7 @@ func (c *commandRemoteConfigure) deleteRemoteStorage(commandEnv *CommandEnv, wri
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandRemoteConfigure) saveRemoteStorage(commandEnv *CommandEnv, writer io.Writer, conf *filer_pb.RemoteConf) error {
|
func (c *commandRemoteConfigure) saveRemoteStorage(commandEnv *CommandEnv, writer io.Writer, conf *remote_pb.RemoteConf) error {
|
||||||
|
|
||||||
data, err := proto.Marshal(conf)
|
data, err := proto.Marshal(conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -178,3 +204,14 @@ func (c *commandRemoteConfigure) saveRemoteStorage(commandEnv *CommandEnv, write
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type arrayFlags []string
|
||||||
|
|
||||||
|
func (i *arrayFlags) String() string {
|
||||||
|
return "my string representation"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *arrayFlags) Set(value string) error {
|
||||||
|
*i = append(*i, value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"io"
|
"io"
|
||||||
|
@ -54,7 +55,7 @@ func (c *commandRemoteMetaSync) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
}
|
}
|
||||||
|
|
||||||
mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, detectErr := detectMountInfo(commandEnv, writer, *dir)
|
mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, detectErr := detectMountInfo(commandEnv, writer, *dir)
|
||||||
if detectErr != nil{
|
if detectErr != nil {
|
||||||
jsonPrintln(writer, mappings)
|
jsonPrintln(writer, mappings)
|
||||||
return detectErr
|
return detectErr
|
||||||
}
|
}
|
||||||
|
@ -67,7 +68,7 @@ func (c *commandRemoteMetaSync) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func detectMountInfo(commandEnv *CommandEnv, writer io.Writer, dir string) (*filer_pb.RemoteStorageMapping, string, *filer_pb.RemoteStorageLocation, *filer_pb.RemoteConf, error) {
|
func detectMountInfo(commandEnv *CommandEnv, writer io.Writer, dir string) (*remote_pb.RemoteStorageMapping, string, *remote_pb.RemoteStorageLocation, *remote_pb.RemoteConf, error) {
|
||||||
return filer.DetectMountInfo(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, dir)
|
return filer.DetectMountInfo(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,8 +106,8 @@ func detectMountInfo(commandEnv *CommandEnv, writer io.Writer, dir string) (*fil
|
||||||
If entry.RemoteEntry.RemoteTag != remoteEntry.RemoteTag {
|
If entry.RemoteEntry.RemoteTag != remoteEntry.RemoteTag {
|
||||||
the remote version is updated, need to pull meta
|
the remote version is updated, need to pull meta
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
func pullMetadata(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *filer_pb.RemoteStorageLocation, dirToCache util.FullPath, remoteConf *filer_pb.RemoteConf) error {
|
func pullMetadata(commandEnv *CommandEnv, writer io.Writer, localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, dirToCache util.FullPath, remoteConf *remote_pb.RemoteConf) error {
|
||||||
|
|
||||||
// visit remote storage
|
// visit remote storage
|
||||||
remoteStorage, err := remote_storage.GetRemoteStorage(remoteConf)
|
remoteStorage, err := remote_storage.GetRemoteStorage(remoteConf)
|
||||||
|
@ -157,7 +158,7 @@ func pullMetadata(commandEnv *CommandEnv, writer io.Writer, localMountedDir util
|
||||||
fmt.Fprintln(writer, " (skip)")
|
fmt.Fprintln(writer, " (skip)")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if existingEntry.RemoteEntry.RemoteETag != remoteEntry.RemoteETag {
|
if existingEntry.RemoteEntry.RemoteETag != remoteEntry.RemoteETag || existingEntry.RemoteEntry.RemoteMtime < remoteEntry.RemoteMtime {
|
||||||
// the remote version is updated, need to pull meta
|
// the remote version is updated, need to pull meta
|
||||||
fmt.Fprintln(writer, " (update)")
|
fmt.Fprintln(writer, " (update)")
|
||||||
return doSaveRemoteEntry(client, string(localDir), existingEntry, remoteEntry)
|
return doSaveRemoteEntry(client, string(localDir), existingEntry, remoteEntry)
|
||||||
|
|
|
@ -6,11 +6,13 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/golang/protobuf/jsonpb"
|
"github.com/golang/protobuf/jsonpb"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"io"
|
"io"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -58,15 +60,17 @@ func (c *commandRemoteMount) Do(args []string, commandEnv *CommandEnv, writer io
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteStorageLocation := remote_storage.ParseLocation(*remote)
|
|
||||||
|
|
||||||
// find configuration for remote storage
|
// find configuration for remote storage
|
||||||
// remotePath is /<bucket>/path/to/dir
|
remoteConf, err := filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote_storage.ParseLocationName(*remote))
|
||||||
remoteConf, err := c.findRemoteStorageConfiguration(commandEnv, writer, remoteStorageLocation)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("find configuration for %s: %v", *remote, err)
|
return fmt.Errorf("find configuration for %s: %v", *remote, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
remoteStorageLocation, err := remote_storage.ParseRemoteLocation(remoteConf.Type, *remote)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// sync metadata from remote
|
// sync metadata from remote
|
||||||
if err = c.syncMetadata(commandEnv, writer, *dir, *nonEmpty, remoteConf, remoteStorageLocation); err != nil {
|
if err = c.syncMetadata(commandEnv, writer, *dir, *nonEmpty, remoteConf, remoteStorageLocation); err != nil {
|
||||||
return fmt.Errorf("pull metadata: %v", err)
|
return fmt.Errorf("pull metadata: %v", err)
|
||||||
|
@ -80,7 +84,7 @@ func (c *commandRemoteMount) Do(args []string, commandEnv *CommandEnv, writer io
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func listExistingRemoteStorageMounts(commandEnv *CommandEnv, writer io.Writer) (mappings *filer_pb.RemoteStorageMapping, err error) {
|
func listExistingRemoteStorageMounts(commandEnv *CommandEnv, writer io.Writer) (mappings *remote_pb.RemoteStorageMapping, err error) {
|
||||||
|
|
||||||
// read current mapping
|
// read current mapping
|
||||||
mappings, err = filer.ReadMountMappings(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress)
|
mappings, err = filer.ReadMountMappings(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress)
|
||||||
|
@ -108,13 +112,13 @@ func jsonPrintln(writer io.Writer, message proto.Message) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandRemoteMount) findRemoteStorageConfiguration(commandEnv *CommandEnv, writer io.Writer, remote *filer_pb.RemoteStorageLocation) (conf *filer_pb.RemoteConf, err error) {
|
func (c *commandRemoteMount) findRemoteStorageConfiguration(commandEnv *CommandEnv, writer io.Writer, remote *remote_pb.RemoteStorageLocation) (conf *remote_pb.RemoteConf, err error) {
|
||||||
|
|
||||||
return filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote.Name)
|
return filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote.Name)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandRemoteMount) syncMetadata(commandEnv *CommandEnv, writer io.Writer, dir string, nonEmpty bool, remoteConf *filer_pb.RemoteConf, remote *filer_pb.RemoteStorageLocation) error {
|
func (c *commandRemoteMount) syncMetadata(commandEnv *CommandEnv, writer io.Writer, dir string, nonEmpty bool, remoteConf *remote_pb.RemoteConf, remote *remote_pb.RemoteStorageLocation) error {
|
||||||
|
|
||||||
// find existing directory, and ensure the directory is empty
|
// find existing directory, and ensure the directory is empty
|
||||||
err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
@ -124,7 +128,9 @@ func (c *commandRemoteMount) syncMetadata(commandEnv *CommandEnv, writer io.Writ
|
||||||
Name: name,
|
Name: name,
|
||||||
})
|
})
|
||||||
if lookupErr != nil {
|
if lookupErr != nil {
|
||||||
return fmt.Errorf("lookup %s: %v", dir, lookupErr)
|
if !strings.Contains(lookupErr.Error(), filer_pb.ErrNotFound.Error()) {
|
||||||
|
return fmt.Errorf("lookup %s: %v", dir, lookupErr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mountToDirIsEmpty := true
|
mountToDirIsEmpty := true
|
||||||
|
@ -157,7 +163,7 @@ func (c *commandRemoteMount) syncMetadata(commandEnv *CommandEnv, writer io.Writ
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandRemoteMount) saveMountMapping(commandEnv *CommandEnv, writer io.Writer, dir string, remoteStorageLocation *filer_pb.RemoteStorageLocation) (err error) {
|
func (c *commandRemoteMount) saveMountMapping(commandEnv *CommandEnv, writer io.Writer, dir string, remoteStorageLocation *remote_pb.RemoteStorageLocation) (err error) {
|
||||||
|
|
||||||
// read current mapping
|
// read current mapping
|
||||||
var oldContent, newContent []byte
|
var oldContent, newContent []byte
|
||||||
|
|
|
@ -83,6 +83,7 @@ func (c *commandRemoteUncache) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer io.Writer, dirToCache util.FullPath, fileFilter *FileFilter) error {
|
func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer io.Writer, dirToCache util.FullPath, fileFilter *FileFilter) error {
|
||||||
|
|
||||||
return recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool {
|
return recursivelyTraverseDirectory(commandEnv, dirToCache, func(dir util.FullPath, entry *filer_pb.Entry) bool {
|
||||||
|
|
||||||
if !mayHaveCachedToLocal(entry) {
|
if !mayHaveCachedToLocal(entry) {
|
||||||
return true // true means recursive traversal should continue
|
return true // true means recursive traversal should continue
|
||||||
}
|
}
|
||||||
|
@ -98,7 +99,7 @@ func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer
|
||||||
entry.RemoteEntry.LastLocalSyncTsNs = 0
|
entry.RemoteEntry.LastLocalSyncTsNs = 0
|
||||||
entry.Chunks = nil
|
entry.Chunks = nil
|
||||||
|
|
||||||
println(dir, entry.Name)
|
fmt.Fprintf(writer, "Uncache %+v ... ", dir.Child(entry.Name))
|
||||||
|
|
||||||
err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||||
_, updateErr := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
|
_, updateErr := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
|
||||||
|
@ -111,6 +112,7 @@ func (c *commandRemoteUncache) uncacheContentData(commandEnv *CommandEnv, writer
|
||||||
fmt.Fprintf(writer, "uncache %+v: %v\n", dir.Child(entry.Name), err)
|
fmt.Fprintf(writer, "uncache %+v: %v\n", dir.Child(entry.Name), err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(writer, "Done\n")
|
||||||
|
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
|
@ -30,7 +30,9 @@ func (c *commandRemoteUnmount) Help() string {
|
||||||
remote.mount -dir=/xxx -remote=s3_1/bucket
|
remote.mount -dir=/xxx -remote=s3_1/bucket
|
||||||
|
|
||||||
# unmount the mounted directory and remove its cache
|
# unmount the mounted directory and remove its cache
|
||||||
remote.unmount -dir=/xxx
|
# Make sure you have stopped "weed filer.remote.sync" first!
|
||||||
|
# Otherwise, the deletion will also be propagated to the remote storage!!!
|
||||||
|
remote.unmount -dir=/xxx -iHaveStoppedRemoteSync
|
||||||
|
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
@ -40,6 +42,7 @@ func (c *commandRemoteUnmount) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
remoteMountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
remoteMountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
|
|
||||||
dir := remoteMountCommand.String("dir", "", "a directory in filer")
|
dir := remoteMountCommand.String("dir", "", "a directory in filer")
|
||||||
|
hasStoppedRemoteSync := remoteMountCommand.Bool("iHaveStoppedRemoteSync", false, "confirm to stop weed filer.remote.sync first")
|
||||||
|
|
||||||
if err = remoteMountCommand.Parse(args); err != nil {
|
if err = remoteMountCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -58,6 +61,9 @@ func (c *commandRemoteUnmount) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
return fmt.Errorf("directory %s is not mounted", *dir)
|
return fmt.Errorf("directory %s is not mounted", *dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !*hasStoppedRemoteSync {
|
||||||
|
return fmt.Errorf("make sure \"weed filer.remote.sync\" is stopped to avoid data loss")
|
||||||
|
}
|
||||||
// purge mounted data
|
// purge mounted data
|
||||||
if err = c.purgeMountedData(commandEnv, *dir); err != nil {
|
if err = c.purgeMountedData(commandEnv, *dir); err != nil {
|
||||||
return fmt.Errorf("purge mounted data: %v", err)
|
return fmt.Errorf("purge mounted data: %v", err)
|
||||||
|
@ -71,12 +77,6 @@ func (c *commandRemoteUnmount) Do(args []string, commandEnv *CommandEnv, writer
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandRemoteUnmount) findRemoteStorageConfiguration(commandEnv *CommandEnv, writer io.Writer, remote *filer_pb.RemoteStorageLocation) (conf *filer_pb.RemoteConf, err error) {
|
|
||||||
|
|
||||||
return filer.ReadRemoteStorageConf(commandEnv.option.GrpcDialOption, commandEnv.option.FilerAddress, remote.Name)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *commandRemoteUnmount) purgeMountedData(commandEnv *CommandEnv, dir string) error {
|
func (c *commandRemoteUnmount) purgeMountedData(commandEnv *CommandEnv, dir string) error {
|
||||||
|
|
||||||
// find existing directory, and ensure the directory is empty
|
// find existing directory, and ensure the directory is empty
|
||||||
|
|
|
@ -29,7 +29,7 @@ func (c *commandVolumeFixReplication) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFixReplication) Help() string {
|
func (c *commandVolumeFixReplication) Help() string {
|
||||||
return `add replicas to volumes that are missing replicas
|
return `add or remove replicas to volumes that are missing replicas or over-replicated
|
||||||
|
|
||||||
This command finds all over-replicated volumes. If found, it will purge the oldest copies and stop.
|
This command finds all over-replicated volumes. If found, it will purge the oldest copies and stop.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package shell
|
package shell
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
@ -45,6 +47,24 @@ func RunShell(options ShellOptions) {
|
||||||
go commandEnv.MasterClient.KeepConnectedToMaster()
|
go commandEnv.MasterClient.KeepConnectedToMaster()
|
||||||
commandEnv.MasterClient.WaitUntilConnected()
|
commandEnv.MasterClient.WaitUntilConnected()
|
||||||
|
|
||||||
|
if commandEnv.option.FilerAddress != "" {
|
||||||
|
commandEnv.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
|
||||||
|
resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.ClusterId != "" {
|
||||||
|
fmt.Printf(`
|
||||||
|
---
|
||||||
|
Free Monitoring Data URL:
|
||||||
|
https://cloud.seaweedfs.com/ui/%s
|
||||||
|
---
|
||||||
|
`, resp.ClusterId)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
cmd, err := line.Prompt("> ")
|
cmd, err := line.Prompt("> ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -37,6 +37,7 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string)
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
Endpoint: aws.String(endpoint),
|
Endpoint: aws.String(endpoint),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")
|
||||||
|
|
|
@ -3,13 +3,13 @@ package erasure_coding
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||||
|
@ -63,10 +63,10 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection
|
||||||
|
|
||||||
// read volume info
|
// read volume info
|
||||||
ev.Version = needle.Version3
|
ev.Version = needle.Version3
|
||||||
if volumeInfo, _, found, _ := pb.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found {
|
if volumeInfo, _, found, _ := volume_info.MaybeLoadVolumeInfo(dataBaseFileName + ".vif"); found {
|
||||||
ev.Version = needle.Version(volumeInfo.Version)
|
ev.Version = needle.Version(volumeInfo.Version)
|
||||||
} else {
|
} else {
|
||||||
pb.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)})
|
volume_info.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)})
|
||||||
}
|
}
|
||||||
|
|
||||||
ev.ShardLocations = make(map[ShardId][]string)
|
ev.ShardLocations = make(map[ShardId][]string)
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
"github.com/willf/bloom"
|
"github.com/tylertreat/BoomFilters"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mapMetric struct {
|
type mapMetric struct {
|
||||||
|
@ -93,10 +93,10 @@ func (mm *mapMetric) MaybeSetMaxFileKey(key NeedleId) {
|
||||||
|
|
||||||
func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) {
|
func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) {
|
||||||
mm = &mapMetric{}
|
mm = &mapMetric{}
|
||||||
var bf *bloom.BloomFilter
|
var bf *boom.StableBloomFilter
|
||||||
buf := make([]byte, NeedleIdSize)
|
buf := make([]byte, NeedleIdSize)
|
||||||
err = reverseWalkIndexFile(r, func(entryCount int64) {
|
err = reverseWalkIndexFile(r, func(entryCount int64) {
|
||||||
bf = bloom.NewWithEstimates(uint(entryCount), 0.001)
|
bf = boom.NewDefaultStableBloomFilter(uint(entryCount), 0.001)
|
||||||
}, func(key NeedleId, offset Offset, size Size) error {
|
}, func(key NeedleId, offset Offset, size Size) error {
|
||||||
|
|
||||||
mm.MaybeSetMaxFileKey(key)
|
mm.MaybeSetMaxFileKey(key)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -10,7 +11,6 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/stats"
|
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||||
|
@ -474,12 +474,12 @@ func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error {
|
||||||
// load, modify, save
|
// load, modify, save
|
||||||
baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name()))
|
baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name()))
|
||||||
vifFile := filepath.Join(location.Directory, baseFileName+".vif")
|
vifFile := filepath.Join(location.Directory, baseFileName+".vif")
|
||||||
volumeInfo, _, _, err := pb.MaybeLoadVolumeInfo(vifFile)
|
volumeInfo, _, _, err := volume_info.MaybeLoadVolumeInfo(vifFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("volume %d fail to load vif", i)
|
return fmt.Errorf("volume %d fail to load vif", i)
|
||||||
}
|
}
|
||||||
volumeInfo.Replication = replication
|
volumeInfo.Replication = replication
|
||||||
err = pb.SaveVolumeInfo(vifFile, volumeInfo)
|
err = volume_info.SaveVolumeInfo(vifFile, volumeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("volume %d fail to save vif", i)
|
return fmt.Errorf("volume %d fail to save vif", i)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package pb
|
package volume_info
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
|
@ -2,11 +2,11 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
|
_ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
|
volume_info "github.com/chrislusf/seaweedfs/weed/storage/volume_info"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo {
|
func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo {
|
||||||
|
@ -16,7 +16,7 @@ func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo {
|
||||||
func (v *Volume) maybeLoadVolumeInfo() (found bool) {
|
func (v *Volume) maybeLoadVolumeInfo() (found bool) {
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
v.volumeInfo, v.hasRemoteFile, found, err = pb.MaybeLoadVolumeInfo(v.FileName(".vif"))
|
v.volumeInfo, v.hasRemoteFile, found, err = volume_info.MaybeLoadVolumeInfo(v.FileName(".vif"))
|
||||||
|
|
||||||
if v.volumeInfo.Version == 0 {
|
if v.volumeInfo.Version == 0 {
|
||||||
v.volumeInfo.Version = uint32(needle.CurrentVersion)
|
v.volumeInfo.Version = uint32(needle.CurrentVersion)
|
||||||
|
@ -56,6 +56,6 @@ func (v *Volume) SaveVolumeInfo() error {
|
||||||
|
|
||||||
tierFileName := v.FileName(".vif")
|
tierFileName := v.FileName(".vif")
|
||||||
|
|
||||||
return pb.SaveVolumeInfo(tierFileName, v.volumeInfo)
|
return volume_info.SaveVolumeInfo(tierFileName, v.volumeInfo)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VERSION = fmt.Sprintf("%s %.02f", sizeLimit, 2.64)
|
VERSION = fmt.Sprintf("%s %.02f", sizeLimit, 2.65)
|
||||||
COMMIT = ""
|
COMMIT = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue