mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
0548ed3a1b
107
.github/workflows/binaries_dev.yml
vendored
107
.github/workflows/binaries_dev.yml
vendored
|
@ -6,57 +6,66 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
|
cleanup:
|
||||||
build-latest-docker-image:
|
runs-on: ubuntu-latest
|
||||||
runs-on: [ubuntu-latest]
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
-
|
|
||||||
name: Checkout
|
- name: Delete old release assets
|
||||||
|
uses: mknejp/delete-release-assets@v1
|
||||||
|
with:
|
||||||
|
token: ${{ github.token }}
|
||||||
|
tag: dev
|
||||||
|
fail-if-no-assets: false
|
||||||
|
assets: |
|
||||||
|
weed-*
|
||||||
|
|
||||||
|
build_dev:
|
||||||
|
needs: cleanup
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
goos: [linux, windows, darwin, freebsd]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
exclude:
|
||||||
|
- goarch: arm64
|
||||||
|
goos: windows
|
||||||
|
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
-
|
|
||||||
name: Docker meta
|
- name: Set BUILD_TIME env
|
||||||
id: docker_meta
|
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||||
uses: docker/metadata-action@v3
|
|
||||||
|
- name: Go Release Binaries Large Disk
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
with:
|
with:
|
||||||
images: |
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
chrislusf/seaweedfs
|
goos: ${{ matrix.goos }}
|
||||||
ghcr.io/chrislusf/seaweedfs
|
goarch: ${{ matrix.goarch }}
|
||||||
tags: |
|
release_tag: dev
|
||||||
type=raw,value=latest
|
overwrite: true
|
||||||
labels: |
|
pre_command: export CGO_ENABLED=0
|
||||||
org.opencontainers.image.title=seaweedfs
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
org.opencontainers.image.vendor=Chris Lu
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
-
|
# Where to run `go build .`
|
||||||
name: Set up QEMU
|
project_path: weed
|
||||||
uses: docker/setup-qemu-action@v1
|
binary_name: weed-large-disk
|
||||||
-
|
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
- name: Go Release Binaries Normal Volume Size
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
with:
|
with:
|
||||||
buildkitd-flags: "--debug"
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
-
|
goos: ${{ matrix.goos }}
|
||||||
name: Login to Docker Hub
|
goarch: ${{ matrix.goarch }}
|
||||||
if: github.event_name != 'pull_request'
|
release_tag: dev
|
||||||
uses: docker/login-action@v1
|
overwrite: true
|
||||||
with:
|
pre_command: export CGO_ENABLED=0
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
# Where to run `go build .`
|
||||||
-
|
project_path: weed
|
||||||
name: Login to GHCR
|
binary_name: weed-normal-disk
|
||||||
if: github.event_name != 'pull_request'
|
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ secrets.GHCR_USERNAME }}
|
|
||||||
password: ${{ secrets.GHCR_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./docker
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
file: ./docker/Dockerfile
|
|
||||||
platforms: linux/amd64, linux/arm, linux/arm64
|
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
|
||||||
|
|
54
.github/workflows/binaries_release0.yml
vendored
Normal file
54
.github/workflows/binaries_release0.yml
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
# This is a basic workflow to help you get started with Actions
|
||||||
|
|
||||||
|
name: "go: build versioned binaries for windows"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build-release-binaries_windows:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
goos: [windows]
|
||||||
|
goarch: [amd64]
|
||||||
|
|
||||||
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
|
steps:
|
||||||
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Go Release Binaries Normal Volume Size
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
overwrite: true
|
||||||
|
pre_command: export CGO_ENABLED=0
|
||||||
|
# build_flags: -tags 5BytesOffset # optional, default is
|
||||||
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
|
# Where to run `go build .`
|
||||||
|
project_path: weed
|
||||||
|
binary_name: weed
|
||||||
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
|
- name: Go Release Large Disk Binaries
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
overwrite: true
|
||||||
|
pre_command: export CGO_ENABLED=0
|
||||||
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
|
# Where to run `go build .`
|
||||||
|
project_path: weed
|
||||||
|
binary_name: weed
|
||||||
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
|
@ -1,6 +1,6 @@
|
||||||
# This is a basic workflow to help you get started with Actions
|
# This is a basic workflow to help you get started with Actions
|
||||||
|
|
||||||
name: "go: build versioned binaries"
|
name: "go: build versioned binaries for linux"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
@ -13,21 +13,12 @@ on:
|
||||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
build-release-binaries:
|
build-release-binaries_linux:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
goos: [linux, windows, darwin, freebsd]
|
goos: [linux]
|
||||||
goarch: [amd64, arm, arm64]
|
goarch: [amd64, arm, arm64]
|
||||||
exclude:
|
|
||||||
- goarch: arm
|
|
||||||
goos: darwin
|
|
||||||
- goarch: 386
|
|
||||||
goos: darwin
|
|
||||||
- goarch: arm
|
|
||||||
goos: windows
|
|
||||||
- goarch: arm64
|
|
||||||
goos: windows
|
|
||||||
|
|
||||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
steps:
|
steps:
|
54
.github/workflows/binaries_release2.yml
vendored
Normal file
54
.github/workflows/binaries_release2.yml
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
# This is a basic workflow to help you get started with Actions
|
||||||
|
|
||||||
|
name: "go: build versioned binaries for darwin"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build-release-binaries_darwin:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
goos: [darwin]
|
||||||
|
goarch: [amd64, arm64]
|
||||||
|
|
||||||
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
|
steps:
|
||||||
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Go Release Binaries Normal Volume Size
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
overwrite: true
|
||||||
|
pre_command: export CGO_ENABLED=0
|
||||||
|
# build_flags: -tags 5BytesOffset # optional, default is
|
||||||
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
|
# Where to run `go build .`
|
||||||
|
project_path: weed
|
||||||
|
binary_name: weed
|
||||||
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
|
- name: Go Release Large Disk Binaries
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
overwrite: true
|
||||||
|
pre_command: export CGO_ENABLED=0
|
||||||
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
|
# Where to run `go build .`
|
||||||
|
project_path: weed
|
||||||
|
binary_name: weed
|
||||||
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
54
.github/workflows/binaries_release3.yml
vendored
Normal file
54
.github/workflows/binaries_release3.yml
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
# This is a basic workflow to help you get started with Actions
|
||||||
|
|
||||||
|
name: "go: build versioned binaries for freebsd"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build-release-binaries_freebsd:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
goos: [freebsd]
|
||||||
|
goarch: [amd64, arm, arm64]
|
||||||
|
|
||||||
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
|
steps:
|
||||||
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Go Release Binaries Normal Volume Size
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
overwrite: true
|
||||||
|
pre_command: export CGO_ENABLED=0
|
||||||
|
# build_flags: -tags 5BytesOffset # optional, default is
|
||||||
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
|
# Where to run `go build .`
|
||||||
|
project_path: weed
|
||||||
|
binary_name: weed
|
||||||
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
|
- name: Go Release Large Disk Binaries
|
||||||
|
uses: wangyoucao577/go-release-action@v1.20
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
goos: ${{ matrix.goos }}
|
||||||
|
goarch: ${{ matrix.goarch }}
|
||||||
|
overwrite: true
|
||||||
|
pre_command: export CGO_ENABLED=0
|
||||||
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
|
# Where to run `go build .`
|
||||||
|
project_path: weed
|
||||||
|
binary_name: weed
|
||||||
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk"
|
2
.github/workflows/container_dev.yml
vendored
2
.github/workflows/container_dev.yml
vendored
|
@ -58,6 +58,6 @@ jobs:
|
||||||
context: ./docker
|
context: ./docker
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
file: ./docker/Dockerfile.go_build
|
file: ./docker/Dockerfile.go_build
|
||||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
platforms: linux/amd64, linux/arm64
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||||
|
|
121
.github/workflows/container_release.yml
vendored
121
.github/workflows/container_release.yml
vendored
|
@ -1,121 +0,0 @@
|
||||||
name: "docker: build release containers"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- '*'
|
|
||||||
workflow_dispatch: []
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-default-release-container:
|
|
||||||
runs-on: [ubuntu-latest]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Docker meta
|
|
||||||
id: docker_meta
|
|
||||||
uses: docker/metadata-action@v3
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
chrislusf/seaweedfs
|
|
||||||
ghcr.io/chrislusf/seaweedfs
|
|
||||||
tags: |
|
|
||||||
type=ref,event=tag
|
|
||||||
flavor: |
|
|
||||||
latest=false
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.title=seaweedfs
|
|
||||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
|
||||||
org.opencontainers.image.vendor=Chris Lu
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
buildkitd-flags: "--debug"
|
|
||||||
-
|
|
||||||
name: Login to Docker Hub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
||||||
-
|
|
||||||
name: Login to GHCR
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ secrets.GHCR_USERNAME }}
|
|
||||||
password: ${{ secrets.GHCR_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./docker
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
file: ./docker/Dockerfile.go_build
|
|
||||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
|
||||||
build-large-release-container:
|
|
||||||
runs-on: [ubuntu-latest]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Docker meta
|
|
||||||
id: docker_meta
|
|
||||||
uses: docker/metadata-action@v3
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
chrislusf/seaweedfs
|
|
||||||
ghcr.io/chrislusf/seaweedfs
|
|
||||||
tags: |
|
|
||||||
type=ref,event=tag,suffix=_large_disk
|
|
||||||
flavor: |
|
|
||||||
latest=false
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.title=seaweedfs
|
|
||||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
|
||||||
org.opencontainers.image.vendor=Chris Lu
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
buildkitd-flags: "--debug"
|
|
||||||
-
|
|
||||||
name: Login to Docker Hub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
||||||
-
|
|
||||||
name: Login to GHCR
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ secrets.GHCR_USERNAME }}
|
|
||||||
password: ${{ secrets.GHCR_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./docker
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
file: ./docker/Dockerfile.go_build_large
|
|
||||||
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
|
54
.github/workflows/container_release1.yml
vendored
Normal file
54
.github/workflows/container_release1.yml
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
name: "docker: build release containers for normal volume"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
workflow_dispatch: []
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-default-release-container:
|
||||||
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Docker meta
|
||||||
|
id: docker_meta
|
||||||
|
uses: docker/metadata-action@v3
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
chrislusf/seaweedfs
|
||||||
|
tags: |
|
||||||
|
type=ref,event=tag
|
||||||
|
flavor: |
|
||||||
|
latest=false
|
||||||
|
labels: |
|
||||||
|
org.opencontainers.image.title=seaweedfs
|
||||||
|
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||||
|
org.opencontainers.image.vendor=Chris Lu
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
-
|
||||||
|
name: Login to Docker Hub
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./docker
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
file: ./docker/Dockerfile.go_build
|
||||||
|
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||||
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
55
.github/workflows/container_release2.yml
vendored
Normal file
55
.github/workflows/container_release2.yml
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
name: "docker: build release containers for large volume"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
workflow_dispatch: []
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build-large-release-container:
|
||||||
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Docker meta
|
||||||
|
id: docker_meta
|
||||||
|
uses: docker/metadata-action@v3
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
chrislusf/seaweedfs
|
||||||
|
tags: |
|
||||||
|
type=ref,event=tag,suffix=_large_disk
|
||||||
|
flavor: |
|
||||||
|
latest=false
|
||||||
|
labels: |
|
||||||
|
org.opencontainers.image.title=seaweedfs
|
||||||
|
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||||
|
org.opencontainers.image.vendor=Chris Lu
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
-
|
||||||
|
name: Login to Docker Hub
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./docker
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
file: ./docker/Dockerfile.go_build_large
|
||||||
|
platforms: linux/amd64, linux/arm, linux/arm64, linux/386
|
||||||
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
55
.github/workflows/container_release3.yml
vendored
Normal file
55
.github/workflows/container_release3.yml
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
name: "docker: build release containers for rocksdb"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
workflow_dispatch: []
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build-large-release-container_rocksdb:
|
||||||
|
runs-on: [ubuntu-latest]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Docker meta
|
||||||
|
id: docker_meta
|
||||||
|
uses: docker/metadata-action@v3
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
chrislusf/seaweedfs
|
||||||
|
tags: |
|
||||||
|
type=ref,event=tag,suffix=_large_disk_rocksdb
|
||||||
|
flavor: |
|
||||||
|
latest=false
|
||||||
|
labels: |
|
||||||
|
org.opencontainers.image.title=seaweedfs
|
||||||
|
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||||
|
org.opencontainers.image.vendor=Chris Lu
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
-
|
||||||
|
name: Login to Docker Hub
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./docker
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
file: ./docker/Dockerfile.rocksdb_large
|
||||||
|
platforms: linux/amd64
|
||||||
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
|
@ -147,6 +147,7 @@ Faster and Cheaper than direct cloud storage!
|
||||||
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data.
|
||||||
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB.
|
||||||
* [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back.
|
* [Cloud Drive][CloudDrive] mounts cloud storage to local cluster, cached for fast read and write with asynchronous write back.
|
||||||
|
* [Gateway to Remote Object Store][GatewayToRemoteObjectStore] mirrors bucket operations to remote object storage, in addition to [Cloud Drive][CloudDrive]
|
||||||
|
|
||||||
## Kubernetes ##
|
## Kubernetes ##
|
||||||
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/)
|
||||||
|
@ -170,6 +171,7 @@ Faster and Cheaper than direct cloud storage!
|
||||||
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
|
[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication
|
||||||
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
|
[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store
|
||||||
[CloudDrive]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Drive-Architecture
|
[CloudDrive]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Drive-Architecture
|
||||||
|
[GatewayToRemoteObjectStore]: https://github.com/chrislusf/seaweedfs/wiki/Gateway-to-Remote-Object-Storage
|
||||||
|
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
59
docker/Dockerfile.rocksdb_large
Normal file
59
docker/Dockerfile.rocksdb_large
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
FROM golang:1.17-buster as builder
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
|
||||||
|
|
||||||
|
ENV ROCKSDB_VERSION v6.22.1
|
||||||
|
|
||||||
|
# build RocksDB
|
||||||
|
RUN cd /tmp && \
|
||||||
|
git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \
|
||||||
|
cd rocksdb && \
|
||||||
|
make static_lib && \
|
||||||
|
make install-static
|
||||||
|
|
||||||
|
ENV CGO_CFLAGS "-I/tmp/rocksdb/include"
|
||||||
|
ENV CGO_LDFLAGS "-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd"
|
||||||
|
|
||||||
|
# build SeaweedFS
|
||||||
|
RUN mkdir -p /go/src/github.com/chrislusf/
|
||||||
|
RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs
|
||||||
|
ARG BRANCH=${BRANCH:-master}
|
||||||
|
RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH
|
||||||
|
RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \
|
||||||
|
&& export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
|
||||||
|
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"
|
||||||
|
|
||||||
|
|
||||||
|
FROM alpine AS final
|
||||||
|
LABEL author="Chris Lu"
|
||||||
|
COPY --from=builder /go/bin/weed /usr/bin/
|
||||||
|
RUN mkdir -p /etc/seaweedfs
|
||||||
|
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml
|
||||||
|
COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh
|
||||||
|
RUN apk add fuse snappy gflags
|
||||||
|
|
||||||
|
# volume server gprc port
|
||||||
|
EXPOSE 18080
|
||||||
|
# volume server http port
|
||||||
|
EXPOSE 8080
|
||||||
|
# filer server gprc port
|
||||||
|
EXPOSE 18888
|
||||||
|
# filer server http port
|
||||||
|
EXPOSE 8888
|
||||||
|
# master server shared gprc port
|
||||||
|
EXPOSE 19333
|
||||||
|
# master server shared http port
|
||||||
|
EXPOSE 9333
|
||||||
|
# s3 server http port
|
||||||
|
EXPOSE 8333
|
||||||
|
# webdav server http port
|
||||||
|
EXPOSE 7333
|
||||||
|
|
||||||
|
RUN mkdir -p /data/filerldb2
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
|
||||||
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -13,6 +13,12 @@ build: binary
|
||||||
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
|
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
|
||||||
rm ./weed
|
rm ./weed
|
||||||
|
|
||||||
|
build_gorocksdb:
|
||||||
|
docker build --no-cache -t chrislusf/gorocksdb -f Dockerfile.go_rocksdb .
|
||||||
|
|
||||||
|
build_rocksdb:
|
||||||
|
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
|
||||||
|
|
||||||
s3tests_build:
|
s3tests_build:
|
||||||
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
|
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
|
||||||
|
|
||||||
|
|
35
go.mod
35
go.mod
|
@ -8,7 +8,6 @@ require (
|
||||||
cloud.google.com/go/storage v1.16.1
|
cloud.google.com/go/storage v1.16.1
|
||||||
github.com/Azure/azure-pipeline-go v0.2.3
|
github.com/Azure/azure-pipeline-go v0.2.3
|
||||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
|
||||||
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 // indirect
|
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 // indirect
|
||||||
github.com/OneOfOne/xxhash v1.2.2
|
github.com/OneOfOne/xxhash v1.2.2
|
||||||
github.com/Shopify/sarama v1.23.1
|
github.com/Shopify/sarama v1.23.1
|
||||||
|
@ -39,6 +38,7 @@ require (
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||||
github.com/go-errors/errors v1.1.1 // indirect
|
github.com/go-errors/errors v1.1.1 // indirect
|
||||||
github.com/go-redis/redis/v8 v8.4.4
|
github.com/go-redis/redis/v8 v8.4.4
|
||||||
|
github.com/go-redsync/redsync/v4 v4.4.1
|
||||||
github.com/go-sql-driver/mysql v1.5.0
|
github.com/go-sql-driver/mysql v1.5.0
|
||||||
github.com/go-stack/stack v1.8.0 // indirect
|
github.com/go-stack/stack v1.8.0 // indirect
|
||||||
github.com/go-zookeeper/zk v1.0.2 // indirect
|
github.com/go-zookeeper/zk v1.0.2 // indirect
|
||||||
|
@ -54,11 +54,10 @@ require (
|
||||||
github.com/googleapis/gax-go v2.0.2+incompatible // indirect
|
github.com/googleapis/gax-go v2.0.2+incompatible // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
|
github.com/googleapis/gax-go/v2 v2.1.0 // indirect
|
||||||
github.com/gorilla/mux v1.7.4
|
github.com/gorilla/mux v1.7.4
|
||||||
github.com/gorilla/websocket v1.4.1 // indirect
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.0.0 // indirect
|
github.com/hashicorp/go-multierror v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||||
|
@ -74,6 +73,7 @@ require (
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
||||||
github.com/kurin/blazer v0.5.3
|
github.com/kurin/blazer v0.5.3
|
||||||
github.com/lib/pq v1.10.0
|
github.com/lib/pq v1.10.0
|
||||||
|
github.com/linxGnu/grocksdb v1.6.38
|
||||||
github.com/magiconair/properties v1.8.1 // indirect
|
github.com/magiconair/properties v1.8.1 // indirect
|
||||||
github.com/mailru/easyjson v0.7.1 // indirect
|
github.com/mailru/easyjson v0.7.1 // indirect
|
||||||
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
||||||
|
@ -110,12 +110,11 @@ require (
|
||||||
github.com/spf13/viper v1.4.0
|
github.com/spf13/viper v1.4.0
|
||||||
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71
|
github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
|
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
||||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c
|
|
||||||
github.com/tidwall/gjson v1.8.1
|
github.com/tidwall/gjson v1.8.1
|
||||||
github.com/tidwall/match v1.0.3
|
github.com/tidwall/match v1.0.3
|
||||||
github.com/tidwall/pretty v1.1.0 // indirect
|
github.com/tidwall/pretty v1.1.0 // indirect
|
||||||
github.com/tikv/client-go/v2 v2.0.0-alpha.0.20210824090536-16d902a3c7e5
|
|
||||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||||
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
||||||
github.com/valyala/bytebufferpool v1.0.0
|
github.com/valyala/bytebufferpool v1.0.0
|
||||||
|
@ -126,24 +125,24 @@ require (
|
||||||
github.com/xdg-go/scram v1.0.2 // indirect
|
github.com/xdg-go/scram v1.0.2 // indirect
|
||||||
github.com/xdg-go/stringprep v1.0.2 // indirect
|
github.com/xdg-go/stringprep v1.0.2 // indirect
|
||||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
|
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
|
||||||
go.etcd.io/etcd v3.3.25+incompatible
|
go.etcd.io/etcd/client/v3 v3.5.0
|
||||||
go.mongodb.org/mongo-driver v1.7.0
|
go.mongodb.org/mongo-driver v1.7.0
|
||||||
go.opencensus.io v0.23.0 // indirect
|
go.opencensus.io v0.23.0 // indirect
|
||||||
go.opentelemetry.io/otel v0.15.0 // indirect
|
go.opentelemetry.io/otel v0.15.0 // indirect
|
||||||
gocloud.dev v0.20.0
|
gocloud.dev v0.20.0
|
||||||
gocloud.dev/pubsub/natspubsub v0.20.0
|
gocloud.dev/pubsub/natspubsub v0.20.0
|
||||||
gocloud.dev/pubsub/rabbitpubsub v0.20.0
|
gocloud.dev/pubsub/rabbitpubsub v0.20.0
|
||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect
|
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f // indirect
|
||||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1
|
golang.org/x/image v0.0.0-20200119044424-58c23975cae1
|
||||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d
|
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf
|
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365
|
||||||
golang.org/x/text v0.3.6 // indirect
|
golang.org/x/text v0.3.6 // indirect
|
||||||
golang.org/x/tools v0.1.5
|
golang.org/x/tools v0.1.5
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/api v0.56.0
|
google.golang.org/api v0.57.0
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 // indirect
|
google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 // indirect
|
||||||
google.golang.org/grpc v1.40.0
|
google.golang.org/grpc v1.40.0
|
||||||
google.golang.org/protobuf v1.27.1
|
google.golang.org/protobuf v1.27.1
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
|
@ -165,35 +164,23 @@ require (
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/coreos/etcd v3.3.10+incompatible // indirect
|
cloud.google.com/go/kms v1.0.0 // indirect
|
||||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
|
||||||
github.com/d4l3k/messagediff v1.2.1 // indirect
|
github.com/d4l3k/messagediff v1.2.1 // indirect
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||||
github.com/jcmturner/rpc/v2 v2.0.2 // indirect
|
github.com/jcmturner/rpc/v2 v2.0.2 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.7 // indirect
|
github.com/mattn/go-runewidth v0.0.7 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect
|
||||||
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
|
|
||||||
github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd // indirect
|
|
||||||
github.com/pingcap/kvproto v0.0.0-20210806074406-317f69fb54b4 // indirect
|
|
||||||
github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 // indirect
|
|
||||||
github.com/pingcap/parser v0.0.0-20210525032559-c37778aff307 // indirect
|
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d // indirect
|
|
||||||
github.com/twmb/murmur3 v1.1.3 // indirect
|
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
|
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
|
||||||
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
|
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.7.0 // indirect
|
go.uber.org/multierr v1.7.0 // indirect
|
||||||
go.uber.org/zap v1.17.0 // indirect
|
go.uber.org/zap v1.17.0 // indirect
|
||||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
|
||||||
golang.org/x/mod v0.4.2 // indirect
|
golang.org/x/mod v0.4.2 // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
description: SeaweedFS
|
description: SeaweedFS
|
||||||
name: seaweedfs
|
name: seaweedfs
|
||||||
appVersion: "2.67"
|
appVersion: "2.71"
|
||||||
version: "2.67"
|
version: "2.71"
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{{- if .Values.filer.metricsPort }}
|
{{- if .Values.filer.metricsPort }}
|
||||||
|
{{- if .Values.global.monitoring.enabled }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -15,4 +16,5 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
app: {{ template "seaweedfs.name" . }}
|
app: {{ template "seaweedfs.name" . }}
|
||||||
component: filer
|
component: filer
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
|
@ -2,6 +2,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
name: ingress-{{ template "seaweedfs.name" . }}-filer
|
name: ingress-{{ template "seaweedfs.name" . }}-filer
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
annotations:
|
annotations:
|
||||||
kubernetes.io/ingress.class: "nginx"
|
kubernetes.io/ingress.class: "nginx"
|
||||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||||
|
@ -32,6 +33,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
name: ingress-{{ template "seaweedfs.name" . }}-master
|
name: ingress-{{ template "seaweedfs.name" . }}-master
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
annotations:
|
annotations:
|
||||||
kubernetes.io/ingress.class: "nginx"
|
kubernetes.io/ingress.class: "nginx"
|
||||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{{- if .Values.s3.metricsPort }}
|
{{- if .Values.s3.metricsPort }}
|
||||||
|
{{- if .Values.global.monitoring.enabled }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -15,4 +16,5 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
app: {{ template "seaweedfs.name" . }}
|
app: {{ template "seaweedfs.name" . }}
|
||||||
component: s3
|
component: s3
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
|
@ -1,4 +1,5 @@
|
||||||
{{- if .Values.volume.metricsPort }}
|
{{- if .Values.volume.metricsPort }}
|
||||||
|
{{- if .Values.global.monitoring.enabled }}
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -15,4 +16,5 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
app: {{ template "seaweedfs.name" . }}
|
app: {{ template "seaweedfs.name" . }}
|
||||||
component: volume
|
component: volume
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
|
@ -161,7 +161,7 @@ volume:
|
||||||
|
|
||||||
# Directories to store data files. dir[,dir]... (default "/tmp")
|
# Directories to store data files. dir[,dir]... (default "/tmp")
|
||||||
dir: "/data"
|
dir: "/data"
|
||||||
# Directories to store index files. dir[,dir]... (default "/tmp")
|
# Directories to store index files. dir[,dir]... (default is the same as "dir")
|
||||||
dir_idx: null
|
dir_idx: null
|
||||||
|
|
||||||
# Maximum numbers of volumes, count[,count]...
|
# Maximum numbers of volumes, count[,count]...
|
||||||
|
|
BIN
note/SeaweedFS_Gateway_RemoteObjectStore.png
Normal file
BIN
note/SeaweedFS_Gateway_RemoteObjectStore.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 127 KiB |
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
@ -135,7 +135,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.sonatype.plugins</groupId>
|
<groupId>org.sonatype.plugins</groupId>
|
||||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<extensions>true</extensions>
|
<extensions>true</extensions>
|
||||||
<configuration>
|
<configuration>
|
||||||
<serverId>ossrh</serverId>
|
<serverId>ossrh</serverId>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
@ -130,7 +130,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.sonatype.plugins</groupId>
|
<groupId>org.sonatype.plugins</groupId>
|
||||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<extensions>true</extensions>
|
<extensions>true</extensions>
|
||||||
<configuration>
|
<configuration>
|
||||||
<serverId>ossrh</serverId>
|
<serverId>ossrh</serverId>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -15,7 +15,7 @@ public class ChunkCache {
|
||||||
}
|
}
|
||||||
this.cache = CacheBuilder.newBuilder()
|
this.cache = CacheBuilder.newBuilder()
|
||||||
.maximumSize(maxEntries)
|
.maximumSize(maxEntries)
|
||||||
.expireAfterAccess(1, TimeUnit.HOURS)
|
.expireAfterWrite(1, TimeUnit.HOURS)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,11 @@ public class FilerClient extends FilerGrpcClient {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
|
private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class);
|
||||||
|
|
||||||
public FilerClient(String host, int grpcPort) {
|
public FilerClient(String host, int grpcPort) {
|
||||||
super(host, grpcPort);
|
super(host, grpcPort-10000, grpcPort);
|
||||||
|
}
|
||||||
|
|
||||||
|
public FilerClient(String host, int port, int grpcPort) {
|
||||||
|
super(host, port, grpcPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String toFileId(FilerProto.FileId fid) {
|
public static String toFileId(FilerProto.FileId fid) {
|
||||||
|
|
|
@ -40,11 +40,11 @@ public class FilerGrpcClient {
|
||||||
private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
|
private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT;
|
||||||
private String filerAddress;
|
private String filerAddress;
|
||||||
|
|
||||||
public FilerGrpcClient(String host, int grpcPort) {
|
public FilerGrpcClient(String host, int port, int grpcPort) {
|
||||||
this(host, grpcPort, sslContext);
|
this(host, port, grpcPort, sslContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) {
|
public FilerGrpcClient(String host, int port, int grpcPort, SslContext sslContext) {
|
||||||
|
|
||||||
this(sslContext == null ?
|
this(sslContext == null ?
|
||||||
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()
|
ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()
|
||||||
|
@ -54,7 +54,7 @@ public class FilerGrpcClient {
|
||||||
.negotiationType(NegotiationType.TLS)
|
.negotiationType(NegotiationType.TLS)
|
||||||
.sslContext(sslContext));
|
.sslContext(sslContext));
|
||||||
|
|
||||||
filerAddress = SeaweedUtil.joinHostPort(host, grpcPort - 10000);
|
filerAddress = SeaweedUtil.joinHostPort(host, port);
|
||||||
|
|
||||||
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
|
FilerProto.GetFilerConfigurationResponse filerConfigurationResponse =
|
||||||
this.getBlockingStub().getFilerConfiguration(
|
this.getBlockingStub().getFilerConfiguration(
|
||||||
|
|
|
@ -59,7 +59,7 @@ public class SeaweedWrite {
|
||||||
String fileId = response.getFileId();
|
String fileId = response.getFileId();
|
||||||
String auth = response.getAuth();
|
String auth = response.getAuth();
|
||||||
|
|
||||||
String targetUrl = filerClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl());
|
String targetUrl = filerClient.getChunkUrl(fileId, response.getLocation().getUrl(), response.getLocation().getPublicUrl());
|
||||||
|
|
||||||
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
|
ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY;
|
||||||
byte[] cipherKey = null;
|
byte[] cipherKey = null;
|
||||||
|
|
|
@ -15,7 +15,7 @@ public class VolumeIdCache {
|
||||||
}
|
}
|
||||||
this.cache = CacheBuilder.newBuilder()
|
this.cache = CacheBuilder.newBuilder()
|
||||||
.maximumSize(maxEntries)
|
.maximumSize(maxEntries)
|
||||||
.expireAfterAccess(5, TimeUnit.MINUTES)
|
.expireAfterWrite(5, TimeUnit.MINUTES)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -238,13 +238,12 @@ message AssignVolumeRequest {
|
||||||
|
|
||||||
message AssignVolumeResponse {
|
message AssignVolumeResponse {
|
||||||
string file_id = 1;
|
string file_id = 1;
|
||||||
string url = 2;
|
|
||||||
string public_url = 3;
|
|
||||||
int32 count = 4;
|
int32 count = 4;
|
||||||
string auth = 5;
|
string auth = 5;
|
||||||
string collection = 6;
|
string collection = 6;
|
||||||
string replication = 7;
|
string replication = 7;
|
||||||
string error = 8;
|
string error = 8;
|
||||||
|
Location location = 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
message LookupVolumeRequest {
|
message LookupVolumeRequest {
|
||||||
|
@ -258,6 +257,7 @@ message Locations {
|
||||||
message Location {
|
message Location {
|
||||||
string url = 1;
|
string url = 1;
|
||||||
string public_url = 2;
|
string public_url = 2;
|
||||||
|
uint32 grpc_port = 3;
|
||||||
}
|
}
|
||||||
message LookupVolumeResponse {
|
message LookupVolumeResponse {
|
||||||
map<string, Locations> locations_map = 1;
|
map<string, Locations> locations_map = 1;
|
||||||
|
|
|
@ -11,13 +11,13 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -86,7 +86,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.sonatype.plugins</groupId>
|
<groupId>org.sonatype.plugins</groupId>
|
||||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<extensions>true</extensions>
|
<extensions>true</extensions>
|
||||||
<configuration>
|
<configuration>
|
||||||
<serverId>ossrh</serverId>
|
<serverId>ossrh</serverId>
|
||||||
|
@ -301,7 +301,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.sonatype.plugins</groupId>
|
<groupId>org.sonatype.plugins</groupId>
|
||||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<extensions>true</extensions>
|
<extensions>true</extensions>
|
||||||
<configuration>
|
<configuration>
|
||||||
<serverId>ossrh</serverId>
|
<serverId>ossrh</serverId>
|
||||||
|
|
|
@ -23,6 +23,7 @@ public class SeaweedFileSystem extends FileSystem {
|
||||||
|
|
||||||
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
||||||
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
||||||
|
public static final String FS_SEAWEED_FILER_PORT_GRPC = "fs.seaweed.filer.port.grpc";
|
||||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||||
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
||||||
|
@ -50,9 +51,6 @@ public class SeaweedFileSystem extends FileSystem {
|
||||||
// get host information from uri (overrides info in conf)
|
// get host information from uri (overrides info in conf)
|
||||||
String host = uri.getHost();
|
String host = uri.getHost();
|
||||||
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
|
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
|
||||||
if (host == null) {
|
|
||||||
throw new IOException("Invalid host specified");
|
|
||||||
}
|
|
||||||
conf.set(FS_SEAWEED_FILER_HOST, host);
|
conf.set(FS_SEAWEED_FILER_HOST, host);
|
||||||
|
|
||||||
// get port information from uri, (overrides info in conf)
|
// get port information from uri, (overrides info in conf)
|
||||||
|
@ -60,10 +58,12 @@ public class SeaweedFileSystem extends FileSystem {
|
||||||
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
||||||
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
||||||
|
|
||||||
|
int grpcPort = conf.getInt(FS_SEAWEED_FILER_PORT_GRPC, port+10000);
|
||||||
|
|
||||||
setConf(conf);
|
setConf(conf);
|
||||||
this.uri = uri;
|
this.uri = uri;
|
||||||
|
|
||||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
|
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,9 +27,8 @@ public class SeaweedFileSystemStore {
|
||||||
private FilerClient filerClient;
|
private FilerClient filerClient;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
|
|
||||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) {
|
||||||
int grpcPort = 10000 + port;
|
filerClient = new FilerClient(host, port, grpcPort);
|
||||||
filerClient = new FilerClient(host, grpcPort);
|
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
||||||
if (volumeServerAccessMode.equals("publicUrl")) {
|
if (volumeServerAccessMode.equals("publicUrl")) {
|
||||||
|
|
|
@ -86,7 +86,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.sonatype.plugins</groupId>
|
<groupId>org.sonatype.plugins</groupId>
|
||||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<extensions>true</extensions>
|
<extensions>true</extensions>
|
||||||
<configuration>
|
<configuration>
|
||||||
<serverId>ossrh</serverId>
|
<serverId>ossrh</serverId>
|
||||||
|
@ -309,7 +309,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.6.7</seaweedfs.client.version>
|
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.sonatype.plugins</groupId>
|
<groupId>org.sonatype.plugins</groupId>
|
||||||
<artifactId>nexus-staging-maven-plugin</artifactId>
|
<artifactId>nexus-staging-maven-plugin</artifactId>
|
||||||
<version>1.6.7</version>
|
<version>1.6.8</version>
|
||||||
<extensions>true</extensions>
|
<extensions>true</extensions>
|
||||||
<configuration>
|
<configuration>
|
||||||
<serverId>ossrh</serverId>
|
<serverId>ossrh</serverId>
|
||||||
|
|
|
@ -23,6 +23,7 @@ public class SeaweedFileSystem extends FileSystem {
|
||||||
|
|
||||||
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
|
||||||
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
|
||||||
|
public static final String FS_SEAWEED_FILER_PORT_GRPC = "fs.seaweed.filer.port.grpc";
|
||||||
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
|
||||||
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size";
|
||||||
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication";
|
||||||
|
@ -50,9 +51,6 @@ public class SeaweedFileSystem extends FileSystem {
|
||||||
// get host information from uri (overrides info in conf)
|
// get host information from uri (overrides info in conf)
|
||||||
String host = uri.getHost();
|
String host = uri.getHost();
|
||||||
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
|
host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host;
|
||||||
if (host == null) {
|
|
||||||
throw new IOException("Invalid host specified");
|
|
||||||
}
|
|
||||||
conf.set(FS_SEAWEED_FILER_HOST, host);
|
conf.set(FS_SEAWEED_FILER_HOST, host);
|
||||||
|
|
||||||
// get port information from uri, (overrides info in conf)
|
// get port information from uri, (overrides info in conf)
|
||||||
|
@ -60,10 +58,12 @@ public class SeaweedFileSystem extends FileSystem {
|
||||||
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port;
|
||||||
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
conf.setInt(FS_SEAWEED_FILER_PORT, port);
|
||||||
|
|
||||||
|
int grpcPort = conf.getInt(FS_SEAWEED_FILER_PORT_GRPC, port+10000);
|
||||||
|
|
||||||
setConf(conf);
|
setConf(conf);
|
||||||
this.uri = uri;
|
this.uri = uri;
|
||||||
|
|
||||||
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf);
|
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,9 +27,8 @@ public class SeaweedFileSystemStore {
|
||||||
private FilerClient filerClient;
|
private FilerClient filerClient;
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
|
|
||||||
public SeaweedFileSystemStore(String host, int port, Configuration conf) {
|
public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) {
|
||||||
int grpcPort = 10000 + port;
|
filerClient = new FilerClient(host, port, grpcPort);
|
||||||
filerClient = new FilerClient(host, grpcPort);
|
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct");
|
||||||
if (volumeServerAccessMode.equals("publicUrl")) {
|
if (volumeServerAccessMode.equals("publicUrl")) {
|
||||||
|
|
|
@ -19,10 +19,10 @@ import (
|
||||||
const (
|
const (
|
||||||
maxPartSize = int64(5 * 1024 * 1024)
|
maxPartSize = int64(5 * 1024 * 1024)
|
||||||
maxRetries = 3
|
maxRetries = 3
|
||||||
awsAccessKeyID = "Your access key"
|
awsAccessKeyID = "any"
|
||||||
awsSecretAccessKey = "Your secret key"
|
awsSecretAccessKey = "any"
|
||||||
awsBucketRegion = "S3 bucket region"
|
awsBucketRegion = "us‑west‑1"
|
||||||
awsBucketName = "newBucket"
|
awsBucketName = "bucket1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -37,7 +37,7 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("bad credentials: %s", err)
|
fmt.Printf("bad credentials: %s", err)
|
||||||
}
|
}
|
||||||
cfg := aws.NewConfig().WithRegion(awsBucketRegion).WithCredentials(creds).WithDisableSSL(true).WithEndpoint("localhost:8333")
|
cfg := aws.NewConfig().WithRegion(awsBucketRegion).WithCredentials(creds).WithDisableSSL(true).WithEndpoint("localhost:8333").WithS3ForcePathStyle(true)
|
||||||
svc := s3.New(session.New(), cfg)
|
svc := s3.New(session.New(), cfg)
|
||||||
|
|
||||||
file, err := os.Open(*filename)
|
file, err := os.Open(*filename)
|
||||||
|
|
|
@ -6,13 +6,9 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/security"
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||||
|
@ -20,6 +16,9 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -45,13 +44,13 @@ func main() {
|
||||||
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
vid := uint32(*volumeId)
|
vid := uint32(*volumeId)
|
||||||
servers := strings.Split(*serversStr, ",")
|
servers := pb.ServerAddresses(*serversStr).ToAddresses()
|
||||||
if len(servers) < 2 {
|
if len(servers) < 2 {
|
||||||
glog.Fatalf("You must specify more than 1 server\n")
|
glog.Fatalf("You must specify more than 1 server\n")
|
||||||
}
|
}
|
||||||
var referenceServer string
|
var referenceServer pb.ServerAddress
|
||||||
var maxOffset int64
|
var maxOffset int64
|
||||||
allFiles := map[string]map[types.NeedleId]needleState{}
|
allFiles := map[pb.ServerAddress]map[types.NeedleId]needleState{}
|
||||||
for _, addr := range servers {
|
for _, addr := range servers {
|
||||||
files, offset, err := getVolumeFiles(vid, addr)
|
files, offset, err := getVolumeFiles(vid, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -121,7 +120,7 @@ type needleState struct {
|
||||||
size types.Size
|
size types.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) {
|
func getVolumeFiles(v uint32, addr pb.ServerAddress) (map[types.NeedleId]needleState, int64, error) {
|
||||||
var idxFile *bytes.Reader
|
var idxFile *bytes.Reader
|
||||||
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
@ -179,7 +178,7 @@ func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int6
|
||||||
return files, maxOffset, nil
|
return files, maxOffset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNeedleFileId(v uint32, nid types.NeedleId, addr string) (string, error) {
|
func getNeedleFileId(v uint32, nid types.NeedleId, addr pb.ServerAddress) (string, error) {
|
||||||
var id string
|
var id string
|
||||||
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||||
resp, err := vs.VolumeNeedleStatus(context.Background(), &volume_server_pb.VolumeNeedleStatusRequest{
|
resp, err := vs.VolumeNeedleStatus(context.Background(), &volume_server_pb.VolumeNeedleStatusRequest{
|
||||||
|
|
|
@ -51,7 +51,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func startGenerateMetadata() {
|
func startGenerateMetadata() {
|
||||||
pb.WithFilerClient(*tailFiler, grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error {
|
pb.WithFilerClient(pb.ServerAddress(*tailFiler), grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
for i := 0; i < *n; i++ {
|
for i := 0; i < *n; i++ {
|
||||||
name := fmt.Sprintf("file%d", i)
|
name := fmt.Sprintf("file%d", i)
|
||||||
|
@ -77,7 +77,7 @@ func startGenerateMetadata() {
|
||||||
|
|
||||||
func startSubscribeMetadata(eachEntryFunc func(event *filer_pb.SubscribeMetadataResponse) error) {
|
func startSubscribeMetadata(eachEntryFunc func(event *filer_pb.SubscribeMetadataResponse) error) {
|
||||||
|
|
||||||
tailErr := pb.FollowMetadata(*tailFiler, grpc.WithInsecure(), "tail", *dir, nil, 0, 0, eachEntryFunc, false)
|
tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpc.WithInsecure(), "tail", *dir, nil, 0, 0, eachEntryFunc, false)
|
||||||
|
|
||||||
if tailErr != nil {
|
if tailErr != nil {
|
||||||
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
|
fmt.Printf("tail %s: %v\n", *tailFiler, tailErr)
|
||||||
|
|
|
@ -3,6 +3,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
|
@ -32,7 +33,7 @@ func main() {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
println("vacuum threshold", *garbageThreshold)
|
println("vacuum threshold", *garbageThreshold)
|
||||||
_, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
|
_, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", pb.ServerAddress(*master).ToHttpAddress(), *garbageThreshold))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("vacuum: %v", err)
|
log.Fatalf("vacuum: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -52,7 +53,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
|
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
|
||||||
assignResult, err := operation.Assign(func() string { return *master }, grpcDialOption, &operation.VolumeAssignRequest{
|
assignResult, err := operation.Assign(func() pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{
|
||||||
Count: 1,
|
Count: 1,
|
||||||
Replication: *replication,
|
Replication: *replication,
|
||||||
})
|
})
|
||||||
|
|
64
unmaintained/stream_read_volume/stream_read_volume.go
Normal file
64
unmaintained/stream_read_volume/stream_read_volume.go
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
volumeServer = flag.String("volumeServer", "localhost:8080", "a volume server")
|
||||||
|
volumeId = flag.Int("volumeId", -1, "a volume id to stream read")
|
||||||
|
grpcDialOption grpc.DialOption
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
util.LoadConfiguration("security", false)
|
||||||
|
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
|
vid := uint32(*volumeId)
|
||||||
|
|
||||||
|
eachNeedleFunc := func(resp *volume_server_pb.ReadAllNeedlesResponse) error {
|
||||||
|
fmt.Printf("%d,%x%08x %d\n", resp.VolumeId, resp.NeedleId, resp.Cookie, len(resp.NeedleBlob))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := operation.WithVolumeServerClient(pb.ServerAddress(*volumeServer), grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
copyFileClient, err := vs.ReadAllNeedles(ctx, &volume_server_pb.ReadAllNeedlesRequest{
|
||||||
|
VolumeIds: []uint32{vid},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, err := copyFileClient.Recv()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = eachNeedleFunc(resp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("read %s: %v\n", *volumeServer, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -37,7 +38,7 @@ func main() {
|
||||||
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
|
sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
err := operation.TailVolume(func()string{return *master}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
|
err := operation.TailVolume(func()pb.ServerAddress{return pb.ServerAddress(*master)}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) {
|
||||||
if n.Size == 0 {
|
if n.Size == 0 {
|
||||||
println("-", n.String())
|
println("-", n.String())
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -2,6 +2,7 @@ package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/security"
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
|
@ -72,12 +73,12 @@ func runBackup(cmd *Command, args []string) bool {
|
||||||
vid := needle.VolumeId(*s.volumeId)
|
vid := needle.VolumeId(*s.volumeId)
|
||||||
|
|
||||||
// find volume location, replication, ttl info
|
// find volume location, replication, ttl info
|
||||||
lookup, err := operation.LookupVolumeId(func() string { return *s.master }, grpcDialOption, vid.String())
|
lookup, err := operation.LookupVolumeId(func() pb.ServerAddress { return pb.ServerAddress(*s.master) }, grpcDialOption, vid.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
|
fmt.Printf("Error looking up volume %d: %v\n", vid, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
volumeServer := lookup.Locations[0].Url
|
volumeServer := lookup.Locations[0].ServerAddress()
|
||||||
|
|
||||||
stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
|
stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package command
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -10,7 +11,6 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool {
|
||||||
defer pprof.StopCPUProfile()
|
defer pprof.StopCPUProfile()
|
||||||
}
|
}
|
||||||
|
|
||||||
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, "", strings.Split(*b.masters, ","))
|
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", "", pb.ServerAddresses(*b.masters).ToAddresses())
|
||||||
go b.masterClient.KeepConnectedToMaster()
|
go b.masterClient.KeepConnectedToMaster()
|
||||||
b.masterClient.WaitUntilConnected()
|
b.masterClient.WaitUntilConnected()
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ var Commands = []*Command{
|
||||||
cmdFilerCopy,
|
cmdFilerCopy,
|
||||||
cmdFilerMetaBackup,
|
cmdFilerMetaBackup,
|
||||||
cmdFilerMetaTail,
|
cmdFilerMetaTail,
|
||||||
|
cmdFilerRemoteGateway,
|
||||||
cmdFilerRemoteSynchronize,
|
cmdFilerRemoteSynchronize,
|
||||||
cmdFilerReplicate,
|
cmdFilerReplicate,
|
||||||
cmdFilerSynchronize,
|
cmdFilerSynchronize,
|
||||||
|
|
|
@ -2,6 +2,7 @@ package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/security"
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"io"
|
"io"
|
||||||
|
@ -49,7 +50,7 @@ func runDownload(cmd *Command, args []string) bool {
|
||||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
for _, fid := range args {
|
for _, fid := range args {
|
||||||
if e := downloadToFile(func() string { return *d.server }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil {
|
if e := downloadToFile(func() pb.ServerAddress { return pb.ServerAddress(*d.server) }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil {
|
||||||
fmt.Println("Download Error: ", fid, e)
|
fmt.Println("Download Error: ", fid, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,10 +29,12 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
type FilerOptions struct {
|
type FilerOptions struct {
|
||||||
masters *string
|
masters []pb.ServerAddress
|
||||||
|
mastersString *string
|
||||||
ip *string
|
ip *string
|
||||||
bindIp *string
|
bindIp *string
|
||||||
port *int
|
port *int
|
||||||
|
portGrpc *int
|
||||||
publicPort *int
|
publicPort *int
|
||||||
collection *string
|
collection *string
|
||||||
defaultReplicaPlacement *string
|
defaultReplicaPlacement *string
|
||||||
|
@ -55,11 +57,12 @@ type FilerOptions struct {
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
cmdFiler.Run = runFiler // break init cycle
|
cmdFiler.Run = runFiler // break init cycle
|
||||||
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
f.mastersString = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
||||||
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
|
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
|
||||||
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
|
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
|
||||||
f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to")
|
f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to")
|
||||||
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
||||||
|
f.portGrpc = cmdFiler.Flag.Int("port.grpc", 0, "filer server grpc listen port")
|
||||||
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
|
f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public")
|
||||||
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
|
f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
|
||||||
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
|
f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing")
|
||||||
|
@ -84,7 +87,7 @@ func init() {
|
||||||
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
|
filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file")
|
||||||
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
|
filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
|
||||||
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
|
filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file")
|
||||||
filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
|
filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", true, "allow empty folders")
|
||||||
|
|
||||||
// start webdav on filer
|
// start webdav on filer
|
||||||
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
|
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
|
||||||
|
@ -155,13 +158,15 @@ func runFiler(cmd *Command, args []string) bool {
|
||||||
|
|
||||||
if *filerStartIam {
|
if *filerStartIam {
|
||||||
filerIamOptions.filer = &filerAddress
|
filerIamOptions.filer = &filerAddress
|
||||||
filerIamOptions.masters = f.masters
|
filerIamOptions.masters = f.mastersString
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(startDelay * time.Second)
|
time.Sleep(startDelay * time.Second)
|
||||||
filerIamOptions.startIamServer()
|
filerIamOptions.startIamServer()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
f.masters = pb.ServerAddresses(*f.mastersString).ToAddresses()
|
||||||
|
|
||||||
f.startFiler()
|
f.startFiler()
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -175,6 +180,9 @@ func (fo *FilerOptions) startFiler() {
|
||||||
if *fo.publicPort != 0 {
|
if *fo.publicPort != 0 {
|
||||||
publicVolumeMux = http.NewServeMux()
|
publicVolumeMux = http.NewServeMux()
|
||||||
}
|
}
|
||||||
|
if *fo.portGrpc == 0 {
|
||||||
|
*fo.portGrpc = 10000 + *fo.port
|
||||||
|
}
|
||||||
|
|
||||||
defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2")
|
defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2")
|
||||||
|
|
||||||
|
@ -183,8 +191,10 @@ func (fo *FilerOptions) startFiler() {
|
||||||
peers = strings.Split(*fo.peers, ",")
|
peers = strings.Split(*fo.peers, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
filerAddress := pb.NewServerAddress(*fo.ip, *fo.port, *fo.portGrpc)
|
||||||
|
|
||||||
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
|
fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{
|
||||||
Masters: strings.Split(*fo.masters, ","),
|
Masters: fo.masters,
|
||||||
Collection: *fo.collection,
|
Collection: *fo.collection,
|
||||||
DefaultReplication: *fo.defaultReplicaPlacement,
|
DefaultReplication: *fo.defaultReplicaPlacement,
|
||||||
DisableDirListing: *fo.disableDirListing,
|
DisableDirListing: *fo.disableDirListing,
|
||||||
|
@ -194,11 +204,10 @@ func (fo *FilerOptions) startFiler() {
|
||||||
Rack: *fo.rack,
|
Rack: *fo.rack,
|
||||||
DefaultLevelDbDir: defaultLevelDbDirectory,
|
DefaultLevelDbDir: defaultLevelDbDirectory,
|
||||||
DisableHttp: *fo.disableHttp,
|
DisableHttp: *fo.disableHttp,
|
||||||
Host: *fo.ip,
|
Host: filerAddress,
|
||||||
Port: uint32(*fo.port),
|
|
||||||
Cipher: *fo.cipher,
|
Cipher: *fo.cipher,
|
||||||
SaveToFilerLimit: int64(*fo.saveToFilerLimit),
|
SaveToFilerLimit: int64(*fo.saveToFilerLimit),
|
||||||
Filers: peers,
|
Filers: pb.FromAddressStrings(peers),
|
||||||
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
|
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
|
||||||
})
|
})
|
||||||
if nfs_err != nil {
|
if nfs_err != nil {
|
||||||
|
@ -229,7 +238,7 @@ func (fo *FilerOptions) startFiler() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// starting grpc server
|
// starting grpc server
|
||||||
grpcPort := *fo.port + 10000
|
grpcPort := *fo.portGrpc
|
||||||
grpcL, err := util.NewListener(util.JoinHostPort(*fo.bindIp, grpcPort), 0)
|
grpcL, err := util.NewListener(util.JoinHostPort(*fo.bindIp, grpcPort), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||||
|
|
|
@ -78,7 +78,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||||
return fmt.Errorf("no data sink configured in replication.toml")
|
return fmt.Errorf("no data sink configured in replication.toml")
|
||||||
}
|
}
|
||||||
|
|
||||||
sourceFiler := *backupOption.filer
|
sourceFiler := pb.ServerAddress(*backupOption.filer)
|
||||||
sourcePath := *backupOption.path
|
sourcePath := *backupOption.path
|
||||||
timeAgo := *backupOption.timeAgo
|
timeAgo := *backupOption.timeAgo
|
||||||
targetPath := dataSink.GetSinkToDirectory()
|
targetPath := dataSink.GetSinkToDirectory()
|
||||||
|
@ -102,7 +102,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
|
||||||
|
|
||||||
// create filer sink
|
// create filer sink
|
||||||
filerSource := &source.FilerSource{}
|
filerSource := &source.FilerSource{}
|
||||||
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, *backupOption.proxyByFiler)
|
filerSource.DoInitialize(sourceFiler.ToHttpAddress(), sourceFiler.ToGrpcAddress(), sourcePath, *backupOption.proxyByFiler)
|
||||||
dataSink.SetSourceFiler(filerSource)
|
dataSink.SetSourceFiler(filerSource)
|
||||||
|
|
||||||
processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
|
processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
|
||||||
|
|
|
@ -23,7 +23,7 @@ var (
|
||||||
|
|
||||||
type FilerCatOptions struct {
|
type FilerCatOptions struct {
|
||||||
grpcDialOption grpc.DialOption
|
grpcDialOption grpc.DialOption
|
||||||
filerAddress string
|
filerAddress pb.ServerAddress
|
||||||
filerClient filer_pb.SeaweedFilerClient
|
filerClient filer_pb.SeaweedFilerClient
|
||||||
output *string
|
output *string
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ func runFilerCat(cmd *Command, args []string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
filerCat.filerAddress = filerUrl.Host
|
filerCat.filerAddress = pb.ServerAddress(filerUrl.Host)
|
||||||
filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
dir, name := util.FullPath(urlPath).DirAndName()
|
dir, name := util.FullPath(urlPath).DirAndName()
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -92,35 +91,21 @@ func runCopy(cmd *Command, args []string) bool {
|
||||||
filerDestination := args[len(args)-1]
|
filerDestination := args[len(args)-1]
|
||||||
fileOrDirs := args[0 : len(args)-1]
|
fileOrDirs := args[0 : len(args)-1]
|
||||||
|
|
||||||
filerUrl, err := url.Parse(filerDestination)
|
filerAddress, urlPath, err := pb.ParseUrl(filerDestination)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
|
fmt.Printf("The last argument should be a URL on filer: %v\n", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
urlPath := filerUrl.Path
|
|
||||||
if !strings.HasSuffix(urlPath, "/") {
|
if !strings.HasSuffix(urlPath, "/") {
|
||||||
fmt.Printf("The last argument should be a folder and end with \"/\"\n")
|
fmt.Printf("The last argument should be a folder and end with \"/\"\n")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if filerUrl.Port() == "" {
|
|
||||||
fmt.Printf("The filer port should be specified.\n")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
filerPort, parseErr := strconv.ParseUint(filerUrl.Port(), 10, 64)
|
|
||||||
if parseErr != nil {
|
|
||||||
fmt.Printf("The filer port parse error: %v\n", parseErr)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
filerGrpcPort := filerPort + 10000
|
|
||||||
filerGrpcAddress := util.JoinHostPort(filerUrl.Hostname(), int(filerGrpcPort))
|
|
||||||
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress)
|
masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err)
|
fmt.Printf("read from filer %s: %v\n", filerAddress, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(urlPath, dirBuckets+"/") {
|
if strings.HasPrefix(urlPath, dirBuckets+"/") {
|
||||||
|
@ -174,9 +159,8 @@ func runCopy(cmd *Command, args []string) bool {
|
||||||
go func() {
|
go func() {
|
||||||
defer waitGroup.Done()
|
defer waitGroup.Done()
|
||||||
worker := FileCopyWorker{
|
worker := FileCopyWorker{
|
||||||
options: ©,
|
options: ©,
|
||||||
filerHost: filerUrl.Host,
|
filerAddress: filerAddress,
|
||||||
filerGrpcAddress: filerGrpcAddress,
|
|
||||||
}
|
}
|
||||||
if err := worker.copyFiles(fileCopyTaskChan); err != nil {
|
if err := worker.copyFiles(fileCopyTaskChan); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
|
fmt.Fprintf(os.Stderr, "copy file error: %v\n", err)
|
||||||
|
@ -189,7 +173,7 @@ func runCopy(cmd *Command, args []string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) {
|
func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress pb.ServerAddress) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) {
|
||||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -241,9 +225,8 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileCopyWorker struct {
|
type FileCopyWorker struct {
|
||||||
options *CopyOptions
|
options *CopyOptions
|
||||||
filerHost string
|
filerAddress pb.ServerAddress
|
||||||
filerGrpcAddress string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
|
func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error {
|
||||||
|
@ -321,7 +304,7 @@ func (worker *FileCopyWorker) checkExistingFileFirst(task FileCopyTask, f *os.Fi
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err = pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
request := &filer_pb.LookupDirectoryEntryRequest{
|
request := &filer_pb.LookupDirectoryEntryRequest{
|
||||||
Directory: task.destinationUrlPath,
|
Directory: task.destinationUrlPath,
|
||||||
|
@ -361,9 +344,9 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// assign a volume
|
err = util.Retry("upload", func() error {
|
||||||
err = util.Retry("assignVolume", func() error {
|
// assign a volume
|
||||||
return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
assignErr := pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
request := &filer_pb.AssignVolumeRequest{
|
request := &filer_pb.AssignVolumeRequest{
|
||||||
Count: 1,
|
Count: 1,
|
||||||
|
@ -381,43 +364,49 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||||
if assignResult.Error != "" {
|
if assignResult.Error != "" {
|
||||||
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
|
||||||
}
|
}
|
||||||
if assignResult.Url == "" {
|
if assignResult.Location.Url == "" {
|
||||||
return fmt.Errorf("assign volume failure %v: %v", request, assignResult)
|
return fmt.Errorf("assign volume failure %v: %v", request, assignResult)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if assignErr != nil {
|
||||||
|
return assignErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// upload data
|
||||||
|
targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId
|
||||||
|
uploadOption := &operation.UploadOption{
|
||||||
|
UploadUrl: targetUrl,
|
||||||
|
Filename: fileName,
|
||||||
|
Cipher: worker.options.cipher,
|
||||||
|
IsInputCompressed: false,
|
||||||
|
MimeType: mimeType,
|
||||||
|
PairMap: nil,
|
||||||
|
Jwt: security.EncodedJwt(assignResult.Auth),
|
||||||
|
}
|
||||||
|
uploadResult, err := operation.UploadData(data, uploadOption)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
||||||
|
}
|
||||||
|
if uploadResult.Error != "" {
|
||||||
|
return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
||||||
|
}
|
||||||
|
if *worker.options.verbose {
|
||||||
|
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName)
|
||||||
|
chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
|
||||||
|
|
||||||
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
return fmt.Errorf("upload %v: %v\n", fileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
|
|
||||||
uploadOption := &operation.UploadOption{
|
|
||||||
UploadUrl: targetUrl,
|
|
||||||
Filename: fileName,
|
|
||||||
Cipher: worker.options.cipher,
|
|
||||||
IsInputCompressed: false,
|
|
||||||
MimeType: mimeType,
|
|
||||||
PairMap: nil,
|
|
||||||
Jwt: security.EncodedJwt(assignResult.Auth),
|
|
||||||
}
|
|
||||||
uploadResult, err := operation.UploadData(data, uploadOption)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err)
|
|
||||||
}
|
|
||||||
if uploadResult.Error != "" {
|
|
||||||
return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
|
|
||||||
}
|
|
||||||
if *worker.options.verbose {
|
|
||||||
fmt.Printf("uploaded %s to %s\n", fileName, targetUrl)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0))
|
|
||||||
|
|
||||||
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerHost, task.destinationUrlPath, fileName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if err := pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
request := &filer_pb.CreateEntryRequest{
|
request := &filer_pb.CreateEntryRequest{
|
||||||
Directory: task.destinationUrlPath,
|
Directory: task.destinationUrlPath,
|
||||||
Entry: &filer_pb.Entry{
|
Entry: &filer_pb.Entry{
|
||||||
|
@ -443,7 +432,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
|
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -474,7 +463,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||||
var assignResult *filer_pb.AssignVolumeResponse
|
var assignResult *filer_pb.AssignVolumeResponse
|
||||||
var assignError error
|
var assignError error
|
||||||
err := util.Retry("assignVolume", func() error {
|
err := util.Retry("assignVolume", func() error {
|
||||||
return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
return pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
request := &filer_pb.AssignVolumeRequest{
|
request := &filer_pb.AssignVolumeRequest{
|
||||||
Count: 1,
|
Count: 1,
|
||||||
Replication: *worker.options.replication,
|
Replication: *worker.options.replication,
|
||||||
|
@ -498,7 +487,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||||
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
|
targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId
|
||||||
if collection == "" {
|
if collection == "" {
|
||||||
collection = assignResult.Collection
|
collection = assignResult.Collection
|
||||||
}
|
}
|
||||||
|
@ -508,7 +497,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||||
|
|
||||||
uploadOption := &operation.UploadOption{
|
uploadOption := &operation.UploadOption{
|
||||||
UploadUrl: targetUrl,
|
UploadUrl: targetUrl,
|
||||||
Filename: fileName+"-"+strconv.FormatInt(i+1, 10),
|
Filename: fileName + "-" + strconv.FormatInt(i+1, 10),
|
||||||
Cipher: worker.options.cipher,
|
Cipher: worker.options.cipher,
|
||||||
IsInputCompressed: false,
|
IsInputCompressed: false,
|
||||||
MimeType: "",
|
MimeType: "",
|
||||||
|
@ -542,8 +531,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
fileIds = append(fileIds, chunk.FileId)
|
fileIds = append(fileIds, chunk.FileId)
|
||||||
}
|
}
|
||||||
operation.DeleteFiles(func() string {
|
operation.DeleteFiles(func() pb.ServerAddress {
|
||||||
return copy.masters[0]
|
return pb.ServerAddress(copy.masters[0])
|
||||||
}, false, worker.options.grpcDialOption, fileIds)
|
}, false, worker.options.grpcDialOption, fileIds)
|
||||||
return uploadError
|
return uploadError
|
||||||
}
|
}
|
||||||
|
@ -553,7 +542,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||||
return fmt.Errorf("create manifest: %v", manifestErr)
|
return fmt.Errorf("create manifest: %v", manifestErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if err := pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
request := &filer_pb.CreateEntryRequest{
|
request := &filer_pb.CreateEntryRequest{
|
||||||
Directory: task.destinationUrlPath,
|
Directory: task.destinationUrlPath,
|
||||||
Entry: &filer_pb.Entry{
|
Entry: &filer_pb.Entry{
|
||||||
|
@ -579,10 +568,10 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err)
|
return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerHost, task.destinationUrlPath, fileName)
|
fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -611,7 +600,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
|
||||||
var fileId, host string
|
var fileId, host string
|
||||||
var auth security.EncodedJwt
|
var auth security.EncodedJwt
|
||||||
|
|
||||||
if flushErr := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if flushErr := pb.WithGrpcFilerClient(worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
@ -633,7 +622,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
|
||||||
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
|
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
|
fileId, host, auth = resp.FileId, resp.Location.Url, security.EncodedJwt(resp.Auth)
|
||||||
collection, replication = resp.Collection, resp.Replication
|
collection, replication = resp.Collection, resp.Replication
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -195,7 +195,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
|
||||||
return metaBackup.setOffset(lastTime)
|
return metaBackup.setOffset(lastTime)
|
||||||
})
|
})
|
||||||
|
|
||||||
return pb.FollowMetadata(*metaBackup.filerAddress, metaBackup.grpcDialOption, "meta_backup",
|
return pb.FollowMetadata(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, "meta_backup",
|
||||||
*metaBackup.filerDirectory, nil, startTime.UnixNano(), 0, processEventFnWithOffset, false)
|
*metaBackup.filerDirectory, nil, startTime.UnixNano(), 0, processEventFnWithOffset, false)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -224,7 +224,7 @@ var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{})
|
||||||
|
|
||||||
func (metaBackup *FilerMetaBackupOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
func (metaBackup *FilerMetaBackupOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||||
|
|
||||||
return pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
return pb.WithFilerClient(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
return fn(client)
|
return fn(client)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -71,12 +71,12 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
|
shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool {
|
||||||
if filterFunc == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
|
if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if filterFunc == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
|
if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ func runFilerMetaTail(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tailErr := pb.FollowMetadata(*tailFiler, grpcDialOption, "tail",
|
tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpcDialOption, "tail",
|
||||||
*tailTarget, nil, time.Now().Add(-*tailStart).UnixNano(), 0,
|
*tailTarget, nil, time.Now().Add(-*tailStart).UnixNano(), 0,
|
||||||
func(resp *filer_pb.SubscribeMetadataResponse) error {
|
func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||||
if !shouldPrint(resp) {
|
if !shouldPrint(resp) {
|
||||||
|
|
117
weed/command/filer_remote_gateway.go
Normal file
117
weed/command/filer_remote_gateway.go
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RemoteGatewayOptions struct {
|
||||||
|
filerAddress *string
|
||||||
|
grpcDialOption grpc.DialOption
|
||||||
|
readChunkFromFiler *bool
|
||||||
|
timeAgo *time.Duration
|
||||||
|
createBucketAt *string
|
||||||
|
createBucketRandomSuffix *bool
|
||||||
|
include *string
|
||||||
|
exclude *string
|
||||||
|
|
||||||
|
mappings *remote_pb.RemoteStorageMapping
|
||||||
|
remoteConfs map[string]*remote_pb.RemoteConf
|
||||||
|
bucketsDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = filer_pb.FilerClient(&RemoteGatewayOptions{})
|
||||||
|
|
||||||
|
func (option *RemoteGatewayOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||||
|
return pb.WithFilerClient(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
return fn(client)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func (option *RemoteGatewayOptions) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
|
return location.Url
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
remoteGatewayOptions RemoteGatewayOptions
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cmdFilerRemoteGateway.Run = runFilerRemoteGateway // break init cycle
|
||||||
|
remoteGatewayOptions.filerAddress = cmdFilerRemoteGateway.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
|
||||||
|
remoteGatewayOptions.createBucketAt = cmdFilerRemoteGateway.Flag.String("createBucketAt", "", "one remote storage name to create new buckets in")
|
||||||
|
remoteGatewayOptions.createBucketRandomSuffix = cmdFilerRemoteGateway.Flag.Bool("createBucketWithRandomSuffix", true, "add randomized suffix to bucket name to avoid conflicts")
|
||||||
|
remoteGatewayOptions.readChunkFromFiler = cmdFilerRemoteGateway.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
|
||||||
|
remoteGatewayOptions.timeAgo = cmdFilerRemoteGateway.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||||
|
remoteGatewayOptions.include = cmdFilerRemoteGateway.Flag.String("include", "", "pattens of new bucket names, e.g., s3*")
|
||||||
|
remoteGatewayOptions.exclude = cmdFilerRemoteGateway.Flag.String("exclude", "", "pattens of new bucket names, e.g., local*")
|
||||||
|
}
|
||||||
|
|
||||||
|
var cmdFilerRemoteGateway = &Command{
|
||||||
|
UsageLine: "filer.remote.gateway",
|
||||||
|
Short: "resumable continuously write back bucket creation, deletion, and other local updates to remote object store",
|
||||||
|
Long: `resumable continuously write back bucket creation, deletion, and other local updates to remote object store
|
||||||
|
|
||||||
|
filer.remote.gateway listens on filer local buckets update events.
|
||||||
|
If any bucket is created, deleted, or updated, it will mirror the changes to remote object store.
|
||||||
|
|
||||||
|
weed filer.remote.sync -createBucketAt=cloud1
|
||||||
|
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
|
||||||
|
func runFilerRemoteGateway(cmd *Command, args []string) bool {
|
||||||
|
|
||||||
|
util.LoadConfiguration("security", false)
|
||||||
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
remoteGatewayOptions.grpcDialOption = grpcDialOption
|
||||||
|
|
||||||
|
filerAddress := pb.ServerAddress(*remoteGatewayOptions.filerAddress)
|
||||||
|
|
||||||
|
filerSource := &source.FilerSource{}
|
||||||
|
filerSource.DoInitialize(
|
||||||
|
filerAddress.ToHttpAddress(),
|
||||||
|
filerAddress.ToGrpcAddress(),
|
||||||
|
"/", // does not matter
|
||||||
|
*remoteGatewayOptions.readChunkFromFiler,
|
||||||
|
)
|
||||||
|
|
||||||
|
remoteGatewayOptions.bucketsDir = "/buckets"
|
||||||
|
// check buckets again
|
||||||
|
remoteGatewayOptions.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
|
||||||
|
resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
remoteGatewayOptions.bucketsDir = resp.DirBuckets
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// read filer remote storage mount mappings
|
||||||
|
if detectErr := remoteGatewayOptions.collectRemoteStorageConf(); detectErr != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "read mount info: %v\n", detectErr)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// synchronize /buckets folder
|
||||||
|
fmt.Printf("synchronize buckets in %s ...\n", remoteGatewayOptions.bucketsDir)
|
||||||
|
util.RetryForever("filer.remote.sync buckets", func() error {
|
||||||
|
return remoteGatewayOptions.followBucketUpdatesAndUploadToRemote(filerSource)
|
||||||
|
}, func(err error) bool {
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("synchronize %s: %v", remoteGatewayOptions.bucketsDir, err)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return true
|
||||||
|
|
||||||
|
}
|
|
@ -13,11 +13,12 @@ import (
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (option *RemoteSyncOptions) followBucketUpdatesAndUploadToRemote(filerSource *source.FilerSource) error {
|
func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSource *source.FilerSource) error {
|
||||||
|
|
||||||
// read filer remote storage mount mappings
|
// read filer remote storage mount mappings
|
||||||
if detectErr := option.collectRemoteStorageConf(); detectErr != nil {
|
if detectErr := option.collectRemoteStorageConf(); detectErr != nil {
|
||||||
|
@ -32,16 +33,16 @@ func (option *RemoteSyncOptions) followBucketUpdatesAndUploadToRemote(filerSourc
|
||||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||||
lastTime := time.Unix(0, lastTsNs)
|
lastTime := time.Unix(0, lastTsNs)
|
||||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
||||||
return remote_storage.SetSyncOffset(option.grpcDialOption, *option.filerAddress, option.bucketsDir, lastTsNs)
|
return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, lastTsNs)
|
||||||
})
|
})
|
||||||
|
|
||||||
lastOffsetTs := collectLastSyncOffset(option, option.bucketsDir)
|
lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, *option.timeAgo)
|
||||||
|
|
||||||
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption, "filer.remote.sync",
|
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, "filer.remote.sync",
|
||||||
option.bucketsDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
option.bucketsDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (option *RemoteSyncOptions) makeBucketedEventProcessor(filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {
|
func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {
|
||||||
|
|
||||||
handleCreateBucket := func(entry *filer_pb.Entry) error {
|
handleCreateBucket := func(entry *filer_pb.Entry) error {
|
||||||
if !entry.IsDirectory {
|
if !entry.IsDirectory {
|
||||||
|
@ -75,6 +76,16 @@ func (option *RemoteSyncOptions) makeBucketedEventProcessor(filerSource *source.
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketName := strings.ToLower(entry.Name)
|
bucketName := strings.ToLower(entry.Name)
|
||||||
|
if *option.include != "" {
|
||||||
|
if ok, _ := filepath.Match(*option.include, entry.Name); !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if *option.exclude != "" {
|
||||||
|
if ok, _ := filepath.Match(*option.exclude, entry.Name); ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
if *option.createBucketRandomSuffix {
|
if *option.createBucketRandomSuffix {
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
|
||||||
if len(bucketName)+5 > 63 {
|
if len(bucketName)+5 > 63 {
|
||||||
|
@ -307,7 +318,7 @@ func (option *RemoteSyncOptions) makeBucketedEventProcessor(filerSource *source.
|
||||||
return eachEntryFunc, nil
|
return eachEntryFunc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (option *RemoteSyncOptions) findRemoteStorageClient(bucketName string) (client remote_storage.RemoteStorageClient, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, err error) {
|
func (option *RemoteGatewayOptions) findRemoteStorageClient(bucketName string) (client remote_storage.RemoteStorageClient, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, err error) {
|
||||||
bucket := util.FullPath(option.bucketsDir).Child(bucketName)
|
bucket := util.FullPath(option.bucketsDir).Child(bucketName)
|
||||||
|
|
||||||
var isMounted bool
|
var isMounted bool
|
||||||
|
@ -327,7 +338,7 @@ func (option *RemoteSyncOptions) findRemoteStorageClient(bucketName string) (cli
|
||||||
return client, remoteStorageMountLocation, nil
|
return client, remoteStorageMountLocation, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (option *RemoteSyncOptions) detectBucketInfo(actualDir string) (bucket util.FullPath, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, remoteConf *remote_pb.RemoteConf, ok bool) {
|
func (option *RemoteGatewayOptions) detectBucketInfo(actualDir string) (bucket util.FullPath, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, remoteConf *remote_pb.RemoteConf, ok bool) {
|
||||||
bucket, ok = extractBucketPath(option.bucketsDir, actualDir)
|
bucket, ok = extractBucketPath(option.bucketsDir, actualDir)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", nil, nil, false
|
return "", nil, nil, false
|
||||||
|
@ -355,9 +366,9 @@ func extractBucketPath(bucketsDir, dir string) (util.FullPath, bool) {
|
||||||
return util.FullPath(bucketsDir).Child(parts[0]), true
|
return util.FullPath(bucketsDir).Child(parts[0]), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (option *RemoteSyncOptions) collectRemoteStorageConf() (err error) {
|
func (option *RemoteGatewayOptions) collectRemoteStorageConf() (err error) {
|
||||||
|
|
||||||
if mappings, err := filer.ReadMountMappings(option.grpcDialOption, *option.filerAddress); err != nil {
|
if mappings, err := filer.ReadMountMappings(option.grpcDialOption, pb.ServerAddress(*option.filerAddress)); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
option.mappings = mappings
|
option.mappings = mappings
|
|
@ -1,39 +1,29 @@
|
||||||
package command
|
package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||||
"github.com/chrislusf/seaweedfs/weed/security"
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RemoteSyncOptions struct {
|
type RemoteSyncOptions struct {
|
||||||
filerAddress *string
|
filerAddress *string
|
||||||
grpcDialOption grpc.DialOption
|
grpcDialOption grpc.DialOption
|
||||||
readChunkFromFiler *bool
|
readChunkFromFiler *bool
|
||||||
debug *bool
|
timeAgo *time.Duration
|
||||||
timeAgo *time.Duration
|
dir *string
|
||||||
dir *string
|
|
||||||
createBucketAt *string
|
|
||||||
createBucketRandomSuffix *bool
|
|
||||||
|
|
||||||
mappings *remote_pb.RemoteStorageMapping
|
|
||||||
remoteConfs map[string]*remote_pb.RemoteConf
|
|
||||||
bucketsDir string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
|
var _ = filer_pb.FilerClient(&RemoteSyncOptions{})
|
||||||
|
|
||||||
func (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
func (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
|
||||||
return pb.WithFilerClient(*option.filerAddress, option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
return pb.WithFilerClient(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
return fn(client)
|
return fn(client)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -49,10 +39,7 @@ func init() {
|
||||||
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
|
cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle
|
||||||
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
|
remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster")
|
||||||
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "", "a mounted directory on filer")
|
remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "", "a mounted directory on filer")
|
||||||
remoteSyncOptions.createBucketAt = cmdFilerRemoteSynchronize.Flag.String("createBucketAt", "", "one remote storage name to create new buckets in")
|
|
||||||
remoteSyncOptions.createBucketRandomSuffix = cmdFilerRemoteSynchronize.Flag.Bool("createBucketWithRandomSuffix", true, "add randomized suffix to bucket name to avoid conflicts")
|
|
||||||
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
|
remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers")
|
||||||
remoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool("debug", false, "debug mode to print out filer updated remote files")
|
|
||||||
remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,21 +52,8 @@ var cmdFilerRemoteSynchronize = &Command{
|
||||||
If any mounted remote file is updated, it will fetch the updated content,
|
If any mounted remote file is updated, it will fetch the updated content,
|
||||||
and write to the remote storage.
|
and write to the remote storage.
|
||||||
|
|
||||||
There are two modes:
|
|
||||||
|
|
||||||
1)By default, watch /buckets folder and write back all changes.
|
|
||||||
|
|
||||||
# if there is only one remote storage configured
|
|
||||||
weed filer.remote.sync
|
|
||||||
# if there are multiple remote storages configured
|
|
||||||
# specify a remote storage to create new buckets.
|
|
||||||
weed filer.remote.sync -createBucketAt=cloud1
|
|
||||||
|
|
||||||
2)Write back one mounted folder to remote storage
|
|
||||||
|
|
||||||
weed filer.remote.sync -dir=/mount/s3_on_cloud
|
weed filer.remote.sync -dir=/mount/s3_on_cloud
|
||||||
|
|
||||||
|
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,28 +64,17 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||||
remoteSyncOptions.grpcDialOption = grpcDialOption
|
remoteSyncOptions.grpcDialOption = grpcDialOption
|
||||||
|
|
||||||
dir := *remoteSyncOptions.dir
|
dir := *remoteSyncOptions.dir
|
||||||
filerAddress := *remoteSyncOptions.filerAddress
|
filerAddress := pb.ServerAddress(*remoteSyncOptions.filerAddress)
|
||||||
|
|
||||||
filerSource := &source.FilerSource{}
|
filerSource := &source.FilerSource{}
|
||||||
filerSource.DoInitialize(
|
filerSource.DoInitialize(
|
||||||
filerAddress,
|
filerAddress.ToHttpAddress(),
|
||||||
pb.ServerToGrpcAddress(filerAddress),
|
filerAddress.ToGrpcAddress(),
|
||||||
"/", // does not matter
|
"/", // does not matter
|
||||||
*remoteSyncOptions.readChunkFromFiler,
|
*remoteSyncOptions.readChunkFromFiler,
|
||||||
)
|
)
|
||||||
|
|
||||||
remoteSyncOptions.bucketsDir = "/buckets"
|
if dir != "" {
|
||||||
// check buckets again
|
|
||||||
remoteSyncOptions.WithFilerClient(func(filerClient filer_pb.SeaweedFilerClient) error {
|
|
||||||
resp, err := filerClient.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
remoteSyncOptions.bucketsDir = resp.DirBuckets
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if dir != "" && dir != remoteSyncOptions.bucketsDir {
|
|
||||||
fmt.Printf("synchronize %s to remote storage...\n", dir)
|
fmt.Printf("synchronize %s to remote storage...\n", dir)
|
||||||
util.RetryForever("filer.remote.sync "+dir, func() error {
|
util.RetryForever("filer.remote.sync "+dir, func() error {
|
||||||
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
|
return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)
|
||||||
|
@ -124,22 +87,6 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// read filer remote storage mount mappings
|
|
||||||
if detectErr := remoteSyncOptions.collectRemoteStorageConf(); detectErr != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "read mount info: %v\n", detectErr)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// synchronize /buckets folder
|
|
||||||
fmt.Printf("synchronize buckets in %s ...\n", remoteSyncOptions.bucketsDir)
|
|
||||||
util.RetryForever("filer.remote.sync buckets", func() error {
|
|
||||||
return remoteSyncOptions.followBucketUpdatesAndUploadToRemote(filerSource)
|
|
||||||
}, func(err error) bool {
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("synchronize %s: %v", remoteSyncOptions.bucketsDir, err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return true
|
return true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
"github.com/chrislusf/seaweedfs/weed/replication/source"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/grpc"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -20,7 +21,7 @@ import (
|
||||||
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {
|
func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {
|
||||||
|
|
||||||
// read filer remote storage mount mappings
|
// read filer remote storage mount mappings
|
||||||
_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, *option.filerAddress, mountedDir)
|
_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir)
|
||||||
if detectErr != nil {
|
if detectErr != nil {
|
||||||
return fmt.Errorf("read mount info: %v", detectErr)
|
return fmt.Errorf("read mount info: %v", detectErr)
|
||||||
}
|
}
|
||||||
|
@ -33,12 +34,12 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
|
||||||
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
|
||||||
lastTime := time.Unix(0, lastTsNs)
|
lastTime := time.Unix(0, lastTsNs)
|
||||||
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3))
|
||||||
return remote_storage.SetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir, lastTsNs)
|
return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, lastTsNs)
|
||||||
})
|
})
|
||||||
|
|
||||||
lastOffsetTs := collectLastSyncOffset(option, mountedDir)
|
lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, *option.timeAgo)
|
||||||
|
|
||||||
return pb.FollowMetadata(*option.filerAddress, option.grpcDialOption, "filer.remote.sync",
|
return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, "filer.remote.sync",
|
||||||
mountedDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
mountedDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,19 +160,19 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
|
||||||
return eachEntryFunc, nil
|
return eachEntryFunc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func collectLastSyncOffset(option *RemoteSyncOptions, mountedDir string) time.Time {
|
func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, mountedDir string, timeAgo time.Duration) time.Time {
|
||||||
// 1. specified by timeAgo
|
// 1. specified by timeAgo
|
||||||
// 2. last offset timestamp for this directory
|
// 2. last offset timestamp for this directory
|
||||||
// 3. directory creation time
|
// 3. directory creation time
|
||||||
var lastOffsetTs time.Time
|
var lastOffsetTs time.Time
|
||||||
if *option.timeAgo == 0 {
|
if timeAgo == 0 {
|
||||||
mountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))
|
mountedDirEntry, err := filer_pb.GetEntry(filerClient, util.FullPath(mountedDir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err)
|
glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
lastOffsetTsNs, err := remote_storage.GetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir)
|
lastOffsetTsNs, err := remote_storage.GetSyncOffset(grpcDialOption, filerAddress, mountedDir)
|
||||||
if mountedDirEntry != nil {
|
if mountedDirEntry != nil {
|
||||||
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 {
|
||||||
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
lastOffsetTs = time.Unix(0, lastOffsetTsNs)
|
||||||
|
@ -183,7 +184,7 @@ func collectLastSyncOffset(option *RemoteSyncOptions, mountedDir string) time.Ti
|
||||||
lastOffsetTs = time.Now()
|
lastOffsetTs = time.Now()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
lastOffsetTs = time.Now().Add(-*option.timeAgo)
|
lastOffsetTs = time.Now().Add(-timeAgo)
|
||||||
}
|
}
|
||||||
return lastOffsetTs
|
return lastOffsetTs
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,9 +93,11 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||||
|
|
||||||
grace.SetupProfiling(*syncCpuProfile, *syncMemProfile)
|
grace.SetupProfiling(*syncCpuProfile, *syncMemProfile)
|
||||||
|
|
||||||
|
filerA := pb.ServerAddress(*syncOptions.filerA)
|
||||||
|
filerB := pb.ServerAddress(*syncOptions.filerB)
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB,
|
err := doSubscribeFilerMetaChanges(grpcDialOption, filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, filerB,
|
||||||
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
|
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
|
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
|
||||||
|
@ -107,7 +109,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||||
if !*syncOptions.isActivePassive {
|
if !*syncOptions.isActivePassive {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA,
|
err := doSubscribeFilerMetaChanges(grpcDialOption, filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, filerA,
|
||||||
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
|
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
|
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
|
||||||
|
@ -122,7 +124,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string,
|
func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler pb.ServerAddress, sourcePath string, sourceReadChunkFromFiler bool, targetFiler pb.ServerAddress, targetPath string,
|
||||||
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
|
replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error {
|
||||||
|
|
||||||
// read source filer signature
|
// read source filer signature
|
||||||
|
@ -147,9 +149,9 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
|
||||||
|
|
||||||
// create filer sink
|
// create filer sink
|
||||||
filerSource := &source.FilerSource{}
|
filerSource := &source.FilerSource{}
|
||||||
filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler)
|
filerSource.DoInitialize(sourceFiler.ToHttpAddress(), sourceFiler.ToGrpcAddress(), sourcePath, sourceReadChunkFromFiler)
|
||||||
filerSink := &filersink.FilerSink{}
|
filerSink := &filersink.FilerSink{}
|
||||||
filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
|
filerSink.DoInitialize(targetFiler.ToHttpAddress(), targetFiler.ToGrpcAddress(), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler)
|
||||||
filerSink.SetSourceFiler(filerSource)
|
filerSink.SetSourceFiler(filerSource)
|
||||||
|
|
||||||
persistEventFn := genProcessFunction(sourcePath, targetPath, filerSink, debug)
|
persistEventFn := genProcessFunction(sourcePath, targetPath, filerSink, debug)
|
||||||
|
@ -170,7 +172,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
|
||||||
return setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, lastTsNs)
|
return setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, lastTsNs)
|
||||||
})
|
})
|
||||||
|
|
||||||
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+targetFiler,
|
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+string(targetFiler),
|
||||||
sourcePath, nil, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
|
sourcePath, nil, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -179,7 +181,7 @@ const (
|
||||||
SyncKeyPrefix = "sync."
|
SyncKeyPrefix = "sync."
|
||||||
)
|
)
|
||||||
|
|
||||||
func getOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) {
|
func getOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) {
|
||||||
|
|
||||||
readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
syncKey := []byte(signaturePrefix + "____")
|
syncKey := []byte(signaturePrefix + "____")
|
||||||
|
@ -206,7 +208,7 @@ func getOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix str
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func setOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32, offsetTsNs int64) error {
|
func setOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32, offsetTsNs int64) error {
|
||||||
return pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
return pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
syncKey := []byte(signaturePrefix + "____")
|
syncKey := []byte(signaturePrefix + "____")
|
||||||
|
|
|
@ -43,38 +43,35 @@ func runIam(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (iamopt *IamOptions) startIamServer() bool {
|
func (iamopt *IamOptions) startIamServer() bool {
|
||||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer)
|
filerAddress := pb.ServerAddress(*iamopt.filer)
|
||||||
if err != nil {
|
|
||||||
glog.Fatal(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
util.LoadConfiguration("security", false)
|
util.LoadConfiguration("security", false)
|
||||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
for {
|
for {
|
||||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||||
}
|
}
|
||||||
glog.V(0).Infof("IAM read filer configuration: %s", resp)
|
glog.V(0).Infof("IAM read filer configuration: %s", resp)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
|
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
} else {
|
} else {
|
||||||
glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
|
glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
masters := pb.ServerAddresses(*iamopt.masters).ToAddresses()
|
||||||
router := mux.NewRouter().SkipClean(true)
|
router := mux.NewRouter().SkipClean(true)
|
||||||
_, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{
|
_, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{
|
||||||
Filer: *iamopt.filer,
|
Masters: masters,
|
||||||
Port: *iamopt.port,
|
Filer: filerAddress,
|
||||||
FilerGrpcAddress: filerGrpcAddress,
|
Port: *iamopt.port,
|
||||||
GrpcDialOption: grpcDialOption,
|
GrpcDialOption: grpcDialOption,
|
||||||
})
|
})
|
||||||
glog.V(0).Info("NewIamApiServer created")
|
glog.V(0).Info("NewIamApiServer created")
|
||||||
if iamApiServer_err != nil {
|
if iamApiServer_err != nil {
|
||||||
|
|
|
@ -29,6 +29,6 @@ import (
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
|
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
|
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
|
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
|
||||||
|
_ "github.com/chrislusf/seaweedfs/weed/filer/redis3"
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
|
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
|
||||||
_ "github.com/chrislusf/seaweedfs/weed/filer/tikv"
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -27,6 +27,7 @@ var (
|
||||||
|
|
||||||
type MasterOptions struct {
|
type MasterOptions struct {
|
||||||
port *int
|
port *int
|
||||||
|
portGrpc *int
|
||||||
ip *string
|
ip *string
|
||||||
ipBind *string
|
ipBind *string
|
||||||
metaFolder *string
|
metaFolder *string
|
||||||
|
@ -46,6 +47,7 @@ type MasterOptions struct {
|
||||||
func init() {
|
func init() {
|
||||||
cmdMaster.Run = runMaster // break init cycle
|
cmdMaster.Run = runMaster // break init cycle
|
||||||
m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
|
m.port = cmdMaster.Flag.Int("port", 9333, "http listen port")
|
||||||
|
m.portGrpc = cmdMaster.Flag.Int("port.grpc", 0, "grpc listen port")
|
||||||
m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master <ip>|<server> address, also used as identifier")
|
m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master <ip>|<server> address, also used as identifier")
|
||||||
m.ipBind = cmdMaster.Flag.String("ip.bind", "", "ip address to bind to")
|
m.ipBind = cmdMaster.Flag.String("ip.bind", "", "ip address to bind to")
|
||||||
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
|
m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data")
|
||||||
|
@ -111,7 +113,11 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||||
|
|
||||||
backend.LoadConfiguration(util.GetViper())
|
backend.LoadConfiguration(util.GetViper())
|
||||||
|
|
||||||
myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers)
|
if *masterOption.portGrpc == 0 {
|
||||||
|
*masterOption.portGrpc = 10000 + *masterOption.port
|
||||||
|
}
|
||||||
|
|
||||||
|
myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.portGrpc, *masterOption.peers)
|
||||||
|
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
|
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
|
||||||
|
@ -130,7 +136,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||||
ms.SetRaftServer(raftServer)
|
ms.SetRaftServer(raftServer)
|
||||||
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
|
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
|
||||||
// starting grpc server
|
// starting grpc server
|
||||||
grpcPort := *masterOption.port + 10000
|
grpcPort := *masterOption.portGrpc
|
||||||
grpcL, err := util.NewListener(util.JoinHostPort(*masterOption.ipBind, grpcPort), 0)
|
grpcL, err := util.NewListener(util.JoinHostPort(*masterOption.ipBind, grpcPort), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||||
|
@ -160,16 +166,14 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
|
||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
|
func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers string) (masterAddress pb.ServerAddress, cleanedPeers []pb.ServerAddress) {
|
||||||
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
|
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
|
||||||
masterAddress = util.JoinHostPort(masterIp, masterPort)
|
masterAddress = pb.NewServerAddress(masterIp, masterPort, masterGrpcPort)
|
||||||
if peers != "" {
|
cleanedPeers = pb.ServerAddresses(peers).ToAddresses()
|
||||||
cleanedPeers = strings.Split(peers, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
hasSelf := false
|
hasSelf := false
|
||||||
for _, peer := range cleanedPeers {
|
for _, peer := range cleanedPeers {
|
||||||
if peer == masterAddress {
|
if peer.ToHttpAddress() == masterAddress.ToHttpAddress() {
|
||||||
hasSelf = true
|
hasSelf = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -179,13 +183,15 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
|
||||||
cleanedPeers = append(cleanedPeers, masterAddress)
|
cleanedPeers = append(cleanedPeers, masterAddress)
|
||||||
}
|
}
|
||||||
if len(cleanedPeers)%2 == 0 {
|
if len(cleanedPeers)%2 == 0 {
|
||||||
glog.Fatalf("Only odd number of masters are supported!")
|
glog.Fatalf("Only odd number of masters are supported: %+v", cleanedPeers)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTheFirstOne(self string, peers []string) bool {
|
func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool {
|
||||||
sort.Strings(peers)
|
sort.Slice(peers, func(i, j int) bool {
|
||||||
|
return strings.Compare(string(peers[i]), string(peers[j])) < 0
|
||||||
|
})
|
||||||
if len(peers) <= 0 {
|
if len(peers) <= 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -193,9 +199,9 @@ func isTheFirstOne(self string, peers []string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
|
func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption {
|
||||||
|
masterAddress := pb.NewServerAddress(*m.ip, *m.port, *m.portGrpc)
|
||||||
return &weed_server.MasterOption{
|
return &weed_server.MasterOption{
|
||||||
Host: *m.ip,
|
Master: masterAddress,
|
||||||
Port: *m.port,
|
|
||||||
MetaFolder: *m.metaFolder,
|
MetaFolder: *m.metaFolder,
|
||||||
VolumeSizeLimitMB: uint32(*m.volumeSizeLimitMB),
|
VolumeSizeLimitMB: uint32(*m.volumeSizeLimitMB),
|
||||||
VolumePreallocate: *m.volumePreallocate,
|
VolumePreallocate: *m.volumePreallocate,
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"google.golang.org/grpc/reflection"
|
"google.golang.org/grpc/reflection"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,6 +23,7 @@ var (
|
||||||
func init() {
|
func init() {
|
||||||
cmdMasterFollower.Run = runMasterFollower // break init cycle
|
cmdMasterFollower.Run = runMasterFollower // break init cycle
|
||||||
mf.port = cmdMasterFollower.Flag.Int("port", 9334, "http listen port")
|
mf.port = cmdMasterFollower.Flag.Int("port", 9334, "http listen port")
|
||||||
|
mf.portGrpc = cmdMasterFollower.Flag.Int("port.grpc", 0, "grpc listen port")
|
||||||
mf.ipBind = cmdMasterFollower.Flag.String("ip.bind", "", "ip address to bind to")
|
mf.ipBind = cmdMasterFollower.Flag.String("ip.bind", "", "ip address to bind to")
|
||||||
mf.peers = cmdMasterFollower.Flag.String("masters", "localhost:9333", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
|
mf.peers = cmdMasterFollower.Flag.String("masters", "localhost:9333", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095")
|
||||||
|
|
||||||
|
@ -70,6 +70,10 @@ func runMasterFollower(cmd *Command, args []string) bool {
|
||||||
util.LoadConfiguration("security", false)
|
util.LoadConfiguration("security", false)
|
||||||
util.LoadConfiguration("master", false)
|
util.LoadConfiguration("master", false)
|
||||||
|
|
||||||
|
if *mf.portGrpc == 0 {
|
||||||
|
*mf.portGrpc = 10000 + *mf.port
|
||||||
|
}
|
||||||
|
|
||||||
startMasterFollower(mf)
|
startMasterFollower(mf)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -78,19 +82,15 @@ func runMasterFollower(cmd *Command, args []string) bool {
|
||||||
func startMasterFollower(masterOptions MasterOptions) {
|
func startMasterFollower(masterOptions MasterOptions) {
|
||||||
|
|
||||||
// collect settings from main masters
|
// collect settings from main masters
|
||||||
masters := strings.Split(*mf.peers, ",")
|
masters := pb.ServerAddresses(*mf.peers).ToAddresses()
|
||||||
masterGrpcAddresses, err := pb.ParseServersToGrpcAddresses(masters)
|
|
||||||
if err != nil {
|
|
||||||
glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
|
var err error
|
||||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.master")
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.master")
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
err = pb.WithOneOfGrpcMasterClients(masterGrpcAddresses, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
err = pb.WithOneOfGrpcMasterClients(masters, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||||
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get master grpc address %v configuration: %v", masterGrpcAddresses, err)
|
return fmt.Errorf("get master grpc address %v configuration: %v", masters, err)
|
||||||
}
|
}
|
||||||
masterOptions.defaultReplication = &resp.DefaultReplication
|
masterOptions.defaultReplication = &resp.DefaultReplication
|
||||||
masterOptions.volumeSizeLimitMB = aws.Uint(uint(resp.VolumeSizeLimitMB))
|
masterOptions.volumeSizeLimitMB = aws.Uint(uint(resp.VolumeSizeLimitMB))
|
||||||
|
@ -98,13 +98,13 @@ func startMasterFollower(masterOptions MasterOptions) {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("failed to talk to filer %v: %v", masterGrpcAddresses, err)
|
glog.V(0).Infof("failed to talk to filer %v: %v", masters, err)
|
||||||
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
||||||
time.Sleep(time.Duration(i+1) * time.Second)
|
time.Sleep(time.Duration(i+1) * time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to talk to filer %v: %v", masterGrpcAddresses, err)
|
glog.Errorf("failed to talk to filer %v: %v", masters, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ func startMasterFollower(masterOptions MasterOptions) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// starting grpc server
|
// starting grpc server
|
||||||
grpcPort := *masterOptions.port + 10000
|
grpcPort := *masterOptions.portGrpc
|
||||||
grpcL, err := util.NewListener(util.JoinHostPort(*masterOptions.ipBind, grpcPort), 0)
|
grpcL, err := util.NewListener(util.JoinHostPort(*masterOptions.ipBind, grpcPort), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
|
||||||
|
|
|
@ -70,35 +70,30 @@ func getParentInode(mountDir string) (uint64, error) {
|
||||||
|
|
||||||
func RunMount(option *MountOptions, umask os.FileMode) bool {
|
func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||||
|
|
||||||
filers := strings.Split(*option.filer, ",")
|
filerAddresses := pb.ServerAddresses(*option.filer).ToAddresses()
|
||||||
// parse filer grpc address
|
|
||||||
filerGrpcAddresses, err := pb.ParseServersToGrpcAddresses(filers)
|
|
||||||
if err != nil {
|
|
||||||
glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
util.LoadConfiguration("security", false)
|
util.LoadConfiguration("security", false)
|
||||||
// try to connect to filer, filerBucketsPath may be useful later
|
// try to connect to filer, filerBucketsPath may be useful later
|
||||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
var cipher bool
|
var cipher bool
|
||||||
|
var err error
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
err = pb.WithOneOfGrpcFilerClients(filerGrpcAddresses, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err = pb.WithOneOfGrpcFilerClients(filerAddresses, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get filer grpc address %v configuration: %v", filerGrpcAddresses, err)
|
return fmt.Errorf("get filer grpc address %v configuration: %v", filerAddresses, err)
|
||||||
}
|
}
|
||||||
cipher = resp.Cipher
|
cipher = resp.Cipher
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("failed to talk to filer %v: %v", filerGrpcAddresses, err)
|
glog.V(0).Infof("failed to talk to filer %v: %v", filerAddresses, err)
|
||||||
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
glog.V(0).Infof("wait for %d seconds ...", i+1)
|
||||||
time.Sleep(time.Duration(i+1) * time.Second)
|
time.Sleep(time.Duration(i+1) * time.Second)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("failed to talk to filer %v: %v", filerGrpcAddresses, err)
|
glog.Errorf("failed to talk to filer %v: %v", filerAddresses, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,8 +201,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
||||||
|
|
||||||
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
|
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
|
||||||
MountDirectory: dir,
|
MountDirectory: dir,
|
||||||
FilerAddresses: filers,
|
FilerAddresses: filerAddresses,
|
||||||
FilerGrpcAddresses: filerGrpcAddresses,
|
|
||||||
GrpcDialOption: grpcDialOption,
|
GrpcDialOption: grpcDialOption,
|
||||||
FilerMountRootPath: mountRoot,
|
FilerMountRootPath: mountRoot,
|
||||||
Collection: *option.collection,
|
Collection: *option.collection,
|
||||||
|
|
|
@ -62,35 +62,31 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
|
||||||
|
|
||||||
grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile)
|
grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile)
|
||||||
|
|
||||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*msgBrokerOpt.filer)
|
filerAddress := pb.ServerAddress(*msgBrokerOpt.filer)
|
||||||
if err != nil {
|
|
||||||
glog.Fatal(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker")
|
||||||
cipher := false
|
cipher := false
|
||||||
|
|
||||||
for {
|
for {
|
||||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||||
}
|
}
|
||||||
cipher = resp.Cipher
|
cipher = resp.Cipher
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
|
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerAddress.ToGrpcAddress())
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
} else {
|
} else {
|
||||||
glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
|
glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerAddress.ToGrpcAddress())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
|
qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{
|
||||||
Filers: []string{*msgBrokerOpt.filer},
|
Filers: []pb.ServerAddress{filerAddress},
|
||||||
DefaultReplication: "",
|
DefaultReplication: "",
|
||||||
MaxMB: 0,
|
MaxMB: 0,
|
||||||
Ip: *msgBrokerOpt.ip,
|
Ip: *msgBrokerOpt.ip,
|
||||||
|
|
|
@ -42,7 +42,7 @@ func init() {
|
||||||
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
|
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
|
||||||
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
|
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
|
||||||
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
|
||||||
s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders")
|
s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", true, "allow empty folders")
|
||||||
}
|
}
|
||||||
|
|
||||||
var cmdS3 = &Command{
|
var cmdS3 = &Command{
|
||||||
|
@ -137,11 +137,7 @@ func runS3(cmd *Command, args []string) bool {
|
||||||
|
|
||||||
func (s3opt *S3Options) startS3Server() bool {
|
func (s3opt *S3Options) startS3Server() bool {
|
||||||
|
|
||||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*s3opt.filer)
|
filerAddress := pb.ServerAddress(*s3opt.filer)
|
||||||
if err != nil {
|
|
||||||
glog.Fatal(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
filerBucketsPath := "/buckets"
|
filerBucketsPath := "/buckets"
|
||||||
|
|
||||||
|
@ -152,10 +148,10 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||||
var metricsIntervalSec int
|
var metricsIntervalSec int
|
||||||
|
|
||||||
for {
|
for {
|
||||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||||
}
|
}
|
||||||
filerBucketsPath = resp.DirBuckets
|
filerBucketsPath = resp.DirBuckets
|
||||||
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
|
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
|
||||||
|
@ -163,10 +159,10 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
|
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
} else {
|
} else {
|
||||||
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
|
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -176,9 +172,8 @@ func (s3opt *S3Options) startS3Server() bool {
|
||||||
router := mux.NewRouter().SkipClean(true)
|
router := mux.NewRouter().SkipClean(true)
|
||||||
|
|
||||||
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
|
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{
|
||||||
Filer: *s3opt.filer,
|
Filer: filerAddress,
|
||||||
Port: *s3opt.port,
|
Port: *s3opt.port,
|
||||||
FilerGrpcAddress: filerGrpcAddress,
|
|
||||||
Config: *s3opt.config,
|
Config: *s3opt.config,
|
||||||
DomainName: *s3opt.domainName,
|
DomainName: *s3opt.domainName,
|
||||||
BucketsPath: filerBucketsPath,
|
BucketsPath: filerBucketsPath,
|
||||||
|
|
|
@ -185,6 +185,28 @@ routeByLatency = false
|
||||||
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
|
||||||
superLargeDirectories = []
|
superLargeDirectories = []
|
||||||
|
|
||||||
|
[redis3] # beta
|
||||||
|
enabled = false
|
||||||
|
address = "localhost:6379"
|
||||||
|
password = ""
|
||||||
|
database = 0
|
||||||
|
|
||||||
|
[redis_cluster3] # beta
|
||||||
|
enabled = false
|
||||||
|
addresses = [
|
||||||
|
"localhost:30001",
|
||||||
|
"localhost:30002",
|
||||||
|
"localhost:30003",
|
||||||
|
"localhost:30004",
|
||||||
|
"localhost:30005",
|
||||||
|
"localhost:30006",
|
||||||
|
]
|
||||||
|
password = ""
|
||||||
|
# allows reads from slave servers or the master, but all writes still go to the master
|
||||||
|
readOnly = false
|
||||||
|
# automatically use the closest Redis server for reads
|
||||||
|
routeByLatency = false
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
enabled = false
|
enabled = false
|
||||||
servers = "localhost:2379"
|
servers = "localhost:2379"
|
||||||
|
@ -230,11 +252,3 @@ location = "/tmp/"
|
||||||
address = "localhost:6379"
|
address = "localhost:6379"
|
||||||
password = ""
|
password = ""
|
||||||
database = 1
|
database = 1
|
||||||
|
|
||||||
[tikv]
|
|
||||||
enabled = false
|
|
||||||
# If you have many pd address, use ',' split then:
|
|
||||||
# pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
|
|
||||||
pdaddrs = "localhost:2379"
|
|
||||||
# Concurrency for TiKV delete range
|
|
||||||
deleterange_concurrency = 1
|
|
||||||
|
|
|
@ -23,10 +23,7 @@ default = "localhost:8888" # used by maintenance scripts if the scripts needs
|
||||||
|
|
||||||
|
|
||||||
[master.sequencer]
|
[master.sequencer]
|
||||||
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
|
type = "raft" # Choose [raft|snowflake] type for storing the file id sequence
|
||||||
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
|
|
||||||
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
|
|
||||||
sequencer_etcd_urls = "http://127.0.0.1:2379"
|
|
||||||
# when sequencer.type = snowflake, the snowflake id must be different from other masters
|
# when sequencer.type = snowflake, the snowflake id must be different from other masters
|
||||||
sequencer_snowflake_id = 0 # any number between 1~1023
|
sequencer_snowflake_id = 0 # any number between 1~1023
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -85,6 +86,7 @@ func init() {
|
||||||
serverOptions.debugPort = cmdServer.Flag.Int("debug.port", 6060, "http port for debugging")
|
serverOptions.debugPort = cmdServer.Flag.Int("debug.port", 6060, "http port for debugging")
|
||||||
|
|
||||||
masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
|
masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port")
|
||||||
|
masterOptions.portGrpc = cmdServer.Flag.Int("master.port.grpc", 0, "master server grpc listen port")
|
||||||
masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
|
masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified")
|
||||||
masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list")
|
masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list")
|
||||||
masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
|
masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.")
|
||||||
|
@ -97,6 +99,7 @@ func init() {
|
||||||
|
|
||||||
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
||||||
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
|
filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port")
|
||||||
|
filerOptions.portGrpc = cmdServer.Flag.Int("filer.port.grpc", 0, "filer server grpc listen port")
|
||||||
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
|
filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port")
|
||||||
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
|
filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.")
|
||||||
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
|
filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing")
|
||||||
|
@ -108,6 +111,7 @@ func init() {
|
||||||
filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
|
filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
|
||||||
|
|
||||||
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
|
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
|
||||||
|
serverOptions.v.portGrpc = cmdServer.Flag.Int("volume.port.grpc", 0, "volume server grpc listen port")
|
||||||
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
|
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
|
||||||
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
|
||||||
serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|<tag>] hard drive or solid state drive or any tag")
|
||||||
|
@ -128,7 +132,7 @@ func init() {
|
||||||
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
|
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
|
||||||
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
|
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
|
||||||
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
|
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
|
||||||
s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders")
|
s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", true, "allow empty folders")
|
||||||
|
|
||||||
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
||||||
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
|
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
|
||||||
|
@ -165,20 +169,20 @@ func runServer(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if *isStartingMasterServer {
|
if *isStartingMasterServer {
|
||||||
_, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)
|
_, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.portGrpc, *masterOptions.peers)
|
||||||
peers := strings.Join(peerList, ",")
|
peers := strings.Join(pb.ToAddressStrings(peerList), ",")
|
||||||
masterOptions.peers = &peers
|
masterOptions.peers = &peers
|
||||||
}
|
}
|
||||||
|
|
||||||
// ip address
|
// ip address
|
||||||
masterOptions.ip = serverIp
|
masterOptions.ip = serverIp
|
||||||
masterOptions.ipBind = serverBindIp
|
masterOptions.ipBind = serverBindIp
|
||||||
filerOptions.masters = masterOptions.peers
|
filerOptions.masters = pb.ServerAddresses(*masterOptions.peers).ToAddresses()
|
||||||
filerOptions.ip = serverIp
|
filerOptions.ip = serverIp
|
||||||
filerOptions.bindIp = serverBindIp
|
filerOptions.bindIp = serverBindIp
|
||||||
serverOptions.v.ip = serverIp
|
serverOptions.v.ip = serverIp
|
||||||
serverOptions.v.bindIp = serverBindIp
|
serverOptions.v.bindIp = serverBindIp
|
||||||
serverOptions.v.masters = masterOptions.peers
|
serverOptions.v.masters = pb.ServerAddresses(*masterOptions.peers).ToAddresses()
|
||||||
serverOptions.v.idleConnectionTimeout = serverTimeout
|
serverOptions.v.idleConnectionTimeout = serverTimeout
|
||||||
serverOptions.v.dataCenter = serverDataCenter
|
serverOptions.v.dataCenter = serverDataCenter
|
||||||
serverOptions.v.rack = serverRack
|
serverOptions.v.rack = serverRack
|
||||||
|
@ -194,7 +198,7 @@ func runServer(cmd *Command, args []string) bool {
|
||||||
filerOptions.disableHttp = serverDisableHttp
|
filerOptions.disableHttp = serverDisableHttp
|
||||||
masterOptions.disableHttp = serverDisableHttp
|
masterOptions.disableHttp = serverDisableHttp
|
||||||
|
|
||||||
filerAddress := util.JoinHostPort(*serverIp, *filerOptions.port)
|
filerAddress := string(pb.NewServerAddress(*serverIp, *filerOptions.port, *filerOptions.portGrpc))
|
||||||
s3Options.filer = &filerAddress
|
s3Options.filer = &filerAddress
|
||||||
webdavOptions.filer = &filerAddress
|
webdavOptions.filer = &filerAddress
|
||||||
msgBrokerOptions.filer = &filerAddress
|
msgBrokerOptions.filer = &filerAddress
|
||||||
|
|
|
@ -2,6 +2,7 @@ package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/security"
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
"github.com/chrislusf/seaweedfs/weed/shell"
|
"github.com/chrislusf/seaweedfs/weed/shell"
|
||||||
|
@ -53,13 +54,7 @@ func runShell(command *Command, args []string) bool {
|
||||||
|
|
||||||
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
|
fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler)
|
||||||
|
|
||||||
var err error
|
shellOptions.FilerAddress = pb.ServerAddress(*shellInitialFiler)
|
||||||
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
|
|
||||||
shellOptions.FilerAddress = *shellInitialFiler
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
shellOptions.Directory = "/"
|
shellOptions.Directory = "/"
|
||||||
|
|
||||||
shell.RunShell(shellOptions)
|
shell.RunShell(shellOptions)
|
||||||
|
|
|
@ -71,7 +71,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||||
util.LoadConfiguration("security", false)
|
util.LoadConfiguration("security", false)
|
||||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
defaultReplication, err := readMasterConfiguration(grpcDialOption, *upload.master)
|
defaultReplication, err := readMasterConfiguration(grpcDialOption, pb.ServerAddress(*upload.master))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("upload: %v", err)
|
fmt.Printf("upload: %v", err)
|
||||||
return false
|
return false
|
||||||
|
@ -96,7 +96,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
results, e := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||||
bytes, _ := json.Marshal(results)
|
bytes, _ := json.Marshal(results)
|
||||||
fmt.Println(string(bytes))
|
fmt.Println(string(bytes))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
|
@ -118,7 +118,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||||
fmt.Println(e.Error())
|
fmt.Println(e.Error())
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
results, err := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
results, err := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err.Error())
|
fmt.Println(err.Error())
|
||||||
return false
|
return false
|
||||||
|
@ -129,7 +129,7 @@ func runUpload(cmd *Command, args []string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) {
|
func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress pb.ServerAddress) (replication string, err error) {
|
||||||
err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error {
|
||||||
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -36,6 +36,7 @@ var (
|
||||||
|
|
||||||
type VolumeServerOptions struct {
|
type VolumeServerOptions struct {
|
||||||
port *int
|
port *int
|
||||||
|
portGrpc *int
|
||||||
publicPort *int
|
publicPort *int
|
||||||
folders []string
|
folders []string
|
||||||
folderMaxLimits []int
|
folderMaxLimits []int
|
||||||
|
@ -43,7 +44,8 @@ type VolumeServerOptions struct {
|
||||||
ip *string
|
ip *string
|
||||||
publicUrl *string
|
publicUrl *string
|
||||||
bindIp *string
|
bindIp *string
|
||||||
masters *string
|
mastersString *string
|
||||||
|
masters []pb.ServerAddress
|
||||||
idleConnectionTimeout *int
|
idleConnectionTimeout *int
|
||||||
dataCenter *string
|
dataCenter *string
|
||||||
rack *string
|
rack *string
|
||||||
|
@ -68,11 +70,12 @@ type VolumeServerOptions struct {
|
||||||
func init() {
|
func init() {
|
||||||
cmdVolume.Run = runVolume // break init cycle
|
cmdVolume.Run = runVolume // break init cycle
|
||||||
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
|
v.port = cmdVolume.Flag.Int("port", 8080, "http listen port")
|
||||||
|
v.portGrpc = cmdVolume.Flag.Int("port.grpc", 0, "grpc listen port")
|
||||||
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
|
v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public")
|
||||||
v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier")
|
v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier")
|
||||||
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
|
v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address")
|
||||||
v.bindIp = cmdVolume.Flag.String("ip.bind", "", "ip address to bind to")
|
v.bindIp = cmdVolume.Flag.String("ip.bind", "", "ip address to bind to")
|
||||||
v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
|
v.mastersString = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers")
|
||||||
v.preStopSeconds = cmdVolume.Flag.Int("preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
|
v.preStopSeconds = cmdVolume.Flag.Int("preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
|
||||||
// v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
|
// v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting")
|
||||||
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
|
v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds")
|
||||||
|
@ -123,6 +126,7 @@ func runVolume(cmd *Command, args []string) bool {
|
||||||
go stats_collect.StartMetricsServer(*v.metricsHttpPort)
|
go stats_collect.StartMetricsServer(*v.metricsHttpPort)
|
||||||
|
|
||||||
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
|
minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
|
||||||
|
v.masters = pb.ServerAddresses(*v.mastersString).ToAddresses()
|
||||||
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, minFreeSpaces)
|
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, minFreeSpaces)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -193,6 +197,9 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||||
if *v.publicPort == 0 {
|
if *v.publicPort == 0 {
|
||||||
*v.publicPort = *v.port
|
*v.publicPort = *v.port
|
||||||
}
|
}
|
||||||
|
if *v.portGrpc == 0 {
|
||||||
|
*v.portGrpc = 10000 + *v.port
|
||||||
|
}
|
||||||
if *v.publicUrl == "" {
|
if *v.publicUrl == "" {
|
||||||
*v.publicUrl = util.JoinHostPort(*v.ip, *v.publicPort)
|
*v.publicUrl = util.JoinHostPort(*v.ip, *v.publicPort)
|
||||||
}
|
}
|
||||||
|
@ -221,14 +228,12 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
||||||
volumeNeedleMapKind = storage.NeedleMapLevelDbLarge
|
volumeNeedleMapKind = storage.NeedleMapLevelDbLarge
|
||||||
}
|
}
|
||||||
|
|
||||||
masters := *v.masters
|
|
||||||
|
|
||||||
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
|
volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
|
||||||
*v.ip, *v.port, *v.publicUrl,
|
*v.ip, *v.port, *v.portGrpc, *v.publicUrl,
|
||||||
v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes,
|
v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes,
|
||||||
*v.idxFolder,
|
*v.idxFolder,
|
||||||
volumeNeedleMapKind,
|
volumeNeedleMapKind,
|
||||||
strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
|
v.masters, 5, *v.dataCenter, *v.rack,
|
||||||
v.whiteList,
|
v.whiteList,
|
||||||
*v.fixJpgOrientation, *v.readMode,
|
*v.fixJpgOrientation, *v.readMode,
|
||||||
*v.compactionMBPerSecond,
|
*v.compactionMBPerSecond,
|
||||||
|
@ -307,7 +312,7 @@ func (v VolumeServerOptions) isSeparatedPublicPort() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server {
|
func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server {
|
||||||
grpcPort := *v.port + 10000
|
grpcPort := *v.portGrpc
|
||||||
grpcL, err := util.NewListener(util.JoinHostPort(*v.bindIp, grpcPort), 0)
|
grpcL, err := util.NewListener(util.JoinHostPort(*v.bindIp, grpcPort), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
|
||||||
|
@ -373,7 +378,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) {
|
func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) {
|
||||||
listeningAddress := util.JoinHostPort(*v.bindIp,*v.port+20000)
|
listeningAddress := util.JoinHostPort(*v.bindIp, *v.port+20000)
|
||||||
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress)
|
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress)
|
||||||
listener, e := util.NewListener(listeningAddress, 0)
|
listener, e := util.NewListener(listeningAddress, 0)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
|
|
|
@ -78,46 +78,41 @@ func (wo *WebDavOption) startWebDav() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse filer grpc address
|
// parse filer grpc address
|
||||||
filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*wo.filer)
|
filerAddress := pb.ServerAddress(*wo.filer)
|
||||||
if err != nil {
|
|
||||||
glog.Fatal(err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
var cipher bool
|
var cipher bool
|
||||||
// connect to filer
|
// connect to filer
|
||||||
for {
|
for {
|
||||||
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err := pb.WithGrpcFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
|
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||||
}
|
}
|
||||||
cipher = resp.Cipher
|
cipher = resp.Cipher
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
|
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
} else {
|
} else {
|
||||||
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
|
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
|
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
|
||||||
Filer: *wo.filer,
|
Filer: filerAddress,
|
||||||
FilerGrpcAddress: filerGrpcAddress,
|
GrpcDialOption: grpcDialOption,
|
||||||
GrpcDialOption: grpcDialOption,
|
Collection: *wo.collection,
|
||||||
Collection: *wo.collection,
|
Replication: *wo.replication,
|
||||||
Replication: *wo.replication,
|
DiskType: *wo.disk,
|
||||||
DiskType: *wo.disk,
|
Uid: uid,
|
||||||
Uid: uid,
|
Gid: gid,
|
||||||
Gid: gid,
|
Cipher: cipher,
|
||||||
Cipher: cipher,
|
CacheDir: util.ResolvePath(*wo.cacheDir),
|
||||||
CacheDir: util.ResolvePath(*wo.cacheDir),
|
CacheSizeMB: *wo.cacheSizeMB,
|
||||||
CacheSizeMB: *wo.cacheSizeMB,
|
|
||||||
})
|
})
|
||||||
if webdavServer_err != nil {
|
if webdavServer_err != nil {
|
||||||
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
|
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
|
||||||
|
|
5
weed/filer.toml
Normal file
5
weed/filer.toml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
[redis3]
|
||||||
|
enabled = true
|
||||||
|
address = "localhost:6379"
|
||||||
|
password = ""
|
||||||
|
database = 0
|
|
@ -3,6 +3,7 @@ package filer
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -43,16 +44,18 @@ type Filer struct {
|
||||||
Signature int32
|
Signature int32
|
||||||
FilerConf *FilerConf
|
FilerConf *FilerConf
|
||||||
RemoteStorage *FilerRemoteStorage
|
RemoteStorage *FilerRemoteStorage
|
||||||
|
UniqueFileId uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFiler(masters []string, grpcDialOption grpc.DialOption,
|
func NewFiler(masters []pb.ServerAddress, grpcDialOption grpc.DialOption,
|
||||||
filerHost string, filerGrpcPort uint32, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
|
filerHost pb.ServerAddress, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
|
||||||
f := &Filer{
|
f := &Filer{
|
||||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, dataCenter, masters),
|
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, dataCenter, masters),
|
||||||
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
||||||
GrpcDialOption: grpcDialOption,
|
GrpcDialOption: grpcDialOption,
|
||||||
FilerConf: NewFilerConf(),
|
FilerConf: NewFilerConf(),
|
||||||
RemoteStorage: NewFilerRemoteStorage(),
|
RemoteStorage: NewFilerRemoteStorage(),
|
||||||
|
UniqueFileId: uint32(util.RandomInt32()),
|
||||||
}
|
}
|
||||||
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn)
|
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn)
|
||||||
f.metaLogCollection = collection
|
f.metaLogCollection = collection
|
||||||
|
@ -63,7 +66,7 @@ func NewFiler(masters []string, grpcDialOption grpc.DialOption,
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Filer) AggregateFromPeers(self string, filers []string) {
|
func (f *Filer) AggregateFromPeers(self pb.ServerAddress, filers []pb.ServerAddress) {
|
||||||
|
|
||||||
// set peers
|
// set peers
|
||||||
found := false
|
found := false
|
||||||
|
@ -110,7 +113,7 @@ func (f *Filer) GetStore() (store FilerStore) {
|
||||||
return f.Store
|
return f.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *Filer) GetMaster() string {
|
func (fs *Filer) GetMaster() pb.ServerAddress {
|
||||||
return fs.MasterClient.GetMaster()
|
return fs.MasterClient.GetMaster()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -92,8 +93,8 @@ func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
|
||||||
|
|
||||||
startTime, stopTime = startTime.UTC(), stopTime.UTC()
|
startTime, stopTime = startTime.UTC(), stopTime.UTC()
|
||||||
|
|
||||||
targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
|
targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.%08x", SystemLogDir,
|
||||||
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
|
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFileId,
|
||||||
// startTime.Second(), startTime.Nanosecond(),
|
// startTime.Second(), startTime.Nanosecond(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -111,7 +112,7 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
|
||||||
|
|
||||||
startTime = startTime.UTC()
|
startTime = startTime.UTC()
|
||||||
startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
|
startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
|
||||||
startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
|
startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute())
|
||||||
|
|
||||||
sizeBuf := make([]byte, 4)
|
sizeBuf := make([]byte, 4)
|
||||||
startTsNs := startTime.UnixNano()
|
startTsNs := startTime.UnixNano()
|
||||||
|
@ -122,14 +123,15 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
|
||||||
}
|
}
|
||||||
for _, dayEntry := range dayEntries {
|
for _, dayEntry := range dayEntries {
|
||||||
// println("checking day", dayEntry.FullPath)
|
// println("checking day", dayEntry.FullPath)
|
||||||
hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "", "")
|
hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "")
|
||||||
if listHourMinuteErr != nil {
|
if listHourMinuteErr != nil {
|
||||||
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
|
return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
|
||||||
}
|
}
|
||||||
for _, hourMinuteEntry := range hourMinuteEntries {
|
for _, hourMinuteEntry := range hourMinuteEntries {
|
||||||
// println("checking hh-mm", hourMinuteEntry.FullPath)
|
// println("checking hh-mm", hourMinuteEntry.FullPath)
|
||||||
if dayEntry.Name() == startDate {
|
if dayEntry.Name() == startDate {
|
||||||
if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 {
|
hourMinute := util.FileNameBase(hourMinuteEntry.Name())
|
||||||
|
if strings.Compare(hourMinute, startHourMinute) < 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,8 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error {
|
||||||
Gid: OS_GID,
|
Gid: OS_GID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("find %s: %v", fullpath, err)
|
||||||
} else {
|
} else {
|
||||||
offset = int64(TotalSize(entry.Chunks))
|
offset = int64(TotalSize(entry.Chunks))
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,15 +23,15 @@ func splitPattern(pattern string) (prefix string, restPattern string) {
|
||||||
// For now, prefix and namePattern are mutually exclusive
|
// For now, prefix and namePattern are mutually exclusive
|
||||||
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) {
|
func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) {
|
||||||
|
|
||||||
|
if limit > math.MaxInt32-1 {
|
||||||
|
limit = math.MaxInt32 - 1
|
||||||
|
}
|
||||||
|
|
||||||
_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool {
|
_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool {
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
if limit == math.MaxInt64 {
|
|
||||||
limit = math.MaxInt64 - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
hasMore = int64(len(entries)) >= limit+1
|
hasMore = int64(len(entries)) >= limit+1
|
||||||
if hasMore {
|
if hasMore {
|
||||||
entries = entries[:limit]
|
entries = entries[:limit]
|
||||||
|
|
|
@ -3,6 +3,7 @@ package filer
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -120,6 +121,10 @@ func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Conte
|
||||||
|
|
||||||
newFullPath := t.translatePath(dirPath)
|
newFullPath := t.translatePath(dirPath)
|
||||||
|
|
||||||
|
if limit > math.MaxInt32-1 {
|
||||||
|
limit = math.MaxInt32 - 1
|
||||||
|
}
|
||||||
|
|
||||||
return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
|
return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
|
||||||
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
|
entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath
|
||||||
return eachEntryFunc(entry)
|
return eachEntryFunc(entry)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/viant/ptrie"
|
"github.com/viant/ptrie"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -248,6 +249,9 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context,
|
||||||
defer func() {
|
defer func() {
|
||||||
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds())
|
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds())
|
||||||
}()
|
}()
|
||||||
|
if limit > math.MaxInt32-1 {
|
||||||
|
limit = math.MaxInt32 - 1
|
||||||
|
}
|
||||||
glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
|
glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
|
||||||
lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc)
|
lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc)
|
||||||
if err == ErrUnsupportedListDirectoryPrefixed {
|
if err == ErrUnsupportedListDirectoryPrefixed {
|
||||||
|
|
|
@ -13,7 +13,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCreateAndFind(t *testing.T) {
|
func TestCreateAndFind(t *testing.T) {
|
||||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
store := &LevelDBStore{}
|
store := &LevelDBStore{}
|
||||||
|
@ -67,7 +67,7 @@ func TestCreateAndFind(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyRoot(t *testing.T) {
|
func TestEmptyRoot(t *testing.T) {
|
||||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
store := &LevelDBStore{}
|
store := &LevelDBStore{}
|
||||||
|
@ -90,7 +90,7 @@ func TestEmptyRoot(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkInsertEntry(b *testing.B) {
|
func BenchmarkInsertEntry(b *testing.B) {
|
||||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
store := &LevelDBStore{}
|
store := &LevelDBStore{}
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCreateAndFind(t *testing.T) {
|
func TestCreateAndFind(t *testing.T) {
|
||||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
store := &LevelDB2Store{}
|
store := &LevelDB2Store{}
|
||||||
|
@ -65,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyRoot(t *testing.T) {
|
func TestEmptyRoot(t *testing.T) {
|
||||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
store := &LevelDB2Store{}
|
store := &LevelDB2Store{}
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCreateAndFind(t *testing.T) {
|
func TestCreateAndFind(t *testing.T) {
|
||||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
store := &LevelDB3Store{}
|
store := &LevelDB3Store{}
|
||||||
|
@ -65,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyRoot(t *testing.T) {
|
func TestEmptyRoot(t *testing.T) {
|
||||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
store := &LevelDB3Store{}
|
store := &LevelDB3Store{}
|
||||||
|
|
|
@ -18,7 +18,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type MetaAggregator struct {
|
type MetaAggregator struct {
|
||||||
filers []string
|
filers []pb.ServerAddress
|
||||||
grpcDialOption grpc.DialOption
|
grpcDialOption grpc.DialOption
|
||||||
MetaLogBuffer *log_buffer.LogBuffer
|
MetaLogBuffer *log_buffer.LogBuffer
|
||||||
// notifying clients
|
// notifying clients
|
||||||
|
@ -28,7 +28,7 @@ type MetaAggregator struct {
|
||||||
|
|
||||||
// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
|
// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
|
||||||
// The old data comes from what each LocalMetadata persisted on disk.
|
// The old data comes from what each LocalMetadata persisted on disk.
|
||||||
func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator {
|
func NewMetaAggregator(filers []pb.ServerAddress, grpcDialOption grpc.DialOption) *MetaAggregator {
|
||||||
t := &MetaAggregator{
|
t := &MetaAggregator{
|
||||||
filers: filers,
|
filers: filers,
|
||||||
grpcDialOption: grpcDialOption,
|
grpcDialOption: grpcDialOption,
|
||||||
|
@ -40,13 +40,13 @@ func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAgg
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ma *MetaAggregator) StartLoopSubscribe(f *Filer, self string) {
|
func (ma *MetaAggregator) StartLoopSubscribe(f *Filer, self pb.ServerAddress) {
|
||||||
for _, filer := range ma.filers {
|
for _, filer := range ma.filers {
|
||||||
go ma.subscribeToOneFiler(f, self, filer)
|
go ma.subscribeToOneFiler(f, self, filer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string) {
|
func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Each filer reads the "filer.store.id", which is the store's signature when filer starts.
|
Each filer reads the "filer.store.id", which is the store's signature when filer starts.
|
||||||
|
@ -110,7 +110,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
|
||||||
}
|
}
|
||||||
dir := event.Directory
|
dir := event.Directory
|
||||||
// println("received meta change", dir, "size", len(data))
|
// println("received meta change", dir, "size", len(data))
|
||||||
ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, 0)
|
ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
|
||||||
if maybeReplicateMetadataChange != nil {
|
if maybeReplicateMetadataChange != nil {
|
||||||
maybeReplicateMetadataChange(event)
|
maybeReplicateMetadataChange(event)
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{
|
||||||
ClientName: "filer:" + self,
|
ClientName: "filer:" + string(self),
|
||||||
PathPrefix: "/",
|
PathPrefix: "/",
|
||||||
SinceNs: lastTsNs,
|
SinceNs: lastTsNs,
|
||||||
})
|
})
|
||||||
|
@ -156,7 +156,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ma *MetaAggregator) readFilerStoreSignature(peer string) (sig int32, err error) {
|
func (ma *MetaAggregator) readFilerStoreSignature(peer pb.ServerAddress) (sig int32, err error) {
|
||||||
err = pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
err = pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -172,7 +172,7 @@ const (
|
||||||
MetaOffsetPrefix = "Meta"
|
MetaOffsetPrefix = "Meta"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32) (lastTsNs int64, err error) {
|
func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignature int32) (lastTsNs int64, err error) {
|
||||||
|
|
||||||
key := []byte(MetaOffsetPrefix + "xxxx")
|
key := []byte(MetaOffsetPrefix + "xxxx")
|
||||||
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
|
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
|
||||||
|
@ -195,7 +195,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int32, lastTsNs int64) (err error) {
|
func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSignature int32, lastTsNs int64) (err error) {
|
||||||
|
|
||||||
key := []byte(MetaOffsetPrefix + "xxxx")
|
key := []byte(MetaOffsetPrefix + "xxxx")
|
||||||
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
|
util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature))
|
||||||
|
|
|
@ -120,6 +120,8 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
|
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
|
||||||
}
|
}
|
||||||
|
// not efficient, but need to remove if it is a directory
|
||||||
|
store.Client.Del(ctx, genDirectoryListKey(string(path)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -144,6 +144,8 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err)
|
return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err)
|
||||||
}
|
}
|
||||||
|
// not efficient, but need to remove if it is a directory
|
||||||
|
store.Client.Del(ctx, genDirectoryListKey(string(path)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
507
weed/filer/redis3/ItemList.go
Normal file
507
weed/filer/redis3/ItemList.go
Normal file
|
@ -0,0 +1,507 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util/skiplist"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ItemList struct {
|
||||||
|
skipList *skiplist.SkipList
|
||||||
|
batchSize int
|
||||||
|
client redis.UniversalClient
|
||||||
|
prefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newItemList(client redis.UniversalClient, prefix string, store skiplist.ListStore, batchSize int) *ItemList {
|
||||||
|
return &ItemList{
|
||||||
|
skipList: skiplist.New(store),
|
||||||
|
batchSize: batchSize,
|
||||||
|
client: client,
|
||||||
|
prefix: prefix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Be reluctant to create new nodes. Try to fit into either previous node or next node.
|
||||||
|
Prefer to add to previous node.
|
||||||
|
|
||||||
|
There are multiple cases after finding the name for greater or equal node
|
||||||
|
1. found and node.Key == name
|
||||||
|
The node contains a batch with leading key the same as the name
|
||||||
|
nothing to do
|
||||||
|
2. no such node found or node.Key > name
|
||||||
|
|
||||||
|
if no such node found
|
||||||
|
prevNode = list.LargestNode
|
||||||
|
|
||||||
|
// case 2.1
|
||||||
|
if previousNode contains name
|
||||||
|
nothing to do
|
||||||
|
|
||||||
|
// prefer to add to previous node
|
||||||
|
if prevNode != nil {
|
||||||
|
// case 2.2
|
||||||
|
if prevNode has capacity
|
||||||
|
prevNode.add name, and save
|
||||||
|
return
|
||||||
|
// case 2.3
|
||||||
|
split prevNode by name
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 2.4
|
||||||
|
// merge into next node. Avoid too many nodes if adding data in reverse order.
|
||||||
|
if nextNode is not nil and nextNode has capacity
|
||||||
|
delete nextNode.Key
|
||||||
|
nextNode.Key = name
|
||||||
|
nextNode.batch.add name
|
||||||
|
insert nodeNode.Key
|
||||||
|
return
|
||||||
|
|
||||||
|
// case 2.5
|
||||||
|
if prevNode is nil
|
||||||
|
insert new node with key = name, value = batch{name}
|
||||||
|
return
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (nl *ItemList) canAddMember(node *skiplist.SkipListElementReference, name string) (alreadyContains bool, nodeSize int, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
pipe := nl.client.TxPipeline()
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
countOperation := pipe.ZLexCount(ctx, key, "-", "+")
|
||||||
|
scoreOperationt := pipe.ZScore(ctx, key, name)
|
||||||
|
if _, err = pipe.Exec(ctx); err != nil && err != redis.Nil {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
if err == redis.Nil {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
alreadyContains = scoreOperationt.Err() == nil
|
||||||
|
nodeSize = int(countOperation.Val())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) WriteName(name string) error {
|
||||||
|
|
||||||
|
lookupKey := []byte(name)
|
||||||
|
prevNode, nextNode, found, err := nl.skipList.FindGreaterOrEqual(lookupKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// case 1: the name already exists as one leading key in the batch
|
||||||
|
if found && bytes.Compare(nextNode.Key, lookupKey) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var prevNodeReference *skiplist.SkipListElementReference
|
||||||
|
if !found {
|
||||||
|
prevNodeReference = nl.skipList.GetLargestNodeReference()
|
||||||
|
}
|
||||||
|
|
||||||
|
if nextNode != nil && prevNode == nil {
|
||||||
|
prevNodeReference = nextNode.Prev
|
||||||
|
}
|
||||||
|
|
||||||
|
if prevNodeReference != nil {
|
||||||
|
alreadyContains, nodeSize, err := nl.canAddMember(prevNodeReference, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if alreadyContains {
|
||||||
|
// case 2.1
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 2.2
|
||||||
|
if nodeSize < nl.batchSize {
|
||||||
|
return nl.NodeAddMember(prevNodeReference, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 2.3
|
||||||
|
x := nl.NodeInnerPosition(prevNodeReference, name)
|
||||||
|
y := nodeSize - x
|
||||||
|
addToX := x <= y
|
||||||
|
// add to a new node
|
||||||
|
if x == 0 || y == 0 {
|
||||||
|
if err := nl.ItemAdd(lookupKey, 0, name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if addToX {
|
||||||
|
// collect names before name, add them to X
|
||||||
|
namesToX, err := nl.NodeRangeBeforeExclusive(prevNodeReference, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// delete skiplist reference to old node
|
||||||
|
if _, err := nl.skipList.DeleteByKey(prevNodeReference.Key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// add namesToY and name to a new X
|
||||||
|
namesToX = append(namesToX, name)
|
||||||
|
if err := nl.ItemAdd([]byte(namesToX[0]), 0, namesToX...); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// remove names less than name from current Y
|
||||||
|
if err := nl.NodeDeleteBeforeExclusive(prevNodeReference, name); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// point skip list to current Y
|
||||||
|
if err := nl.ItemAdd(lookupKey, prevNodeReference.ElementPointer); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
// collect names after name, add them to Y
|
||||||
|
namesToY, err := nl.NodeRangeAfterExclusive(prevNodeReference, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// add namesToY and name to a new Y
|
||||||
|
namesToY = append(namesToY, name)
|
||||||
|
if err := nl.ItemAdd(lookupKey, 0, namesToY...); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// remove names after name from current X
|
||||||
|
if err := nl.NodeDeleteAfterExclusive(prevNodeReference, name); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 2.4
|
||||||
|
if nextNode != nil {
|
||||||
|
nodeSize := nl.NodeSize(nextNode.Reference())
|
||||||
|
if nodeSize < nl.batchSize {
|
||||||
|
if id, err := nl.skipList.DeleteByKey(nextNode.Key); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
if err := nl.ItemAdd(lookupKey, id, name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 2.5
|
||||||
|
// now prevNode is nil
|
||||||
|
return nl.ItemAdd(lookupKey, 0, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
// case 1: exists in nextNode
|
||||||
|
if nextNode != nil && nextNode.Key == name {
|
||||||
|
remove from nextNode, update nextNode
|
||||||
|
// TODO: merge with prevNode if possible?
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if nextNode is nil
|
||||||
|
prevNode = list.Largestnode
|
||||||
|
if prevNode == nil and nextNode.Prev != nil
|
||||||
|
prevNode = load(nextNode.Prev)
|
||||||
|
|
||||||
|
// case 2: does not exist
|
||||||
|
// case 2.1
|
||||||
|
if prevNode == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// case 2.2
|
||||||
|
if prevNameBatch does not contain name {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 3
|
||||||
|
delete from prevNameBatch
|
||||||
|
if prevNameBatch + nextNode < capacityList
|
||||||
|
// case 3.1
|
||||||
|
merge
|
||||||
|
else
|
||||||
|
// case 3.2
|
||||||
|
update prevNode
|
||||||
|
|
||||||
|
|
||||||
|
*/
|
||||||
|
func (nl *ItemList) DeleteName(name string) error {
|
||||||
|
lookupKey := []byte(name)
|
||||||
|
prevNode, nextNode, found, err := nl.skipList.FindGreaterOrEqual(lookupKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 1
|
||||||
|
if found && bytes.Compare(nextNode.Key, lookupKey) == 0 {
|
||||||
|
if _, err := nl.skipList.DeleteByKey(nextNode.Key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := nl.NodeDeleteMember(nextNode.Reference(), name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
minName := nl.NodeMin(nextNode.Reference())
|
||||||
|
if minName == "" {
|
||||||
|
return nl.NodeDelete(nextNode.Reference())
|
||||||
|
}
|
||||||
|
return nl.ItemAdd([]byte(minName), nextNode.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
prevNode, err = nl.skipList.GetLargestNode()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nextNode != nil && prevNode == nil {
|
||||||
|
prevNode, err = nl.skipList.LoadElement(nextNode.Prev)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 2
|
||||||
|
if prevNode == nil {
|
||||||
|
// case 2.1
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !nl.NodeContainsItem(prevNode.Reference(), name) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 3
|
||||||
|
if err := nl.NodeDeleteMember(prevNode.Reference(), name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
prevSize := nl.NodeSize(prevNode.Reference())
|
||||||
|
if prevSize == 0 {
|
||||||
|
if _, err := nl.skipList.DeleteByKey(prevNode.Key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
nextSize := nl.NodeSize(nextNode.Reference())
|
||||||
|
if nextSize > 0 && prevSize+nextSize < nl.batchSize {
|
||||||
|
// case 3.1 merge nextNode and prevNode
|
||||||
|
if _, err := nl.skipList.DeleteByKey(nextNode.Key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nextNames, err := nl.NodeRangeBeforeExclusive(nextNode.Reference(), "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := nl.NodeAddMember(prevNode.Reference(), nextNames...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nl.NodeDelete(nextNode.Reference())
|
||||||
|
} else {
|
||||||
|
// case 3.2 update prevNode
|
||||||
|
// no action to take
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) ListNames(startFrom string, visitNamesFn func(name string) bool) error {
|
||||||
|
lookupKey := []byte(startFrom)
|
||||||
|
prevNode, nextNode, found, err := nl.skipList.FindGreaterOrEqual(lookupKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if found && bytes.Compare(nextNode.Key, lookupKey) == 0 {
|
||||||
|
prevNode = nil
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
prevNode, err = nl.skipList.GetLargestNode()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if prevNode != nil {
|
||||||
|
if !nl.NodeScanIncluseiveAfter(prevNode.Reference(), startFrom, visitNamesFn) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for nextNode != nil {
|
||||||
|
if !nl.NodeScanIncluseiveAfter(nextNode.Reference(), startFrom, visitNamesFn) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
nextNode, err = nl.skipList.LoadElement(nextNode.Next[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) RemoteAllListElement() error {
|
||||||
|
|
||||||
|
t := nl.skipList
|
||||||
|
|
||||||
|
nodeRef := t.StartLevels[0]
|
||||||
|
for nodeRef != nil {
|
||||||
|
node, err := t.LoadElement(nodeRef)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := t.DeleteElement(node); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := nl.NodeDelete(node.Reference()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nodeRef = node.Next[0]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeContainsItem(node *skiplist.SkipListElementReference, item string) bool {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
_, err := nl.client.ZScore(context.Background(), key, item).Result()
|
||||||
|
if err == redis.Nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeSize(node *skiplist.SkipListElementReference) int {
|
||||||
|
if node == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
return int(nl.client.ZLexCount(context.Background(), key, "-", "+").Val())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeAddMember(node *skiplist.SkipListElementReference, names ...string) error {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
var members []*redis.Z
|
||||||
|
for _, name := range names {
|
||||||
|
members = append(members, &redis.Z{
|
||||||
|
Score: 0,
|
||||||
|
Member: name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nl.client.ZAddNX(context.Background(), key, members...).Err()
|
||||||
|
}
|
||||||
|
func (nl *ItemList) NodeDeleteMember(node *skiplist.SkipListElementReference, name string) error {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
return nl.client.ZRem(context.Background(), key, name).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeDelete(node *skiplist.SkipListElementReference) error {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
return nl.client.Del(context.Background(), key).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeInnerPosition(node *skiplist.SkipListElementReference, name string) int {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
return int(nl.client.ZLexCount(context.Background(), key, "-", "("+name).Val())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeMin(node *skiplist.SkipListElementReference) string {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
slice := nl.client.ZRangeByLex(context.Background(), key, &redis.ZRangeBy{
|
||||||
|
Min: "-",
|
||||||
|
Max: "+",
|
||||||
|
Offset: 0,
|
||||||
|
Count: 1,
|
||||||
|
}).Val()
|
||||||
|
if len(slice) > 0 {
|
||||||
|
s := slice[0]
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeScanIncluseiveAfter(node *skiplist.SkipListElementReference, startFrom string, visitNamesFn func(name string) bool) bool {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
if startFrom == "" {
|
||||||
|
startFrom = "-"
|
||||||
|
} else {
|
||||||
|
startFrom = "[" + startFrom
|
||||||
|
}
|
||||||
|
names := nl.client.ZRangeByLex(context.Background(), key, &redis.ZRangeBy{
|
||||||
|
Min: startFrom,
|
||||||
|
Max: "+",
|
||||||
|
}).Val()
|
||||||
|
for _, n := range names {
|
||||||
|
if !visitNamesFn(n) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeRangeBeforeExclusive(node *skiplist.SkipListElementReference, stopAt string) ([]string, error) {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
if stopAt == "" {
|
||||||
|
stopAt = "+"
|
||||||
|
} else {
|
||||||
|
stopAt = "(" + stopAt
|
||||||
|
}
|
||||||
|
return nl.client.ZRangeByLex(context.Background(), key, &redis.ZRangeBy{
|
||||||
|
Min: "-",
|
||||||
|
Max: stopAt,
|
||||||
|
}).Result()
|
||||||
|
}
|
||||||
|
func (nl *ItemList) NodeRangeAfterExclusive(node *skiplist.SkipListElementReference, startFrom string) ([]string, error) {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
if startFrom == "" {
|
||||||
|
startFrom = "-"
|
||||||
|
} else {
|
||||||
|
startFrom = "(" + startFrom
|
||||||
|
}
|
||||||
|
return nl.client.ZRangeByLex(context.Background(), key, &redis.ZRangeBy{
|
||||||
|
Min: startFrom,
|
||||||
|
Max: "+",
|
||||||
|
}).Result()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) NodeDeleteBeforeExclusive(node *skiplist.SkipListElementReference, stopAt string) error {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
if stopAt == "" {
|
||||||
|
stopAt = "+"
|
||||||
|
} else {
|
||||||
|
stopAt = "(" + stopAt
|
||||||
|
}
|
||||||
|
return nl.client.ZRemRangeByLex(context.Background(), key, "-", stopAt).Err()
|
||||||
|
}
|
||||||
|
func (nl *ItemList) NodeDeleteAfterExclusive(node *skiplist.SkipListElementReference, startFrom string) error {
|
||||||
|
key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer)
|
||||||
|
if startFrom == "" {
|
||||||
|
startFrom = "-"
|
||||||
|
} else {
|
||||||
|
startFrom = "(" + startFrom
|
||||||
|
}
|
||||||
|
return nl.client.ZRemRangeByLex(context.Background(), key, startFrom, "+").Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) ItemAdd(lookupKey []byte, idIfKnown int64, names ...string) error {
|
||||||
|
if id, err := nl.skipList.InsertByKey(lookupKey, idIfKnown, nil); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
if len(names) > 0 {
|
||||||
|
return nl.NodeAddMember(&skiplist.SkipListElementReference{
|
||||||
|
ElementPointer: id,
|
||||||
|
Key: lookupKey,
|
||||||
|
}, names...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
75
weed/filer/redis3/item_list_serde.go
Normal file
75
weed/filer/redis3/item_list_serde.go
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util/skiplist"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func LoadItemList(data []byte, prefix string, client redis.UniversalClient, store skiplist.ListStore, batchSize int) *ItemList {
|
||||||
|
|
||||||
|
nl := &ItemList{
|
||||||
|
skipList: skiplist.New(store),
|
||||||
|
batchSize: batchSize,
|
||||||
|
client: client,
|
||||||
|
prefix: prefix,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nl
|
||||||
|
}
|
||||||
|
|
||||||
|
message := &skiplist.SkipListProto{}
|
||||||
|
if err := proto.Unmarshal(data, message); err != nil {
|
||||||
|
glog.Errorf("loading skiplist: %v", err)
|
||||||
|
}
|
||||||
|
nl.skipList.MaxNewLevel = int(message.MaxNewLevel)
|
||||||
|
nl.skipList.MaxLevel = int(message.MaxLevel)
|
||||||
|
for i, ref := range message.StartLevels {
|
||||||
|
nl.skipList.StartLevels[i] = &skiplist.SkipListElementReference{
|
||||||
|
ElementPointer: ref.ElementPointer,
|
||||||
|
Key: ref.Key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, ref := range message.EndLevels {
|
||||||
|
nl.skipList.EndLevels[i] = &skiplist.SkipListElementReference{
|
||||||
|
ElementPointer: ref.ElementPointer,
|
||||||
|
Key: ref.Key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) HasChanges() bool {
|
||||||
|
return nl.skipList.HasChanges
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nl *ItemList) ToBytes() []byte {
|
||||||
|
message := &skiplist.SkipListProto{}
|
||||||
|
message.MaxNewLevel = int32(nl.skipList.MaxNewLevel)
|
||||||
|
message.MaxLevel = int32(nl.skipList.MaxLevel)
|
||||||
|
for _, ref := range nl.skipList.StartLevels {
|
||||||
|
if ref == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
message.StartLevels = append(message.StartLevels, &skiplist.SkipListElementReference{
|
||||||
|
ElementPointer: ref.ElementPointer,
|
||||||
|
Key: ref.Key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, ref := range nl.skipList.EndLevels {
|
||||||
|
if ref == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
message.EndLevels = append(message.EndLevels, &skiplist.SkipListElementReference{
|
||||||
|
ElementPointer: ref.ElementPointer,
|
||||||
|
Key: ref.Key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
data, err := proto.Marshal(message)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("marshal skiplist: %v", err)
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
138
weed/filer/redis3/kv_directory_children.go
Normal file
138
weed/filer/redis3/kv_directory_children.go
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxNameBatchSizeLimit = 1000000
|
||||||
|
|
||||||
|
func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key string, name string) error {
|
||||||
|
|
||||||
|
// lock and unlock
|
||||||
|
mutex := redisStore.redsync.NewMutex(key + "lock")
|
||||||
|
if err := mutex.Lock(); err != nil {
|
||||||
|
return fmt.Errorf("lock %s: %v", key, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
mutex.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
client := redisStore.Client
|
||||||
|
data, err := client.Get(ctx, key).Result()
|
||||||
|
if err != nil {
|
||||||
|
if err != redis.Nil {
|
||||||
|
return fmt.Errorf("read %s: %v", key, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store := newSkipListElementStore(key, client)
|
||||||
|
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
||||||
|
|
||||||
|
if err := nameList.WriteName(name); err != nil {
|
||||||
|
glog.Errorf("add %s %s: %v", key, name, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !nameList.HasChanges() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := client.Set(ctx, key, nameList.ToBytes(), 0).Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeChild(ctx context.Context, redisStore *UniversalRedis3Store, key string, name string) error {
|
||||||
|
|
||||||
|
// lock and unlock
|
||||||
|
mutex := redisStore.redsync.NewMutex(key + "lock")
|
||||||
|
if err := mutex.Lock(); err != nil {
|
||||||
|
return fmt.Errorf("lock %s: %v", key, err)
|
||||||
|
}
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
client := redisStore.Client
|
||||||
|
data, err := client.Get(ctx, key).Result()
|
||||||
|
if err != nil {
|
||||||
|
if err != redis.Nil {
|
||||||
|
return fmt.Errorf("read %s: %v", key, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store := newSkipListElementStore(key, client)
|
||||||
|
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
||||||
|
|
||||||
|
if err := nameList.DeleteName(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !nameList.HasChanges() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := client.Set(ctx, key, nameList.ToBytes(), 0).Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key string, onDeleteFn func(name string) error) error {
|
||||||
|
|
||||||
|
// lock and unlock
|
||||||
|
mutex := redisStore.redsync.NewMutex(key + "lock")
|
||||||
|
if err := mutex.Lock(); err != nil {
|
||||||
|
return fmt.Errorf("lock %s: %v", key, err)
|
||||||
|
}
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
client := redisStore.Client
|
||||||
|
data, err := client.Get(ctx, key).Result()
|
||||||
|
if err != nil {
|
||||||
|
if err != redis.Nil {
|
||||||
|
return fmt.Errorf("read %s: %v", key, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store := newSkipListElementStore(key, client)
|
||||||
|
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
||||||
|
|
||||||
|
if err = nameList.ListNames("", func(name string) bool {
|
||||||
|
if err := onDeleteFn(name); err != nil {
|
||||||
|
glog.Errorf("delete %s child %s: %v", key, name, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = nameList.RemoteAllListElement(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func listChildren(ctx context.Context, redisStore *UniversalRedis3Store, key string, startFileName string, eachFn func(name string) bool) error {
|
||||||
|
client := redisStore.Client
|
||||||
|
data, err := client.Get(ctx, key).Result()
|
||||||
|
if err != nil {
|
||||||
|
if err != redis.Nil {
|
||||||
|
return fmt.Errorf("read %s: %v", key, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
store := newSkipListElementStore(key, client)
|
||||||
|
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
||||||
|
|
||||||
|
if err = nameList.ListNames(startFileName, func(name string) bool {
|
||||||
|
return eachFn(name)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
210
weed/filer/redis3/kv_directory_children_test.go
Normal file
210
weed/filer/redis3/kv_directory_children_test.go
Normal file
|
@ -0,0 +1,210 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/stvp/tempredis"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var names = []string{
|
||||||
|
"cassandra.in.sh",
|
||||||
|
"cassandra",
|
||||||
|
"debug-cql.bat",
|
||||||
|
"nodetool",
|
||||||
|
"nodetool.bat",
|
||||||
|
"source-conf.ps1",
|
||||||
|
"sstableloader",
|
||||||
|
"sstableloader.bat",
|
||||||
|
"sstablescrub",
|
||||||
|
"sstablescrub.bat",
|
||||||
|
"sstableupgrade",
|
||||||
|
"sstableupgrade.bat",
|
||||||
|
"sstableutil",
|
||||||
|
"sstableutil.bat",
|
||||||
|
"sstableverify",
|
||||||
|
"sstableverify.bat",
|
||||||
|
"stop-server",
|
||||||
|
"stop-server.bat",
|
||||||
|
"stop-server.ps1",
|
||||||
|
"cassandra.in.bat",
|
||||||
|
"cqlsh.py",
|
||||||
|
"cqlsh",
|
||||||
|
"cassandra.ps1",
|
||||||
|
"cqlsh.bat",
|
||||||
|
"debug-cql",
|
||||||
|
"cassandra.bat",
|
||||||
|
}
|
||||||
|
|
||||||
|
func yTestNameList(t *testing.T) {
|
||||||
|
server, err := tempredis.Start(tempredis.Config{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer server.Term()
|
||||||
|
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Network: "unix",
|
||||||
|
Addr: server.Socket(),
|
||||||
|
})
|
||||||
|
|
||||||
|
store := newSkipListElementStore("/yyy/bin", client)
|
||||||
|
var data []byte
|
||||||
|
for _, name := range names {
|
||||||
|
nameList := LoadItemList(data, "/yyy/bin", client, store, maxNameBatchSizeLimit)
|
||||||
|
nameList.WriteName(name)
|
||||||
|
|
||||||
|
nameList.ListNames("", func(name string) bool {
|
||||||
|
println(name)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
if nameList.HasChanges() {
|
||||||
|
data = nameList.ToBytes()
|
||||||
|
}
|
||||||
|
println()
|
||||||
|
}
|
||||||
|
|
||||||
|
nameList := LoadItemList(data, "/yyy/bin", client, store, maxNameBatchSizeLimit)
|
||||||
|
nameList.ListNames("", func(name string) bool {
|
||||||
|
println(name)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func yBenchmarkNameList(b *testing.B) {
|
||||||
|
|
||||||
|
server, err := tempredis.Start(tempredis.Config{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer server.Term()
|
||||||
|
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Network: "unix",
|
||||||
|
Addr: server.Socket(),
|
||||||
|
})
|
||||||
|
|
||||||
|
store := newSkipListElementStore("/yyy/bin", client)
|
||||||
|
var data []byte
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
nameList := LoadItemList(data, "/yyy/bin", client, store, maxNameBatchSizeLimit)
|
||||||
|
|
||||||
|
nameList.WriteName(strconv.Itoa(i) + "namexxxxxxxxxxxxxxxxxxx")
|
||||||
|
|
||||||
|
if nameList.HasChanges() {
|
||||||
|
data = nameList.ToBytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRedis(b *testing.B) {
|
||||||
|
|
||||||
|
server, err := tempredis.Start(tempredis.Config{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer server.Term()
|
||||||
|
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Network: "unix",
|
||||||
|
Addr: server.Socket(),
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
client.ZAddNX(context.Background(), "/yyy/bin", &redis.Z{Score: 0, Member: strconv.Itoa(i) + "namexxxxxxxxxxxxxxxxxxx"})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func xTestNameListAdd(t *testing.T) {
|
||||||
|
|
||||||
|
server, err := tempredis.Start(tempredis.Config{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer server.Term()
|
||||||
|
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Addr: "localhost:6379",
|
||||||
|
Password: "",
|
||||||
|
DB: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
client.FlushAll(context.Background())
|
||||||
|
|
||||||
|
N := 364800
|
||||||
|
|
||||||
|
ts0 := time.Now()
|
||||||
|
store := newSkipListElementStore("/y", client)
|
||||||
|
var data []byte
|
||||||
|
nameList := LoadItemList(data, "/y", client, store, 100000)
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
nameList.WriteName(fmt.Sprintf("%8d", i))
|
||||||
|
}
|
||||||
|
|
||||||
|
ts1 := time.Now()
|
||||||
|
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
client.ZAddNX(context.Background(), "/x", &redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)})
|
||||||
|
}
|
||||||
|
ts2 := time.Now()
|
||||||
|
|
||||||
|
fmt.Printf("%v %v", ts1.Sub(ts0), ts2.Sub(ts1))
|
||||||
|
|
||||||
|
/*
|
||||||
|
keys := client.Keys(context.Background(), "/*m").Val()
|
||||||
|
for _, k := range keys {
|
||||||
|
println("key", k)
|
||||||
|
for i, v := range client.ZRangeByLex(context.Background(), k, &redis.ZRangeBy{
|
||||||
|
Min: "-",
|
||||||
|
Max: "+",
|
||||||
|
}).Val() {
|
||||||
|
println(" ", i, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
func xBenchmarkNameList(b *testing.B) {
|
||||||
|
|
||||||
|
server, err := tempredis.Start(tempredis.Config{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer server.Term()
|
||||||
|
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Addr: "localhost:6379",
|
||||||
|
Password: "",
|
||||||
|
DB: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
store := newSkipListElementStore("/yyy/bin", client)
|
||||||
|
var data []byte
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
nameList := LoadItemList(data, "/yyy/bin", client, store, maxNameBatchSizeLimit)
|
||||||
|
|
||||||
|
nameList.WriteName(fmt.Sprintf("name %8d", i))
|
||||||
|
|
||||||
|
if nameList.HasChanges() {
|
||||||
|
data = nameList.ToBytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func xBenchmarkRedis(b *testing.B) {
|
||||||
|
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Addr: "localhost:6379",
|
||||||
|
Password: "",
|
||||||
|
DB: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
client.ZAddNX(context.Background(), "/xxx/bin", &redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)})
|
||||||
|
}
|
||||||
|
}
|
45
weed/filer/redis3/redis_cluster_store.go
Normal file
45
weed/filer/redis3/redis_cluster_store.go
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/go-redsync/redsync/v4"
|
||||||
|
"github.com/go-redsync/redsync/v4/redis/goredis/v8"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
filer.Stores = append(filer.Stores, &RedisCluster3Store{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type RedisCluster3Store struct {
|
||||||
|
UniversalRedis3Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *RedisCluster3Store) GetName() string {
|
||||||
|
return "redis_cluster3"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *RedisCluster3Store) Initialize(configuration util.Configuration, prefix string) (err error) {
|
||||||
|
|
||||||
|
configuration.SetDefault(prefix+"useReadOnly", false)
|
||||||
|
configuration.SetDefault(prefix+"routeByLatency", false)
|
||||||
|
|
||||||
|
return store.initialize(
|
||||||
|
configuration.GetStringSlice(prefix+"addresses"),
|
||||||
|
configuration.GetString(prefix+"password"),
|
||||||
|
configuration.GetBool(prefix+"useReadOnly"),
|
||||||
|
configuration.GetBool(prefix+"routeByLatency"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *RedisCluster3Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
|
||||||
|
store.Client = redis.NewClusterClient(&redis.ClusterOptions{
|
||||||
|
Addrs: addresses,
|
||||||
|
Password: password,
|
||||||
|
ReadOnly: readOnly,
|
||||||
|
RouteByLatency: routeByLatency,
|
||||||
|
})
|
||||||
|
store.redsync = redsync.New(goredis.NewPool(store.Client))
|
||||||
|
return
|
||||||
|
}
|
39
weed/filer/redis3/redis_store.go
Normal file
39
weed/filer/redis3/redis_store.go
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/go-redsync/redsync/v4"
|
||||||
|
"github.com/go-redsync/redsync/v4/redis/goredis/v8"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
filer.Stores = append(filer.Stores, &Redis3Store{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Redis3Store struct {
|
||||||
|
UniversalRedis3Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *Redis3Store) GetName() string {
|
||||||
|
return "redis3"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *Redis3Store) Initialize(configuration util.Configuration, prefix string) (err error) {
|
||||||
|
return store.initialize(
|
||||||
|
configuration.GetString(prefix+"address"),
|
||||||
|
configuration.GetString(prefix+"password"),
|
||||||
|
configuration.GetInt(prefix+"database"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *Redis3Store) initialize(hostPort string, password string, database int) (err error) {
|
||||||
|
store.Client = redis.NewClient(&redis.Options{
|
||||||
|
Addr: hostPort,
|
||||||
|
Password: password,
|
||||||
|
DB: database,
|
||||||
|
})
|
||||||
|
store.redsync = redsync.New(goredis.NewPool(store.Client))
|
||||||
|
return
|
||||||
|
}
|
62
weed/filer/redis3/skiplist_element_store.go
Normal file
62
weed/filer/redis3/skiplist_element_store.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util/skiplist"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SkipListElementStore struct {
|
||||||
|
Prefix string
|
||||||
|
client redis.UniversalClient
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = skiplist.ListStore(&SkipListElementStore{})
|
||||||
|
|
||||||
|
func newSkipListElementStore(prefix string, client redis.UniversalClient) *SkipListElementStore {
|
||||||
|
return &SkipListElementStore{
|
||||||
|
Prefix: prefix,
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SkipListElementStore) SaveElement(id int64, element *skiplist.SkipListElement) error {
|
||||||
|
key := fmt.Sprintf("%s%d", m.Prefix, id)
|
||||||
|
data, err := proto.Marshal(element)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("marshal %s: %v", key, err)
|
||||||
|
}
|
||||||
|
return m.client.Set(context.Background(), key, data, 0).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SkipListElementStore) DeleteElement(id int64) error {
|
||||||
|
key := fmt.Sprintf("%s%d", m.Prefix, id)
|
||||||
|
return m.client.Del(context.Background(), key).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SkipListElementStore) LoadElement(id int64) (*skiplist.SkipListElement, error) {
|
||||||
|
key := fmt.Sprintf("%s%d", m.Prefix, id)
|
||||||
|
data, err := m.client.Get(context.Background(), key).Result()
|
||||||
|
if err != nil {
|
||||||
|
if err == redis.Nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
t := &skiplist.SkipListElement{}
|
||||||
|
err = proto.Unmarshal([]byte(data), t)
|
||||||
|
if err == nil {
|
||||||
|
for i := 0; i < len(t.Next); i++ {
|
||||||
|
if t.Next[i].IsNil() {
|
||||||
|
t.Next[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t.Prev.IsNil() {
|
||||||
|
t.Prev = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, err
|
||||||
|
}
|
179
weed/filer/redis3/universal_redis_store.go
Normal file
179
weed/filer/redis3/universal_redis_store.go
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-redsync/redsync/v4"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DIR_LIST_MARKER = "\x00"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UniversalRedis3Store struct {
|
||||||
|
Client redis.UniversalClient
|
||||||
|
redsync *redsync.Redsync
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) BeginTransaction(ctx context.Context) (context.Context, error) {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
func (store *UniversalRedis3Store) CommitTransaction(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (store *UniversalRedis3Store) RollbackTransaction(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
||||||
|
|
||||||
|
value, err := entry.EncodeAttributesAndChunks()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entry.Chunks) > 50 {
|
||||||
|
value = util.MaybeGzipData(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
|
||||||
|
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, name := entry.FullPath.DirAndName()
|
||||||
|
|
||||||
|
if name != "" {
|
||||||
|
if err = insertChild(ctx, store, genDirectoryListKey(dir), name); err != nil {
|
||||||
|
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
||||||
|
|
||||||
|
return store.InsertEntry(ctx, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
|
||||||
|
|
||||||
|
data, err := store.Client.Get(ctx, string(fullpath)).Result()
|
||||||
|
if err == redis.Nil {
|
||||||
|
return nil, filer_pb.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get %s : %v", fullpath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = &filer.Entry{
|
||||||
|
FullPath: fullpath,
|
||||||
|
}
|
||||||
|
err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data)))
|
||||||
|
if err != nil {
|
||||||
|
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||||
|
|
||||||
|
_, err = store.Client.Del(ctx, genDirectoryListKey(string(fullpath))).Result()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("delete dir list %s : %v", fullpath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = store.Client.Del(ctx, string(fullpath)).Result()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("delete %s : %v", fullpath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, name := fullpath.DirAndName()
|
||||||
|
|
||||||
|
if name != "" {
|
||||||
|
if err = removeChild(ctx, store, genDirectoryListKey(dir), name); err != nil {
|
||||||
|
return fmt.Errorf("DeleteEntry %s in parent dir: %v", fullpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||||
|
|
||||||
|
return removeChildren(ctx, store, genDirectoryListKey(string(fullpath)), func(name string) error {
|
||||||
|
path := util.NewFullPath(string(fullpath), name)
|
||||||
|
_, err = store.Client.Del(ctx, string(path)).Result()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err)
|
||||||
|
}
|
||||||
|
// not efficient, but need to remove if it is a directory
|
||||||
|
store.Client.Del(ctx, genDirectoryListKey(string(path)))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||||
|
return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||||
|
|
||||||
|
dirListKey := genDirectoryListKey(string(dirPath))
|
||||||
|
counter := int64(0)
|
||||||
|
|
||||||
|
err = listChildren(ctx, store, dirListKey, startFileName, func(fileName string) bool {
|
||||||
|
if startFileName != "" {
|
||||||
|
if !includeStartFile && startFileName == fileName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
path := util.NewFullPath(string(dirPath), fileName)
|
||||||
|
entry, err := store.FindEntry(ctx, path)
|
||||||
|
lastFileName = fileName
|
||||||
|
if err != nil {
|
||||||
|
glog.V(0).Infof("list %s : %v", path, err)
|
||||||
|
if err == filer_pb.ErrNotFound {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if entry.TtlSec > 0 {
|
||||||
|
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
|
||||||
|
store.Client.Del(ctx, string(path)).Result()
|
||||||
|
store.Client.ZRem(ctx, dirListKey, fileName).Result()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
counter++
|
||||||
|
if !eachEntryFunc(entry) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if counter >= limit {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return lastFileName, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func genDirectoryListKey(dir string) (dirList string) {
|
||||||
|
return dir + DIR_LIST_MARKER
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) Shutdown() {
|
||||||
|
store.Client.Close()
|
||||||
|
}
|
42
weed/filer/redis3/universal_redis_store_kv.go
Normal file
42
weed/filer/redis3/universal_redis_store_kv.go
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package redis3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
|
||||||
|
|
||||||
|
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("kv put: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
|
||||||
|
|
||||||
|
data, err := store.Client.Get(ctx, string(key)).Result()
|
||||||
|
|
||||||
|
if err == redis.Nil {
|
||||||
|
return nil, filer.ErrKvNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return []byte(data), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *UniversalRedis3Store) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||||
|
|
||||||
|
_, err = store.Client.Del(ctx, string(key)).Result()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("kv delete: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *remote_pb.RemoteStorageMapping, readErr error) {
|
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress) (mappings *remote_pb.RemoteStorageMapping, readErr error) {
|
||||||
var oldContent []byte
|
var oldContent []byte
|
||||||
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
|
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE)
|
||||||
|
|
|
@ -131,7 +131,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.Remo
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string, storageName string) (conf *remote_pb.RemoteConf, readErr error) {
|
func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, storageName string) (conf *remote_pb.RemoteConf, readErr error) {
|
||||||
var oldContent []byte
|
var oldContent []byte
|
||||||
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
|
oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX)
|
||||||
|
@ -150,7 +150,7 @@ func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress string,
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress string, dir string) (*remote_pb.RemoteStorageMapping, string, *remote_pb.RemoteStorageLocation, *remote_pb.RemoteConf, error) {
|
func DetectMountInfo(grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, dir string) (*remote_pb.RemoteStorageMapping, string, *remote_pb.RemoteStorageLocation, *remote_pb.RemoteConf, error) {
|
||||||
|
|
||||||
mappings, listErr := ReadMountMappings(grpcDialOption, filerAddress)
|
mappings, listErr := ReadMountMappings(grpcDialOption, filerAddress)
|
||||||
if listErr != nil {
|
if listErr != nil {
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/tecbot/gorocksdb"
|
gorocksdb "github.com/linxGnu/grocksdb"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue