mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
ec03f22cc3
21
.github/workflows/binaries_dev.yml
vendored
21
.github/workflows/binaries_dev.yml
vendored
|
@ -3,8 +3,6 @@ name: "go: build dev binaries"
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master ]
|
branches: [ master ]
|
||||||
pull_request:
|
|
||||||
branches: [ master ]
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
|
@ -38,18 +36,15 @@ jobs:
|
||||||
- name: Set BUILD_TIME env
|
- name: Set BUILD_TIME env
|
||||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||||
|
|
||||||
- name: disable http2 env
|
|
||||||
run: export GODEBUG=http2client=0
|
|
||||||
|
|
||||||
- name: Go Release Binaries Large Disk
|
- name: Go Release Binaries Large Disk
|
||||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
release_tag: dev
|
release_tag: dev
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
build_flags: -tags 5BytesOffset # optional, default is
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
@ -58,14 +53,14 @@ jobs:
|
||||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||||
|
|
||||||
- name: Go Release Binaries Normal Volume Size
|
- name: Go Release Binaries Normal Volume Size
|
||||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
release_tag: dev
|
release_tag: dev
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
project_path: weed
|
project_path: weed
|
||||||
|
@ -89,14 +84,14 @@ jobs:
|
||||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||||
|
|
||||||
- name: Go Release Binaries Large Disk
|
- name: Go Release Binaries Large Disk
|
||||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
release_tag: dev
|
release_tag: dev
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
build_flags: -tags 5BytesOffset # optional, default is
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
@ -105,14 +100,14 @@ jobs:
|
||||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||||
|
|
||||||
- name: Go Release Binaries Normal Volume Size
|
- name: Go Release Binaries Normal Volume Size
|
||||||
uses: wangyoucao577/go-release-action@bugfix/upload-fail
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
release_tag: dev
|
release_tag: dev
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
project_path: weed
|
project_path: weed
|
||||||
|
|
8
.github/workflows/binaries_release0.yml
vendored
8
.github/workflows/binaries_release0.yml
vendored
|
@ -25,13 +25,13 @@ jobs:
|
||||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Go Release Binaries Normal Volume Size
|
- name: Go Release Binaries Normal Volume Size
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
# build_flags: -tags 5BytesOffset # optional, default is
|
# build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
@ -39,13 +39,13 @@ jobs:
|
||||||
binary_name: weed
|
binary_name: weed
|
||||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
- name: Go Release Large Disk Binaries
|
- name: Go Release Large Disk Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
build_flags: -tags 5BytesOffset # optional, default is
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
|
8
.github/workflows/binaries_release1.yml
vendored
8
.github/workflows/binaries_release1.yml
vendored
|
@ -25,13 +25,13 @@ jobs:
|
||||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Go Release Binaries Normal Volume Size
|
- name: Go Release Binaries Normal Volume Size
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
# build_flags: -tags 5BytesOffset # optional, default is
|
# build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
@ -39,13 +39,13 @@ jobs:
|
||||||
binary_name: weed
|
binary_name: weed
|
||||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
- name: Go Release Large Disk Binaries
|
- name: Go Release Large Disk Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
build_flags: -tags 5BytesOffset # optional, default is
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
|
8
.github/workflows/binaries_release2.yml
vendored
8
.github/workflows/binaries_release2.yml
vendored
|
@ -25,13 +25,13 @@ jobs:
|
||||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Go Release Binaries Normal Volume Size
|
- name: Go Release Binaries Normal Volume Size
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
# build_flags: -tags 5BytesOffset # optional, default is
|
# build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
@ -39,13 +39,13 @@ jobs:
|
||||||
binary_name: weed
|
binary_name: weed
|
||||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
- name: Go Release Large Disk Binaries
|
- name: Go Release Large Disk Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
build_flags: -tags 5BytesOffset # optional, default is
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
|
8
.github/workflows/binaries_release3.yml
vendored
8
.github/workflows/binaries_release3.yml
vendored
|
@ -25,13 +25,13 @@ jobs:
|
||||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Go Release Binaries Normal Volume Size
|
- name: Go Release Binaries Normal Volume Size
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
# build_flags: -tags 5BytesOffset # optional, default is
|
# build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
@ -39,13 +39,13 @@ jobs:
|
||||||
binary_name: weed
|
binary_name: weed
|
||||||
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
|
||||||
- name: Go Release Large Disk Binaries
|
- name: Go Release Large Disk Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.20
|
uses: wangyoucao577/go-release-action@v1.22
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
overwrite: true
|
overwrite: true
|
||||||
pre_command: export CGO_ENABLED=0
|
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
|
||||||
build_flags: -tags 5BytesOffset # optional, default is
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
|
|
|
@ -48,6 +48,7 @@ EXPOSE 7333
|
||||||
RUN mkdir -p /data/filerldb2
|
RUN mkdir -p /data/filerldb2
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
WORKDIR /data
|
||||||
|
|
||||||
COPY filer.toml /etc/seaweedfs/filer.toml
|
COPY filer.toml /etc/seaweedfs/filer.toml
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
|
|
|
@ -37,6 +37,7 @@ EXPOSE 7333
|
||||||
RUN mkdir -p /data/filerldb2
|
RUN mkdir -p /data/filerldb2
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
WORKDIR /data
|
||||||
|
|
||||||
RUN chmod +x /entrypoint.sh
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ EXPOSE 7333
|
||||||
RUN mkdir -p /data/filerldb2
|
RUN mkdir -p /data/filerldb2
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
WORKDIR /data
|
||||||
|
|
||||||
RUN chmod +x /entrypoint.sh
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ EXPOSE 7333
|
||||||
RUN mkdir -p /data/filerldb2
|
RUN mkdir -p /data/filerldb2
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
WORKDIR /data
|
||||||
|
|
||||||
RUN chmod +x /entrypoint.sh
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ EXPOSE 7333
|
||||||
RUN mkdir -p /data/filerldb2
|
RUN mkdir -p /data/filerldb2
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
WORKDIR /data
|
||||||
|
|
||||||
RUN chmod +x /entrypoint.sh
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,8 @@ RUN mkdir -p /data/filerldb2
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
|
|
||||||
|
WORKDIR /data
|
||||||
|
|
||||||
RUN chmod +x /entrypoint.sh
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
description: SeaweedFS
|
description: SeaweedFS
|
||||||
name: seaweedfs
|
name: seaweedfs
|
||||||
appVersion: "2.77"
|
appVersion: "2.79"
|
||||||
version: "2.77"
|
version: "2.79"
|
||||||
|
|
|
@ -1,61 +1,67 @@
|
||||||
|
{{- if .Values.filer.ingress.enabled }}
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else }}
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
name: ingress-{{ template "seaweedfs.name" . }}-filer
|
name: ingress-{{ template "seaweedfs.name" . }}-filer
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
annotations:
|
annotations:
|
||||||
kubernetes.io/ingress.class: "nginx"
|
{{ omit .Values.filer.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
|
||||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
|
||||||
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
|
||||||
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
|
|
||||||
nginx.ingress.kubernetes.io/service-upstream: "true"
|
|
||||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
|
||||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
|
||||||
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
|
||||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
|
||||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
|
||||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
|
||||||
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
|
|
||||||
sub_filter '="/' '="./'; #make absolute paths to relative
|
|
||||||
sub_filter '=/' '=./';
|
|
||||||
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
|
||||||
sub_filter_once off;
|
|
||||||
spec:
|
spec:
|
||||||
rules:
|
ingressClassName: {{ .Values.filer.ingress.className | quote }}
|
||||||
- http:
|
rules:
|
||||||
paths:
|
- http:
|
||||||
- path: /sw-filer/?(.*)
|
paths:
|
||||||
backend:
|
- path: /sw-filer/?(.*)
|
||||||
serviceName: {{ template "seaweedfs.name" . }}-filer
|
pathType: ImplementationSpecific
|
||||||
servicePort: {{ .Values.filer.port }}
|
backend:
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
|
||||||
|
service:
|
||||||
|
name: {{ template "seaweedfs.name" . }}-filer
|
||||||
|
port:
|
||||||
|
number: {{ .Values.filer.port }}
|
||||||
|
#name:
|
||||||
|
{{- else }}
|
||||||
|
serviceName: {{ template "seaweedfs.name" . }}-filer
|
||||||
|
servicePort: {{ .Values.filer.port }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
---
|
---
|
||||||
|
{{- if .Values.master.ingress.enabled }}
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else }}
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
name: ingress-{{ template "seaweedfs.name" . }}-master
|
name: ingress-{{ template "seaweedfs.name" . }}-master
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
annotations:
|
annotations:
|
||||||
kubernetes.io/ingress.class: "nginx"
|
{{ omit .Values.master.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
|
||||||
nginx.ingress.kubernetes.io/auth-type: "basic"
|
|
||||||
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
|
||||||
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
|
|
||||||
nginx.ingress.kubernetes.io/service-upstream: "true"
|
|
||||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
|
||||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
|
||||||
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
|
||||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
|
||||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
|
||||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
|
||||||
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
|
|
||||||
sub_filter '="/' '="./'; #make absolute paths to relative
|
|
||||||
sub_filter '=/' '=./';
|
|
||||||
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
|
||||||
sub_filter_once off;
|
|
||||||
spec:
|
spec:
|
||||||
|
ingressClassName: {{ .Values.master.ingress.className | quote }}
|
||||||
rules:
|
rules:
|
||||||
- http:
|
- http:
|
||||||
paths:
|
paths:
|
||||||
- path: /sw-master/?(.*)
|
- path: /sw-master/?(.*)
|
||||||
|
pathType: ImplementationSpecific
|
||||||
backend:
|
backend:
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
|
||||||
|
service:
|
||||||
|
name: {{ template "seaweedfs.name" . }}-master
|
||||||
|
port:
|
||||||
|
number: {{ .Values.master.port }}
|
||||||
|
#name:
|
||||||
|
{{- else }}
|
||||||
serviceName: {{ template "seaweedfs.name" . }}-master
|
serviceName: {{ template "seaweedfs.name" . }}-master
|
||||||
servicePort: {{ .Values.master.port }}
|
servicePort: {{ .Values.master.port }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#hack for delete pod master after migration
|
#hack for delete pod master after migration
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: seaweefds-rw-cr
|
name: seaweefds-rw-cr
|
||||||
rules:
|
rules:
|
||||||
|
@ -16,7 +16,7 @@ metadata:
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: system:serviceaccount:seaweefds-rw-sa:default
|
name: system:serviceaccount:seaweefds-rw-sa:default
|
||||||
subjects:
|
subjects:
|
||||||
|
@ -26,4 +26,4 @@ subjects:
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: seaweefds-rw-cr
|
name: seaweefds-rw-cr
|
||||||
|
|
|
@ -109,6 +109,26 @@ master:
|
||||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
className: "nginx"
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||||
|
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
||||||
|
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
|
||||||
|
nginx.ingress.kubernetes.io/service-upstream: "true"
|
||||||
|
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||||
|
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||||
|
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
||||||
|
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||||
|
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
||||||
|
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||||
|
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
|
||||||
|
sub_filter '="/' '="./'; #make absolute paths to relative
|
||||||
|
sub_filter '=/' '=./';
|
||||||
|
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
||||||
|
sub_filter_once off;
|
||||||
|
|
||||||
extraEnvironmentVars:
|
extraEnvironmentVars:
|
||||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 7
|
WEED_MASTER_VOLUME_GROWTH_COPY_1: 7
|
||||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 6
|
WEED_MASTER_VOLUME_GROWTH_COPY_2: 6
|
||||||
|
@ -309,6 +329,26 @@ filer:
|
||||||
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
className: "nginx"
|
||||||
|
annotations:
|
||||||
|
nginx.ingress.kubernetes.io/auth-type: "basic"
|
||||||
|
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
|
||||||
|
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
|
||||||
|
nginx.ingress.kubernetes.io/service-upstream: "true"
|
||||||
|
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||||
|
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||||
|
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
|
||||||
|
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||||
|
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
|
||||||
|
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||||
|
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
|
||||||
|
sub_filter '="/' '="./'; #make absolute paths to relative
|
||||||
|
sub_filter '=/' '=./';
|
||||||
|
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
|
||||||
|
sub_filter_once off;
|
||||||
|
|
||||||
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
|
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
|
||||||
extraEnvironmentVars:
|
extraEnvironmentVars:
|
||||||
WEED_MYSQL_ENABLED: "true"
|
WEED_MYSQL_ENABLED: "true"
|
||||||
|
|
|
@ -199,8 +199,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||||
return client.WriteDirectory(dest, message.NewEntry)
|
return client.WriteDirectory(dest, message.NewEntry)
|
||||||
}
|
}
|
||||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
|
||||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
|
||||||
if writeErr != nil {
|
if writeErr != nil {
|
||||||
return writeErr
|
return writeErr
|
||||||
}
|
}
|
||||||
|
@ -264,9 +263,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||||
return client.UpdateFileMetadata(oldDest, message.OldEntry, message.NewEntry)
|
return client.UpdateFileMetadata(oldDest, message.OldEntry, message.NewEntry)
|
||||||
} else {
|
} else {
|
||||||
newDest := toRemoteStorageLocation(newBucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), newRemoteStorageMountLocation)
|
newDest := toRemoteStorageLocation(newBucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), newRemoteStorageMountLocation)
|
||||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, newDest)
|
||||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
|
|
||||||
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
|
|
||||||
if writeErr != nil {
|
if writeErr != nil {
|
||||||
return writeErr
|
return writeErr
|
||||||
}
|
}
|
||||||
|
@ -303,9 +300,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
|
||||||
if message.NewEntry.IsDirectory {
|
if message.NewEntry.IsDirectory {
|
||||||
return client.WriteDirectory(newDest, message.NewEntry)
|
return client.WriteDirectory(newDest, message.NewEntry)
|
||||||
}
|
}
|
||||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, newDest)
|
||||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
|
|
||||||
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
|
|
||||||
if writeErr != nil {
|
if writeErr != nil {
|
||||||
return writeErr
|
return writeErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,8 +108,7 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
|
||||||
return client.WriteDirectory(dest, message.NewEntry)
|
return client.WriteDirectory(dest, message.NewEntry)
|
||||||
}
|
}
|
||||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
|
||||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
|
||||||
if writeErr != nil {
|
if writeErr != nil {
|
||||||
return writeErr
|
return writeErr
|
||||||
}
|
}
|
||||||
|
@ -146,9 +145,7 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
|
||||||
if err := client.DeleteFile(oldDest); err != nil {
|
if err := client.DeleteFile(oldDest); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
reader := filer.NewFileReader(filerSource, message.NewEntry)
|
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
|
||||||
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
|
||||||
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
|
|
||||||
if writeErr != nil {
|
if writeErr != nil {
|
||||||
return writeErr
|
return writeErr
|
||||||
}
|
}
|
||||||
|
@ -160,6 +157,20 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
|
||||||
return eachEntryFunc, nil
|
return eachEntryFunc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func retriedWriteFile(client remote_storage.RemoteStorageClient, filerSource *source.FilerSource, newEntry *filer_pb.Entry, dest *remote_pb.RemoteStorageLocation) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
||||||
|
var writeErr error
|
||||||
|
err = util.Retry("writeFile", func() error {
|
||||||
|
reader := filer.NewFileReader(filerSource, newEntry)
|
||||||
|
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
|
||||||
|
remoteEntry, writeErr = client.WriteFile(dest, newEntry, reader)
|
||||||
|
if writeErr != nil {
|
||||||
|
return writeErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, mountedDir string, timeAgo time.Duration) time.Time {
|
func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, mountedDir string, timeAgo time.Duration) time.Time {
|
||||||
// 1. specified by timeAgo
|
// 1. specified by timeAgo
|
||||||
// 2. last offset timestamp for this directory
|
// 2. last offset timestamp for this directory
|
||||||
|
|
|
@ -61,12 +61,12 @@ connection_max_lifetime_seconds = 0
|
||||||
interpolateParams = false
|
interpolateParams = false
|
||||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||||
enableUpsert = true
|
enableUpsert = true
|
||||||
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
||||||
|
|
||||||
[mysql2] # or memsql, tidb
|
[mysql2] # or memsql, tidb
|
||||||
enabled = false
|
enabled = false
|
||||||
createTable = """
|
createTable = """
|
||||||
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
|
CREATE TABLE IF NOT EXISTS `%s` (
|
||||||
dirhash BIGINT,
|
dirhash BIGINT,
|
||||||
name VARCHAR(1000) BINARY,
|
name VARCHAR(1000) BINARY,
|
||||||
directory TEXT BINARY,
|
directory TEXT BINARY,
|
||||||
|
@ -85,7 +85,7 @@ connection_max_lifetime_seconds = 0
|
||||||
interpolateParams = false
|
interpolateParams = false
|
||||||
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
|
||||||
enableUpsert = true
|
enableUpsert = true
|
||||||
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
|
||||||
|
|
||||||
[postgres] # or cockroachdb, YugabyteDB
|
[postgres] # or cockroachdb, YugabyteDB
|
||||||
# CREATE TABLE IF NOT EXISTS filemeta (
|
# CREATE TABLE IF NOT EXISTS filemeta (
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -143,9 +142,6 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKe
|
||||||
var shouldRetry bool
|
var shouldRetry bool
|
||||||
var totalWritten int
|
var totalWritten int
|
||||||
|
|
||||||
rand.Shuffle(len(urlStrings), func(i, j int) {
|
|
||||||
urlStrings[i], urlStrings[j] = urlStrings[j], urlStrings[i]
|
|
||||||
})
|
|
||||||
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
|
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
|
||||||
for _, urlString := range urlStrings {
|
for _, urlString := range urlStrings {
|
||||||
var localProcesed int
|
var localProcesed int
|
||||||
|
|
|
@ -177,8 +177,8 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
|
||||||
bucket, _ := getBucketAndObject(r)
|
bucket, _ := getBucketAndObject(r)
|
||||||
glog.V(3).Infof("HeadBucketHandler %s", bucket)
|
glog.V(3).Infof("HeadBucketHandler %s", bucket)
|
||||||
|
|
||||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); entry == nil || err == filer_pb.ErrNotFound {
|
||||||
s3err.WriteErrorResponse(w, r, err)
|
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -226,7 +226,7 @@ func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName s
|
||||||
if receiveErr == io.EOF {
|
if receiveErr == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if resp.ModifiedTsNs != 0 {
|
if resp!=nil && resp.ModifiedTsNs != 0 {
|
||||||
modifiedTsNs = resp.ModifiedTsNs
|
modifiedTsNs = resp.ModifiedTsNs
|
||||||
}
|
}
|
||||||
if receiveErr != nil {
|
if receiveErr != nil {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"io"
|
"io"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||||
|
@ -34,11 +35,12 @@ func (c *commandVolumeConfigureReplication) Help() string {
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, _ io.Writer) (err error) {
|
||||||
|
|
||||||
configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id")
|
volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id")
|
||||||
replicationString := configureReplicationCommand.String("replication", "", "the intended replication value")
|
replicationString := configureReplicationCommand.String("replication", "", "the intended replication value")
|
||||||
|
collectionPattern := configureReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
|
||||||
if err = configureReplicationCommand.Parse(args); err != nil {
|
if err = configureReplicationCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -55,7 +57,6 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("replication format: %v", err)
|
return fmt.Errorf("replication format: %v", err)
|
||||||
}
|
}
|
||||||
replicaPlacementInt32 := uint32(replicaPlacement.Byte())
|
|
||||||
|
|
||||||
// collect topology information
|
// collect topology information
|
||||||
topologyInfo, _, err := collectTopologyInfo(commandEnv)
|
topologyInfo, _, err := collectTopologyInfo(commandEnv)
|
||||||
|
@ -64,6 +65,7 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
|
||||||
}
|
}
|
||||||
|
|
||||||
vid := needle.VolumeId(*volumeIdInt)
|
vid := needle.VolumeId(*volumeIdInt)
|
||||||
|
volumeFilter := getVolumeFilter(replicaPlacement, uint32(vid), *collectionPattern)
|
||||||
|
|
||||||
// find all data nodes with volumes that needs replication change
|
// find all data nodes with volumes that needs replication change
|
||||||
var allLocations []location
|
var allLocations []location
|
||||||
|
@ -71,7 +73,7 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
|
||||||
loc := newLocation(dc, string(rack), dn)
|
loc := newLocation(dc, string(rack), dn)
|
||||||
for _, diskInfo := range dn.DiskInfos {
|
for _, diskInfo := range dn.DiskInfos {
|
||||||
for _, v := range diskInfo.VolumeInfos {
|
for _, v := range diskInfo.VolumeInfos {
|
||||||
if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
|
if volumeFilter(v) {
|
||||||
allLocations = append(allLocations, loc)
|
allLocations = append(allLocations, loc)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -106,3 +108,19 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getVolumeFilter(replicaPlacement *super_block.ReplicaPlacement, volumeId uint32, collectionPattern string) func(message *master_pb.VolumeInformationMessage) bool {
|
||||||
|
replicaPlacementInt32 := uint32(replicaPlacement.Byte())
|
||||||
|
if volumeId > 0 {
|
||||||
|
return func(v *master_pb.VolumeInformationMessage) bool {
|
||||||
|
return v.Id == volumeId && v.ReplicaPlacement != replicaPlacementInt32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return func(v *master_pb.VolumeInformationMessage) bool {
|
||||||
|
matched, err := filepath.Match(collectionPattern, v.Collection)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.77)
|
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.79)
|
||||||
VERSION = sizeLimit + " " + VERSION_NUMBER
|
VERSION = sizeLimit + " " + VERSION_NUMBER
|
||||||
COMMIT = ""
|
COMMIT = ""
|
||||||
)
|
)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
"math/rand"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -69,13 +70,21 @@ func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err er
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("volume %d not found", id)
|
return nil, fmt.Errorf("volume %d not found", id)
|
||||||
}
|
}
|
||||||
|
var sameDcServers, otherDcServers []string
|
||||||
for _, loc := range locations {
|
for _, loc := range locations {
|
||||||
if vc.DataCenter == "" || loc.DataCenter == "" || vc.DataCenter != loc.DataCenter {
|
if vc.DataCenter == "" || loc.DataCenter == "" || vc.DataCenter != loc.DataCenter {
|
||||||
serverUrls = append(serverUrls, loc.Url)
|
otherDcServers = append(otherDcServers, loc.Url)
|
||||||
} else {
|
} else {
|
||||||
serverUrls = append([]string{loc.Url}, serverUrls...)
|
sameDcServers = append(sameDcServers, loc.Url)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
rand.Shuffle(len(sameDcServers), func(i, j int) {
|
||||||
|
sameDcServers[i], sameDcServers[j] = sameDcServers[j], sameDcServers[i]
|
||||||
|
})
|
||||||
|
rand.Shuffle(len(otherDcServers), func(i, j int) {
|
||||||
|
otherDcServers[i], otherDcServers[j] = otherDcServers[j], otherDcServers[i]
|
||||||
|
})
|
||||||
|
serverUrls = append(sameDcServers, otherDcServers...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue