Merge pull request #88 from chrislusf/master

sync
This commit is contained in:
hilimd 2021-11-25 15:53:35 +08:00 committed by GitHub
commit aa1ef02984
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 182 additions and 105 deletions

View file

@ -3,8 +3,6 @@ name: "go: build dev binaries"
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
@ -38,18 +36,15 @@ jobs:
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: disable http2 env
run: export GODEBUG=http2client=0
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@bugfix/upload-fail
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
@ -58,14 +53,14 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@bugfix/upload-fail
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
@ -89,14 +84,14 @@ jobs:
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@bugfix/upload-fail
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
@ -105,14 +100,14 @@ jobs:
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@bugfix/upload-fail
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed

View file

@ -25,13 +25,13 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
@ -39,13 +39,13 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`

View file

@ -25,13 +25,13 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
@ -39,13 +39,13 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`

View file

@ -25,13 +25,13 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
@ -39,13 +39,13 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`

View file

@ -25,13 +25,13 @@ jobs:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
@ -39,13 +39,13 @@ jobs:
binary_name: weed
asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}"
- name: Go Release Large Disk Binaries
uses: wangyoucao577/go-release-action@v1.20
uses: wangyoucao577/go-release-action@v1.22
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
overwrite: true
pre_command: export CGO_ENABLED=0
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`

View file

@ -48,6 +48,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
WORKDIR /data
COPY filer.toml /etc/seaweedfs/filer.toml
COPY entrypoint.sh /entrypoint.sh

View file

@ -37,6 +37,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh

View file

@ -36,6 +36,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh

View file

@ -36,6 +36,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh

View file

@ -26,6 +26,7 @@ EXPOSE 7333
RUN mkdir -p /data/filerldb2
VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh

View file

@ -54,6 +54,8 @@ RUN mkdir -p /data/filerldb2
VOLUME /data
WORKDIR /data
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -1,5 +1,5 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "2.77"
version: "2.77"
appVersion: "2.79"
version: "2.79"

View file

@ -1,61 +1,67 @@
{{- if .Values.filer.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
name: ingress-{{ template "seaweedfs.name" . }}-filer
namespace: {{ .Release.Namespace }}
annotations:
{{ omit .Values.filer.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
spec:
rules:
- http:
paths:
- path: /sw-filer/?(.*)
backend:
serviceName: {{ template "seaweedfs.name" . }}-filer
servicePort: {{ .Values.filer.port }}
ingressClassName: {{ .Values.filer.ingress.className | quote }}
rules:
- http:
paths:
- path: /sw-filer/?(.*)
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-filer
port:
number: {{ .Values.filer.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-filer
servicePort: {{ .Values.filer.port }}
{{- end }}
{{- end }}
---
{{- if .Values.master.ingress.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: ingress-{{ template "seaweedfs.name" . }}-master
namespace: {{ .Release.Namespace }}
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
{{ omit .Values.master.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }}
spec:
ingressClassName: {{ .Values.master.ingress.className | quote }}
rules:
- http:
paths:
- path: /sw-master/?(.*)
pathType: ImplementationSpecific
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-master
port:
number: {{ .Values.master.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-master
servicePort: {{ .Values.master.port }}
{{- end }}
{{- end }}

View file

@ -1,7 +1,7 @@
#hack for delete pod master after migration
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: seaweefds-rw-cr
rules:
@ -16,7 +16,7 @@ metadata:
namespace: {{ .Release.Namespace }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:serviceaccount:seaweefds-rw-sa:default
subjects:
@ -26,4 +26,4 @@ subjects:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: seaweefds-rw-cr
name: seaweefds-rw-cr

View file

@ -109,6 +109,26 @@ master:
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
ingress:
enabled: false
className: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
extraEnvironmentVars:
WEED_MASTER_VOLUME_GROWTH_COPY_1: 7
WEED_MASTER_VOLUME_GROWTH_COPY_2: 6
@ -309,6 +329,26 @@ filer:
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
ingress:
enabled: false
className: "nginx"
annotations:
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
# extraEnvVars is a list of extra enviroment variables to set with the stateful set.
extraEnvironmentVars:
WEED_MYSQL_ENABLED: "true"

View file

@ -199,8 +199,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
return client.WriteDirectory(dest, message.NewEntry)
}
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
reader := filer.NewFileReader(filerSource, message.NewEntry)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
if writeErr != nil {
return writeErr
}
@ -264,9 +263,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
return client.UpdateFileMetadata(oldDest, message.OldEntry, message.NewEntry)
} else {
newDest := toRemoteStorageLocation(newBucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), newRemoteStorageMountLocation)
reader := filer.NewFileReader(filerSource, message.NewEntry)
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, newDest)
if writeErr != nil {
return writeErr
}
@ -303,9 +300,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
if message.NewEntry.IsDirectory {
return client.WriteDirectory(newDest, message.NewEntry)
}
reader := filer.NewFileReader(filerSource, message.NewEntry)
glog.V(0).Infof("create %s", remote_storage.FormatLocation(newDest))
remoteEntry, writeErr := client.WriteFile(newDest, message.NewEntry, reader)
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, newDest)
if writeErr != nil {
return writeErr
}

View file

@ -108,8 +108,7 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
return client.WriteDirectory(dest, message.NewEntry)
}
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
reader := filer.NewFileReader(filerSource, message.NewEntry)
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
if writeErr != nil {
return writeErr
}
@ -146,9 +145,7 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
if err := client.DeleteFile(oldDest); err != nil {
return err
}
reader := filer.NewFileReader(filerSource, message.NewEntry)
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
remoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)
remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest)
if writeErr != nil {
return writeErr
}
@ -160,6 +157,20 @@ func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string,
return eachEntryFunc, nil
}
func retriedWriteFile(client remote_storage.RemoteStorageClient, filerSource *source.FilerSource, newEntry *filer_pb.Entry, dest *remote_pb.RemoteStorageLocation) (remoteEntry *filer_pb.RemoteEntry, err error) {
var writeErr error
err = util.Retry("writeFile", func() error {
reader := filer.NewFileReader(filerSource, newEntry)
glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest))
remoteEntry, writeErr = client.WriteFile(dest, newEntry, reader)
if writeErr != nil {
return writeErr
}
return nil
})
return
}
func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, mountedDir string, timeAgo time.Duration) time.Time {
// 1. specified by timeAgo
// 2. last offset timestamp for this directory

View file

@ -61,12 +61,12 @@ connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
CREATE TABLE IF NOT EXISTS `%s` (
dirhash BIGINT,
name VARCHAR(1000) BINARY,
directory TEXT BINARY,
@ -85,7 +85,7 @@ connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (

View file

@ -6,7 +6,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
"io"
"math"
"math/rand"
"net/url"
"strings"
"time"
@ -143,9 +142,6 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKe
var shouldRetry bool
var totalWritten int
rand.Shuffle(len(urlStrings), func(i, j int) {
urlStrings[i], urlStrings[j] = urlStrings[j], urlStrings[i]
})
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
for _, urlString := range urlStrings {
var localProcesed int

View file

@ -177,8 +177,8 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("HeadBucketHandler %s", bucket)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
s3err.WriteErrorResponse(w, r, err)
if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); entry == nil || err == filer_pb.ErrNotFound {
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket)
return
}

View file

@ -226,7 +226,7 @@ func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName s
if receiveErr == io.EOF {
break
}
if resp.ModifiedTsNs != 0 {
if resp!=nil && resp.ModifiedTsNs != 0 {
modifiedTsNs = resp.ModifiedTsNs
}
if receiveErr != nil {

View file

@ -7,6 +7,7 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"io"
"path/filepath"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -34,11 +35,12 @@ func (c *commandVolumeConfigureReplication) Help() string {
`
}
func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, _ io.Writer) (err error) {
configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id")
replicationString := configureReplicationCommand.String("replication", "", "the intended replication value")
collectionPattern := configureReplicationCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
if err = configureReplicationCommand.Parse(args); err != nil {
return nil
}
@ -55,7 +57,6 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
if err != nil {
return fmt.Errorf("replication format: %v", err)
}
replicaPlacementInt32 := uint32(replicaPlacement.Byte())
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv)
@ -64,6 +65,7 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
}
vid := needle.VolumeId(*volumeIdInt)
volumeFilter := getVolumeFilter(replicaPlacement, uint32(vid), *collectionPattern)
// find all data nodes with volumes that needs replication change
var allLocations []location
@ -71,7 +73,7 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
loc := newLocation(dc, string(rack), dn)
for _, diskInfo := range dn.DiskInfos {
for _, v := range diskInfo.VolumeInfos {
if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
if volumeFilter(v) {
allLocations = append(allLocations, loc)
continue
}
@ -106,3 +108,19 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
return nil
}
func getVolumeFilter(replicaPlacement *super_block.ReplicaPlacement, volumeId uint32, collectionPattern string) func(message *master_pb.VolumeInformationMessage) bool {
replicaPlacementInt32 := uint32(replicaPlacement.Byte())
if volumeId > 0 {
return func(v *master_pb.VolumeInformationMessage) bool {
return v.Id == volumeId && v.ReplicaPlacement != replicaPlacementInt32
}
}
return func(v *master_pb.VolumeInformationMessage) bool {
matched, err := filepath.Match(collectionPattern, v.Collection)
if err != nil {
return false
}
return matched
}
}

View file

@ -5,7 +5,7 @@ import (
)
var (
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.77)
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.79)
VERSION = sizeLimit + " " + VERSION_NUMBER
COMMIT = ""
)

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"math/rand"
"strconv"
"strings"
"sync"
@ -69,13 +70,21 @@ func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err er
if !found {
return nil, fmt.Errorf("volume %d not found", id)
}
var sameDcServers, otherDcServers []string
for _, loc := range locations {
if vc.DataCenter == "" || loc.DataCenter == "" || vc.DataCenter != loc.DataCenter {
serverUrls = append(serverUrls, loc.Url)
otherDcServers = append(otherDcServers, loc.Url)
} else {
serverUrls = append([]string{loc.Url}, serverUrls...)
sameDcServers = append(sameDcServers, loc.Url)
}
}
rand.Shuffle(len(sameDcServers), func(i, j int) {
sameDcServers[i], sameDcServers[j] = sameDcServers[j], sameDcServers[i]
})
rand.Shuffle(len(otherDcServers), func(i, j int) {
otherDcServers[i], otherDcServers[j] = otherDcServers[j], otherDcServers[i]
})
serverUrls = append(sameDcServers, otherDcServers...)
return
}