Merge branch 'master' into bptree

This commit is contained in:
Chris Lu 2021-09-19 23:56:59 -07:00
commit b3d88180ca
16 changed files with 152 additions and 67 deletions

View file

@ -6,57 +6,72 @@ on:
jobs: jobs:
cleanup:
build-latest-docker-image: runs-on: ubuntu-latest
runs-on: [ubuntu-latest]
steps: steps:
-
name: Checkout - name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*
build_dev:
needs: cleanup
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux, windows, darwin, freebsd]
goarch: [amd64, arm, arm64]
exclude:
- goarch: arm
goos: darwin
- goarch: 386
goos: darwin
- goarch: arm
goos: windows
- goarch: arm64
goos: windows
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2 uses: actions/checkout@v2
-
name: Docker meta - name: Set BUILD_TIME env
id: docker_meta run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
uses: docker/metadata-action@v3
- name: Go Release Binaries Large Disk
uses: wangyoucao577/go-release-action@v1.20
with: with:
images: | github_token: ${{ secrets.GITHUB_TOKEN }}
chrislusf/seaweedfs goos: ${{ matrix.goos }}
ghcr.io/chrislusf/seaweedfs goarch: ${{ matrix.goarch }}
tags: | release_tag: dev
type=raw,value=latest overwrite: true
labels: | pre_command: export CGO_ENABLED=0
org.opencontainers.image.title=seaweedfs build_flags: -tags 5BytesOffset # optional, default is
org.opencontainers.image.vendor=Chris Lu ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
- # Where to run `go build .`
name: Set up QEMU project_path: weed
uses: docker/setup-qemu-action@v1 binary_name: weed-large-disk
- asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@v1.20
with: with:
buildkitd-flags: "--debug" github_token: ${{ secrets.GITHUB_TOKEN }}
- goos: ${{ matrix.goos }}
name: Login to Docker Hub goarch: ${{ matrix.goarch }}
if: github.event_name != 'pull_request' release_tag: dev
uses: docker/login-action@v1 overwrite: true
with: pre_command: export CGO_ENABLED=0
username: ${{ secrets.DOCKER_USERNAME }} ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
password: ${{ secrets.DOCKER_PASSWORD }} # Where to run `go build .`
- project_path: weed
name: Login to GHCR binary_name: weed-normal-disk
if: github.event_name != 'pull_request' asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }}
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}
file: ./docker/Dockerfile
platforms: linux/amd64, linux/arm, linux/arm64
tags: ${{ steps.docker_meta.outputs.tags }}
labels: ${{ steps.docker_meta.outputs.labels }}

View file

@ -1,5 +1,5 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
appVersion: "2.68" appVersion: "2.69"
version: "2.68" version: "2.69"

View file

@ -22,8 +22,8 @@ type RemoteGatewayOptions struct {
timeAgo *time.Duration timeAgo *time.Duration
createBucketAt *string createBucketAt *string
createBucketRandomSuffix *bool createBucketRandomSuffix *bool
include *string include *string
exclude *string exclude *string
mappings *remote_pb.RemoteStorageMapping mappings *remote_pb.RemoteStorageMapping
remoteConfs map[string]*remote_pb.RemoteConf remoteConfs map[string]*remote_pb.RemoteConf

View file

@ -13,12 +13,11 @@ import (
) )
type RemoteSyncOptions struct { type RemoteSyncOptions struct {
filerAddress *string filerAddress *string
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
readChunkFromFiler *bool readChunkFromFiler *bool
timeAgo *time.Duration timeAgo *time.Duration
dir *string dir *string
} }
var _ = filer_pb.FilerClient(&RemoteSyncOptions{}) var _ = filer_pb.FilerClient(&RemoteSyncOptions{})

View file

@ -27,6 +27,8 @@ type ListAllMyBucketsResult struct {
func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
glog.V(3).Infof("ListBucketsHandler")
var identity *Identity var identity *Identity
var s3Err s3err.ErrorCode var s3Err s3err.ErrorCode
if s3a.iam.isEnabled() { if s3a.iam.isEnabled() {
@ -75,6 +77,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("PutBucketHandler %s", bucket)
// avoid duplicated buckets // avoid duplicated buckets
errCode := s3err.ErrNone errCode := s3err.ErrNone
@ -128,6 +131,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("DeleteBucketHandler %s", bucket)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
s3err.WriteErrorResponse(w, err, r) s3err.WriteErrorResponse(w, err, r)
@ -162,6 +166,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("HeadBucketHandler %s", bucket)
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
s3err.WriteErrorResponse(w, err, r) s3err.WriteErrorResponse(w, err, r)

View file

@ -27,6 +27,8 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject)
if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) { if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) {
fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject))
dir, name := fullPath.DirAndName() dir, name := fullPath.DirAndName()
@ -139,6 +141,8 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
return return
} }
glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d", srcBucket, srcObject, dstBucket, partID)
// check partID with maximum part ID for multipart objects // check partID with maximum part ID for multipart objects
if partID > globalMaxPartID { if partID > globalMaxPartID {
s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r) s3err.WriteErrorResponse(w, s3err.ErrInvalidMaxParts, r)

View file

@ -41,6 +41,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
glog.V(3).Infof("PutObjectHandler %s %s", bucket, object)
_, err := validateContentMd5(r.Header) _, err := validateContentMd5(r.Header)
if err != nil { if err != nil {
@ -118,6 +119,7 @@ func urlPathEscape(object string) string {
func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
glog.V(3).Infof("GetObjectHandler %s %s", bucket, object)
if strings.HasSuffix(r.URL.Path, "/") { if strings.HasSuffix(r.URL.Path, "/") {
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r) s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
@ -134,6 +136,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object)
destUrl := fmt.Sprintf("http://%s%s/%s%s", destUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
@ -145,6 +148,7 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request
func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object)
destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true", destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
@ -192,6 +196,7 @@ type DeleteObjectsResponse struct {
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket)
deleteXMLBytes, err := ioutil.ReadAll(r.Body) deleteXMLBytes, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
@ -291,7 +296,7 @@ var passThroughHeaders = []string{
func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) { func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) {
glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl) glog.V(3).Infof("s3 proxying %s to %s", r.Method, destUrl)
proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body) proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body)

View file

@ -5,6 +5,7 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api/policy" "github.com/chrislusf/seaweedfs/weed/s3api/policy"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
@ -24,6 +25,8 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
bucket := mux.Vars(r)["bucket"] bucket := mux.Vars(r)["bucket"]
glog.V(3).Infof("PostPolicyBucketHandler %s", bucket)
reader, err := r.MultipartReader() reader, err := r.MultipartReader()
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r) s3err.WriteErrorResponse(w, s3err.ErrMalformedPOSTRequest, r)

View file

@ -0,0 +1,37 @@
package s3api
import (
"net/http"
)
// PutObjectAclHandler Put object ACL
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html
func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
// PutObjectRetentionHandler Put object Retention
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html
func (s3a *S3ApiServer) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
// PutObjectLegalHoldHandler Put object Legal Hold
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html
func (s3a *S3ApiServer) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}
// PutObjectLockConfigurationHandler Put object Lock configuration
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html
func (s3a *S3ApiServer) PutObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
}

View file

@ -17,6 +17,7 @@ import (
func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
glog.V(3).Infof("GetObjectTaggingHandler %s %s", bucket, object)
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
dir, name := target.DirAndName() dir, name := target.DirAndName()
@ -42,6 +43,7 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R
func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
glog.V(3).Infof("PutObjectTaggingHandler %s %s", bucket, object)
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
dir, name := target.DirAndName() dir, name := target.DirAndName()
@ -97,6 +99,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
bucket, object := getBucketAndObject(r) bucket, object := getBucketAndObject(r)
glog.V(3).Infof("DeleteObjectTaggingHandler %s %s", bucket, object)
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
dir, name := target.DirAndName() dir, name := target.DirAndName()

View file

@ -40,6 +40,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
// collect parameters // collect parameters
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
@ -95,6 +96,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
// collect parameters // collect parameters
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())

View file

@ -90,6 +90,15 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
// DeleteObjectTagging // DeleteObjectTagging
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "") bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "")
// PutObjectACL
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectAclHandler, ACTION_WRITE), "PUT")).Queries("acl", "")
// PutObjectRetention
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectRetentionHandler, ACTION_WRITE), "PUT")).Queries("retention", "")
// PutObjectLegalHold
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLegalHoldHandler, ACTION_WRITE), "PUT")).Queries("legal-hold", "")
// PutObjectLockConfiguration
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE), "PUT")).Queries("object-lock", "")
// CopyObject // CopyObject
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY")) bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY"))
// PutObject // PutObject

View file

@ -177,6 +177,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
Directory: dir, Directory: dir,
Name: name, Name: name,
}); err != nil { }); err != nil {
glog.Errorf("DownloadToLocal %s: %v", entry.FullPath, err)
return fmt.Errorf("cache %s: %v", entry.FullPath, err) return fmt.Errorf("cache %s: %v", entry.FullPath, err)
} else { } else {
chunks = resp.Entry.Chunks chunks = resp.Entry.Chunks

View file

@ -49,7 +49,7 @@ func NewCommandEnv(options ShellOptions) *CommandEnv {
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", "", pb.ServerAddresses(*options.Masters).ToAddresses()), MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, "", "", pb.ServerAddresses(*options.Masters).ToAddresses()),
option: options, option: options,
} }
ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient) ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient, "admin")
return ce return ce
} }

View file

@ -5,7 +5,7 @@ import (
) )
var ( var (
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.68) VERSION_NUMBER = fmt.Sprintf("%.02f", 2.69)
VERSION = sizeLimit + " " + VERSION_NUMBER VERSION = sizeLimit + " " + VERSION_NUMBER
COMMIT = "" COMMIT = ""
) )

View file

@ -14,7 +14,6 @@ const (
RenewInteval = 4 * time.Second RenewInteval = 4 * time.Second
SafeRenewInteval = 3 * time.Second SafeRenewInteval = 3 * time.Second
InitLockInteval = 1 * time.Second InitLockInteval = 1 * time.Second
AdminLockName = "admin"
) )
type ExclusiveLocker struct { type ExclusiveLocker struct {
@ -22,13 +21,16 @@ type ExclusiveLocker struct {
lockTsNs int64 lockTsNs int64
isLocking bool isLocking bool
masterClient *wdclient.MasterClient masterClient *wdclient.MasterClient
lockName string
} }
func NewExclusiveLocker(masterClient *wdclient.MasterClient) *ExclusiveLocker { func NewExclusiveLocker(masterClient *wdclient.MasterClient, lockName string) *ExclusiveLocker {
return &ExclusiveLocker{ return &ExclusiveLocker{
masterClient: masterClient, masterClient: masterClient,
lockName: lockName,
} }
} }
func (l *ExclusiveLocker) IsLocking() bool { func (l *ExclusiveLocker) IsLocking() bool {
return l.isLocking return l.isLocking
} }
@ -55,7 +57,7 @@ func (l *ExclusiveLocker) RequestLock(clientName string) {
resp, err := client.LeaseAdminToken(ctx, &master_pb.LeaseAdminTokenRequest{ resp, err := client.LeaseAdminToken(ctx, &master_pb.LeaseAdminTokenRequest{
PreviousToken: atomic.LoadInt64(&l.token), PreviousToken: atomic.LoadInt64(&l.token),
PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
LockName: AdminLockName, LockName: l.lockName,
ClientName: clientName, ClientName: clientName,
}) })
if err == nil { if err == nil {
@ -83,7 +85,7 @@ func (l *ExclusiveLocker) RequestLock(clientName string) {
resp, err := client.LeaseAdminToken(ctx2, &master_pb.LeaseAdminTokenRequest{ resp, err := client.LeaseAdminToken(ctx2, &master_pb.LeaseAdminTokenRequest{
PreviousToken: atomic.LoadInt64(&l.token), PreviousToken: atomic.LoadInt64(&l.token),
PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
LockName: AdminLockName, LockName: l.lockName,
ClientName: clientName, ClientName: clientName,
}) })
if err == nil { if err == nil {
@ -114,7 +116,7 @@ func (l *ExclusiveLocker) ReleaseLock() {
client.ReleaseAdminToken(ctx, &master_pb.ReleaseAdminTokenRequest{ client.ReleaseAdminToken(ctx, &master_pb.ReleaseAdminTokenRequest{
PreviousToken: atomic.LoadInt64(&l.token), PreviousToken: atomic.LoadInt64(&l.token),
PreviousLockTime: atomic.LoadInt64(&l.lockTsNs), PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
LockName: AdminLockName, LockName: l.lockName,
}) })
return nil return nil
}) })