mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge branch 'master' into bufix/validate-tags-on-copy
This commit is contained in:
commit
d7c3493d15
2
go.mod
2
go.mod
|
@ -45,7 +45,7 @@ require (
|
|||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/btree v1.1.1
|
||||
github.com/google/btree v1.0.1
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/google/wire v0.5.0 // indirect
|
||||
|
|
5
go.sum
5
go.sum
|
@ -401,8 +401,8 @@ github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k
|
|||
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.1.1 h1:OMJCfqwmbcwNihVCadalGMZiHclz5T0mRv12gnIaV0Q=
|
||||
github.com/google/btree v1.1.1/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
|
@ -1520,7 +1520,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -2,9 +2,10 @@ package command
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
flag "github.com/chrislusf/seaweedfs/weed/util/fla9"
|
||||
)
|
||||
|
||||
var Commands = []*Command{
|
||||
|
@ -36,6 +37,7 @@ var Commands = []*Command{
|
|||
cmdScaffold,
|
||||
cmdServer,
|
||||
cmdShell,
|
||||
cmdUpdate,
|
||||
cmdUpload,
|
||||
cmdVersion,
|
||||
cmdVolume,
|
||||
|
|
382
weed/command/update.go
Normal file
382
weed/command/update.go
Normal file
|
@ -0,0 +1,382 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
//copied from https://github.com/restic/restic/tree/master/internal/selfupdate
|
||||
|
||||
// Release collects data about a single release on GitHub.
|
||||
type Release struct {
|
||||
Name string `json:"name"`
|
||||
TagName string `json:"tag_name"`
|
||||
Draft bool `json:"draft"`
|
||||
PreRelease bool `json:"prerelease"`
|
||||
PublishedAt time.Time `json:"published_at"`
|
||||
Assets []Asset `json:"assets"`
|
||||
|
||||
Version string `json:"-"` // set manually in the code
|
||||
}
|
||||
|
||||
// Asset is a file uploaded and attached to a release.
|
||||
type Asset struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
const githubAPITimeout = 30 * time.Second
|
||||
|
||||
// githubError is returned by the GitHub API, e.g. for rate-limiting.
|
||||
type githubError struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
//default version is not full version
|
||||
var isFullVersion = false
|
||||
|
||||
var (
|
||||
updateOpt UpdateOptions
|
||||
)
|
||||
|
||||
type UpdateOptions struct {
|
||||
dir *string
|
||||
name *string
|
||||
Version *string
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, _ := os.Executable()
|
||||
_, name := filepath.Split(path)
|
||||
updateOpt.dir = cmdUpdate.Flag.String("dir", filepath.Dir(path), "directory to save new weed.")
|
||||
updateOpt.name = cmdUpdate.Flag.String("name", name, "name of new weed. On windows, name shouldn't be same to the orignial name.")
|
||||
updateOpt.Version = cmdUpdate.Flag.String("version", "0", "specific version of weed you want to download. If not specified, get the latest version.")
|
||||
cmdUpdate.Run = runUpdate
|
||||
}
|
||||
|
||||
var cmdUpdate = &Command{
|
||||
UsageLine: "update [-dir=/path/to/dir] [-name=name] [-version=x.xx]",
|
||||
Short: "get latest or specific version from https://github.com/chrislusf/seaweedfs",
|
||||
Long: `get latest or specific version from https://github.com/chrislusf/seaweedfs`,
|
||||
}
|
||||
|
||||
func runUpdate(cmd *Command, args []string) bool {
|
||||
path, _ := os.Executable()
|
||||
_, name := filepath.Split(path)
|
||||
|
||||
if *updateOpt.dir != "" {
|
||||
if err := util.TestFolderWritable(util.ResolvePath(*updateOpt.dir)); err != nil {
|
||||
glog.Fatalf("Check Folder(-dir) Writable %s : %s", *updateOpt.dir, err)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
*updateOpt.dir = filepath.Dir(path)
|
||||
}
|
||||
|
||||
if *updateOpt.name == "" {
|
||||
*updateOpt.name = name
|
||||
}
|
||||
|
||||
target := filepath.Join(*updateOpt.dir, *updateOpt.name)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if target == path {
|
||||
glog.Fatalf("On windows, name of the new weed shouldn't be same to the orignial name.")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(0).Infof("new weed will be saved to %s", target)
|
||||
|
||||
_, err := downloadRelease(context.Background(), target, *updateOpt.Version)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to download weed: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func downloadRelease(ctx context.Context, target string, ver string) (version string, err error) {
|
||||
currentVersion := util.VERSION_NUMBER
|
||||
rel, err := GitHubLatestRelease(ctx, ver, "chrislusf", "seaweedfs")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if rel.Version == currentVersion {
|
||||
if ver == "0" {
|
||||
glog.V(0).Infof("weed is up to date")
|
||||
} else {
|
||||
glog.V(0).Infof("no need to download the same version of weed ")
|
||||
}
|
||||
return currentVersion, nil
|
||||
}
|
||||
|
||||
glog.V(0).Infof("download version: %s", rel.Version)
|
||||
|
||||
largeDiskSuffix := ""
|
||||
if util.VolumeSizeLimitGB == 8000 {
|
||||
largeDiskSuffix = "_large_disk"
|
||||
}
|
||||
|
||||
fullSuffix := ""
|
||||
if isFullVersion {
|
||||
fullSuffix = "_full"
|
||||
}
|
||||
|
||||
ext := "tar.gz"
|
||||
if runtime.GOOS == "windows" {
|
||||
ext = "zip"
|
||||
}
|
||||
|
||||
suffix := fmt.Sprintf("%s_%s%s%s.%s", runtime.GOOS, runtime.GOARCH, fullSuffix, largeDiskSuffix, ext)
|
||||
md5Filename := fmt.Sprintf("%s.md5", suffix)
|
||||
_, md5Val, err := getGithubDataFile(ctx, rel.Assets, md5Filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
downloadFilename, buf, err := getGithubDataFile(ctx, rel.Assets, suffix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write(buf)
|
||||
binaryMd5 := md5Ctx.Sum(nil)
|
||||
if hex.EncodeToString(binaryMd5) != string(md5Val[0:32]) {
|
||||
glog.Errorf("md5:'%s' '%s'", hex.EncodeToString(binaryMd5), string(md5Val[0:32]))
|
||||
err = fmt.Errorf("binary md5sum doesn't match")
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = extractToFile(buf, downloadFilename, target)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
glog.V(0).Infof("successfully updated weed to version %v\n", rel.Version)
|
||||
}
|
||||
|
||||
return rel.Version, nil
|
||||
}
|
||||
|
||||
// GitHubLatestRelease uses the GitHub API to get information about the specific
|
||||
// release of a repository.
|
||||
func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (Release, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, githubAPITimeout)
|
||||
defer cancel()
|
||||
|
||||
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases", owner, repo)
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return Release{}, err
|
||||
}
|
||||
|
||||
// pin API version 3
|
||||
req.Header.Set("Accept", "application/vnd.github.v3+json")
|
||||
|
||||
res, err := ctxhttp.Do(ctx, http.DefaultClient, req)
|
||||
if err != nil {
|
||||
return Release{}, err
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
content := res.Header.Get("Content-Type")
|
||||
if strings.Contains(content, "application/json") {
|
||||
// try to decode error message
|
||||
var msg githubError
|
||||
jerr := json.NewDecoder(res.Body).Decode(&msg)
|
||||
if jerr == nil {
|
||||
return Release{}, fmt.Errorf("unexpected status %v (%v) returned, message:\n %v", res.StatusCode, res.Status, msg.Message)
|
||||
}
|
||||
}
|
||||
|
||||
_ = res.Body.Close()
|
||||
return Release{}, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
_ = res.Body.Close()
|
||||
return Release{}, err
|
||||
}
|
||||
|
||||
err = res.Body.Close()
|
||||
if err != nil {
|
||||
return Release{}, err
|
||||
}
|
||||
|
||||
var release Release
|
||||
var releaseList []Release
|
||||
err = json.Unmarshal(buf, &releaseList)
|
||||
if err != nil {
|
||||
return Release{}, err
|
||||
}
|
||||
if ver == "0" {
|
||||
release = releaseList[0]
|
||||
glog.V(0).Infof("latest version is %v\n", release.TagName)
|
||||
} else {
|
||||
for _, r := range releaseList {
|
||||
if r.TagName == ver {
|
||||
release = r
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if release.TagName == "" {
|
||||
return Release{}, fmt.Errorf("can not find the specific version")
|
||||
}
|
||||
|
||||
release.Version = release.TagName
|
||||
return release, nil
|
||||
}
|
||||
|
||||
func getGithubData(ctx context.Context, url string) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// request binary data
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
|
||||
res, err := ctxhttp.Do(ctx, http.DefaultClient, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
_ = res.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = res.Body.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func getGithubDataFile(ctx context.Context, assets []Asset, suffix string) (filename string, data []byte, err error) {
|
||||
var url string
|
||||
for _, a := range assets {
|
||||
if strings.HasSuffix(a.Name, suffix) {
|
||||
url = a.URL
|
||||
filename = a.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if url == "" {
|
||||
return "", nil, fmt.Errorf("unable to find file with suffix %v", suffix)
|
||||
}
|
||||
|
||||
glog.V(0).Infof("download %v\n", filename)
|
||||
data, err = getGithubData(ctx, url)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return filename, data, nil
|
||||
}
|
||||
|
||||
func extractToFile(buf []byte, filename, target string) error {
|
||||
var rd io.Reader = bytes.NewReader(buf)
|
||||
|
||||
switch filepath.Ext(filename) {
|
||||
case ".gz":
|
||||
gr, err := gzip.NewReader(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
trd := tar.NewReader(gr)
|
||||
hdr, terr := trd.Next()
|
||||
if terr != nil {
|
||||
glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr)
|
||||
return terr
|
||||
}
|
||||
rd = trd
|
||||
case ".zip":
|
||||
zrd, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(zrd.File) != 1 {
|
||||
return fmt.Errorf("ZIP archive contains more than one file")
|
||||
}
|
||||
|
||||
file, err := zrd.File[0].Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
}()
|
||||
|
||||
rd = file
|
||||
}
|
||||
|
||||
// Write everything to a temp file
|
||||
dir := filepath.Dir(target)
|
||||
new, err := ioutil.TempFile(dir, "weed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := io.Copy(new, rd)
|
||||
if err != nil {
|
||||
_ = new.Close()
|
||||
_ = os.Remove(new.Name())
|
||||
return err
|
||||
}
|
||||
if err = new.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = new.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mode := os.FileMode(0755)
|
||||
// attempt to find the original mode
|
||||
if fi, err := os.Lstat(target); err == nil {
|
||||
mode = fi.Mode()
|
||||
}
|
||||
|
||||
// Rename the temp file to the final location atomically.
|
||||
if err := os.Rename(new.Name(), target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(0).Infof("saved %d bytes in %v\n", n, target)
|
||||
return os.Chmod(target, mode)
|
||||
}
|
9
weed/command/update_full.go
Normal file
9
weed/command/update_full.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
//go:build elastic && ydb && gocdk && hdfs
|
||||
// +build elastic,ydb,gocdk,hdfs
|
||||
|
||||
package command
|
||||
|
||||
//set true if gtags are set
|
||||
func init() {
|
||||
isFullVersion = true
|
||||
}
|
|
@ -3,7 +3,6 @@ package filer
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"io"
|
||||
"math"
|
||||
"net/url"
|
||||
|
@ -11,6 +10,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -63,14 +64,14 @@ func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chun
|
|||
|
||||
resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk)
|
||||
if err != nil {
|
||||
return chunks, nil, err
|
||||
return dataChunks, nil, err
|
||||
}
|
||||
|
||||
manifestChunks = append(manifestChunks, chunk)
|
||||
// recursive
|
||||
subDataChunks, subManifestChunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks, startOffset, stopOffset)
|
||||
if subErr != nil {
|
||||
return chunks, nil, subErr
|
||||
return dataChunks, nil, subErr
|
||||
}
|
||||
dataChunks = append(dataChunks, subDataChunks...)
|
||||
manifestChunks = append(manifestChunks, subManifestChunks...)
|
||||
|
|
|
@ -3,11 +3,12 @@ package filer
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"golang.org/x/exp/slices"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
@ -248,6 +249,9 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
|
|||
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) {
|
||||
|
||||
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
visibles2 := readResolvedChunks(chunks)
|
||||
|
||||
|
|
|
@ -2,13 +2,12 @@ package filer
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"io"
|
||||
)
|
||||
|
||||
func ParseS3ConfigurationFromBytes(content []byte, config *iam_pb.S3ApiConfiguration) error {
|
||||
func ParseS3ConfigurationFromBytes[T proto.Message](content []byte, config T) error {
|
||||
if err := jsonpb.Unmarshal(bytes.NewBuffer(content), config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -23,3 +23,13 @@ message S3ConfigureRequest {
|
|||
|
||||
message S3ConfigureResponse {
|
||||
}
|
||||
|
||||
message S3CircuitBreakerConfig {
|
||||
S3CircuitBreakerOptions global=1;
|
||||
map<string, S3CircuitBreakerOptions> buckets= 2;
|
||||
}
|
||||
|
||||
message S3CircuitBreakerOptions {
|
||||
bool enabled=1;
|
||||
map<string, int64> actions = 2;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.17.3
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.21.1
|
||||
// source: s3.proto
|
||||
|
||||
package s3_pb
|
||||
|
@ -105,6 +105,116 @@ func (*S3ConfigureResponse) Descriptor() ([]byte, []int) {
|
|||
return file_s3_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
type S3CircuitBreakerConfig struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Global *S3CircuitBreakerOptions `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"`
|
||||
Buckets map[string]*S3CircuitBreakerOptions `protobuf:"bytes,2,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerConfig) Reset() {
|
||||
*x = S3CircuitBreakerConfig{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_s3_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerConfig) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*S3CircuitBreakerConfig) ProtoMessage() {}
|
||||
|
||||
func (x *S3CircuitBreakerConfig) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_s3_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use S3CircuitBreakerConfig.ProtoReflect.Descriptor instead.
|
||||
func (*S3CircuitBreakerConfig) Descriptor() ([]byte, []int) {
|
||||
return file_s3_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerConfig) GetGlobal() *S3CircuitBreakerOptions {
|
||||
if x != nil {
|
||||
return x.Global
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerConfig) GetBuckets() map[string]*S3CircuitBreakerOptions {
|
||||
if x != nil {
|
||||
return x.Buckets
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type S3CircuitBreakerOptions struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||
Actions map[string]int64 `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerOptions) Reset() {
|
||||
*x = S3CircuitBreakerOptions{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_s3_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerOptions) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*S3CircuitBreakerOptions) ProtoMessage() {}
|
||||
|
||||
func (x *S3CircuitBreakerOptions) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_s3_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use S3CircuitBreakerOptions.ProtoReflect.Descriptor instead.
|
||||
func (*S3CircuitBreakerOptions) Descriptor() ([]byte, []int) {
|
||||
return file_s3_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerOptions) GetEnabled() bool {
|
||||
if x != nil {
|
||||
return x.Enabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *S3CircuitBreakerOptions) GetActions() map[string]int64 {
|
||||
if x != nil {
|
||||
return x.Actions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_s3_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_s3_proto_rawDesc = []byte{
|
||||
|
@ -116,18 +226,47 @@ var file_s3_proto_rawDesc = []byte{
|
|||
0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1a, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
|
||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||
0x74, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5f, 0x0a, 0x09, 0x53, 0x65, 0x61, 0x77,
|
||||
0x65, 0x65, 0x64, 0x53, 0x33, 0x12, 0x52, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
|
||||
0x72, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
|
||||
0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67,
|
||||
0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x61,
|
||||
0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x07, 0x53,
|
||||
0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
|
||||
0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x73,
|
||||
0x33, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x87, 0x02, 0x0a, 0x16, 0x53, 0x33, 0x43,
|
||||
0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e,
|
||||
0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
|
||||
0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61,
|
||||
0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62,
|
||||
0x61, 0x6c, 0x12, 0x4b, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
|
||||
0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61,
|
||||
0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x1a,
|
||||
0x61, 0x0a, 0x0c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
|
||||
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
|
||||
0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
|
||||
0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72,
|
||||
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
|
||||
0x38, 0x01, 0x22, 0xbd, 0x01, 0x0a, 0x17, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74,
|
||||
0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
|
||||
0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6d, 0x65, 0x73, 0x73,
|
||||
0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75,
|
||||
0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
|
||||
0x38, 0x01, 0x32, 0x5f, 0x0a, 0x09, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x53, 0x33, 0x12,
|
||||
0x52, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x20, 0x2e, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f,
|
||||
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
|
||||
0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33,
|
||||
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x00, 0x42, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
|
||||
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x07, 0x53, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72,
|
||||
0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
|
||||
0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x33, 0x5f, 0x70, 0x62, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -142,19 +281,27 @@ func file_s3_proto_rawDescGZIP() []byte {
|
|||
return file_s3_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_s3_proto_goTypes = []interface{}{
|
||||
(*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest
|
||||
(*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse
|
||||
(*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest
|
||||
(*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse
|
||||
(*S3CircuitBreakerConfig)(nil), // 2: messaging_pb.S3CircuitBreakerConfig
|
||||
(*S3CircuitBreakerOptions)(nil), // 3: messaging_pb.S3CircuitBreakerOptions
|
||||
nil, // 4: messaging_pb.S3CircuitBreakerConfig.BucketsEntry
|
||||
nil, // 5: messaging_pb.S3CircuitBreakerOptions.ActionsEntry
|
||||
}
|
||||
var file_s3_proto_depIdxs = []int32{
|
||||
0, // 0: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest
|
||||
1, // 1: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse
|
||||
1, // [1:2] is the sub-list for method output_type
|
||||
0, // [0:1] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
3, // 0: messaging_pb.S3CircuitBreakerConfig.global:type_name -> messaging_pb.S3CircuitBreakerOptions
|
||||
4, // 1: messaging_pb.S3CircuitBreakerConfig.buckets:type_name -> messaging_pb.S3CircuitBreakerConfig.BucketsEntry
|
||||
5, // 2: messaging_pb.S3CircuitBreakerOptions.actions:type_name -> messaging_pb.S3CircuitBreakerOptions.ActionsEntry
|
||||
3, // 3: messaging_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> messaging_pb.S3CircuitBreakerOptions
|
||||
0, // 4: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest
|
||||
1, // 5: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse
|
||||
5, // [5:6] is the sub-list for method output_type
|
||||
4, // [4:5] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_s3_proto_init() }
|
||||
|
@ -187,6 +334,30 @@ func file_s3_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_s3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*S3CircuitBreakerConfig); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_s3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*S3CircuitBreakerOptions); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
|
@ -194,7 +365,7 @@ func file_s3_proto_init() {
|
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_s3_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -22,12 +23,11 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
|
|||
if message.NewParentPath != "" {
|
||||
dir = message.NewParentPath
|
||||
}
|
||||
if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile {
|
||||
if err := s3a.iam.LoadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile)
|
||||
}
|
||||
fileName := message.NewEntry.Name
|
||||
content := message.NewEntry.Content
|
||||
|
||||
_ = s3a.onIamConfigUpdate(dir, fileName, content)
|
||||
_ = s3a.onCircuitBreakerConfigUpdate(dir, fileName, content)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -38,5 +38,26 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
|
|||
glog.V(0).Infof("iam follow metadata changes: %v", err)
|
||||
return true
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
//reload iam config
|
||||
func (s3a *S3ApiServer) onIamConfigUpdate(dir, filename string, content []byte) error {
|
||||
if dir == filer.IamConfigDirecotry && filename == filer.IamIdentityFile {
|
||||
if err := s3a.iam.LoadS3ApiConfigurationFromBytes(content); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(0).Infof("updated %s/%s", dir, filename)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//reload circuit breaker config
|
||||
func (s3a *S3ApiServer) onCircuitBreakerConfigUpdate(dir, filename string, content []byte) error {
|
||||
if dir == s3_constants.CircuitBreakerConfigDir && filename == s3_constants.CircuitBreakerConfigFile {
|
||||
if err := s3a.cb.LoadS3ApiConfigurationFromBytes(content); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(0).Infof("updated %s/%s", dir, filename)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
18
weed/s3api/s3_constants/s3_config.go
Normal file
18
weed/s3api/s3_constants/s3_config.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
package s3_constants
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
CircuitBreakerConfigDir = "/etc/s3"
|
||||
CircuitBreakerConfigFile = "circuit_breaker.json"
|
||||
AllowedActions = []string{ACTION_READ, ACTION_WRITE, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN}
|
||||
LimitTypeCount = "Count"
|
||||
LimitTypeBytes = "MB"
|
||||
Separator = ":"
|
||||
)
|
||||
|
||||
func Concat(elements ...string) string {
|
||||
return strings.Join(elements, Separator)
|
||||
}
|
182
weed/s3api/s3api_circuit_breaker.go
Normal file
182
weed/s3api/s3api_circuit_breaker.go
Normal file
|
@ -0,0 +1,182 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"github.com/gorilla/mux"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type CircuitBreaker struct {
|
||||
sync.Mutex
|
||||
Enabled bool
|
||||
counters map[string]*int64
|
||||
limitations map[string]int64
|
||||
}
|
||||
|
||||
func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {
|
||||
cb := &CircuitBreaker{
|
||||
counters: make(map[string]*int64),
|
||||
limitations: make(map[string]int64),
|
||||
}
|
||||
|
||||
err := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
content, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read S3 circuit breaker config: %v", err)
|
||||
}
|
||||
return cb.LoadS3ApiConfigurationFromBytes(content)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("fail to load config: %v", err)
|
||||
}
|
||||
|
||||
return cb
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error {
|
||||
cbCfg := &s3_pb.S3CircuitBreakerConfig{}
|
||||
if err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil {
|
||||
glog.Warningf("unmarshal error: %v", err)
|
||||
return fmt.Errorf("unmarshal error: %v", err)
|
||||
}
|
||||
if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerConfig) error {
|
||||
|
||||
//global
|
||||
globalEnabled := false
|
||||
globalOptions := cfg.Global
|
||||
limitations := make(map[string]int64)
|
||||
if globalOptions != nil && globalOptions.Enabled && len(globalOptions.Actions) > 0 {
|
||||
globalEnabled = globalOptions.Enabled
|
||||
for action, limit := range globalOptions.Actions {
|
||||
limitations[action] = limit
|
||||
}
|
||||
}
|
||||
cb.Enabled = globalEnabled
|
||||
|
||||
//buckets
|
||||
for bucket, cbOptions := range cfg.Buckets {
|
||||
if cbOptions.Enabled {
|
||||
for action, limit := range cbOptions.Actions {
|
||||
limitations[s3_constants.Concat(bucket, action)] = limit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cb.limitations = limitations
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !cb.Enabled {
|
||||
f(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
rollback, errCode := cb.limit(r, bucket, action)
|
||||
defer func() {
|
||||
for _, rf := range rollback {
|
||||
rf()
|
||||
}
|
||||
}()
|
||||
|
||||
if errCode == s3err.ErrNone {
|
||||
f(w, r)
|
||||
return
|
||||
}
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
}, Action(action)
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) {
|
||||
|
||||
//bucket simultaneous request count
|
||||
bucketCountRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest)
|
||||
if bucketCountRollBack != nil {
|
||||
rollback = append(rollback, bucketCountRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
|
||||
//bucket simultaneous request content bytes
|
||||
bucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
|
||||
if bucketContentLengthRollBack != nil {
|
||||
rollback = append(rollback, bucketContentLengthRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
|
||||
//global simultaneous request count
|
||||
globalCountRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest)
|
||||
if globalCountRollBack != nil {
|
||||
rollback = append(rollback, globalCountRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
|
||||
//global simultaneous request content bytes
|
||||
globalContentLengthRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
|
||||
if globalContentLengthRollBack != nil {
|
||||
rollback = append(rollback, globalContentLengthRollBack)
|
||||
}
|
||||
if errCode != s3err.ErrNone {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cb *CircuitBreaker) loadCounterAndCompare(bucket, action, limitType string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) {
|
||||
key := s3_constants.Concat(bucket, action, limitType)
|
||||
e = s3err.ErrNone
|
||||
if max, ok := cb.limitations[key]; ok {
|
||||
counter, exists := cb.counters[key]
|
||||
if !exists {
|
||||
cb.Lock()
|
||||
counter, exists = cb.counters[key]
|
||||
if !exists {
|
||||
var newCounter int64
|
||||
counter = &newCounter
|
||||
cb.counters[key] = counter
|
||||
}
|
||||
cb.Unlock()
|
||||
}
|
||||
current := atomic.LoadInt64(counter)
|
||||
if current+inc > max {
|
||||
e = errCode
|
||||
return
|
||||
} else {
|
||||
current := atomic.AddInt64(counter, inc)
|
||||
f = func() {
|
||||
atomic.AddInt64(counter, -inc)
|
||||
}
|
||||
current = atomic.LoadInt64(counter)
|
||||
if current > max {
|
||||
e = errCode
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
97
weed/s3api/s3api_circuit_breaker_test.go
Normal file
97
weed/s3api/s3api_circuit_breaker_test.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type TestLimitCase struct {
|
||||
actionName string
|
||||
limitType string
|
||||
bucketLimitValue int64
|
||||
globalLimitValue int64
|
||||
|
||||
routineCount int
|
||||
reqBytes int64
|
||||
|
||||
successCount int64
|
||||
}
|
||||
|
||||
var (
|
||||
bucket = "/test"
|
||||
action = s3_constants.ACTION_READ
|
||||
TestLimitCases = []*TestLimitCase{
|
||||
{action, s3_constants.LimitTypeCount, 5, 5, 6, 1024, 5},
|
||||
{action, s3_constants.LimitTypeCount, 6, 6, 6, 1024, 6},
|
||||
{action, s3_constants.LimitTypeCount, 5, 6, 6, 1024, 5},
|
||||
{action, s3_constants.LimitTypeBytes, 1024, 1024, 6, 200, 5},
|
||||
{action, s3_constants.LimitTypeBytes, 1200, 1200, 6, 200, 6},
|
||||
{action, s3_constants.LimitTypeBytes, 11990, 11990, 60, 200, 59},
|
||||
{action, s3_constants.LimitTypeBytes, 11790, 11990, 70, 200, 58},
|
||||
}
|
||||
)
|
||||
|
||||
func TestLimit(t *testing.T) {
|
||||
for _, tc := range TestLimitCases {
|
||||
circuitBreakerConfig := &s3_pb.S3CircuitBreakerConfig{
|
||||
Global: &s3_pb.S3CircuitBreakerOptions{
|
||||
Enabled: true,
|
||||
Actions: map[string]int64{
|
||||
s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue,
|
||||
s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue,
|
||||
},
|
||||
},
|
||||
Buckets: map[string]*s3_pb.S3CircuitBreakerOptions{
|
||||
bucket: {
|
||||
Enabled: true,
|
||||
Actions: map[string]int64{
|
||||
s3_constants.Concat(tc.actionName, tc.limitType): tc.bucketLimitValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
circuitBreaker := &CircuitBreaker{
|
||||
counters: make(map[string]*int64),
|
||||
limitations: make(map[string]int64),
|
||||
}
|
||||
err := circuitBreaker.loadCircuitBreakerConfig(circuitBreakerConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
successCount := doLimit(circuitBreaker, tc.routineCount, &http.Request{ContentLength: tc.reqBytes})
|
||||
if successCount != tc.successCount {
|
||||
t.Errorf("successCount not equal, expect=%d, actual=%d", tc.successCount, successCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doLimit(circuitBreaker *CircuitBreaker, routineCount int, r *http.Request) int64 {
|
||||
var successCounter int64
|
||||
resultCh := make(chan []func(), routineCount)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < routineCount; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
rollbackFn, errCode := circuitBreaker.limit(r, bucket, action)
|
||||
if errCode == s3err.ErrNone {
|
||||
atomic.AddInt64(&successCounter, 1)
|
||||
}
|
||||
resultCh <- rollbackFn
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
for fns := range resultCh {
|
||||
for _, fn := range fns {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
return successCounter
|
||||
}
|
|
@ -4,16 +4,17 @@ import (
|
|||
"crypto/sha1"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
@ -119,7 +120,9 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
|
|||
|
||||
glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)))
|
||||
|
||||
writeSuccessResponseXML(w, r, response)
|
||||
//https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
|
||||
s3err.WriteXMLResponse(w, r, http.StatusNoContent, response)
|
||||
s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone)
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -3,13 +3,13 @@ package s3api
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
|
@ -35,6 +35,7 @@ type S3ApiServer struct {
|
|||
s3_pb.UnimplementedSeaweedS3Server
|
||||
option *S3ApiServerOption
|
||||
iam *IdentityAccessManagement
|
||||
cb *CircuitBreaker
|
||||
randomClientId int32
|
||||
filerGuard *security.Guard
|
||||
client *http.Client
|
||||
|
@ -55,6 +56,7 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer
|
|||
iam: NewIdentityAccessManagement(option),
|
||||
randomClientId: util.RandomInt32(),
|
||||
filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec),
|
||||
cb: NewCircuitBreaker(option),
|
||||
}
|
||||
if option.LocalFilerSocket == nil || *option.LocalFilerSocket == "" {
|
||||
s3ApiServer.client = &http.Client{Transport: &http.Transport{
|
||||
|
@ -73,7 +75,7 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer
|
|||
|
||||
s3ApiServer.registerRouter(router)
|
||||
|
||||
go s3ApiServer.subscribeMetaEvents("s3", filer.IamConfigDirecotry+"/"+filer.IamIdentityFile, time.Now().UnixNano())
|
||||
go s3ApiServer.subscribeMetaEvents("s3", filer.DirectoryEtcRoot, time.Now().UnixNano())
|
||||
return s3ApiServer, nil
|
||||
}
|
||||
|
||||
|
@ -107,115 +109,115 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||
// objects with query
|
||||
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "")
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
|
||||
// ListObjectParts
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_READ), "GET")).Queries("uploadId", "{uploadId:.*}")
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_READ), "GET")).Queries("uploads", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
|
||||
|
||||
// GetObjectTagging
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectTaggingHandler, ACTION_READ), "GET")).Queries("tagging", "")
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "")
|
||||
// PutObjectTagging
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectTaggingHandler, ACTION_TAGGING), "PUT")).Queries("tagging", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "")
|
||||
// DeleteObjectTagging
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "")
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "")
|
||||
|
||||
// PutObjectACL
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectAclHandler, ACTION_WRITE), "PUT")).Queries("acl", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
|
||||
// PutObjectRetention
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectRetentionHandler, ACTION_WRITE), "PUT")).Queries("retention", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLegalHoldHandler, ACTION_WRITE), "PUT")).Queries("legal-hold", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "")
|
||||
// PutObjectLockConfiguration
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE), "PUT")).Queries("object-lock", "")
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
|
||||
|
||||
// GetObjectACL
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectAclHandler, ACTION_READ), "GET")).Queries("acl", "")
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
|
||||
|
||||
// objects with query
|
||||
|
||||
// raw objects
|
||||
|
||||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET"))
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ)), "GET"))
|
||||
|
||||
// GetObject, but directory listing is not supported
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ)), "GET"))
|
||||
|
||||
// CopyObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY"))
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY"))
|
||||
// PutObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), "PUT"))
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT"))
|
||||
// DeleteObject
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE"))
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE"))
|
||||
|
||||
// raw objects
|
||||
|
||||
// buckets with query
|
||||
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "")
|
||||
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "")
|
||||
|
||||
// GetBucketACL
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketAclHandler, ACTION_READ), "GET")).Queries("acl", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
|
||||
// PutBucketACL
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketAclHandler, ACTION_WRITE), "PUT")).Queries("acl", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
|
||||
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketPolicyHandler, ACTION_READ), "GET")).Queries("policy", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketPolicyHandler, ACTION_WRITE), "PUT")).Queries("policy", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketPolicyHandler, ACTION_WRITE), "DELETE")).Queries("policy", "")
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
|
||||
|
||||
// GetBucketCors
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketCorsHandler, ACTION_READ), "GET")).Queries("cors", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "")
|
||||
// PutBucketCors
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketCorsHandler, ACTION_WRITE), "PUT")).Queries("cors", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "")
|
||||
// DeleteBucketCors
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketCorsHandler, ACTION_WRITE), "DELETE")).Queries("cors", "")
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "")
|
||||
|
||||
// GetBucketLifecycleConfiguration
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ), "GET")).Queries("lifecycle", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "")
|
||||
// PutBucketLifecycleConfiguration
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE), "PUT")).Queries("lifecycle", "")
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "")
|
||||
// DeleteBucketLifecycleConfiguration
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE), "DELETE")).Queries("lifecycle", "")
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "")
|
||||
|
||||
// GetBucketLocation
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketLocationHandler, ACTION_READ), "GET")).Queries("location", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "")
|
||||
|
||||
// GetBucketRequestPayment
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketRequestPaymentHandler, ACTION_READ), "GET")).Queries("requestPayment", "")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "")
|
||||
|
||||
// ListObjectsV2
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2")
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2")
|
||||
|
||||
// buckets with query
|
||||
|
||||
// raw buckets
|
||||
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST"))
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST"))
|
||||
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_READ), "GET"))
|
||||
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ)), "GET"))
|
||||
|
||||
// PutBucket
|
||||
bucket.Methods("PUT").HandlerFunc(track(s3a.PutBucketHandler, "PUT"))
|
||||
// DeleteBucket
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))
|
||||
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_WRITE)), "DELETE"))
|
||||
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST"))
|
||||
|
||||
// raw buckets
|
||||
|
||||
|
|
|
@ -104,6 +104,9 @@ const (
|
|||
|
||||
ErrExistingObjectIsDirectory
|
||||
ErrExistingObjectIsFile
|
||||
|
||||
ErrTooManyRequest
|
||||
ErrRequestBytesExceed
|
||||
)
|
||||
|
||||
// error code to APIError structure, these fields carry respective
|
||||
|
@ -401,6 +404,16 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
|||
Description: "Existing Object is a file.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrTooManyRequest: {
|
||||
Code: "ErrTooManyRequest",
|
||||
Description: "Too many simultaneous request count",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrRequestBytesExceed: {
|
||||
Code: "ErrRequestBytesExceed",
|
||||
Description: "Simultaneous request bytes exceed limitations",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
}
|
||||
|
||||
// GetAPIError provides API Error for input API error code.
|
||||
|
|
358
weed/shell/command_s3_circuitbreaker.go
Normal file
358
weed/shell/command_s3_circuitbreaker.go
Normal file
|
@ -0,0 +1,358 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
var LoadConfig = loadConfig
|
||||
|
||||
func init() {
|
||||
Commands = append(Commands, &commandS3CircuitBreaker{})
|
||||
}
|
||||
|
||||
type commandS3CircuitBreaker struct {
|
||||
}
|
||||
|
||||
func (c *commandS3CircuitBreaker) Name() string {
|
||||
return "s3.circuitBreaker"
|
||||
}
|
||||
|
||||
func (c *commandS3CircuitBreaker) Help() string {
|
||||
return `configure and apply s3 circuit breaker options for each bucket
|
||||
|
||||
# examples
|
||||
# add circuit breaker config for global
|
||||
s3.circuitBreaker -global -type count -actions Read,Write -values 500,200 -apply
|
||||
|
||||
# disable global config
|
||||
s3.circuitBreaker -global -disable -apply
|
||||
|
||||
# add circuit breaker config for buckets x,y,z
|
||||
s3.circuitBreaker -buckets x,y,z -type count -actions Read,Write -values 200,100 -apply
|
||||
|
||||
# disable circuit breaker config of x
|
||||
s3.circuitBreaker -buckets x -disable -apply
|
||||
|
||||
# delete circuit breaker config of x
|
||||
s3.circuitBreaker -buckets x -delete -apply
|
||||
|
||||
# clear all circuit breaker config
|
||||
s3.circuitBreaker -delete -apply
|
||||
`
|
||||
}
|
||||
|
||||
func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||
dir := s3_constants.CircuitBreakerConfigDir
|
||||
file := s3_constants.CircuitBreakerConfigFile
|
||||
|
||||
s3CircuitBreakerCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
buckets := s3CircuitBreakerCommand.String("buckets", "", "the bucket name(s) to configure, eg: -buckets x,y,z")
|
||||
global := s3CircuitBreakerCommand.Bool("global", false, "configure global circuit breaker")
|
||||
|
||||
actions := s3CircuitBreakerCommand.String("actions", "", "comma separated actions names: Read,Write,List,Tagging,Admin")
|
||||
limitType := s3CircuitBreakerCommand.String("type", "", "'Count' or 'MB'; Count represents the number of simultaneous requests, and MB represents the content size of all simultaneous requests")
|
||||
values := s3CircuitBreakerCommand.String("values", "", "comma separated values")
|
||||
|
||||
disabled := s3CircuitBreakerCommand.Bool("disable", false, "disable global or buckets circuit breaker")
|
||||
deleted := s3CircuitBreakerCommand.Bool("delete", false, "delete circuit breaker config")
|
||||
|
||||
apply := s3CircuitBreakerCommand.Bool("apply", false, "update and apply current configuration")
|
||||
|
||||
if err = s3CircuitBreakerCommand.Parse(args); err != nil {
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = LoadConfig(commandEnv, dir, file, &buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cbCfg := &s3_pb.S3CircuitBreakerConfig{
|
||||
Buckets: make(map[string]*s3_pb.S3CircuitBreakerOptions),
|
||||
}
|
||||
if buf.Len() > 0 {
|
||||
if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), cbCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if *deleted {
|
||||
cmdBuckets, cmdActions, _, err := c.initActionsAndValues(buckets, actions, limitType, values, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cmdBuckets) <= 0 && !*global {
|
||||
if len(cmdActions) > 0 {
|
||||
deleteGlobalActions(cbCfg, cmdActions, limitType)
|
||||
if cbCfg.Buckets != nil {
|
||||
var allBuckets []string
|
||||
for bucket := range cbCfg.Buckets {
|
||||
allBuckets = append(allBuckets, bucket)
|
||||
}
|
||||
deleteBucketsActions(allBuckets, cbCfg, cmdActions, limitType)
|
||||
}
|
||||
} else {
|
||||
cbCfg.Global = nil
|
||||
cbCfg.Buckets = nil
|
||||
}
|
||||
} else {
|
||||
if len(cmdBuckets) > 0 {
|
||||
deleteBucketsActions(cmdBuckets, cbCfg, cmdActions, limitType)
|
||||
}
|
||||
if *global {
|
||||
deleteGlobalActions(cbCfg, cmdActions, nil)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cmdBuckets, cmdActions, cmdValues, err := c.initActionsAndValues(buckets, actions, limitType, values, *disabled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(cmdActions) > 0 && len(*buckets) <= 0 && !*global {
|
||||
return fmt.Errorf("one of -global and -buckets must be specified")
|
||||
}
|
||||
|
||||
if len(*buckets) > 0 {
|
||||
for _, bucket := range cmdBuckets {
|
||||
var cbOptions *s3_pb.S3CircuitBreakerOptions
|
||||
var exists bool
|
||||
if cbOptions, exists = cbCfg.Buckets[bucket]; !exists {
|
||||
cbOptions = &s3_pb.S3CircuitBreakerOptions{}
|
||||
cbCfg.Buckets[bucket] = cbOptions
|
||||
}
|
||||
cbOptions.Enabled = !*disabled
|
||||
|
||||
if len(cmdActions) > 0 {
|
||||
err = insertOrUpdateValues(cbOptions, cmdActions, cmdValues, limitType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(cbOptions.Actions) <= 0 && !cbOptions.Enabled {
|
||||
delete(cbCfg.Buckets, bucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *global {
|
||||
globalOptions := cbCfg.Global
|
||||
if globalOptions == nil {
|
||||
globalOptions = &s3_pb.S3CircuitBreakerOptions{Actions: make(map[string]int64, len(cmdActions))}
|
||||
cbCfg.Global = globalOptions
|
||||
}
|
||||
globalOptions.Enabled = !*disabled
|
||||
|
||||
if len(cmdActions) > 0 {
|
||||
err = insertOrUpdateValues(globalOptions, cmdActions, cmdValues, limitType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(globalOptions.Actions) <= 0 && !globalOptions.Enabled {
|
||||
cbCfg.Global = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
err = filer.ProtoToText(&buf, cbCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(writer, string(buf.Bytes()))
|
||||
_, _ = fmt.Fprintln(writer)
|
||||
|
||||
if *apply {
|
||||
if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return filer.SaveInsideFiler(client, dir, file, buf.Bytes())
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfig(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error {
|
||||
if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||
return filer.ReadEntry(commandEnv.MasterClient, client, dir, file, buf)
|
||||
}); err != nil && err != filer_pb.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertOrUpdateValues(cbOptions *s3_pb.S3CircuitBreakerOptions, cmdActions []string, cmdValues []int64, limitType *string) error {
|
||||
if len(*limitType) == 0 {
|
||||
return fmt.Errorf("type not valid, only 'count' and 'bytes' are allowed")
|
||||
}
|
||||
|
||||
if cbOptions.Actions == nil {
|
||||
cbOptions.Actions = make(map[string]int64, len(cmdActions))
|
||||
}
|
||||
|
||||
if len(cmdValues) > 0 {
|
||||
for i, action := range cmdActions {
|
||||
cbOptions.Actions[s3_constants.Concat(action, *limitType)] = cmdValues[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteBucketsActions(cmdBuckets []string, cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []string, limitType *string) {
|
||||
if cbCfg.Buckets == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(cmdActions) == 0 {
|
||||
for _, bucket := range cmdBuckets {
|
||||
delete(cbCfg.Buckets, bucket)
|
||||
}
|
||||
} else {
|
||||
for _, bucket := range cmdBuckets {
|
||||
if cbOption, ok := cbCfg.Buckets[bucket]; ok {
|
||||
if len(cmdActions) > 0 && cbOption.Actions != nil {
|
||||
for _, action := range cmdActions {
|
||||
delete(cbOption.Actions, s3_constants.Concat(action, *limitType))
|
||||
}
|
||||
}
|
||||
|
||||
if len(cbOption.Actions) == 0 && !cbOption.Enabled {
|
||||
delete(cbCfg.Buckets, bucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(cbCfg.Buckets) == 0 {
|
||||
cbCfg.Buckets = nil
|
||||
}
|
||||
}
|
||||
|
||||
func deleteGlobalActions(cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []string, limitType *string) {
|
||||
globalOptions := cbCfg.Global
|
||||
if globalOptions == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(cmdActions) == 0 && globalOptions.Actions != nil {
|
||||
globalOptions.Actions = nil
|
||||
return
|
||||
} else {
|
||||
for _, action := range cmdActions {
|
||||
delete(globalOptions.Actions, s3_constants.Concat(action, *limitType))
|
||||
}
|
||||
}
|
||||
|
||||
if len(globalOptions.Actions) == 0 && !globalOptions.Enabled {
|
||||
cbCfg.Global = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitType, values *string, parseValues bool) (cmdBuckets, cmdActions []string, cmdValues []int64, err error) {
|
||||
if len(*buckets) > 0 {
|
||||
cmdBuckets = strings.Split(*buckets, ",")
|
||||
}
|
||||
|
||||
if len(*actions) > 0 {
|
||||
cmdActions = strings.Split(*actions, ",")
|
||||
|
||||
//check action valid
|
||||
for _, action := range cmdActions {
|
||||
var found bool
|
||||
for _, allowedAction := range s3_constants.AllowedActions {
|
||||
if allowedAction == action {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, nil, nil, fmt.Errorf("value(%s) of flag[-action] not valid, allowed actions: %v", *actions, s3_constants.AllowedActions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !parseValues {
|
||||
if len(cmdActions) < 0 {
|
||||
for _, action := range s3_constants.AllowedActions {
|
||||
cmdActions = append(cmdActions, action)
|
||||
}
|
||||
}
|
||||
|
||||
if len(*limitType) > 0 {
|
||||
switch *limitType {
|
||||
case s3_constants.LimitTypeCount:
|
||||
elements := strings.Split(*values, ",")
|
||||
if len(cmdActions) != len(elements) {
|
||||
if len(elements) != 1 || len(elements) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("count of flag[-actions] and flag[-counts] not equal")
|
||||
}
|
||||
v, err := strconv.Atoi(elements[0])
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)")
|
||||
}
|
||||
for range cmdActions {
|
||||
cmdValues = append(cmdValues, int64(v))
|
||||
}
|
||||
} else {
|
||||
for _, value := range elements {
|
||||
v, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)")
|
||||
}
|
||||
cmdValues = append(cmdValues, int64(v))
|
||||
}
|
||||
}
|
||||
case s3_constants.LimitTypeBytes:
|
||||
elements := strings.Split(*values, ",")
|
||||
if len(cmdActions) != len(elements) {
|
||||
if len(elements) != 1 || len(elements) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("values count of -actions and -values not equal")
|
||||
}
|
||||
v, err := parseMBToBytes(elements[0])
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("value of -max must be a legal number(s)")
|
||||
}
|
||||
for range cmdActions {
|
||||
cmdValues = append(cmdValues, v)
|
||||
}
|
||||
} else {
|
||||
for _, value := range elements {
|
||||
v, err := parseMBToBytes(value)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("value of -max must be a legal number(s)")
|
||||
}
|
||||
cmdValues = append(cmdValues, v)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, nil, nil, fmt.Errorf("type not valid, only 'count' and 'bytes' are allowed")
|
||||
}
|
||||
} else {
|
||||
*limitType = ""
|
||||
}
|
||||
}
|
||||
return cmdBuckets, cmdActions, cmdValues, nil
|
||||
}
|
||||
|
||||
func parseMBToBytes(valStr string) (int64, error) {
|
||||
v, err := strconv.Atoi(valStr)
|
||||
v *= 1024 * 1024
|
||||
return int64(v), err
|
||||
}
|
292
weed/shell/command_s3_circuitbreaker_test.go
Normal file
292
weed/shell/command_s3_circuitbreaker_test.go
Normal file
|
@ -0,0 +1,292 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type Case struct {
|
||||
args []string
|
||||
result string
|
||||
}
|
||||
|
||||
var (
|
||||
TestCases = []*Case{
|
||||
//add circuit breaker config for global
|
||||
{
|
||||
args: strings.Split("-global -type Count -actions Read,Write -values 500,200", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//disable global config
|
||||
{
|
||||
args: strings.Split("-global -disable", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//add circuit breaker config for buckets x,y,z
|
||||
{
|
||||
args: strings.Split("-buckets x,y,z -type Count -actions Read,Write -values 200,100", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
},
|
||||
"buckets": {
|
||||
"x": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
},
|
||||
"y": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
},
|
||||
"z": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//disable circuit breaker config of x
|
||||
{
|
||||
args: strings.Split("-buckets x -disable", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
},
|
||||
"buckets": {
|
||||
"x": {
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
},
|
||||
"y": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
},
|
||||
"z": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//delete circuit breaker config of x
|
||||
{
|
||||
args: strings.Split("-buckets x -delete", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
},
|
||||
"buckets": {
|
||||
"y": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
},
|
||||
"z": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//configure the circuit breaker for the size of the uploaded file for bucket x,y
|
||||
{
|
||||
args: strings.Split("-buckets x,y -type MB -actions Write -values 1024", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
},
|
||||
"buckets": {
|
||||
"x": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Write:MB": "1073741824"
|
||||
}
|
||||
},
|
||||
"y": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100",
|
||||
"Write:MB": "1073741824"
|
||||
}
|
||||
},
|
||||
"z": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//delete the circuit breaker configuration for the size of the uploaded file of bucket x,y
|
||||
{
|
||||
args: strings.Split("-buckets x,y -type MB -actions Write -delete", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
},
|
||||
"buckets": {
|
||||
"x": {
|
||||
"enabled": true
|
||||
},
|
||||
"y": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
},
|
||||
"z": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//enable global circuit breaker config (without -disable flag)
|
||||
{
|
||||
args: strings.Split("-global", " "),
|
||||
result: `{
|
||||
"global": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "500",
|
||||
"Write:Count": "200"
|
||||
}
|
||||
},
|
||||
"buckets": {
|
||||
"x": {
|
||||
"enabled": true
|
||||
},
|
||||
"y": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
},
|
||||
"z": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"Read:Count": "200",
|
||||
"Write:Count": "100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
||||
//clear all circuit breaker config
|
||||
{
|
||||
args: strings.Split("-delete", " "),
|
||||
result: `{
|
||||
|
||||
}`,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestCircuitBreakerShell(t *testing.T) {
|
||||
var writeBuf bytes.Buffer
|
||||
cmd := &commandS3CircuitBreaker{}
|
||||
LoadConfig = func(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error {
|
||||
_, err := buf.Write(writeBuf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeBuf.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, tc := range TestCases {
|
||||
err := cmd.Do(tc.args, nil, &writeBuf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if i != 0 {
|
||||
result := writeBuf.String()
|
||||
|
||||
actual := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(result), &actual)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expect := make(map[string]interface{})
|
||||
err = json.Unmarshal([]byte(result), &expect)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(actual, expect) {
|
||||
t.Fatal("result of s3 circuit breaker shell command is unexpect!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
batch = 100000
|
||||
batch = 10000
|
||||
)
|
||||
|
||||
type SectionalNeedleId uint32
|
||||
|
|
Loading…
Reference in a new issue