mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
b693a8d6b9
|
@ -6,25 +6,37 @@ services:
|
|||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m1"
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m2"
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m3"
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1 -disk=ssd1'
|
||||
command: 'volume -dataCenter dc1 -rack v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -34,7 +46,7 @@ services:
|
|||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1 -disk=ssd1'
|
||||
command: 'volume -dataCenter dc2 -rack v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -44,7 +56,7 @@ services:
|
|||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
command: 'volume -dataCenter dc3 -rack v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -54,7 +66,8 @@ services:
|
|||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master0:9333,master1:9334,master2:9335"'
|
||||
- 8111:8111
|
||||
command: 'filer -defaultReplicaPlacement 100 -iam -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -65,7 +78,7 @@ services:
|
|||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
command: '-v=9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
|
|
@ -3,6 +3,10 @@ package filer
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -26,6 +30,29 @@ type FilerConf struct {
|
|||
rules ptrie.Trie
|
||||
}
|
||||
|
||||
func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if masterClient != nil {
|
||||
return ReadEntry(masterClient, client, DirectoryEtcSeaweedFS, FilerConfName, &buf)
|
||||
} else {
|
||||
content, err := ReadInsideFiler(client, DirectoryEtcSeaweedFS, FilerConfName)
|
||||
buf = *bytes.NewBuffer(content)
|
||||
return err
|
||||
}
|
||||
}); err != nil && err != filer_pb.ErrNotFound {
|
||||
return nil, fmt.Errorf("read %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
|
||||
}
|
||||
|
||||
fc := NewFilerConf()
|
||||
if buf.Len() > 0 {
|
||||
if err := fc.LoadFromBytes(buf.Bytes()); err != nil {
|
||||
return nil, fmt.Errorf("parse %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
|
||||
}
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func NewFilerConf() (fc *FilerConf) {
|
||||
fc = &FilerConf{
|
||||
rules: ptrie.New(),
|
||||
|
@ -115,6 +142,18 @@ func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf
|
|||
return pathConf
|
||||
}
|
||||
|
||||
func (fc *FilerConf) GetCollectionTtls(collection string) (ttls map[string]string) {
|
||||
ttls = make(map[string]string)
|
||||
fc.rules.Walk(func(key []byte, value interface{}) bool {
|
||||
t := value.(*filer_pb.FilerConf_PathConf)
|
||||
if t.Collection == collection {
|
||||
ttls[t.LocationPrefix] = t.GetTtl()
|
||||
}
|
||||
return true
|
||||
})
|
||||
return ttls
|
||||
}
|
||||
|
||||
// merge if values in b is not empty, merge them into a
|
||||
func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
|
||||
a.Collection = util.Nvl(b.Collection, a.Collection)
|
||||
|
|
|
@ -37,6 +37,31 @@ type Credential struct {
|
|||
SecretKey string
|
||||
}
|
||||
|
||||
func (action Action) isAdmin() bool {
|
||||
return strings.HasPrefix(string(action), s3_constants.ACTION_ADMIN)
|
||||
}
|
||||
|
||||
func (action Action) isOwner(bucket string) bool {
|
||||
return string(action) == s3_constants.ACTION_ADMIN+":"+bucket
|
||||
}
|
||||
|
||||
func (action Action) overBucket(bucket string) bool {
|
||||
return strings.HasSuffix(string(action), ":"+bucket) || strings.HasSuffix(string(action), ":*")
|
||||
}
|
||||
|
||||
func (action Action) getPermission() Permission {
|
||||
switch act := strings.Split(string(action), ":")[0]; act {
|
||||
case s3_constants.ACTION_ADMIN:
|
||||
return Permission("FULL_CONTROL")
|
||||
case s3_constants.ACTION_WRITE:
|
||||
return Permission("WRITE")
|
||||
case s3_constants.ACTION_READ:
|
||||
return Permission("READ")
|
||||
default:
|
||||
return Permission("")
|
||||
}
|
||||
}
|
||||
|
||||
func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManagement {
|
||||
iam := &IdentityAccessManagement{
|
||||
domain: option.DomainName,
|
||||
|
|
|
@ -4,7 +4,9 @@ import (
|
|||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
|
@ -205,3 +207,98 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBucketAclHandler Get Bucket ACL
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html
|
||||
func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// collect parameters
|
||||
bucket, _ := getBucketAndObject(r)
|
||||
glog.V(3).Infof("GetBucketAclHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
return
|
||||
}
|
||||
|
||||
response := AccessControlPolicy{}
|
||||
for _, ident := range s3a.iam.identities {
|
||||
if len(ident.Credentials) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, action := range ident.Actions {
|
||||
if !action.overBucket(bucket) || action.getPermission() == "" {
|
||||
continue
|
||||
}
|
||||
id := ident.Credentials[0].AccessKey
|
||||
if response.Owner.DisplayName == "" && action.isOwner(bucket) && len(ident.Credentials) > 0 {
|
||||
response.Owner.DisplayName = ident.Name
|
||||
response.Owner.ID = id
|
||||
}
|
||||
response.AccessControlList.Grant = append(response.AccessControlList.Grant, Grant{
|
||||
Grantee: Grantee{
|
||||
ID: id,
|
||||
DisplayName: ident.Name,
|
||||
Type: "CanonicalUser",
|
||||
XMLXSI: "CanonicalUser",
|
||||
XMLNS: "http://www.w3.org/2001/XMLSchema-instance"},
|
||||
Permission: action.getPermission(),
|
||||
})
|
||||
}
|
||||
}
|
||||
writeSuccessResponseXML(w, response)
|
||||
}
|
||||
|
||||
// GetBucketLifecycleConfigurationHandler Get Bucket Lifecycle configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
|
||||
func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// collect parameters
|
||||
bucket, _ := getBucketAndObject(r)
|
||||
glog.V(3).Infof("GetBucketAclHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
return
|
||||
}
|
||||
fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("GetBucketLifecycleConfigurationHandler: %s", err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
return
|
||||
}
|
||||
ttls := fc.GetCollectionTtls(bucket)
|
||||
if len(ttls) == 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchLifecycleConfiguration, r)
|
||||
}
|
||||
response := Lifecycle{}
|
||||
for prefix, internalTtl := range ttls {
|
||||
ttl, _ := needle.ReadTTL(internalTtl)
|
||||
days := int(ttl.Minutes() / 60 / 24)
|
||||
if days == 0 {
|
||||
continue
|
||||
}
|
||||
response.Rules = append(response.Rules, Rule{
|
||||
Status: Enabled, Filter: Filter{
|
||||
Prefix: Prefix{string: prefix, set: true},
|
||||
set: true,
|
||||
},
|
||||
Expiration: Expiration{Days: days, set: true},
|
||||
})
|
||||
}
|
||||
writeSuccessResponseXML(w, response)
|
||||
}
|
||||
|
||||
// PutBucketLifecycleConfigurationHandler Put Bucket Lifecycle configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
|
||||
func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
|
||||
}
|
||||
|
||||
// DeleteBucketMetricsConfiguration Delete Bucket Lifecycle
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
|
||||
func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) err
|
|||
}, s3a.option.Filer.ToGrpcAddress(), s3a.option.GrpcDialOption)
|
||||
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string {
|
||||
return location.Url
|
||||
}
|
||||
|
|
|
@ -4,6 +4,14 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// GetObjectAclHandler Put object ACL
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
|
||||
func (s3a *S3ApiServer) GetObjectAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
}
|
||||
|
||||
// PutObjectAclHandler Put object ACL
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html
|
||||
func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
147
weed/s3api/s3api_policy.go
Normal file
147
weed/s3api/s3api_policy.go
Normal file
|
@ -0,0 +1,147 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Status represents lifecycle configuration status
|
||||
type ruleStatus string
|
||||
|
||||
// Supported status types
|
||||
const (
|
||||
Enabled ruleStatus = "Enabled"
|
||||
Disabled ruleStatus = "Disabled"
|
||||
)
|
||||
|
||||
// Lifecycle - Configuration for bucket lifecycle.
|
||||
type Lifecycle struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules []Rule `xml:"Rule"`
|
||||
}
|
||||
|
||||
// Rule - a rule for lifecycle configuration.
|
||||
type Rule struct {
|
||||
XMLName xml.Name `xml:"Rule"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
Status ruleStatus `xml:"Status"`
|
||||
Filter Filter `xml:"Filter,omitempty"`
|
||||
Prefix Prefix `xml:"Prefix,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty"`
|
||||
Transition Transition `xml:"Transition,omitempty"`
|
||||
}
|
||||
|
||||
// Filter - a filter for a lifecycle configuration Rule.
|
||||
type Filter struct {
|
||||
XMLName xml.Name `xml:"Filter"`
|
||||
set bool
|
||||
|
||||
Prefix Prefix
|
||||
|
||||
And And
|
||||
andSet bool
|
||||
|
||||
Tag Tag
|
||||
tagSet bool
|
||||
}
|
||||
|
||||
// Prefix holds the prefix xml tag in <Rule> and <Filter>
|
||||
type Prefix struct {
|
||||
string
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML - decodes XML data.
|
||||
func (p Prefix) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if !p.set {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(p.string, startElement)
|
||||
}
|
||||
|
||||
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.EncodeToken(xml.EndElement{Name: start.Name})
|
||||
}
|
||||
|
||||
// And - a tag to combine a prefix and multiple tags for lifecycle configuration rule.
|
||||
type And struct {
|
||||
XMLName xml.Name `xml:"And"`
|
||||
Prefix Prefix `xml:"Prefix,omitempty"`
|
||||
Tags []Tag `xml:"Tag,omitempty"`
|
||||
}
|
||||
|
||||
// Expiration - expiration actions for a rule in lifecycle configuration.
|
||||
type Expiration struct {
|
||||
XMLName xml.Name `xml:"Expiration"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Date ExpirationDate `xml:"Date,omitempty"`
|
||||
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker"`
|
||||
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration field into an XML form.
|
||||
func (e Expiration) MarshalXML(enc *xml.Encoder, startElement xml.StartElement) error {
|
||||
if !e.set {
|
||||
return nil
|
||||
}
|
||||
type expirationWrapper Expiration
|
||||
return enc.EncodeElement(expirationWrapper(e), startElement)
|
||||
}
|
||||
|
||||
// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
|
||||
type ExpireDeleteMarker struct {
|
||||
val bool
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML encodes delete marker boolean into an XML form.
|
||||
func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if !b.set {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(b.val, startElement)
|
||||
}
|
||||
|
||||
// ExpirationDate is a embedded type containing time.Time to unmarshal
|
||||
// Date in Expiration
|
||||
type ExpirationDate struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if eDate.Time.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// Transition - transition actions for a rule in lifecycle configuration.
|
||||
type Transition struct {
|
||||
XMLName xml.Name `xml:"Transition"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Date time.Time `xml:"Date,omitempty"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML encodes transition field into an XML form.
|
||||
func (t Transition) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||
if !t.set {
|
||||
return nil
|
||||
}
|
||||
type transitionWrapper Transition
|
||||
return enc.EncodeElement(transitionWrapper(t), start)
|
||||
}
|
||||
|
||||
// TransitionDays is a type alias to unmarshal Days in Transition
|
||||
type TransitionDays int
|
|
@ -115,14 +115,30 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2")
|
||||
// GetObject, but directory listing is not supported
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
|
||||
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST"))
|
||||
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "")
|
||||
|
||||
// GetBucketACL
|
||||
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.GetBucketAclHandler, ACTION_READ)).Queries("acl", "")
|
||||
|
||||
// GetObjectACL
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectAclHandler, ACTION_READ)).Queries("acl", "")
|
||||
|
||||
// GetBucketLifecycleConfiguration
|
||||
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)).Queries("lifecycle", "")
|
||||
|
||||
// PutBucketLifecycleConfiguration
|
||||
bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)).Queries("lifecycle", "")
|
||||
|
||||
// DeleteBucketLifecycleConfiguration
|
||||
bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)).Queries("lifecycle", "")
|
||||
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
|
||||
/*
|
||||
|
||||
// not implemented
|
||||
|
@ -132,8 +148,6 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||
bucket.Methods("GET").HandlerFunc(s3a.GetBucketPolicyHandler).Queries("policy", "")
|
||||
// GetObjectACL
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectACLHandler).Queries("acl", "")
|
||||
// GetBucketACL
|
||||
bucket.Methods("GET").HandlerFunc(s3a.GetBucketACLHandler).Queries("acl", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "")
|
||||
// DeleteBucketPolicy
|
||||
|
|
|
@ -8,12 +8,12 @@ import (
|
|||
)
|
||||
|
||||
type AccessControlList struct {
|
||||
Grant []Grant `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grant,omitempty"`
|
||||
Grant []Grant `xml:"Grant,omitempty"`
|
||||
}
|
||||
|
||||
type AccessControlPolicy struct {
|
||||
Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner"`
|
||||
AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"`
|
||||
Owner CanonicalUser `xml:"Owner"`
|
||||
AccessControlList AccessControlList `xml:"AccessControlList"`
|
||||
}
|
||||
|
||||
type AmazonCustomerByEmail struct {
|
||||
|
@ -467,11 +467,17 @@ func (t *GetObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
|
|||
}
|
||||
|
||||
type Grant struct {
|
||||
Grantee Grantee `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grantee"`
|
||||
Permission Permission `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Permission"`
|
||||
Grantee Grantee `xml:"Grantee"`
|
||||
Permission Permission `xml:"Permission"`
|
||||
}
|
||||
|
||||
type Grantee struct {
|
||||
XMLNS string `xml:"xmlns:xsi,attr"`
|
||||
XMLXSI string `xml:"xsi:type,attr"`
|
||||
Type string `xml:"Type"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
DisplayName string `xml:"DisplayName,omitempty"`
|
||||
URI string `xml:"URI,omitempty"`
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
|
|
|
@ -51,6 +51,7 @@ const (
|
|||
ErrBucketAlreadyExists
|
||||
ErrBucketAlreadyOwnedByYou
|
||||
ErrNoSuchBucket
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrInvalidBucketName
|
||||
|
@ -163,6 +164,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
|||
Description: "The specified bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchLifecycleConfiguration: {
|
||||
Code: "NoSuchLifecycleConfiguration",
|
||||
Description: "The lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchKey: {
|
||||
Code: "NoSuchKey",
|
||||
Description: "The specified key does not exist.",
|
||||
|
|
|
@ -62,7 +62,7 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
|
|||
return nil
|
||||
}
|
||||
|
||||
fc, err := readFilerConf(commandEnv)
|
||||
fc, err := filer.ReadFilerConf(commandEnv.option.FilerAddress, commandEnv.option.GrpcDialOption, commandEnv.MasterClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -122,20 +122,3 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
|
|||
return nil
|
||||
|
||||
}
|
||||
|
||||
func readFilerConf(commandEnv *CommandEnv) (*filer.FilerConf, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return filer.ReadEntry(commandEnv.MasterClient, client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, &buf)
|
||||
}); err != nil && err != filer_pb.ErrNotFound {
|
||||
return nil, fmt.Errorf("read %s/%s: %v", filer.DirectoryEtcSeaweedFS, filer.FilerConfName, err)
|
||||
}
|
||||
|
||||
fc := filer.NewFilerConf()
|
||||
if buf.Len() > 0 {
|
||||
if err := fc.LoadFromBytes(buf.Bytes()); err != nil {
|
||||
return nil, fmt.Errorf("parse %s/%s: %v", filer.DirectoryEtcSeaweedFS, filer.FilerConfName, err)
|
||||
}
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue