mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
move s3 related constants from package http to s3_constants
This commit is contained in:
parent
f4a6da6cb2
commit
27732ecfa4
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
)
|
||||
|
@ -186,11 +185,11 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
|
|||
identity, errCode := iam.authRequest(r, action)
|
||||
if errCode == s3err.ErrNone {
|
||||
if identity != nil && identity.Name != "" {
|
||||
r.Header.Set(xhttp.AmzIdentityId, identity.Name)
|
||||
r.Header.Set(s3_constants.AmzIdentityId, identity.Name)
|
||||
if identity.isAdmin() {
|
||||
r.Header.Set(xhttp.AmzIsAdmin, "true")
|
||||
} else if _, ok := r.Header[xhttp.AmzIsAdmin]; ok {
|
||||
r.Header.Del(xhttp.AmzIsAdmin)
|
||||
r.Header.Set(s3_constants.AmzIsAdmin, "true")
|
||||
} else if _, ok := r.Header[s3_constants.AmzIsAdmin]; ok {
|
||||
r.Header.Del(s3_constants.AmzIsAdmin)
|
||||
}
|
||||
}
|
||||
f(w, r)
|
||||
|
@ -211,7 +210,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
|
|||
return identity, s3err.ErrNone
|
||||
case authTypeUnknown:
|
||||
glog.V(3).Infof("unknown auth type")
|
||||
r.Header.Set(xhttp.AmzAuthType, "Unknown")
|
||||
r.Header.Set(s3_constants.AmzAuthType, "Unknown")
|
||||
return identity, s3err.ErrAccessDenied
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
glog.V(3).Infof("v2 auth type")
|
||||
|
@ -223,17 +222,17 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
|
|||
authType = "SigV4"
|
||||
case authTypePostPolicy:
|
||||
glog.V(3).Infof("post policy auth type")
|
||||
r.Header.Set(xhttp.AmzAuthType, "PostPolicy")
|
||||
r.Header.Set(s3_constants.AmzAuthType, "PostPolicy")
|
||||
return identity, s3err.ErrNone
|
||||
case authTypeJWT:
|
||||
glog.V(3).Infof("jwt auth type")
|
||||
r.Header.Set(xhttp.AmzAuthType, "Jwt")
|
||||
r.Header.Set(s3_constants.AmzAuthType, "Jwt")
|
||||
return identity, s3err.ErrNotImplemented
|
||||
case authTypeAnonymous:
|
||||
authType = "Anonymous"
|
||||
identity, found = iam.lookupAnonymous()
|
||||
if !found {
|
||||
r.Header.Set(xhttp.AmzAuthType, authType)
|
||||
r.Header.Set(s3_constants.AmzAuthType, authType)
|
||||
return identity, s3err.ErrAccessDenied
|
||||
}
|
||||
default:
|
||||
|
@ -241,7 +240,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
|
|||
}
|
||||
|
||||
if len(authType) > 0 {
|
||||
r.Header.Set(xhttp.AmzAuthType, authType)
|
||||
r.Header.Set(s3_constants.AmzAuthType, authType)
|
||||
}
|
||||
if s3Err != s3err.ErrNone {
|
||||
return identity, s3Err
|
||||
|
@ -249,7 +248,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
|
|||
|
||||
glog.V(3).Infof("user name: %v actions: %v, action: %v", identity.Name, identity.Actions, action)
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
if !identity.canDo(action, bucket, object) {
|
||||
return identity, s3err.ErrAccessDenied
|
||||
|
@ -269,7 +268,7 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err
|
|||
return identity, s3err.ErrNone
|
||||
case authTypeUnknown:
|
||||
glog.V(3).Infof("unknown auth type")
|
||||
r.Header.Set(xhttp.AmzAuthType, "Unknown")
|
||||
r.Header.Set(s3_constants.AmzAuthType, "Unknown")
|
||||
return identity, s3err.ErrAccessDenied
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
glog.V(3).Infof("v2 auth type")
|
||||
|
@ -281,17 +280,17 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err
|
|||
authType = "SigV4"
|
||||
case authTypePostPolicy:
|
||||
glog.V(3).Infof("post policy auth type")
|
||||
r.Header.Set(xhttp.AmzAuthType, "PostPolicy")
|
||||
r.Header.Set(s3_constants.AmzAuthType, "PostPolicy")
|
||||
return identity, s3err.ErrNone
|
||||
case authTypeJWT:
|
||||
glog.V(3).Infof("jwt auth type")
|
||||
r.Header.Set(xhttp.AmzAuthType, "Jwt")
|
||||
r.Header.Set(s3_constants.AmzAuthType, "Jwt")
|
||||
return identity, s3err.ErrNotImplemented
|
||||
case authTypeAnonymous:
|
||||
authType = "Anonymous"
|
||||
identity, found = iam.lookupAnonymous()
|
||||
if !found {
|
||||
r.Header.Set(xhttp.AmzAuthType, authType)
|
||||
r.Header.Set(s3_constants.AmzAuthType, authType)
|
||||
return identity, s3err.ErrAccessDenied
|
||||
}
|
||||
default:
|
||||
|
@ -299,7 +298,7 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err
|
|||
}
|
||||
|
||||
if len(authType) > 0 {
|
||||
r.Header.Set(xhttp.AmzAuthType, authType)
|
||||
r.Header.Set(s3_constants.AmzAuthType, authType)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("auth error: %v", s3Err)
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"hash"
|
||||
|
@ -92,7 +91,7 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
|
|||
return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID
|
||||
}
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
if !identity.canDo(s3_constants.ACTION_WRITE, bucket, object) {
|
||||
errCode = s3err.ErrAccessDenied
|
||||
return
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
)
|
||||
|
||||
const (
|
||||
S3TAG_PREFIX = xhttp.AmzObjectTagging + "-"
|
||||
S3TAG_PREFIX = s3_constants.AmzObjectTagging + "-"
|
||||
)
|
||||
|
||||
func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (tags map[string]string, err error) {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package http
|
||||
package s3_constants
|
||||
|
||||
import (
|
||||
"github.com/gorilla/mux"
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
@ -52,7 +51,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
identityId := r.Header.Get(xhttp.AmzIdentityId)
|
||||
identityId := r.Header.Get(s3_constants.AmzIdentityId)
|
||||
|
||||
var buckets []*s3.Bucket
|
||||
for _, entry := range entries {
|
||||
|
@ -80,7 +79,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("PutBucketHandler %s", bucket)
|
||||
|
||||
// avoid duplicated buckets
|
||||
|
@ -121,11 +120,11 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
|
||||
fn := func(entry *filer_pb.Entry) {
|
||||
if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" {
|
||||
if identityId := r.Header.Get(s3_constants.AmzIdentityId); identityId != "" {
|
||||
if entry.Extended == nil {
|
||||
entry.Extended = make(map[string][]byte)
|
||||
}
|
||||
entry.Extended[xhttp.AmzIdentityId] = []byte(identityId)
|
||||
entry.Extended[s3_constants.AmzIdentityId] = []byte(identityId)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,7 +140,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("DeleteBucketHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
|
@ -194,7 +193,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("HeadBucketHandler %s", bucket)
|
||||
|
||||
if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); entry == nil || err == filer_pb.ErrNotFound {
|
||||
|
@ -218,7 +217,7 @@ func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorC
|
|||
}
|
||||
|
||||
func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
|
||||
isAdmin := r.Header.Get(xhttp.AmzIsAdmin) != ""
|
||||
isAdmin := r.Header.Get(s3_constants.AmzIsAdmin) != ""
|
||||
if isAdmin {
|
||||
return true
|
||||
}
|
||||
|
@ -226,8 +225,8 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
identityId := r.Header.Get(xhttp.AmzIdentityId)
|
||||
if id, ok := entry.Extended[xhttp.AmzIdentityId]; ok {
|
||||
identityId := r.Header.Get(s3_constants.AmzIdentityId)
|
||||
if id, ok := entry.Extended[s3_constants.AmzIdentityId]; ok {
|
||||
if identityId != string(id) {
|
||||
return false
|
||||
}
|
||||
|
@ -239,7 +238,7 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html
|
||||
func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// collect parameters
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("GetBucketAclHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
|
@ -279,7 +278,7 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
|
||||
func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// collect parameters
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("GetBucketLifecycleConfigurationHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
|
|
|
@ -3,8 +3,7 @@ package s3api
|
|||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
headers "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"modernc.org/strutil"
|
||||
"net/http"
|
||||
|
@ -23,7 +22,7 @@ const (
|
|||
|
||||
func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
dstBucket, dstObject := xhttp.GetBucketAndObject(r)
|
||||
dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
// Copy source path.
|
||||
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
|
||||
|
@ -130,7 +129,7 @@ type CopyPartResult struct {
|
|||
func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
|
||||
dstBucket, dstObject := xhttp.GetBucketAndObject(r)
|
||||
dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
// Copy source path.
|
||||
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
|
||||
|
@ -198,24 +197,24 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) {
|
||||
return reqHeader.Get(headers.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(headers.AmzObjectTaggingDirective) == DirectiveReplace
|
||||
return reqHeader.Get(s3_constants.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(s3_constants.AmzObjectTaggingDirective) == DirectiveReplace
|
||||
}
|
||||
|
||||
func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) {
|
||||
if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) == 0 {
|
||||
if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 {
|
||||
reqHeader[xhttp.AmzStorageClass] = sc
|
||||
if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) == 0 {
|
||||
if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
|
||||
reqHeader[s3_constants.AmzStorageClass] = sc
|
||||
}
|
||||
}
|
||||
|
||||
if !replaceMeta {
|
||||
for header, _ := range reqHeader {
|
||||
if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) {
|
||||
if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
|
||||
delete(reqHeader, header)
|
||||
}
|
||||
}
|
||||
for k, v := range existing {
|
||||
if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) {
|
||||
if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
|
||||
reqHeader[k] = v
|
||||
}
|
||||
}
|
||||
|
@ -223,14 +222,14 @@ func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTaggin
|
|||
|
||||
if !replaceTagging {
|
||||
for header, _ := range reqHeader {
|
||||
if strings.HasPrefix(header, xhttp.AmzObjectTagging) {
|
||||
if strings.HasPrefix(header, s3_constants.AmzObjectTagging) {
|
||||
delete(reqHeader, header)
|
||||
}
|
||||
}
|
||||
|
||||
found := false
|
||||
for k, _ := range existing {
|
||||
if strings.HasPrefix(k, xhttp.AmzObjectTaggingPrefix) {
|
||||
if strings.HasPrefix(k, s3_constants.AmzObjectTaggingPrefix) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
@ -247,7 +246,7 @@ func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTaggin
|
|||
tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
tagStr := strutil.JoinFields(tagArr, "&")
|
||||
reqHeader.Set(xhttp.AmzObjectTagging, tagStr)
|
||||
reqHeader.Set(s3_constants.AmzObjectTagging, tagStr)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
@ -256,16 +255,16 @@ func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTaggin
|
|||
func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte) {
|
||||
metadata = make(map[string][]byte)
|
||||
|
||||
if sc := existing[xhttp.AmzStorageClass]; len(sc) > 0 {
|
||||
metadata[xhttp.AmzStorageClass] = sc
|
||||
if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
|
||||
metadata[s3_constants.AmzStorageClass] = sc
|
||||
}
|
||||
if sc := reqHeader.Get(xhttp.AmzStorageClass); len(sc) > 0 {
|
||||
metadata[xhttp.AmzStorageClass] = []byte(sc)
|
||||
if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) > 0 {
|
||||
metadata[s3_constants.AmzStorageClass] = []byte(sc)
|
||||
}
|
||||
|
||||
if replaceMeta {
|
||||
for header, values := range reqHeader {
|
||||
if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) {
|
||||
if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
|
||||
for _, value := range values {
|
||||
metadata[header] = []byte(value)
|
||||
}
|
||||
|
@ -273,30 +272,30 @@ func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, rep
|
|||
}
|
||||
} else {
|
||||
for k, v := range existing {
|
||||
if strings.HasPrefix(k, xhttp.AmzUserMetaPrefix) {
|
||||
if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
|
||||
metadata[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if replaceTagging {
|
||||
if tags := reqHeader.Get(xhttp.AmzObjectTagging); tags != "" {
|
||||
if tags := reqHeader.Get(s3_constants.AmzObjectTagging); tags != "" {
|
||||
for _, v := range strings.Split(tags, "&") {
|
||||
tag := strings.Split(v, "=")
|
||||
if len(tag) == 2 {
|
||||
metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
||||
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
||||
} else if len(tag) == 1 {
|
||||
metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = nil
|
||||
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for k, v := range existing {
|
||||
if strings.HasPrefix(k, xhttp.AmzObjectTagging) {
|
||||
if strings.HasPrefix(k, s3_constants.AmzObjectTagging) {
|
||||
metadata[k] = v
|
||||
}
|
||||
}
|
||||
delete(metadata, xhttp.AmzTagCount)
|
||||
delete(metadata, s3_constants.AmzTagCount)
|
||||
}
|
||||
|
||||
return
|
||||
|
|
|
@ -2,7 +2,7 @@ package s3api
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
headers "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
@ -55,10 +55,10 @@ var processMetadataTestCases = []struct {
|
|||
{
|
||||
202,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -71,20 +71,20 @@ var processMetadataTestCases = []struct {
|
|||
"type": "existing",
|
||||
},
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=existing",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=existing",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
203,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -97,21 +97,21 @@ var processMetadataTestCases = []struct {
|
|||
"type": "existing",
|
||||
},
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
204,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -125,40 +125,40 @@ var processMetadataTestCases = []struct {
|
|||
"type": "existing",
|
||||
},
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
205,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{},
|
||||
H{},
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
206,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -172,19 +172,19 @@ var processMetadataTestCases = []struct {
|
|||
"type": "existing",
|
||||
},
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
207,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -198,10 +198,10 @@ var processMetadataTestCases = []struct {
|
|||
"type": "existing",
|
||||
},
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -235,10 +235,10 @@ var processMetadataBytesTestCases = []struct {
|
|||
{
|
||||
102,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -257,10 +257,10 @@ var processMetadataBytesTestCases = []struct {
|
|||
{
|
||||
103,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -279,11 +279,11 @@ var processMetadataBytesTestCases = []struct {
|
|||
{
|
||||
104,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -302,9 +302,9 @@ var processMetadataBytesTestCases = []struct {
|
|||
{
|
||||
105,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{
|
||||
"X-Amz-Meta-My-Meta": "existing",
|
||||
|
@ -318,11 +318,11 @@ var processMetadataBytesTestCases = []struct {
|
|||
{
|
||||
107,
|
||||
H{
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
headers.AmzUserMetaDirective: DirectiveReplace,
|
||||
headers.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
"User-Agent": "firefox",
|
||||
"X-Amz-Meta-My-Meta": "request",
|
||||
"X-Amz-Tagging": "A=B&a=b&type=request",
|
||||
s3_constants.AmzUserMetaDirective: DirectiveReplace,
|
||||
s3_constants.AmzObjectTaggingDirective: DirectiveReplace,
|
||||
},
|
||||
H{},
|
||||
H{
|
||||
|
@ -385,10 +385,10 @@ func TestProcessMetadataBytes(t *testing.T) {
|
|||
|
||||
func fmtTagging(maps ...map[string]string) {
|
||||
for _, m := range maps {
|
||||
if tagging := m[headers.AmzObjectTagging]; len(tagging) > 0 {
|
||||
if tagging := m[s3_constants.AmzObjectTagging]; len(tagging) > 0 {
|
||||
split := strings.Split(tagging, "&")
|
||||
sort.Strings(split)
|
||||
m[headers.AmzObjectTagging] = strings.Join(split, "&")
|
||||
m[s3_constants.AmzObjectTagging] = strings.Join(split, "&")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/pquerna/cachecontrol/cacheobject"
|
||||
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -46,7 +45,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("PutObjectHandler %s %s", bucket, object)
|
||||
|
||||
_, err := validateContentMd5(r.Header)
|
||||
|
@ -135,7 +134,7 @@ func (s3a *S3ApiServer) toFilerUrl(bucket, object string) string {
|
|||
|
||||
func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("GetObjectHandler %s %s", bucket, object)
|
||||
|
||||
if strings.HasSuffix(r.URL.Path, "/") {
|
||||
|
@ -150,7 +149,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object)
|
||||
|
||||
destUrl := s3a.toFilerUrl(bucket, object)
|
||||
|
@ -160,7 +159,7 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request
|
|||
|
||||
func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object)
|
||||
|
||||
destUrl := s3a.toFilerUrl(bucket, object)
|
||||
|
@ -209,7 +208,7 @@ type DeleteObjectsResponse struct {
|
|||
// DeleteMultipleObjectsHandler - Delete multiple objects
|
||||
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket)
|
||||
|
||||
deleteXMLBytes, err := io.ReadAll(r.Body)
|
||||
|
@ -325,7 +324,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
|
|||
|
||||
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
||||
for k, v := range r.URL.Query() {
|
||||
if _, ok := xhttp.PassThroughHeaders[strings.ToLower(k)]; ok {
|
||||
if _, ok := s3_constants.PassThroughHeaders[strings.ToLower(k)]; ok {
|
||||
proxyReq.Header[k] = v
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
||||
"io"
|
||||
|
@ -27,7 +27,7 @@ const (
|
|||
|
||||
// NewMultipartUploadHandler - New multipart upload.
|
||||
func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
createMultipartUploadInput := &s3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(bucket),
|
||||
|
@ -61,7 +61,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
|
|||
func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
parts := &CompleteMultipartUpload{}
|
||||
if err := xmlDecoder(r.Body, parts, r.ContentLength); err != nil {
|
||||
|
@ -96,7 +96,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
|
|||
|
||||
// AbortMultipartUploadHandler - Aborts multipart upload.
|
||||
func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
// Get upload id.
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
|
@ -125,7 +125,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
|
|||
|
||||
// ListMultipartUploadsHandler - Lists multipart uploads.
|
||||
func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query())
|
||||
if maxUploads < 0 {
|
||||
|
@ -164,7 +164,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
|
|||
|
||||
// ListObjectPartsHandler - Lists object parts in a multipart upload.
|
||||
func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
|
||||
if partNumberMarker < 0 {
|
||||
|
@ -203,7 +203,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
// PutObjectPartHandler - Put an object part in a multipart upload.
|
||||
func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
|
||||
uploadID := r.URL.Query().Get("uploadId")
|
||||
err := s3a.checkUploadId(object, uploadID)
|
||||
|
|
|
@ -3,7 +3,7 @@ package s3api
|
|||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
|
@ -17,7 +17,7 @@ import (
|
|||
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
|
||||
func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("GetObjectTaggingHandler %s %s", bucket, object)
|
||||
|
||||
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
|
||||
|
@ -43,7 +43,7 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R
|
|||
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
|
||||
func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("PutObjectTaggingHandler %s %s", bucket, object)
|
||||
|
||||
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
|
||||
|
@ -99,7 +99,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
|
|||
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
|
||||
func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
bucket, object := xhttp.GetBucketAndObject(r)
|
||||
bucket, object := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("DeleteObjectTaggingHandler %s %s", bucket, object)
|
||||
|
||||
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object))
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -15,7 +16,6 @@ import (
|
|||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
)
|
||||
|
||||
|
@ -39,7 +39,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
|
||||
|
||||
// collect parameters
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("ListObjectsV2Handler %s", bucket)
|
||||
|
||||
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
|
||||
|
@ -95,7 +95,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
|
|||
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
||||
|
||||
// collect parameters
|
||||
bucket, _ := xhttp.GetBucketAndObject(r)
|
||||
bucket, _ := s3_constants.GetBucketAndObject(r)
|
||||
glog.V(3).Infof("ListObjectsV1Handler %s", bucket)
|
||||
|
||||
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
|
||||
|
@ -157,7 +157,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
|||
return
|
||||
}
|
||||
storageClass := "STANDARD"
|
||||
if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
|
||||
if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
|
||||
storageClass = string(v)
|
||||
}
|
||||
contents = append(contents, ListEntry{
|
||||
|
@ -188,7 +188,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
|||
_, _, _, doErr = s3a.doListFilerEntries(client, reqDir, prefix, 1, prefix, delimiter, true, func(dir string, entry *filer_pb.Entry) {
|
||||
if entry.IsDirectory && entry.Attributes.Mime != "" && entry.Name == prefix {
|
||||
storageClass := "STANDARD"
|
||||
if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
|
||||
if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
|
||||
storageClass = string(v)
|
||||
}
|
||||
contents = append(contents, ListEntry{
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/fluent/fluent-logger-golang/fluent"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -134,7 +134,7 @@ func GetAccessHttpLog(r *http.Request, statusCode int, s3errCode ErrorCode) Acce
|
|||
}
|
||||
|
||||
func GetAccessLog(r *http.Request, HTTPStatusCode int, s3errCode ErrorCode) *AccessLog {
|
||||
bucket, key := xhttp.GetBucketAndObject(r)
|
||||
bucket, key := s3_constants.GetBucketAndObject(r)
|
||||
var errorCode string
|
||||
if s3errCode != ErrNone {
|
||||
errorCode = GetAPIError(s3errCode).Code
|
||||
|
@ -151,8 +151,8 @@ func GetAccessLog(r *http.Request, HTTPStatusCode int, s3errCode ErrorCode) *Acc
|
|||
HostHeader: hostHeader,
|
||||
RequestID: r.Header.Get("X-Request-ID"),
|
||||
RemoteIP: remoteIP,
|
||||
Requester: r.Header.Get(xhttp.AmzIdentityId),
|
||||
SignatureVersion: r.Header.Get(xhttp.AmzAuthType),
|
||||
Requester: r.Header.Get(s3_constants.AmzIdentityId),
|
||||
SignatureVersion: r.Header.Get(s3_constants.AmzAuthType),
|
||||
UserAgent: r.Header.Get("user-agent"),
|
||||
HostId: hostname,
|
||||
Bucket: bucket,
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"io"
|
||||
"io/fs"
|
||||
"mime/multipart"
|
||||
|
@ -254,7 +254,7 @@ func handleStaticResources2(r *mux.Router) {
|
|||
|
||||
func adjustPassthroughHeaders(w http.ResponseWriter, r *http.Request, filename string) {
|
||||
for header, values := range r.Header {
|
||||
if normalizedHeader, ok := xhttp.PassThroughHeaders[strings.ToLower(header)]; ok {
|
||||
if normalizedHeader, ok := s3_constants.PassThroughHeaders[strings.ToLower(header)]; ok {
|
||||
w.Header()[normalizedHeader] = values
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/mem"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -18,7 +19,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/images"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
@ -173,12 +173,12 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
//set tag count
|
||||
tagCount := 0
|
||||
for k := range entry.Extended {
|
||||
if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") {
|
||||
if strings.HasPrefix(k, s3_constants.AmzObjectTagging+"-") {
|
||||
tagCount++
|
||||
}
|
||||
}
|
||||
if tagCount > 0 {
|
||||
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
|
||||
w.Header().Set(s3_constants.AmzTagCount, strconv.Itoa(tagCount))
|
||||
}
|
||||
|
||||
setEtag(w, etag)
|
||||
|
|
|
@ -3,6 +3,7 @@ package weed_server
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -15,7 +16,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
|
@ -349,23 +349,23 @@ func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool
|
|||
}
|
||||
}
|
||||
|
||||
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
|
||||
metadata[xhttp.AmzStorageClass] = []byte(sc)
|
||||
if sc := r.Header.Get(s3_constants.AmzStorageClass); sc != "" {
|
||||
metadata[s3_constants.AmzStorageClass] = []byte(sc)
|
||||
}
|
||||
|
||||
if tags := r.Header.Get(xhttp.AmzObjectTagging); tags != "" {
|
||||
if tags := r.Header.Get(s3_constants.AmzObjectTagging); tags != "" {
|
||||
for _, v := range strings.Split(tags, "&") {
|
||||
tag := strings.Split(v, "=")
|
||||
if len(tag) == 2 {
|
||||
metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
||||
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
||||
} else if len(tag) == 1 {
|
||||
metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = nil
|
||||
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for header, values := range r.Header {
|
||||
if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) {
|
||||
if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
|
||||
for _, value := range values {
|
||||
metadata[header] = []byte(value)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue