2020-01-31 08:11:08 +00:00
|
|
|
package s3api
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2020-11-11 10:01:24 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2022-05-31 05:57:41 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
2020-09-19 21:09:58 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
2022-05-13 10:14:39 +00:00
|
|
|
"modernc.org/strutil"
|
2020-01-31 08:11:08 +00:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
)
|
|
|
|
|
2022-05-14 02:40:29 +00:00
|
|
|
const (
|
|
|
|
DirectiveCopy = "COPY"
|
|
|
|
DirectiveReplace = "REPLACE"
|
|
|
|
)
|
|
|
|
|
2020-01-31 08:11:08 +00:00
|
|
|
func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
2022-05-31 05:57:41 +00:00
|
|
|
dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
|
2020-01-31 08:11:08 +00:00
|
|
|
|
|
|
|
// Copy source path.
|
|
|
|
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
|
|
|
|
if err != nil {
|
|
|
|
// Save unescaped string as is.
|
|
|
|
cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
|
|
|
|
}
|
|
|
|
|
|
|
|
srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
|
2021-03-19 08:31:49 +00:00
|
|
|
|
2021-09-19 07:18:59 +00:00
|
|
|
glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject)
|
|
|
|
|
2022-05-14 02:40:29 +00:00
|
|
|
replaceMeta, replaceTagging := replaceDirective(r.Header)
|
2022-05-13 10:14:39 +00:00
|
|
|
|
|
|
|
if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && (replaceMeta || replaceTagging) {
|
2021-03-19 08:31:49 +00:00
|
|
|
fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject))
|
|
|
|
dir, name := fullPath.DirAndName()
|
|
|
|
entry, err := s3a.getEntry(dir, name)
|
2022-01-18 20:04:40 +00:00
|
|
|
if err != nil || entry.IsDirectory {
|
2022-01-18 07:09:37 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
|
|
|
return
|
|
|
|
}
|
2022-05-13 10:14:39 +00:00
|
|
|
entry.Extended = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging)
|
2021-03-19 08:31:49 +00:00
|
|
|
err = s3a.touch(dir, name, entry)
|
|
|
|
if err != nil {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
2021-09-04 00:38:39 +00:00
|
|
|
return
|
2021-03-19 08:31:49 +00:00
|
|
|
}
|
2021-11-01 01:02:08 +00:00
|
|
|
writeSuccessResponseXML(w, r, CopyObjectResult{
|
2021-03-19 08:31:49 +00:00
|
|
|
ETag: fmt.Sprintf("%x", entry.Attributes.Md5),
|
|
|
|
LastModified: time.Now().UTC(),
|
2021-06-11 04:50:21 +00:00
|
|
|
})
|
2021-03-19 08:31:49 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-31 08:11:08 +00:00
|
|
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
|
|
|
if srcObject == "" || srcBucket == "" {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
2021-09-06 06:37:40 +00:00
|
|
|
srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject))
|
2021-09-04 00:38:39 +00:00
|
|
|
dir, name := srcPath.DirAndName()
|
2022-01-18 20:04:40 +00:00
|
|
|
if entry, err := s3a.getEntry(dir, name); err != nil || entry.IsDirectory {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
2021-09-04 00:38:39 +00:00
|
|
|
return
|
|
|
|
}
|
2020-01-31 08:11:08 +00:00
|
|
|
|
|
|
|
if srcBucket == dstBucket && srcObject == dstObject {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopyDest)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s",
|
2021-12-05 07:24:53 +00:00
|
|
|
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, dstBucket, urlPathEscape(dstObject), dstBucket)
|
2020-01-31 08:11:08 +00:00
|
|
|
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
|
2021-12-05 07:24:53 +00:00
|
|
|
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlPathEscape(srcObject))
|
2020-01-31 08:11:08 +00:00
|
|
|
|
FEATURE: add JWT to HTTP endpoints of Filer and use them in S3 Client
- one JWT for reading and one for writing, analogous to how the JWT
between Master and Volume Server works
- I did not implement IP `whiteList` parameter on the filer
Additionally, because http_util.DownloadFile now sets the JWT,
the `download` command should now work when `jwt.signing.read` is
configured. By looking at the code, I think this case did not work
before.
## Docs to be adjusted after a release
Page `Amazon-S3-API`:
```
# Authentication with Filer
You can use mTLS for the gRPC connection between S3-API-Proxy and the filer, as
explained in [Security-Configuration](Security-Configuration) -
controlled by the `grpc.*` configuration in `security.toml`.
Starting with version XX, it is also possible to authenticate the HTTP
operations between the S3-API-Proxy and the Filer (especially
uploading new files). This is configured by setting
`filer_jwt.signing.key` and `filer_jwt.signing.read.key` in
`security.toml`.
With both configurations (gRPC and JWT), it is possible to have Filer
and S3 communicate in fully authenticated fashion; so Filer will reject
any unauthenticated communication.
```
Page `Security Overview`:
```
The following items are not covered, yet:
- master server http REST services
Starting with version XX, the Filer HTTP REST services can be secured
with a JWT, by setting `filer_jwt.signing.key` and
`filer_jwt.signing.read.key` in `security.toml`.
...
Before version XX: "weed filer -disableHttp", disable http operations, only gRPC operations are allowed. This works with "weed mount" by FUSE. It does **not work** with the [S3 Gateway](Amazon S3 API), as this does HTTP calls to the Filer.
Starting with version XX: secured by JWT, by setting `filer_jwt.signing.key` and `filer_jwt.signing.read.key` in `security.toml`. **This now works with the [S3 Gateway](Amazon S3 API).**
...
# Securing Filer HTTP with JWT
To enable JWT-based access control for the Filer,
1. generate `security.toml` file by `weed scaffold -config=security`
2. set `filer_jwt.signing.key` to a secret string - and optionally filer_jwt.signing.read.key` as well to a secret string
3. copy the same `security.toml` file to the filers and all S3 proxies.
If `filer_jwt.signing.key` is configured: When sending upload/update/delete HTTP operations to a filer server, the request header `Authorization` should be the JWT string (`Authorization: Bearer [JwtToken]`). The operation is authorized after the filer validates the JWT with `filer_jwt.signing.key`.
If `filer_jwt.signing.read.key` is configured: When sending GET or HEAD requests to a filer server, the request header `Authorization` should be the JWT string (`Authorization: Bearer [JwtToken]`). The operation is authorized after the filer validates the JWT with `filer_jwt.signing.read.key`.
The S3 API Gateway reads the above JWT keys and sends authenticated
HTTP requests to the filer.
```
Page `Security Configuration`:
```
(update scaffold file)
...
[filer_jwt.signing]
key = "blahblahblahblah"
[filer_jwt.signing.read]
key = "blahblahblahblah"
```
Resolves: #158
2021-12-29 18:47:53 +00:00
|
|
|
_, _, resp, err := util.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false))
|
2020-01-31 08:11:08 +00:00
|
|
|
if err != nil {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
2020-09-09 10:53:09 +00:00
|
|
|
defer util.CloseResponse(resp)
|
2020-01-31 08:11:08 +00:00
|
|
|
|
2022-05-13 10:14:39 +00:00
|
|
|
tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name)
|
|
|
|
if tagErr != nil {
|
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
|
|
|
return
|
|
|
|
}
|
2020-11-11 10:01:24 +00:00
|
|
|
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
|
2022-05-31 05:47:26 +00:00
|
|
|
destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
|
|
|
|
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination)
|
2020-01-31 08:11:08 +00:00
|
|
|
|
2020-09-19 21:09:58 +00:00
|
|
|
if errCode != s3err.ErrNone {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, errCode)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
setEtag(w, etag)
|
|
|
|
|
|
|
|
response := CopyObjectResult{
|
|
|
|
ETag: etag,
|
2020-07-25 07:52:31 +00:00
|
|
|
LastModified: time.Now().UTC(),
|
2020-01-31 08:11:08 +00:00
|
|
|
}
|
|
|
|
|
2021-11-01 01:02:08 +00:00
|
|
|
writeSuccessResponseXML(w, r, response)
|
2020-01-31 08:11:08 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func pathToBucketAndObject(path string) (bucket, object string) {
|
|
|
|
path = strings.TrimPrefix(path, "/")
|
|
|
|
parts := strings.SplitN(path, "/", 2)
|
|
|
|
if len(parts) == 2 {
|
|
|
|
return parts[0], "/" + parts[1]
|
|
|
|
}
|
|
|
|
return parts[0], "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
type CopyPartResult struct {
|
|
|
|
LastModified time.Time `xml:"LastModified"`
|
|
|
|
ETag string `xml:"ETag"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
|
|
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
|
2022-05-31 05:57:41 +00:00
|
|
|
dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
|
2020-01-31 08:11:08 +00:00
|
|
|
|
|
|
|
// Copy source path.
|
|
|
|
cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
|
|
|
|
if err != nil {
|
|
|
|
// Save unescaped string as is.
|
|
|
|
cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
|
|
|
|
}
|
|
|
|
|
|
|
|
srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
|
|
|
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
|
|
|
if srcObject == "" || srcBucket == "" {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadID := r.URL.Query().Get("uploadId")
|
|
|
|
partIDString := r.URL.Query().Get("partNumber")
|
|
|
|
|
|
|
|
partID, err := strconv.Atoi(partIDString)
|
|
|
|
if err != nil {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-09-19 07:18:59 +00:00
|
|
|
glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d", srcBucket, srcObject, dstBucket, partID)
|
|
|
|
|
2020-01-31 08:11:08 +00:00
|
|
|
// check partID with maximum part ID for multipart objects
|
2020-02-25 20:58:45 +00:00
|
|
|
if partID > globalMaxPartID {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
rangeHeader := r.Header.Get("x-amz-copy-source-range")
|
|
|
|
|
|
|
|
dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
|
2021-09-13 05:47:52 +00:00
|
|
|
s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(dstBucket), uploadID, partID, dstBucket)
|
2020-01-31 08:11:08 +00:00
|
|
|
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
|
2021-12-05 07:24:53 +00:00
|
|
|
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlPathEscape(srcObject))
|
2020-01-31 08:11:08 +00:00
|
|
|
|
FEATURE: add JWT to HTTP endpoints of Filer and use them in S3 Client
- one JWT for reading and one for writing, analogous to how the JWT
between Master and Volume Server works
- I did not implement IP `whiteList` parameter on the filer
Additionally, because http_util.DownloadFile now sets the JWT,
the `download` command should now work when `jwt.signing.read` is
configured. By looking at the code, I think this case did not work
before.
## Docs to be adjusted after a release
Page `Amazon-S3-API`:
```
# Authentication with Filer
You can use mTLS for the gRPC connection between S3-API-Proxy and the filer, as
explained in [Security-Configuration](Security-Configuration) -
controlled by the `grpc.*` configuration in `security.toml`.
Starting with version XX, it is also possible to authenticate the HTTP
operations between the S3-API-Proxy and the Filer (especially
uploading new files). This is configured by setting
`filer_jwt.signing.key` and `filer_jwt.signing.read.key` in
`security.toml`.
With both configurations (gRPC and JWT), it is possible to have Filer
and S3 communicate in fully authenticated fashion; so Filer will reject
any unauthenticated communication.
```
Page `Security Overview`:
```
The following items are not covered, yet:
- master server http REST services
Starting with version XX, the Filer HTTP REST services can be secured
with a JWT, by setting `filer_jwt.signing.key` and
`filer_jwt.signing.read.key` in `security.toml`.
...
Before version XX: "weed filer -disableHttp", disable http operations, only gRPC operations are allowed. This works with "weed mount" by FUSE. It does **not work** with the [S3 Gateway](Amazon S3 API), as this does HTTP calls to the Filer.
Starting with version XX: secured by JWT, by setting `filer_jwt.signing.key` and `filer_jwt.signing.read.key` in `security.toml`. **This now works with the [S3 Gateway](Amazon S3 API).**
...
# Securing Filer HTTP with JWT
To enable JWT-based access control for the Filer,
1. generate `security.toml` file by `weed scaffold -config=security`
2. set `filer_jwt.signing.key` to a secret string - and optionally filer_jwt.signing.read.key` as well to a secret string
3. copy the same `security.toml` file to the filers and all S3 proxies.
If `filer_jwt.signing.key` is configured: When sending upload/update/delete HTTP operations to a filer server, the request header `Authorization` should be the JWT string (`Authorization: Bearer [JwtToken]`). The operation is authorized after the filer validates the JWT with `filer_jwt.signing.key`.
If `filer_jwt.signing.read.key` is configured: When sending GET or HEAD requests to a filer server, the request header `Authorization` should be the JWT string (`Authorization: Bearer [JwtToken]`). The operation is authorized after the filer validates the JWT with `filer_jwt.signing.read.key`.
The S3 API Gateway reads the above JWT keys and sends authenticated
HTTP requests to the filer.
```
Page `Security Configuration`:
```
(update scaffold file)
...
[filer_jwt.signing]
key = "blahblahblahblah"
[filer_jwt.signing.read]
key = "blahblahblahblah"
```
Resolves: #158
2021-12-29 18:47:53 +00:00
|
|
|
dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader)
|
2020-01-31 08:11:08 +00:00
|
|
|
if err != nil {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
2020-02-14 17:09:15 +00:00
|
|
|
defer dataReader.Close()
|
2020-01-31 08:11:08 +00:00
|
|
|
|
2020-11-11 10:01:24 +00:00
|
|
|
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
|
2022-05-31 05:47:26 +00:00
|
|
|
destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
|
|
|
|
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination)
|
2020-01-31 08:11:08 +00:00
|
|
|
|
2020-09-19 21:09:58 +00:00
|
|
|
if errCode != s3err.ErrNone {
|
2021-11-01 01:05:34 +00:00
|
|
|
s3err.WriteErrorResponse(w, r, errCode)
|
2020-01-31 08:11:08 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
setEtag(w, etag)
|
|
|
|
|
|
|
|
response := CopyPartResult{
|
|
|
|
ETag: etag,
|
2020-07-25 07:52:31 +00:00
|
|
|
LastModified: time.Now().UTC(),
|
2020-01-31 08:11:08 +00:00
|
|
|
}
|
|
|
|
|
2021-11-01 01:02:08 +00:00
|
|
|
writeSuccessResponseXML(w, r, response)
|
2020-01-31 08:11:08 +00:00
|
|
|
|
|
|
|
}
|
2021-03-19 08:31:49 +00:00
|
|
|
|
2022-05-14 02:40:29 +00:00
|
|
|
func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) {
|
2022-05-31 05:57:41 +00:00
|
|
|
return reqHeader.Get(s3_constants.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(s3_constants.AmzObjectTaggingDirective) == DirectiveReplace
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) {
|
2022-05-31 05:57:41 +00:00
|
|
|
if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) == 0 {
|
|
|
|
if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
|
|
|
|
reqHeader[s3_constants.AmzStorageClass] = sc
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !replaceMeta {
|
|
|
|
for header, _ := range reqHeader {
|
2022-05-31 05:57:41 +00:00
|
|
|
if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
|
2022-05-13 10:14:39 +00:00
|
|
|
delete(reqHeader, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for k, v := range existing {
|
2022-05-31 05:57:41 +00:00
|
|
|
if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
|
2022-05-13 10:14:39 +00:00
|
|
|
reqHeader[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-14 02:40:29 +00:00
|
|
|
if !replaceTagging {
|
2022-05-13 10:14:39 +00:00
|
|
|
for header, _ := range reqHeader {
|
2022-05-31 05:57:41 +00:00
|
|
|
if strings.HasPrefix(header, s3_constants.AmzObjectTagging) {
|
2022-05-13 10:14:39 +00:00
|
|
|
delete(reqHeader, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
found := false
|
|
|
|
for k, _ := range existing {
|
2022-05-31 05:57:41 +00:00
|
|
|
if strings.HasPrefix(k, s3_constants.AmzObjectTaggingPrefix) {
|
2022-05-13 10:14:39 +00:00
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if found {
|
|
|
|
tags, err := getTags(dir, name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var tagArr []string
|
|
|
|
for k, v := range tags {
|
|
|
|
tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v))
|
|
|
|
}
|
|
|
|
tagStr := strutil.JoinFields(tagArr, "&")
|
2022-05-31 05:57:41 +00:00
|
|
|
reqHeader.Set(s3_constants.AmzObjectTagging, tagStr)
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte) {
|
|
|
|
metadata = make(map[string][]byte)
|
|
|
|
|
2022-05-31 05:57:41 +00:00
|
|
|
if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
|
|
|
|
metadata[s3_constants.AmzStorageClass] = sc
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
2022-05-31 05:57:41 +00:00
|
|
|
if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) > 0 {
|
|
|
|
metadata[s3_constants.AmzStorageClass] = []byte(sc)
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if replaceMeta {
|
|
|
|
for header, values := range reqHeader {
|
2022-05-31 05:57:41 +00:00
|
|
|
if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
|
2022-05-13 10:14:39 +00:00
|
|
|
for _, value := range values {
|
|
|
|
metadata[header] = []byte(value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-05-14 02:40:29 +00:00
|
|
|
} else {
|
|
|
|
for k, v := range existing {
|
2022-05-31 05:57:41 +00:00
|
|
|
if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
|
2022-05-14 02:40:29 +00:00
|
|
|
metadata[k] = v
|
|
|
|
}
|
|
|
|
}
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if replaceTagging {
|
2022-05-31 05:57:41 +00:00
|
|
|
if tags := reqHeader.Get(s3_constants.AmzObjectTagging); tags != "" {
|
2022-05-13 10:14:39 +00:00
|
|
|
for _, v := range strings.Split(tags, "&") {
|
|
|
|
tag := strings.Split(v, "=")
|
|
|
|
if len(tag) == 2 {
|
2022-05-31 05:57:41 +00:00
|
|
|
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
2022-05-13 10:14:39 +00:00
|
|
|
} else if len(tag) == 1 {
|
2022-05-31 05:57:41 +00:00
|
|
|
metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = nil
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for k, v := range existing {
|
2022-05-31 05:57:41 +00:00
|
|
|
if strings.HasPrefix(k, s3_constants.AmzObjectTagging) {
|
2022-05-13 10:14:39 +00:00
|
|
|
metadata[k] = v
|
|
|
|
}
|
|
|
|
}
|
2022-05-31 05:57:41 +00:00
|
|
|
delete(metadata, s3_constants.AmzTagCount)
|
2022-05-13 10:14:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2021-03-19 08:31:49 +00:00
|
|
|
}
|