mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
9f9ef1340c
streaming mode would create separate grpc connections for each call. this is to ensure the long poll connections are properly closed.
436 lines
12 KiB
Go
436 lines
12 KiB
Go
package s3api
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/md5"
|
|
"encoding/json"
|
|
"encoding/xml"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"net/url"
|
|
"sort"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
|
"github.com/pquerna/cachecontrol/cacheobject"
|
|
|
|
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
|
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
weed_server "github.com/chrislusf/seaweedfs/weed/server"
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
)
|
|
|
|
var (
|
|
client *http.Client
|
|
)
|
|
|
|
func init() {
|
|
client = &http.Client{Transport: &http.Transport{
|
|
MaxIdleConns: 1024,
|
|
MaxIdleConnsPerHost: 1024,
|
|
}}
|
|
}
|
|
|
|
func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser {
|
|
mimeBuffer := make([]byte, 512)
|
|
size, _ := dataReader.Read(mimeBuffer)
|
|
if size > 0 {
|
|
r.Header.Set("Content-Type", http.DetectContentType(mimeBuffer[:size]))
|
|
return io.NopCloser(io.MultiReader(bytes.NewReader(mimeBuffer[:size]), dataReader))
|
|
}
|
|
return io.NopCloser(dataReader)
|
|
}
|
|
|
|
func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
|
|
|
bucket, object := xhttp.GetBucketAndObject(r)
|
|
glog.V(3).Infof("PutObjectHandler %s %s", bucket, object)
|
|
|
|
_, err := validateContentMd5(r.Header)
|
|
if err != nil {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
|
return
|
|
}
|
|
|
|
if r.Header.Get("Cache-Control") != "" {
|
|
if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
|
return
|
|
}
|
|
}
|
|
|
|
if r.Header.Get("Expires") != "" {
|
|
if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest)
|
|
return
|
|
}
|
|
}
|
|
|
|
dataReader := r.Body
|
|
rAuthType := getRequestAuthType(r)
|
|
if s3a.iam.isEnabled() {
|
|
var s3ErrCode s3err.ErrorCode
|
|
switch rAuthType {
|
|
case authTypeStreamingSigned:
|
|
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
|
|
case authTypeSignedV2, authTypePresignedV2:
|
|
_, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
|
|
case authTypePresigned, authTypeSigned:
|
|
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
|
|
}
|
|
if s3ErrCode != s3err.ErrNone {
|
|
s3err.WriteErrorResponse(w, r, s3ErrCode)
|
|
return
|
|
}
|
|
} else {
|
|
if authTypeStreamingSigned == rAuthType {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrAuthNotSetup)
|
|
return
|
|
}
|
|
}
|
|
defer dataReader.Close()
|
|
|
|
if strings.HasSuffix(object, "/") {
|
|
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
|
return
|
|
}
|
|
} else {
|
|
uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
|
|
|
if r.Header.Get("Content-Type") == "" {
|
|
dataReader = mimeDetect(r, dataReader)
|
|
}
|
|
|
|
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
|
|
|
|
if errCode != s3err.ErrNone {
|
|
s3err.WriteErrorResponse(w, r, errCode)
|
|
return
|
|
}
|
|
|
|
setEtag(w, etag)
|
|
}
|
|
|
|
writeSuccessResponseEmpty(w, r)
|
|
}
|
|
|
|
func urlPathEscape(object string) string {
|
|
var escapedParts []string
|
|
for _, part := range strings.Split(object, "/") {
|
|
escapedParts = append(escapedParts, url.PathEscape(part))
|
|
}
|
|
return strings.Join(escapedParts, "/")
|
|
}
|
|
|
|
func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
bucket, object := xhttp.GetBucketAndObject(r)
|
|
glog.V(3).Infof("GetObjectHandler %s %s", bucket, object)
|
|
|
|
if strings.HasSuffix(r.URL.Path, "/") {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented)
|
|
return
|
|
}
|
|
|
|
destUrl := fmt.Sprintf("http://%s%s/%s%s",
|
|
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
|
|
|
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
|
|
}
|
|
|
|
func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
bucket, object := xhttp.GetBucketAndObject(r)
|
|
glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object)
|
|
|
|
destUrl := fmt.Sprintf("http://%s%s/%s%s",
|
|
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
|
|
|
s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
|
|
}
|
|
|
|
func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
bucket, object := xhttp.GetBucketAndObject(r)
|
|
glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object)
|
|
|
|
destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
|
|
s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
|
|
|
s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int) {
|
|
statusCode = http.StatusNoContent
|
|
for k, v := range proxyResponse.Header {
|
|
w.Header()[k] = v
|
|
}
|
|
w.WriteHeader(statusCode)
|
|
return statusCode
|
|
})
|
|
}
|
|
|
|
// / ObjectIdentifier carries key name for the object to delete.
|
|
type ObjectIdentifier struct {
|
|
ObjectName string `xml:"Key"`
|
|
}
|
|
|
|
// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
|
|
type DeleteObjectsRequest struct {
|
|
// Element to enable quiet mode for the request
|
|
Quiet bool
|
|
// List of objects to be deleted
|
|
Objects []ObjectIdentifier `xml:"Object"`
|
|
}
|
|
|
|
// DeleteError structure.
|
|
type DeleteError struct {
|
|
Code string
|
|
Message string
|
|
Key string
|
|
}
|
|
|
|
// DeleteObjectsResponse container for multiple object deletes.
|
|
type DeleteObjectsResponse struct {
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
|
|
|
|
// Collection of all deleted objects
|
|
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
|
|
|
|
// Collection of errors deleting certain objects.
|
|
Errors []DeleteError `xml:"Error,omitempty"`
|
|
}
|
|
|
|
// DeleteMultipleObjectsHandler - Delete multiple objects
|
|
func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
bucket, _ := xhttp.GetBucketAndObject(r)
|
|
glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket)
|
|
|
|
deleteXMLBytes, err := io.ReadAll(r.Body)
|
|
if err != nil {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
|
return
|
|
}
|
|
|
|
deleteObjects := &DeleteObjectsRequest{}
|
|
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML)
|
|
return
|
|
}
|
|
|
|
var deletedObjects []ObjectIdentifier
|
|
var deleteErrors []DeleteError
|
|
var auditLog *s3err.AccessLog
|
|
|
|
directoriesWithDeletion := make(map[string]int)
|
|
|
|
if s3err.Logger != nil {
|
|
auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone)
|
|
}
|
|
s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
// delete file entries
|
|
for _, object := range deleteObjects.Objects {
|
|
lastSeparator := strings.LastIndex(object.ObjectName, "/")
|
|
parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false
|
|
if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
|
|
entryName = object.ObjectName[lastSeparator+1:]
|
|
parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
|
|
}
|
|
parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath)
|
|
|
|
err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
|
|
if err == nil {
|
|
directoriesWithDeletion[parentDirectoryPath]++
|
|
deletedObjects = append(deletedObjects, object)
|
|
} else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) {
|
|
deletedObjects = append(deletedObjects, object)
|
|
} else {
|
|
delete(directoriesWithDeletion, parentDirectoryPath)
|
|
deleteErrors = append(deleteErrors, DeleteError{
|
|
Code: "",
|
|
Message: err.Error(),
|
|
Key: object.ObjectName,
|
|
})
|
|
}
|
|
if auditLog != nil {
|
|
auditLog.Key = entryName
|
|
s3err.PostAccessLog(*auditLog)
|
|
}
|
|
}
|
|
|
|
// purge empty folders, only checking folders with deletions
|
|
for len(directoriesWithDeletion) > 0 {
|
|
directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
|
|
deleteResp := DeleteObjectsResponse{}
|
|
if !deleteObjects.Quiet {
|
|
deleteResp.DeletedObjects = deletedObjects
|
|
}
|
|
deleteResp.Errors = deleteErrors
|
|
|
|
writeSuccessResponseXML(w, r, deleteResp)
|
|
|
|
}
|
|
|
|
func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) {
|
|
var allDirs []string
|
|
for dir, _ := range directoriesWithDeletion {
|
|
allDirs = append(allDirs, dir)
|
|
}
|
|
sort.Slice(allDirs, func(i, j int) bool {
|
|
return len(allDirs[i]) > len(allDirs[j])
|
|
})
|
|
newDirectoriesWithDeletion = make(map[string]int)
|
|
for _, dir := range allDirs {
|
|
parentDir, dirName := util.FullPath(dir).DirAndName()
|
|
if parentDir == s3a.option.BucketsPath {
|
|
continue
|
|
}
|
|
if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil {
|
|
glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err)
|
|
} else {
|
|
newDirectoriesWithDeletion[parentDir]++
|
|
}
|
|
}
|
|
return
|
|
}
|
|
|
|
func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int)) {
|
|
|
|
glog.V(3).Infof("s3 proxying %s to %s", r.Method, destUrl)
|
|
|
|
proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body)
|
|
|
|
if err != nil {
|
|
glog.Errorf("NewRequest %s: %v", destUrl, err)
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
|
return
|
|
}
|
|
|
|
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
|
for k, v := range r.URL.Query() {
|
|
if _, ok := xhttp.PassThroughHeaders[strings.ToLower(k)]; ok {
|
|
proxyReq.Header[k] = v
|
|
}
|
|
}
|
|
for header, values := range r.Header {
|
|
proxyReq.Header[header] = values
|
|
}
|
|
|
|
resp, postErr := client.Do(proxyReq)
|
|
|
|
if postErr != nil {
|
|
glog.Errorf("post to filer: %v", postErr)
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
|
return
|
|
}
|
|
defer util.CloseResponse(resp)
|
|
|
|
if resp.StatusCode == http.StatusPreconditionFailed {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrPreconditionFailed)
|
|
return
|
|
}
|
|
|
|
if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 {
|
|
if r.Method != "DELETE" {
|
|
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey)
|
|
return
|
|
}
|
|
}
|
|
|
|
responseStatusCode := responseFn(resp, w)
|
|
s3err.PostLog(r, responseStatusCode, s3err.ErrNone)
|
|
}
|
|
|
|
func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int) {
|
|
for k, v := range proxyResponse.Header {
|
|
w.Header()[k] = v
|
|
}
|
|
if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 {
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
statusCode = http.StatusPartialContent
|
|
} else {
|
|
statusCode = proxyResponse.StatusCode
|
|
}
|
|
w.WriteHeader(statusCode)
|
|
io.Copy(w, proxyResponse.Body)
|
|
return statusCode
|
|
}
|
|
|
|
func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code s3err.ErrorCode) {
|
|
|
|
hash := md5.New()
|
|
var body = io.TeeReader(dataReader, hash)
|
|
|
|
proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
|
|
|
|
if err != nil {
|
|
glog.Errorf("NewRequest %s: %v", uploadUrl, err)
|
|
return "", s3err.ErrInternalError
|
|
}
|
|
|
|
proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
|
|
|
|
for header, values := range r.Header {
|
|
for _, value := range values {
|
|
proxyReq.Header.Add(header, value)
|
|
}
|
|
}
|
|
|
|
resp, postErr := client.Do(proxyReq)
|
|
|
|
if postErr != nil {
|
|
glog.Errorf("post to filer: %v", postErr)
|
|
return "", s3err.ErrInternalError
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
etag = fmt.Sprintf("%x", hash.Sum(nil))
|
|
|
|
resp_body, ra_err := io.ReadAll(resp.Body)
|
|
if ra_err != nil {
|
|
glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
|
|
return etag, s3err.ErrInternalError
|
|
}
|
|
var ret weed_server.FilerPostResult
|
|
unmarshal_err := json.Unmarshal(resp_body, &ret)
|
|
if unmarshal_err != nil {
|
|
glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
|
|
return "", s3err.ErrInternalError
|
|
}
|
|
if ret.Error != "" {
|
|
glog.Errorf("upload to filer error: %v", ret.Error)
|
|
return "", filerErrorToS3Error(ret.Error)
|
|
}
|
|
|
|
return etag, s3err.ErrNone
|
|
}
|
|
|
|
func setEtag(w http.ResponseWriter, etag string) {
|
|
if etag != "" {
|
|
if strings.HasPrefix(etag, "\"") {
|
|
w.Header().Set("ETag", etag)
|
|
} else {
|
|
w.Header().Set("ETag", "\""+etag+"\"")
|
|
}
|
|
}
|
|
}
|
|
|
|
func filerErrorToS3Error(errString string) s3err.ErrorCode {
|
|
if strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory") {
|
|
return s3err.ErrExistingObjectIsDirectory
|
|
}
|
|
return s3err.ErrInternalError
|
|
}
|