2021-07-27 05:53:44 +00:00
|
|
|
package s3
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
2021-09-03 05:55:35 +00:00
|
|
|
"github.com/aws/aws-sdk-go/aws/request"
|
2021-07-27 05:53:44 +00:00
|
|
|
"github.com/aws/aws-sdk-go/aws/session"
|
2021-08-31 00:28:33 +00:00
|
|
|
v4 "github.com/aws/aws-sdk-go/aws/signer/v4"
|
2021-07-27 05:53:44 +00:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
2021-08-01 05:39:38 +00:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
2021-08-08 08:21:42 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2021-07-27 05:53:44 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2021-08-26 22:18:34 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
2021-07-27 05:53:44 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2021-08-08 08:21:42 +00:00
|
|
|
"io"
|
2021-08-16 02:27:30 +00:00
|
|
|
"reflect"
|
2021-07-27 05:53:44 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
remote_storage.RemoteStorageClientMakers["s3"] = new(s3RemoteStorageMaker)
|
|
|
|
}
|
|
|
|
|
|
|
|
type s3RemoteStorageMaker struct{}
|
|
|
|
|
2021-08-30 01:41:29 +00:00
|
|
|
func (s s3RemoteStorageMaker) HasBucket() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s s3RemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.RemoteStorageClient, error) {
|
2021-07-27 05:53:44 +00:00
|
|
|
client := &s3RemoteStorageClient{
|
|
|
|
conf: conf,
|
|
|
|
}
|
|
|
|
config := &aws.Config{
|
2021-08-26 00:34:29 +00:00
|
|
|
Region: aws.String(conf.S3Region),
|
|
|
|
Endpoint: aws.String(conf.S3Endpoint),
|
|
|
|
S3ForcePathStyle: aws.Bool(conf.S3ForcePathStyle),
|
|
|
|
S3DisableContentMD5Validation: aws.Bool(true),
|
2021-07-27 05:53:44 +00:00
|
|
|
}
|
|
|
|
if conf.S3AccessKey != "" && conf.S3SecretKey != "" {
|
|
|
|
config.Credentials = credentials.NewStaticCredentials(conf.S3AccessKey, conf.S3SecretKey, "")
|
|
|
|
}
|
|
|
|
|
|
|
|
sess, err := session.NewSession(config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("create aws session: %v", err)
|
|
|
|
}
|
2021-08-31 00:28:33 +00:00
|
|
|
if conf.S3V4Signature {
|
|
|
|
sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
|
|
|
}
|
2021-09-03 05:55:35 +00:00
|
|
|
sess.Handlers.Build.PushBack(func(r *request.Request) {
|
2021-09-03 06:09:24 +00:00
|
|
|
r.HTTPRequest.Header.Set("User-Agent", "SeaweedFS/"+util.VERSION_NUMBER)
|
2021-09-03 05:55:35 +00:00
|
|
|
})
|
2021-08-26 00:34:29 +00:00
|
|
|
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
|
2021-07-27 05:53:44 +00:00
|
|
|
client.conn = s3.New(sess)
|
|
|
|
return client, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type s3RemoteStorageClient struct {
|
2021-08-26 22:18:34 +00:00
|
|
|
conf *remote_pb.RemoteConf
|
2021-07-27 05:53:44 +00:00
|
|
|
conn s3iface.S3API
|
|
|
|
}
|
|
|
|
|
2021-08-08 08:21:42 +00:00
|
|
|
var _ = remote_storage.RemoteStorageClient(&s3RemoteStorageClient{})
|
|
|
|
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) {
|
2021-07-27 10:26:35 +00:00
|
|
|
|
2021-07-29 09:08:55 +00:00
|
|
|
pathKey := remote.Path[1:]
|
2021-07-27 05:53:44 +00:00
|
|
|
|
|
|
|
listInput := &s3.ListObjectsV2Input{
|
2021-07-29 09:08:55 +00:00
|
|
|
Bucket: aws.String(remote.Bucket),
|
2021-07-27 05:53:44 +00:00
|
|
|
ContinuationToken: nil,
|
|
|
|
Delimiter: nil, // not aws.String("/"), iterate through all entries
|
|
|
|
EncodingType: nil,
|
|
|
|
ExpectedBucketOwner: nil,
|
|
|
|
FetchOwner: nil,
|
|
|
|
MaxKeys: nil, // aws.Int64(1000),
|
2021-07-27 10:32:24 +00:00
|
|
|
Prefix: aws.String(pathKey),
|
2021-07-27 05:53:44 +00:00
|
|
|
RequestPayer: nil,
|
|
|
|
StartAfter: nil,
|
|
|
|
}
|
|
|
|
isLastPage := false
|
|
|
|
for !isLastPage && err == nil {
|
|
|
|
listErr := s.conn.ListObjectsV2Pages(listInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
|
|
|
for _, content := range page.Contents {
|
2021-08-01 05:39:38 +00:00
|
|
|
key := *content.Key
|
2021-08-15 08:53:46 +00:00
|
|
|
key = "/" + key
|
2021-07-27 10:26:35 +00:00
|
|
|
dir, name := util.FullPath(key).DirAndName()
|
2021-07-27 05:53:44 +00:00
|
|
|
if err := visitFn(dir, name, false, &filer_pb.RemoteEntry{
|
2021-08-09 21:35:18 +00:00
|
|
|
RemoteMtime: (*content.LastModified).Unix(),
|
|
|
|
RemoteSize: *content.Size,
|
|
|
|
RemoteETag: *content.ETag,
|
|
|
|
StorageName: s.conf.Name,
|
2021-07-27 05:53:44 +00:00
|
|
|
}); err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
listInput.ContinuationToken = page.NextContinuationToken
|
|
|
|
isLastPage = lastPage
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if listErr != nil {
|
2021-07-27 10:26:35 +00:00
|
|
|
err = fmt.Errorf("list %v: %v", remote, listErr)
|
2021-07-27 05:53:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s *s3RemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) {
|
2021-08-01 05:39:38 +00:00
|
|
|
downloader := s3manager.NewDownloaderWithClient(s.conn, func(u *s3manager.Downloader) {
|
|
|
|
u.PartSize = int64(4 * 1024 * 1024)
|
|
|
|
u.Concurrency = 1
|
|
|
|
})
|
2021-08-08 08:21:42 +00:00
|
|
|
|
2021-08-01 05:39:38 +00:00
|
|
|
dataSlice := make([]byte, int(size))
|
|
|
|
writerAt := aws.NewWriteAtBuffer(dataSlice)
|
|
|
|
|
|
|
|
_, err = downloader.Download(writerAt, &s3.GetObjectInput{
|
2021-08-08 08:21:42 +00:00
|
|
|
Bucket: aws.String(loc.Bucket),
|
|
|
|
Key: aws.String(loc.Path[1:]),
|
|
|
|
Range: aws.String(fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)),
|
2021-08-01 05:39:38 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return writerAt.Bytes(), nil
|
2021-07-29 09:08:55 +00:00
|
|
|
}
|
2021-08-08 08:21:42 +00:00
|
|
|
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s *s3RemoteStorageClient) WriteDirectory(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry) (err error) {
|
2021-08-09 21:35:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-30 01:46:28 +00:00
|
|
|
func (s *s3RemoteStorageClient) RemoveDirectory(loc *remote_pb.RemoteStorageLocation) (err error) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s *s3RemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) {
|
2021-08-08 08:21:42 +00:00
|
|
|
|
|
|
|
fileSize := int64(filer.FileSize(entry))
|
|
|
|
|
|
|
|
partSize := int64(8 * 1024 * 1024) // The minimum/default allowed part size is 5MB
|
|
|
|
for partSize*1000 < fileSize {
|
|
|
|
partSize *= 4
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an uploader with the session and custom options
|
|
|
|
uploader := s3manager.NewUploaderWithClient(s.conn, func(u *s3manager.Uploader) {
|
|
|
|
u.PartSize = partSize
|
2021-08-26 00:34:29 +00:00
|
|
|
u.Concurrency = 1
|
2021-08-08 08:21:42 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// process tagging
|
|
|
|
tags := ""
|
|
|
|
for k, v := range entry.Extended {
|
|
|
|
if len(tags) > 0 {
|
|
|
|
tags = tags + "&"
|
|
|
|
}
|
|
|
|
tags = tags + k + "=" + string(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload the file to S3.
|
|
|
|
_, err = uploader.Upload(&s3manager.UploadInput{
|
2021-08-23 09:18:56 +00:00
|
|
|
Bucket: aws.String(loc.Bucket),
|
|
|
|
Key: aws.String(loc.Path[1:]),
|
|
|
|
Body: reader,
|
|
|
|
Tagging: aws.String(tags),
|
|
|
|
StorageClass: aws.String(s.conf.S3StorageClass),
|
2021-08-08 08:21:42 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
//in case it fails to upload
|
|
|
|
if err != nil {
|
2021-08-26 00:34:29 +00:00
|
|
|
return nil, fmt.Errorf("upload to %s/%s%s: %v", loc.Name, loc.Bucket, loc.Path, err)
|
2021-08-08 08:21:42 +00:00
|
|
|
}
|
|
|
|
|
2021-08-09 00:55:03 +00:00
|
|
|
// read back the remote entry
|
|
|
|
return s.readFileRemoteEntry(loc)
|
|
|
|
|
2021-08-08 08:21:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func toTagging(attributes map[string][]byte) *s3.Tagging {
|
|
|
|
tagging := &s3.Tagging{}
|
|
|
|
for k, v := range attributes {
|
|
|
|
tagging.TagSet = append(tagging.TagSet, &s3.Tag{
|
|
|
|
Key: aws.String(k),
|
|
|
|
Value: aws.String(string(v)),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return tagging
|
|
|
|
}
|
|
|
|
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
|
2021-08-09 00:55:03 +00:00
|
|
|
resp, err := s.conn.HeadObject(&s3.HeadObjectInput{
|
2021-08-09 05:30:36 +00:00
|
|
|
Bucket: aws.String(loc.Bucket),
|
|
|
|
Key: aws.String(loc.Path[1:]),
|
2021-08-09 00:55:03 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-08-09 05:30:36 +00:00
|
|
|
|
2021-08-09 00:55:03 +00:00
|
|
|
return &filer_pb.RemoteEntry{
|
2021-08-09 21:35:18 +00:00
|
|
|
RemoteMtime: resp.LastModified.Unix(),
|
|
|
|
RemoteSize: *resp.ContentLength,
|
|
|
|
RemoteETag: *resp.ETag,
|
|
|
|
StorageName: s.conf.Name,
|
2021-08-09 00:55:03 +00:00
|
|
|
}, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStorageLocation, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) (err error) {
|
2021-08-16 02:27:30 +00:00
|
|
|
if reflect.DeepEqual(oldEntry.Extended, newEntry.Extended) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
tagging := toTagging(newEntry.Extended)
|
2021-08-08 08:21:42 +00:00
|
|
|
if len(tagging.TagSet) > 0 {
|
|
|
|
_, err = s.conn.PutObjectTagging(&s3.PutObjectTaggingInput{
|
|
|
|
Bucket: aws.String(loc.Bucket),
|
|
|
|
Key: aws.String(loc.Path[1:]),
|
2021-08-16 02:27:30 +00:00
|
|
|
Tagging: toTagging(newEntry.Extended),
|
2021-08-08 08:21:42 +00:00
|
|
|
})
|
|
|
|
} else {
|
|
|
|
_, err = s.conn.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{
|
2021-08-09 05:30:36 +00:00
|
|
|
Bucket: aws.String(loc.Bucket),
|
|
|
|
Key: aws.String(loc.Path[1:]),
|
2021-08-08 08:21:42 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2021-08-26 22:18:34 +00:00
|
|
|
func (s *s3RemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) {
|
2021-08-08 08:21:42 +00:00
|
|
|
_, err = s.conn.DeleteObject(&s3.DeleteObjectInput{
|
|
|
|
Bucket: aws.String(loc.Bucket),
|
|
|
|
Key: aws.String(loc.Path[1:]),
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
2021-09-04 03:42:02 +00:00
|
|
|
|
|
|
|
func (s *s3RemoteStorageClient) ListBuckets() (buckets []*remote_storage.Bucket, err error) {
|
|
|
|
resp, err := s.conn.ListBuckets(&s3.ListBucketsInput{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("list buckets: %v", err)
|
|
|
|
}
|
|
|
|
for _, b := range resp.Buckets {
|
|
|
|
buckets = append(buckets, &remote_storage.Bucket{
|
|
|
|
Name: *b.Name,
|
|
|
|
CreatedAt: *b.CreationDate,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2021-09-04 05:30:55 +00:00
|
|
|
|
|
|
|
func (s *s3RemoteStorageClient) CreateBucket(name string) (err error) {
|
|
|
|
_, err = s.conn.CreateBucket(&s3.CreateBucketInput{
|
|
|
|
ACL: nil,
|
|
|
|
Bucket: aws.String(name),
|
|
|
|
CreateBucketConfiguration: nil,
|
|
|
|
GrantFullControl: nil,
|
|
|
|
GrantRead: nil,
|
|
|
|
GrantReadACP: nil,
|
|
|
|
GrantWrite: nil,
|
|
|
|
GrantWriteACP: nil,
|
|
|
|
ObjectLockEnabledForBucket: nil,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2021-09-04 20:46:06 +00:00
|
|
|
return fmt.Errorf("%s create bucket %s: %v", s.conf.Name, name, err)
|
2021-09-04 05:30:55 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *s3RemoteStorageClient) DeleteBucket(name string) (err error) {
|
|
|
|
_, err = s.conn.DeleteBucket(&s3.DeleteBucketInput{
|
2021-09-14 17:37:06 +00:00
|
|
|
Bucket: aws.String(name),
|
2021-09-04 05:30:55 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("delete bucket %s: %v", name, err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|