2019-11-19 03:24:34 +00:00
|
|
|
package s3_backend
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2019-12-02 23:08:28 +00:00
|
|
|
"io"
|
2019-11-27 11:09:42 +00:00
|
|
|
"os"
|
2019-11-19 03:24:34 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
2019-12-25 17:53:13 +00:00
|
|
|
"github.com/google/uuid"
|
|
|
|
|
2019-11-19 03:24:34 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2019-12-02 23:08:28 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
2019-11-19 03:24:34 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2019-11-29 02:33:18 +00:00
|
|
|
backend.BackendStorageFactories["s3"] = &S3BackendFactory{}
|
|
|
|
}
|
|
|
|
|
|
|
|
type S3BackendFactory struct {
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 02:33:18 +00:00
|
|
|
func (factory *S3BackendFactory) StorageType() backend.StorageType {
|
|
|
|
return backend.StorageType("s3")
|
|
|
|
}
|
2019-11-29 09:05:09 +00:00
|
|
|
func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, id string) (backend.BackendStorage, error) {
|
2019-11-29 02:33:18 +00:00
|
|
|
return newS3BackendStorage(configuration, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
type S3BackendStorage struct {
|
2019-11-29 09:05:09 +00:00
|
|
|
id string
|
|
|
|
aws_access_key_id string
|
|
|
|
aws_secret_access_key string
|
|
|
|
region string
|
|
|
|
bucket string
|
|
|
|
conn s3iface.S3API
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 09:05:09 +00:00
|
|
|
func newS3BackendStorage(configuration backend.StringProperties, id string) (s *S3BackendStorage, err error) {
|
2019-11-29 02:33:18 +00:00
|
|
|
s = &S3BackendStorage{}
|
|
|
|
s.id = id
|
2019-11-29 09:05:09 +00:00
|
|
|
s.aws_access_key_id = configuration.GetString("aws_access_key_id")
|
|
|
|
s.aws_secret_access_key = configuration.GetString("aws_secret_access_key")
|
2019-11-29 02:33:18 +00:00
|
|
|
s.region = configuration.GetString("region")
|
|
|
|
s.bucket = configuration.GetString("bucket")
|
2019-11-29 09:05:09 +00:00
|
|
|
s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region)
|
2019-11-29 02:33:18 +00:00
|
|
|
|
2019-11-29 09:05:09 +00:00
|
|
|
glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket)
|
2019-11-29 02:33:18 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-29 09:05:09 +00:00
|
|
|
func (s *S3BackendStorage) ToProperties() map[string]string {
|
|
|
|
m := make(map[string]string)
|
|
|
|
m["aws_access_key_id"] = s.aws_access_key_id
|
|
|
|
m["aws_secret_access_key"] = s.aws_secret_access_key
|
|
|
|
m["region"] = s.region
|
|
|
|
m["bucket"] = s.bucket
|
|
|
|
return m
|
2019-11-29 02:33:18 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 23:08:28 +00:00
|
|
|
func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb.VolumeTierInfo) backend.BackendStorageFile {
|
2019-11-29 02:33:18 +00:00
|
|
|
if strings.HasPrefix(key, "/") {
|
|
|
|
key = key[1:]
|
|
|
|
}
|
|
|
|
|
|
|
|
f := &S3BackendStorageFile{
|
|
|
|
backendStorage: s,
|
|
|
|
key: key,
|
2019-12-02 23:08:28 +00:00
|
|
|
tierInfo: tierInfo,
|
2019-11-29 02:33:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2019-12-02 23:08:28 +00:00
|
|
|
func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
|
|
|
|
randomUuid, _ := uuid.NewRandom()
|
|
|
|
key = randomUuid.String()
|
|
|
|
|
2019-12-02 23:54:24 +00:00
|
|
|
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
|
2019-12-02 23:08:28 +00:00
|
|
|
|
|
|
|
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-12-25 17:53:13 +00:00
|
|
|
func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) {
|
|
|
|
|
|
|
|
glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key)
|
|
|
|
|
|
|
|
size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *S3BackendStorage) DeleteFile(key string) (err error) {
|
|
|
|
|
|
|
|
glog.V(1).Infof("delete dat file %s from remote", key)
|
|
|
|
|
|
|
|
err = deleteFromS3(s.conn, s.bucket, key)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-29 02:33:18 +00:00
|
|
|
type S3BackendStorageFile struct {
|
|
|
|
backendStorage *S3BackendStorage
|
|
|
|
key string
|
2019-12-02 23:08:28 +00:00
|
|
|
tierInfo *volume_server_pb.VolumeTierInfo
|
2019-11-29 02:33:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n int, err error) {
|
2019-12-02 23:08:28 +00:00
|
|
|
|
2019-11-19 03:24:34 +00:00
|
|
|
bytesRange := fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))-1)
|
2019-12-02 23:08:28 +00:00
|
|
|
|
|
|
|
// glog.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
|
|
|
|
|
2019-11-29 02:33:18 +00:00
|
|
|
getObjectOutput, getObjectErr := s3backendStorageFile.backendStorage.conn.GetObject(&s3.GetObjectInput{
|
|
|
|
Bucket: &s3backendStorageFile.backendStorage.bucket,
|
|
|
|
Key: &s3backendStorageFile.key,
|
2019-11-19 03:24:34 +00:00
|
|
|
Range: &bytesRange,
|
|
|
|
})
|
|
|
|
|
|
|
|
if getObjectErr != nil {
|
2019-12-02 23:08:28 +00:00
|
|
|
return 0, fmt.Errorf("bucket %s GetObject %s: %v", s3backendStorageFile.backendStorage.bucket, s3backendStorageFile.key, getObjectErr)
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|
|
|
|
defer getObjectOutput.Body.Close()
|
|
|
|
|
2019-12-02 23:08:28 +00:00
|
|
|
glog.V(4).Infof("read %s %s", s3backendStorageFile.key, bytesRange)
|
|
|
|
glog.V(4).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength)
|
2019-11-19 03:24:34 +00:00
|
|
|
|
2019-12-02 23:08:28 +00:00
|
|
|
for {
|
|
|
|
if n, err = getObjectOutput.Body.Read(p); err == nil && n < len(p) {
|
|
|
|
p = p[n:]
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == io.EOF {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 02:33:18 +00:00
|
|
|
func (s3backendStorageFile S3BackendStorageFile) WriteAt(p []byte, off int64) (n int, err error) {
|
2019-12-11 05:44:47 +00:00
|
|
|
panic("not implemented")
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 02:33:18 +00:00
|
|
|
func (s3backendStorageFile S3BackendStorageFile) Truncate(off int64) error {
|
2019-12-11 05:44:47 +00:00
|
|
|
panic("not implemented")
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 02:33:18 +00:00
|
|
|
func (s3backendStorageFile S3BackendStorageFile) Close() error {
|
2019-11-19 03:24:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-29 02:33:18 +00:00
|
|
|
func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTime time.Time, err error) {
|
2019-11-19 03:24:34 +00:00
|
|
|
|
2019-12-02 23:08:28 +00:00
|
|
|
files := s3backendStorageFile.tierInfo.GetFiles()
|
2019-11-19 03:24:34 +00:00
|
|
|
|
2019-12-03 04:49:58 +00:00
|
|
|
if len(files) == 0 {
|
2019-12-02 23:08:28 +00:00
|
|
|
err = fmt.Errorf("remote file info not found")
|
|
|
|
return
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 23:08:28 +00:00
|
|
|
datSize = int64(files[0].FileSize)
|
2019-12-03 04:49:58 +00:00
|
|
|
modTime = time.Unix(int64(files[0].ModifiedTime), 0)
|
2019-11-19 03:24:34 +00:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-12-09 03:44:16 +00:00
|
|
|
func (s3backendStorageFile S3BackendStorageFile) Name() string {
|
2019-11-29 02:33:18 +00:00
|
|
|
return s3backendStorageFile.key
|
2019-11-19 03:24:34 +00:00
|
|
|
}
|