cloud tier: remove tagging since not all s3 vendors support this

This commit is contained in:
Chris Lu 2021-10-29 12:39:19 -07:00
parent d04cdcf40d
commit c857cc7286
4 changed files with 5 additions and 24 deletions

View file

@ -62,13 +62,8 @@ func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTi
})
}
// remember the file original source
attributes := make(map[string]string)
attributes["volumeId"] = v.Id.String()
attributes["collection"] = v.Collection
attributes["ext"] = ".dat"
// copy the data file
key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn)
key, size, err := backendStorage.CopyFile(diskFile.File, fn)
if err != nil {
return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err)
}

View file

@ -25,7 +25,7 @@ type BackendStorageFile interface {
type BackendStorage interface {
ToProperties() map[string]string
NewStorageFile(key string, tierInfo *volume_server_pb.VolumeInfo) BackendStorageFile
CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error)
DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error)
DeleteFile(key string) (err error)
}

View file

@ -79,13 +79,13 @@ func (s *S3BackendStorage) NewStorageFile(key string, tierInfo *volume_server_pb
return f
}
func (s *S3BackendStorage) CopyFile(f *os.File, attributes map[string]string, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percentage float32) error) (key string, size int64, err error) {
randomUuid, _ := uuid.NewRandom()
key = randomUuid.String()
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, attributes, fn)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn)
return
}

View file

@ -12,9 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
)
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string,
attributes map[string]string,
fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
//open the file
f, err := os.Open(filename)
@ -48,25 +46,13 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
fn: fn,
}
// process tagging
tags := ""
for k, v := range attributes {
if len(tags) > 0 {
tags = tags + "&"
}
tags = tags + k + "=" + v
}
// Upload the file to S3.
var result *s3manager.UploadOutput
result, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(destBucket),
Key: aws.String(destKey),
Body: fileReader,
ACL: aws.String("private"),
ServerSideEncryption: aws.String("AES256"),
StorageClass: aws.String("STANDARD_IA"),
Tagging: aws.String(tags),
})
//in case it fails to upload