s3 backend support customizing storage class

This commit is contained in:
chrislu 2022-04-30 17:36:40 -07:00
parent d71a1fd077
commit 192983b464
4 changed files with 11 additions and 3 deletions

View file

@ -28,3 +28,4 @@ sleep_minutes = 17 # sleep minutes between each script execution
region = "us-east-2" region = "us-east-2"
bucket = "volume_bucket" # an existing bucket bucket = "volume_bucket" # an existing bucket
endpoint = "http://server2:8333" endpoint = "http://server2:8333"
storage_class = "STANDARD_IA"

View file

@ -36,6 +36,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2" region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket bucket = "your_bucket_name" # an existing bucket
endpoint = "" endpoint = ""
storage_class = "STANDARD_IA"
# create this number of logical volumes if no more writable volumes # create this number of logical volumes if no more writable volumes
# count_x means how many copies of data. # count_x means how many copies of data.

View file

@ -37,6 +37,7 @@ type S3BackendStorage struct {
region string region string
bucket string bucket string
endpoint string endpoint string
storageClass string
conn s3iface.S3API conn s3iface.S3API
} }
@ -48,6 +49,10 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st
s.region = configuration.GetString(configPrefix + "region") s.region = configuration.GetString(configPrefix + "region")
s.bucket = configuration.GetString(configPrefix + "bucket") s.bucket = configuration.GetString(configPrefix + "bucket")
s.endpoint = configuration.GetString(configPrefix + "endpoint") s.endpoint = configuration.GetString(configPrefix + "endpoint")
s.storageClass = configuration.GetString(configPrefix + "storageClass")
if s.storageClass == "" {
s.storageClass = "STANDARD_IA"
}
s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint) s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint)
@ -62,6 +67,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string {
m["region"] = s.region m["region"] = s.region
m["bucket"] = s.bucket m["bucket"] = s.bucket
m["endpoint"] = s.endpoint m["endpoint"] = s.endpoint
m["storage_class"] = s.storageClass
return m return m
} }
@ -85,7 +91,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen
glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key)
size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, fn) size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn)
return return
} }

View file

@ -12,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
) )
func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) { func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, storageClass string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) {
//open the file //open the file
f, err := os.Open(filename) f, err := os.Open(filename)
@ -52,7 +52,7 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
Bucket: aws.String(destBucket), Bucket: aws.String(destBucket),
Key: aws.String(destKey), Key: aws.String(destKey),
Body: fileReader, Body: fileReader,
StorageClass: aws.String("STANDARD_IA"), StorageClass: aws.String(storageClass),
}) })
//in case it fails to upload //in case it fails to upload