mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Use filerGroup for s3 buckets collection prefix (#4465)
* Use filerGroup for s3 buckets collection prefix * Fix templates * Remove flags * Remove s3CollectionPrefix
This commit is contained in:
parent
b7f011f777
commit
17e91d2917
|
@ -148,6 +148,9 @@ spec:
|
|||
-encryptVolumeData \
|
||||
{{- end }}
|
||||
-ip=${POD_IP} \
|
||||
{{- if .Values.filer.filerGroup}}
|
||||
-filerGroup={{ .Values.filer.filerGroup}} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.s3.enabled }}
|
||||
-s3 \
|
||||
-s3.port={{ .Values.filer.s3.port }} \
|
||||
|
|
|
@ -273,6 +273,7 @@ filer:
|
|||
grpcPort: 18888
|
||||
metricsPort: 9327
|
||||
loggingOverrideLevel: null
|
||||
filerGroup: ""
|
||||
# replication type is XYZ:
|
||||
# X number of replica in other data centers
|
||||
# Y number of replica in other racks in the same data center
|
||||
|
|
|
@ -155,6 +155,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
|||
filerAddress := pb.ServerAddress(*s3opt.filer)
|
||||
|
||||
filerBucketsPath := "/buckets"
|
||||
filerGroup := ""
|
||||
|
||||
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
|
@ -169,6 +170,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
|||
return fmt.Errorf("get filer %s configuration: %v", filerAddress, err)
|
||||
}
|
||||
filerBucketsPath = resp.DirBuckets
|
||||
filerGroup = resp.FilerGroup
|
||||
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
|
||||
glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
|
||||
return nil
|
||||
|
@ -200,6 +202,7 @@ func (s3opt *S3Options) startS3Server() bool {
|
|||
AllowDeleteBucketNotEmpty: *s3opt.allowDeleteBucketNotEmpty,
|
||||
LocalFilerSocket: localFilerSocket,
|
||||
DataCenter: *s3opt.dataCenter,
|
||||
FilerGroup: filerGroup,
|
||||
})
|
||||
if s3ApiServer_err != nil {
|
||||
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
|
||||
|
|
|
@ -107,6 +107,13 @@ func (s3a *S3ApiServer) updateEntry(parentDirectoryPath string, newEntry *filer_
|
|||
return err
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) getCollectionName(bucket string) string {
|
||||
if s3a.option.FilerGroup != "" {
|
||||
return fmt.Sprintf("%s_%s", s3a.option.FilerGroup, bucket)
|
||||
}
|
||||
return bucket
|
||||
}
|
||||
|
||||
func objectKey(key *string) *string {
|
||||
if strings.HasPrefix(*key, "/") {
|
||||
t := (*key)[1:]
|
||||
|
|
|
@ -104,7 +104,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
|
|||
return fmt.Errorf("list collections: %v", err)
|
||||
} else {
|
||||
for _, c := range resp.Collections {
|
||||
if bucket == c.Name {
|
||||
if s3a.getCollectionName(bucket) == c.Name {
|
||||
errCode = s3err.ErrBucketAlreadyExists
|
||||
break
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
// delete collection
|
||||
deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{
|
||||
Collection: bucket,
|
||||
Collection: s3a.getCollectionName(bucket),
|
||||
}
|
||||
|
||||
glog.V(1).Infof("delete collection: %v", deleteCollectionRequest)
|
||||
|
@ -304,7 +304,7 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr
|
|||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||
return
|
||||
}
|
||||
ttls := fc.GetCollectionTtls(bucket)
|
||||
ttls := fc.GetCollectionTtls(s3a.getCollectionName(bucket))
|
||||
if len(ttls) == 0 {
|
||||
s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchLifecycleConfiguration)
|
||||
return
|
||||
|
|
|
@ -100,7 +100,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
|
||||
destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
|
||||
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination)
|
||||
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination, dstBucket)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
|
@ -185,7 +185,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
|
||||
destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
|
||||
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination)
|
||||
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination, dstBucket)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
|
|
|
@ -115,7 +115,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
dataReader = mimeDetect(r, dataReader)
|
||||
}
|
||||
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "")
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "", bucket)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
|
@ -457,7 +457,7 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (s
|
|||
return statusCode
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string) (etag string, code s3err.ErrorCode) {
|
||||
func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string, bucket string) (etag string, code s3err.ErrorCode) {
|
||||
|
||||
hash := md5.New()
|
||||
var body = io.TeeReader(dataReader, hash)
|
||||
|
@ -474,6 +474,12 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
|
|||
proxyReq.Header.Set(s3_constants.SeaweedStorageDestinationHeader, destination)
|
||||
}
|
||||
|
||||
if s3a.option.FilerGroup != "" {
|
||||
query := proxyReq.URL.Query()
|
||||
query.Add("collection", s3a.getCollectionName(bucket))
|
||||
proxyReq.URL.RawQuery = query.Encode()
|
||||
}
|
||||
|
||||
for header, values := range r.Header {
|
||||
for _, value := range values {
|
||||
proxyReq.Header.Add(header, value)
|
||||
|
|
|
@ -115,7 +115,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
|
||||
uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlEscapeObject(object))
|
||||
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody, "")
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody, "", bucket)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
|
|
|
@ -255,7 +255,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)
|
||||
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, destination)
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, destination, bucket)
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, r, errCode)
|
||||
return
|
||||
|
|
|
@ -31,6 +31,7 @@ type S3ApiServerOption struct {
|
|||
AllowDeleteBucketNotEmpty bool
|
||||
LocalFilerSocket string
|
||||
DataCenter string
|
||||
FilerGroup string
|
||||
}
|
||||
|
||||
type S3ApiServer struct {
|
||||
|
|
|
@ -54,7 +54,7 @@ func (c *commandS3BucketDelete) Do(args []string, commandEnv *CommandEnv, writer
|
|||
// delete the collection directly first
|
||||
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
|
||||
_, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
|
||||
Name: *bucketName,
|
||||
Name: getCollectionName(commandEnv, *bucketName),
|
||||
})
|
||||
return err
|
||||
})
|
||||
|
|
|
@ -57,7 +57,7 @@ func (c *commandS3BucketList) Do(args []string, commandEnv *CommandEnv, writer i
|
|||
if !entry.IsDirectory {
|
||||
return nil
|
||||
}
|
||||
collection := entry.Name
|
||||
collection := getCollectionName(commandEnv, entry.Name)
|
||||
var collectionSize, fileCount float64
|
||||
if collectionInfo, found := collectionInfos[collection]; found {
|
||||
collectionSize = collectionInfo.Size
|
||||
|
|
|
@ -65,7 +65,7 @@ func (c *commandS3BucketQuotaEnforce) Do(args []string, commandEnv *CommandEnv,
|
|||
if !entry.IsDirectory {
|
||||
return nil
|
||||
}
|
||||
collection := entry.Name
|
||||
collection := getCollectionName(commandEnv, entry.Name)
|
||||
var collectionSize float64
|
||||
if collectionInfo, found := collectionInfos[collection]; found {
|
||||
collectionSize = collectionInfo.Size
|
||||
|
|
|
@ -184,3 +184,10 @@ func readNeedleStatus(grpcDialOption grpc.DialOption, sourceVolumeServer pb.Serv
|
|||
)
|
||||
return
|
||||
}
|
||||
|
||||
func getCollectionName(commandEnv *CommandEnv, bucket string) string {
|
||||
if *commandEnv.option.FilerGroup != "" {
|
||||
return fmt.Sprintf("%s_%s", *commandEnv.option.FilerGroup, bucket)
|
||||
}
|
||||
return bucket
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue