s3: avoid duplicated bucket

This commit is contained in:
Chris Lu 2020-10-15 10:52:17 -07:00
parent 53207ae0e7
commit ace0ea3d28
5 changed files with 670 additions and 347 deletions

View file

@ -37,6 +37,9 @@ service SeaweedFiler {
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) { rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
} }
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
}
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
} }
@ -244,6 +247,16 @@ message LookupVolumeResponse {
map<string, Locations> locations_map = 1; map<string, Locations> locations_map = 1;
} }
message Collection {
string name = 1;
}
message CollectionListRequest {
bool include_normal_volumes = 1;
bool include_ec_volumes = 2;
}
message CollectionListResponse {
repeated Collection collections = 1;
}
message DeleteCollectionRequest { message DeleteCollectionRequest {
string collection = 1; string collection = 1;
} }

View file

@ -37,6 +37,9 @@ service SeaweedFiler {
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) { rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
} }
rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) {
}
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
} }
@ -244,6 +247,16 @@ message LookupVolumeResponse {
map<string, Locations> locations_map = 1; map<string, Locations> locations_map = 1;
} }
message Collection {
string name = 1;
}
message CollectionListRequest {
bool include_normal_volumes = 1;
bool include_ec_volumes = 2;
}
message CollectionListResponse {
repeated Collection collections = 1;
}
message DeleteCollectionRequest { message DeleteCollectionRequest {
string collection = 1; string collection = 1;
} }

File diff suppressed because it is too large Load diff

View file

@ -58,8 +58,36 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
// avoid duplicated buckets
errCode := s3err.ErrNone
if err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
if resp, err := client.CollectionList(context.Background(), &filer_pb.CollectionListRequest{
IncludeEcVolumes: true,
IncludeNormalVolumes: true,
}); err != nil {
glog.Errorf("list collection: %v", err)
return fmt.Errorf("list collections: %v", err)
}else {
for _, c := range resp.Collections {
if bucket == c.Name {
errCode = s3err.ErrBucketAlreadyExists
break
}
}
}
return nil
}); err != nil {
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
// create the folder for bucket, but lazily create actual collection // create the folder for bucket, but lazily create actual collection
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil { if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil {
glog.Errorf("PutBucketHandler mkdir: %v", err)
writeErrorResponse(w, s3err.ErrInternalError, r.URL) writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return return
} }

View file

@ -383,6 +383,28 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
}, nil }, nil
} }
func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.CollectionListRequest) (resp *filer_pb.CollectionListResponse, err error) {
glog.V(4).Infof("CollectionList %v", req)
resp = &filer_pb.CollectionListResponse{}
err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
masterResp, err := client.CollectionList(context.Background(), &master_pb.CollectionListRequest{
IncludeNormalVolumes: req.IncludeNormalVolumes,
IncludeEcVolumes: req.IncludeEcVolumes,
})
if err != nil {
return err
}
for _, c := range masterResp.Collections {
resp.Collections = append(resp.Collections, &filer_pb.Collection{Name: c.Name})
}
return nil
})
return
}
func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) {
glog.V(4).Infof("DeleteCollection %v", req) glog.V(4).Infof("DeleteCollection %v", req)