mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #3130 from kmlebedev/fix_rm_parent_dir_via_nextcloud
This commit is contained in:
commit
f43ec9f363
|
@ -28,4 +28,4 @@ ENV \
|
||||||
S3TEST_CONF="/s3test.conf"
|
S3TEST_CONF="/s3test.conf"
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/bash", "-c"]
|
ENTRYPOINT ["/bin/bash", "-c"]
|
||||||
CMD ["sleep 10 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]
|
CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"]
|
|
@ -37,3 +37,8 @@ docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,l
|
||||||
docker buildx stop $BUILDER
|
docker buildx stop $BUILDER
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Minio debuging
|
||||||
|
```
|
||||||
|
mc config host add local http://127.0.0.1:9000 some_access_key1 some_secret_key1
|
||||||
|
mc admin trace --all --verbose local
|
||||||
|
```
|
|
@ -38,7 +38,7 @@ services:
|
||||||
S3TEST_CONF: "s3tests.conf"
|
S3TEST_CONF: "s3tests.conf"
|
||||||
NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
|
NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3"
|
||||||
NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
|
NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy"
|
||||||
NOSETESTS_EXCLUDE: "(get_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket|list_multipart_upload_owner|multipart_upload_small)"
|
NOSETESTS_EXCLUDE: "(get_bucket_encryption|delete_bucket_encryption|put_bucket_encryption|bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket|list_multipart_upload_owner|multipart_upload_small)"
|
||||||
depends_on:
|
depends_on:
|
||||||
- master
|
- master
|
||||||
- volume
|
- volume
|
||||||
|
|
|
@ -17,6 +17,10 @@ func (entry *Entry) IsInRemoteOnly() bool {
|
||||||
return len(entry.Chunks) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0
|
return len(entry.Chunks) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) IsDirectoryKeyObject() bool {
|
||||||
|
return entry.IsDirectory && entry.Attributes != nil && entry.Attributes.Mime != ""
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) FileMode() (fileMode os.FileMode) {
|
func (entry *Entry) FileMode() (fileMode os.FileMode) {
|
||||||
if entry != nil && entry.Attributes != nil {
|
if entry != nil && entry.Attributes != nil {
|
||||||
fileMode = os.FileMode(entry.Attributes.FileMode)
|
fileMode = os.FileMode(entry.Attributes.FileMode)
|
||||||
|
|
|
@ -92,16 +92,20 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
defer dataReader.Close()
|
defer dataReader.Close()
|
||||||
|
|
||||||
|
objectContentType := r.Header.Get("Content-Type")
|
||||||
if strings.HasSuffix(object, "/") {
|
if strings.HasSuffix(object, "/") {
|
||||||
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), func(entry *filer_pb.Entry) {
|
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), func(entry *filer_pb.Entry) {
|
||||||
entry.Attributes.Mime = r.Header.Get("Content-Type")
|
if objectContentType == "" {
|
||||||
|
objectContentType = "httpd/unix-directory"
|
||||||
|
}
|
||||||
|
entry.Attributes.Mime = objectContentType
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uploadUrl := s3a.toFilerUrl(bucket, object)
|
uploadUrl := s3a.toFilerUrl(bucket, object)
|
||||||
if r.Header.Get("Content-Type") == "" {
|
if objectContentType == "" {
|
||||||
dataReader = mimeDetect(r, dataReader)
|
dataReader = mimeDetect(r, dataReader)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -133,6 +133,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
reqDir = reqDir[1:]
|
reqDir = reqDir[1:]
|
||||||
}
|
}
|
||||||
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
|
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
|
||||||
|
bucketPrefixLen := len(bucketPrefix)
|
||||||
reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir)
|
reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir)
|
||||||
if strings.HasSuffix(reqDir, "/") {
|
if strings.HasSuffix(reqDir, "/") {
|
||||||
reqDir = strings.TrimSuffix(reqDir, "/")
|
reqDir = strings.TrimSuffix(reqDir, "/")
|
||||||
|
@ -147,21 +148,23 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
// check filer
|
// check filer
|
||||||
err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) {
|
_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, false, false, bucketPrefixLen, func(dir string, entry *filer_pb.Entry) {
|
||||||
if entry.IsDirectory {
|
if entry.IsDirectory {
|
||||||
if delimiter == "/" {
|
if delimiter == "/" {
|
||||||
commonPrefixes = append(commonPrefixes, PrefixEntry{
|
commonPrefixes = append(commonPrefixes, PrefixEntry{
|
||||||
Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
|
Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[bucketPrefixLen:],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return
|
if !(entry.IsDirectoryKeyObject() && strings.HasSuffix(entry.Name, "/")) {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
storageClass := "STANDARD"
|
storageClass := "STANDARD"
|
||||||
if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
|
if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
|
||||||
storageClass = string(v)
|
storageClass = string(v)
|
||||||
}
|
}
|
||||||
contents = append(contents, ListEntry{
|
contents = append(contents, ListEntry{
|
||||||
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
|
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[bucketPrefixLen:],
|
||||||
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
||||||
ETag: "\"" + filer.ETag(entry) + "\"",
|
ETag: "\"" + filer.ETag(entry) + "\"",
|
||||||
Size: int64(filer.FileSize(entry)),
|
Size: int64(filer.FileSize(entry)),
|
||||||
|
@ -172,6 +175,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
StorageClass: StorageClass(storageClass),
|
StorageClass: StorageClass(storageClass),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
glog.V(4).Infof("end doListFilerEntries isTruncated:%v nextMarker:%v reqDir: %v prefix: %v", isTruncated, nextMarker, reqDir, prefix)
|
||||||
if doErr != nil {
|
if doErr != nil {
|
||||||
return doErr
|
return doErr
|
||||||
}
|
}
|
||||||
|
@ -180,21 +184,21 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
nextMarker = ""
|
nextMarker = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(contents) == 0 && maxKeys > 0 {
|
if len(contents) == 0 && len(commonPrefixes) == 0 && maxKeys > 0 {
|
||||||
if strings.HasSuffix(originalPrefix, "/") && prefix == "" {
|
if strings.HasSuffix(originalPrefix, "/") && prefix == "" {
|
||||||
reqDir, prefix = filepath.Split(strings.TrimSuffix(reqDir, "/"))
|
reqDir, prefix = filepath.Split(strings.TrimSuffix(reqDir, "/"))
|
||||||
reqDir = strings.TrimSuffix(reqDir, "/")
|
reqDir = strings.TrimSuffix(reqDir, "/")
|
||||||
}
|
}
|
||||||
_, _, _, doErr = s3a.doListFilerEntries(client, reqDir, prefix, 1, prefix, delimiter, true, func(dir string, entry *filer_pb.Entry) {
|
_, _, _, doErr = s3a.doListFilerEntries(client, reqDir, prefix, 1, prefix, delimiter, true, false, bucketPrefixLen, func(dir string, entry *filer_pb.Entry) {
|
||||||
if entry.IsDirectory && entry.Attributes.Mime != "" && entry.Name == prefix {
|
if entry.IsDirectoryKeyObject() && entry.Name == prefix {
|
||||||
storageClass := "STANDARD"
|
storageClass := "STANDARD"
|
||||||
if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
|
if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok {
|
||||||
storageClass = string(v)
|
storageClass = string(v)
|
||||||
}
|
}
|
||||||
contents = append(contents, ListEntry{
|
contents = append(contents, ListEntry{
|
||||||
Key: fmt.Sprintf("%s/%s", dir, entry.Name+"/")[len(bucketPrefix):],
|
Key: fmt.Sprintf("%s/%s/", dir, entry.Name)[bucketPrefixLen:],
|
||||||
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
||||||
ETag: "\"" + filer.ETag(entry) + "\"",
|
ETag: "\"" + fmt.Sprintf("%x", entry.Attributes.Md5) + "\"",
|
||||||
Size: int64(filer.FileSize(entry)),
|
Size: int64(filer.FileSize(entry)),
|
||||||
Owner: CanonicalUser{
|
Owner: CanonicalUser{
|
||||||
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
||||||
|
@ -204,6 +208,13 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
if doErr != nil {
|
||||||
|
return doErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nextMarker) > 0 {
|
||||||
|
nextMarker = nextMarker[bucketPrefixLen:]
|
||||||
}
|
}
|
||||||
|
|
||||||
response = ListBucketResult{
|
response = ListBucketResult{
|
||||||
|
@ -224,7 +235,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {
|
func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, inclusiveStartFrom bool, subEntries bool, bucketPrefixLen int, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {
|
||||||
// invariants
|
// invariants
|
||||||
// prefix and marker should be under dir, marker may contain "/"
|
// prefix and marker should be under dir, marker may contain "/"
|
||||||
// maxKeys should be updated for each recursion
|
// maxKeys should be updated for each recursion
|
||||||
|
@ -237,20 +248,27 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(marker, "/") {
|
if strings.Contains(marker, "/") {
|
||||||
sepIndex := strings.Index(marker, "/")
|
if strings.HasSuffix(marker, "/") {
|
||||||
subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]
|
marker = strings.TrimSuffix(marker, "/")
|
||||||
glog.V(4).Infoln("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys)
|
}
|
||||||
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, false, eachEntryFn)
|
sepIndex := strings.Index(marker, "/")
|
||||||
if subErr != nil {
|
if sepIndex != -1 {
|
||||||
err = subErr
|
subPrefix, subMarker := marker[0:sepIndex], marker[sepIndex+1:]
|
||||||
return
|
subDir := fmt.Sprintf("%s/%s", dir[0:bucketPrefixLen-1], subPrefix)
|
||||||
|
if strings.HasPrefix(subDir, dir) {
|
||||||
|
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, subDir, "", maxKeys, subMarker, delimiter, false, false, bucketPrefixLen, eachEntryFn)
|
||||||
|
if subErr != nil {
|
||||||
|
err = subErr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
counter += subCounter
|
||||||
|
isTruncated = isTruncated || subIsTruncated
|
||||||
|
maxKeys -= subCounter
|
||||||
|
nextMarker = subNextMarker
|
||||||
|
// finished processing this sub directory
|
||||||
|
marker = subPrefix
|
||||||
|
}
|
||||||
}
|
}
|
||||||
counter += subCounter
|
|
||||||
isTruncated = isTruncated || subIsTruncated
|
|
||||||
maxKeys -= subCounter
|
|
||||||
nextMarker = subDir + "/" + subNextMarker
|
|
||||||
// finished processing this sub directory
|
|
||||||
marker = subDir
|
|
||||||
}
|
}
|
||||||
if maxKeys <= 0 {
|
if maxKeys <= 0 {
|
||||||
return
|
return
|
||||||
|
@ -288,40 +306,46 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
entry := resp.Entry
|
entry := resp.Entry
|
||||||
nextMarker = entry.Name
|
nextMarker = dir + "/" + entry.Name
|
||||||
if entry.IsDirectory {
|
if entry.IsDirectory {
|
||||||
// println("ListEntries", dir, "dir:", entry.Name)
|
// println("ListEntries", dir, "dir:", entry.Name)
|
||||||
if entry.Name == ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys
|
if entry.Name == ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if delimiter != "/" {
|
if delimiter == "" {
|
||||||
eachEntryFn(dir, entry)
|
eachEntryFn(dir, entry)
|
||||||
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter)
|
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter)
|
||||||
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, false, eachEntryFn)
|
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, false, true, bucketPrefixLen, eachEntryFn)
|
||||||
if subErr != nil {
|
if subErr != nil {
|
||||||
err = fmt.Errorf("doListFilerEntries2: %v", subErr)
|
err = fmt.Errorf("doListFilerEntries2: %v", subErr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated)
|
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated)
|
||||||
|
if subCounter == 0 && entry.IsDirectoryKeyObject() {
|
||||||
|
entry.Name += "/"
|
||||||
|
eachEntryFn(dir, entry)
|
||||||
|
counter++
|
||||||
|
}
|
||||||
counter += subCounter
|
counter += subCounter
|
||||||
nextMarker = entry.Name + "/" + subNextMarker
|
nextMarker = subNextMarker
|
||||||
if subIsTruncated {
|
if subIsTruncated {
|
||||||
isTruncated = true
|
isTruncated = true
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else if delimiter == "/" {
|
||||||
var isEmpty bool
|
var isEmpty bool
|
||||||
if !s3a.option.AllowEmptyFolder {
|
if !s3a.option.AllowEmptyFolder && !entry.IsDirectoryKeyObject() {
|
||||||
if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
|
if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
|
||||||
glog.Errorf("check empty folder %s: %v", dir, err)
|
glog.Errorf("check empty folder %s: %v", dir, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !isEmpty {
|
if !isEmpty {
|
||||||
|
nextMarker += "/"
|
||||||
eachEntryFn(dir, entry)
|
eachEntryFn(dir, entry)
|
||||||
counter++
|
counter++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if !(delimiter == "/" && subEntries) {
|
||||||
// println("ListEntries", dir, "file:", entry.Name)
|
// println("ListEntries", dir, "file:", entry.Name)
|
||||||
eachEntryFn(dir, entry)
|
eachEntryFn(dir, entry)
|
||||||
counter++
|
counter++
|
||||||
|
|
Loading…
Reference in a new issue