avoid recursive deleting newly created empty directories (#4016)

This commit is contained in:
Konstantin Lebedev 2022-11-25 21:45:47 +05:00 committed by GitHub
parent 4b0430e71d
commit 2b910d1cf8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -18,6 +18,8 @@ import (
"github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
)
const cutoffTimeNewEmptyDir = 3
type ListBucketResultV2 struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
Name string `xml:"Name"`
@ -407,11 +409,16 @@ func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFile
var startFrom string
var isExhausted bool
var foundEntry bool
cutOffTimeAtSec := time.Now().Unix() + cutoffTimeNewEmptyDir
for fileCounter == 0 && !isExhausted && err == nil {
err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error {
foundEntry = true
if entry.IsDirectory {
subDirs = append(subDirs, entry.Name)
if entry.Attributes != nil && cutOffTimeAtSec >= entry.Attributes.GetCrtime() {
fileCounter++
} else {
subDirs = append(subDirs, entry.Name)
}
} else {
fileCounter++
}