diff --git a/docker/Makefile b/docker/Makefile index dbc82fde4..3afea17c1 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -52,6 +52,9 @@ dev_replicate: build dev_auditlog: build docker-compose -f compose/local-auditlog-compose.yml -p seaweedfs up +dev_nextcloud: build + docker-compose -f compose/local-nextcloud-compose.yml -p seaweedfs up + cluster: build docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up diff --git a/docker/compose/local-nextcloud-compose.yml b/docker/compose/local-nextcloud-compose.yml new file mode 100644 index 000000000..80c3fca53 --- /dev/null +++ b/docker/compose/local-nextcloud-compose.yml @@ -0,0 +1,44 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume" + depends_on: + - master + s3: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 8333:8333 + command: '-v 9 filer -master="master:9333" -s3' + depends_on: + - master + - volume + nextcloud: + image: nextcloud:23.0.5-apache + environment: + - OBJECTSTORE_S3_HOST=s3 + - OBJECTSTORE_S3_BUCKET=nextcloud + - OBJECTSTORE_S3_KEY=some_access_key1 + - OBJECTSTORE_S3_SECRET=some_secret_key1 + - OBJECTSTORE_S3_PORT=8333 + - OBJECTSTORE_S3_SSL=false + - OBJECTSTORE_S3_USEPATH_STYLE=true + - SQLITE_DATABASE=nextcloud + - NEXTCLOUD_ADMIN_USER=admin + - NEXTCLOUD_ADMIN_PASSWORD=admin + ports: + - 80:80 + depends_on: + - s3 \ No newline at end of file diff --git a/docker/seaweedfs.sql b/docker/seaweedfs.sql index 38ebc575c..a27eb7081 100644 --- a/docker/seaweedfs.sql +++ b/docker/seaweedfs.sql @@ -1,6 +1,6 @@ CREATE DATABASE IF NOT EXISTS seaweedfs; CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret'; -GRANT ALL PRIVILEGES ON seaweedfs_fast.* TO 'seaweedfs'@'%'; +GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%'; FLUSH PRIVILEGES; USE seaweedfs; CREATE TABLE IF NOT EXISTS filemeta ( diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 3d26d395e..91086fec8 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -93,7 +93,9 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) defer dataReader.Close() if strings.HasSuffix(object, "/") { - if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil { + if err := s3a.mkdir(s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), func(entry *filer_pb.Entry) { + entry.Attributes.Mime = r.Header.Get("Content-Type") + }); err != nil { s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index a3b858dcb..d898061fa 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -135,8 +135,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir) if strings.HasSuffix(reqDir, "/") { - // remove trailing "/" - reqDir = reqDir[:len(reqDir)-1] + reqDir = strings.TrimSuffix(reqDir, "/") } var contents []ListEntry @@ -148,30 +147,30 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m // check filer err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) { + _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) { if entry.IsDirectory { if delimiter == "/" { commonPrefixes = append(commonPrefixes, PrefixEntry{ Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):], }) } - } else { - storageClass := "STANDARD" - if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok { - storageClass = string(v) - } - contents = append(contents, ListEntry{ - Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):], - LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), - ETag: "\"" + filer.ETag(entry) + "\"", - Size: int64(filer.FileSize(entry)), - Owner: CanonicalUser{ - ID: fmt.Sprintf("%x", entry.Attributes.Uid), - DisplayName: entry.Attributes.UserName, - }, - StorageClass: StorageClass(storageClass), - }) + return } + storageClass := "STANDARD" + if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok { + storageClass = string(v) + } + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + filer.ETag(entry) + "\"", + Size: int64(filer.FileSize(entry)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, + }, + StorageClass: StorageClass(storageClass), + }) }) if doErr != nil { return doErr @@ -181,6 +180,32 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m nextMarker = "" } + if len(contents) == 0 && maxKeys > 0 { + if strings.HasSuffix(originalPrefix, "/") && prefix == "" { + reqDir, prefix = filepath.Split(strings.TrimSuffix(reqDir, "/")) + reqDir = strings.TrimSuffix(reqDir, "/") + } + _, _, _, doErr = s3a.doListFilerEntries(client, reqDir, prefix, 1, prefix, delimiter, true, func(dir string, entry *filer_pb.Entry) { + if entry.IsDirectory && entry.Attributes.Mime != "" && entry.Name == prefix { + storageClass := "STANDARD" + if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok { + storageClass = string(v) + } + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s/%s", dir, entry.Name+"/")[len(bucketPrefix):], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + filer.ETag(entry) + "\"", + Size: int64(filer.FileSize(entry)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, + }, + StorageClass: StorageClass(storageClass), + }) + } + }) + } + response = ListBucketResult{ Name: bucket, Prefix: originalPrefix, @@ -199,7 +224,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m return } -func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) { +func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) { // invariants // prefix and marker should be under dir, marker may contain "/" // maxKeys should be updated for each recursion @@ -214,8 +239,8 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d if strings.Contains(marker, "/") { sepIndex := strings.Index(marker, "/") subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:] - // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys) - subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn) + glog.V(4).Infoln("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, false, eachEntryFn) if subErr != nil { err = subErr return @@ -237,7 +262,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d Prefix: prefix, Limit: uint32(maxKeys + 1), StartFromFileName: marker, - InclusiveStartFrom: false, + InclusiveStartFrom: inclusiveStartFrom, } ctx, cancel := context.WithCancel(context.Background()) @@ -266,33 +291,34 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d nextMarker = entry.Name if entry.IsDirectory { // println("ListEntries", dir, "dir:", entry.Name) - if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys - if delimiter != "/" { + if entry.Name == ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys + continue + } + if delimiter != "/" { + eachEntryFn(dir, entry) + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, false, eachEntryFn) + if subErr != nil { + err = fmt.Errorf("doListFilerEntries2: %v", subErr) + return + } + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated) + counter += subCounter + nextMarker = entry.Name + "/" + subNextMarker + if subIsTruncated { + isTruncated = true + return + } + } else { + var isEmpty bool + if !s3a.option.AllowEmptyFolder { + if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { + glog.Errorf("check empty folder %s: %v", dir, err) + } + } + if !isEmpty { eachEntryFn(dir, entry) - // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter) - subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn) - if subErr != nil { - err = fmt.Errorf("doListFilerEntries2: %v", subErr) - return - } - // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated) - counter += subCounter - nextMarker = entry.Name + "/" + subNextMarker - if subIsTruncated { - isTruncated = true - return - } - } else { - var isEmpty bool - if !s3a.option.AllowEmptyFolder { - if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { - glog.Errorf("check empty folder %s: %v", dir, err) - } - } - if !isEmpty { - eachEntryFn(dir, entry) - counter++ - } + counter++ } } } else {