mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #3114 from kmlebedev/nextcloud
Create folders in s3 via nextcloud
This commit is contained in:
commit
730c9cf601
|
@ -52,6 +52,9 @@ dev_replicate: build
|
||||||
dev_auditlog: build
|
dev_auditlog: build
|
||||||
docker-compose -f compose/local-auditlog-compose.yml -p seaweedfs up
|
docker-compose -f compose/local-auditlog-compose.yml -p seaweedfs up
|
||||||
|
|
||||||
|
dev_nextcloud: build
|
||||||
|
docker-compose -f compose/local-nextcloud-compose.yml -p seaweedfs up
|
||||||
|
|
||||||
cluster: build
|
cluster: build
|
||||||
docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up
|
docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up
|
||||||
|
|
||||||
|
|
44
docker/compose/local-nextcloud-compose.yml
Normal file
44
docker/compose/local-nextcloud-compose.yml
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
version: '2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
master:
|
||||||
|
image: chrislusf/seaweedfs:local
|
||||||
|
ports:
|
||||||
|
- 9333:9333
|
||||||
|
- 19333:19333
|
||||||
|
command: "master -ip=master"
|
||||||
|
volume:
|
||||||
|
image: chrislusf/seaweedfs:local
|
||||||
|
ports:
|
||||||
|
- 8080:8080
|
||||||
|
- 18080:18080
|
||||||
|
command: "volume -mserver=master:9333 -port=8080 -ip=volume"
|
||||||
|
depends_on:
|
||||||
|
- master
|
||||||
|
s3:
|
||||||
|
image: chrislusf/seaweedfs:local
|
||||||
|
ports:
|
||||||
|
- 8888:8888
|
||||||
|
- 18888:18888
|
||||||
|
- 8333:8333
|
||||||
|
command: '-v 9 filer -master="master:9333" -s3'
|
||||||
|
depends_on:
|
||||||
|
- master
|
||||||
|
- volume
|
||||||
|
nextcloud:
|
||||||
|
image: nextcloud:23.0.5-apache
|
||||||
|
environment:
|
||||||
|
- OBJECTSTORE_S3_HOST=s3
|
||||||
|
- OBJECTSTORE_S3_BUCKET=nextcloud
|
||||||
|
- OBJECTSTORE_S3_KEY=some_access_key1
|
||||||
|
- OBJECTSTORE_S3_SECRET=some_secret_key1
|
||||||
|
- OBJECTSTORE_S3_PORT=8333
|
||||||
|
- OBJECTSTORE_S3_SSL=false
|
||||||
|
- OBJECTSTORE_S3_USEPATH_STYLE=true
|
||||||
|
- SQLITE_DATABASE=nextcloud
|
||||||
|
- NEXTCLOUD_ADMIN_USER=admin
|
||||||
|
- NEXTCLOUD_ADMIN_PASSWORD=admin
|
||||||
|
ports:
|
||||||
|
- 80:80
|
||||||
|
depends_on:
|
||||||
|
- s3
|
|
@ -1,6 +1,6 @@
|
||||||
CREATE DATABASE IF NOT EXISTS seaweedfs;
|
CREATE DATABASE IF NOT EXISTS seaweedfs;
|
||||||
CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret';
|
CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret';
|
||||||
GRANT ALL PRIVILEGES ON seaweedfs_fast.* TO 'seaweedfs'@'%';
|
GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%';
|
||||||
FLUSH PRIVILEGES;
|
FLUSH PRIVILEGES;
|
||||||
USE seaweedfs;
|
USE seaweedfs;
|
||||||
CREATE TABLE IF NOT EXISTS filemeta (
|
CREATE TABLE IF NOT EXISTS filemeta (
|
||||||
|
|
|
@ -93,7 +93,9 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
||||||
defer dataReader.Close()
|
defer dataReader.Close()
|
||||||
|
|
||||||
if strings.HasSuffix(object, "/") {
|
if strings.HasSuffix(object, "/") {
|
||||||
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
|
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), func(entry *filer_pb.Entry) {
|
||||||
|
entry.Attributes.Mime = r.Header.Get("Content-Type")
|
||||||
|
}); err != nil {
|
||||||
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
s3err.WriteErrorResponse(w, r, s3err.ErrInternalError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,8 +135,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
|
bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)
|
||||||
reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir)
|
reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir)
|
||||||
if strings.HasSuffix(reqDir, "/") {
|
if strings.HasSuffix(reqDir, "/") {
|
||||||
// remove trailing "/"
|
reqDir = strings.TrimSuffix(reqDir, "/")
|
||||||
reqDir = reqDir[:len(reqDir)-1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var contents []ListEntry
|
var contents []ListEntry
|
||||||
|
@ -148,30 +147,30 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
// check filer
|
// check filer
|
||||||
err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) {
|
_, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) {
|
||||||
if entry.IsDirectory {
|
if entry.IsDirectory {
|
||||||
if delimiter == "/" {
|
if delimiter == "/" {
|
||||||
commonPrefixes = append(commonPrefixes, PrefixEntry{
|
commonPrefixes = append(commonPrefixes, PrefixEntry{
|
||||||
Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
|
Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
return
|
||||||
storageClass := "STANDARD"
|
|
||||||
if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
|
|
||||||
storageClass = string(v)
|
|
||||||
}
|
|
||||||
contents = append(contents, ListEntry{
|
|
||||||
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
|
|
||||||
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
|
||||||
ETag: "\"" + filer.ETag(entry) + "\"",
|
|
||||||
Size: int64(filer.FileSize(entry)),
|
|
||||||
Owner: CanonicalUser{
|
|
||||||
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
|
||||||
DisplayName: entry.Attributes.UserName,
|
|
||||||
},
|
|
||||||
StorageClass: StorageClass(storageClass),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
storageClass := "STANDARD"
|
||||||
|
if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
|
||||||
|
storageClass = string(v)
|
||||||
|
}
|
||||||
|
contents = append(contents, ListEntry{
|
||||||
|
Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):],
|
||||||
|
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
||||||
|
ETag: "\"" + filer.ETag(entry) + "\"",
|
||||||
|
Size: int64(filer.FileSize(entry)),
|
||||||
|
Owner: CanonicalUser{
|
||||||
|
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
||||||
|
DisplayName: entry.Attributes.UserName,
|
||||||
|
},
|
||||||
|
StorageClass: StorageClass(storageClass),
|
||||||
|
})
|
||||||
})
|
})
|
||||||
if doErr != nil {
|
if doErr != nil {
|
||||||
return doErr
|
return doErr
|
||||||
|
@ -181,6 +180,32 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
nextMarker = ""
|
nextMarker = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(contents) == 0 && maxKeys > 0 {
|
||||||
|
if strings.HasSuffix(originalPrefix, "/") && prefix == "" {
|
||||||
|
reqDir, prefix = filepath.Split(strings.TrimSuffix(reqDir, "/"))
|
||||||
|
reqDir = strings.TrimSuffix(reqDir, "/")
|
||||||
|
}
|
||||||
|
_, _, _, doErr = s3a.doListFilerEntries(client, reqDir, prefix, 1, prefix, delimiter, true, func(dir string, entry *filer_pb.Entry) {
|
||||||
|
if entry.IsDirectory && entry.Attributes.Mime != "" && entry.Name == prefix {
|
||||||
|
storageClass := "STANDARD"
|
||||||
|
if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok {
|
||||||
|
storageClass = string(v)
|
||||||
|
}
|
||||||
|
contents = append(contents, ListEntry{
|
||||||
|
Key: fmt.Sprintf("%s/%s", dir, entry.Name+"/")[len(bucketPrefix):],
|
||||||
|
LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(),
|
||||||
|
ETag: "\"" + filer.ETag(entry) + "\"",
|
||||||
|
Size: int64(filer.FileSize(entry)),
|
||||||
|
Owner: CanonicalUser{
|
||||||
|
ID: fmt.Sprintf("%x", entry.Attributes.Uid),
|
||||||
|
DisplayName: entry.Attributes.UserName,
|
||||||
|
},
|
||||||
|
StorageClass: StorageClass(storageClass),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
response = ListBucketResult{
|
response = ListBucketResult{
|
||||||
Name: bucket,
|
Name: bucket,
|
||||||
Prefix: originalPrefix,
|
Prefix: originalPrefix,
|
||||||
|
@ -199,7 +224,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, m
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {
|
func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) {
|
||||||
// invariants
|
// invariants
|
||||||
// prefix and marker should be under dir, marker may contain "/"
|
// prefix and marker should be under dir, marker may contain "/"
|
||||||
// maxKeys should be updated for each recursion
|
// maxKeys should be updated for each recursion
|
||||||
|
@ -214,8 +239,8 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
|
||||||
if strings.Contains(marker, "/") {
|
if strings.Contains(marker, "/") {
|
||||||
sepIndex := strings.Index(marker, "/")
|
sepIndex := strings.Index(marker, "/")
|
||||||
subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]
|
subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:]
|
||||||
// println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys)
|
glog.V(4).Infoln("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys)
|
||||||
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn)
|
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, false, eachEntryFn)
|
||||||
if subErr != nil {
|
if subErr != nil {
|
||||||
err = subErr
|
err = subErr
|
||||||
return
|
return
|
||||||
|
@ -237,7 +262,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
Limit: uint32(maxKeys + 1),
|
Limit: uint32(maxKeys + 1),
|
||||||
StartFromFileName: marker,
|
StartFromFileName: marker,
|
||||||
InclusiveStartFrom: false,
|
InclusiveStartFrom: inclusiveStartFrom,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
@ -266,33 +291,34 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d
|
||||||
nextMarker = entry.Name
|
nextMarker = entry.Name
|
||||||
if entry.IsDirectory {
|
if entry.IsDirectory {
|
||||||
// println("ListEntries", dir, "dir:", entry.Name)
|
// println("ListEntries", dir, "dir:", entry.Name)
|
||||||
if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys
|
if entry.Name == ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys
|
||||||
if delimiter != "/" {
|
continue
|
||||||
|
}
|
||||||
|
if delimiter != "/" {
|
||||||
|
eachEntryFn(dir, entry)
|
||||||
|
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter)
|
||||||
|
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, false, eachEntryFn)
|
||||||
|
if subErr != nil {
|
||||||
|
err = fmt.Errorf("doListFilerEntries2: %v", subErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated)
|
||||||
|
counter += subCounter
|
||||||
|
nextMarker = entry.Name + "/" + subNextMarker
|
||||||
|
if subIsTruncated {
|
||||||
|
isTruncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var isEmpty bool
|
||||||
|
if !s3a.option.AllowEmptyFolder {
|
||||||
|
if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
|
||||||
|
glog.Errorf("check empty folder %s: %v", dir, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !isEmpty {
|
||||||
eachEntryFn(dir, entry)
|
eachEntryFn(dir, entry)
|
||||||
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter)
|
counter++
|
||||||
subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn)
|
|
||||||
if subErr != nil {
|
|
||||||
err = fmt.Errorf("doListFilerEntries2: %v", subErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated)
|
|
||||||
counter += subCounter
|
|
||||||
nextMarker = entry.Name + "/" + subNextMarker
|
|
||||||
if subIsTruncated {
|
|
||||||
isTruncated = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var isEmpty bool
|
|
||||||
if !s3a.option.AllowEmptyFolder {
|
|
||||||
if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil {
|
|
||||||
glog.Errorf("check empty folder %s: %v", dir, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !isEmpty {
|
|
||||||
eachEntryFn(dir, entry)
|
|
||||||
counter++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in a new issue