2018-05-28 06:53:10 +00:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
2019-03-15 22:55:34 +00:00
|
|
|
"context"
|
2020-04-08 15:12:00 +00:00
|
|
|
"crypto/md5"
|
2020-09-14 20:07:40 +00:00
|
|
|
"fmt"
|
2020-08-08 17:45:37 +00:00
|
|
|
"hash"
|
2018-05-28 06:53:10 +00:00
|
|
|
"io"
|
2020-04-08 15:12:00 +00:00
|
|
|
"io/ioutil"
|
2018-05-28 06:53:10 +00:00
|
|
|
"net/http"
|
2020-08-08 19:02:06 +00:00
|
|
|
"os"
|
2018-05-28 06:53:10 +00:00
|
|
|
"path"
|
|
|
|
"strconv"
|
2018-07-22 00:47:59 +00:00
|
|
|
"strings"
|
2018-05-28 06:59:49 +00:00
|
|
|
"time"
|
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2018-05-28 06:53:10 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-10-29 08:05:40 +00:00
|
|
|
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
2019-02-15 08:09:19 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/security"
|
2019-06-23 05:53:52 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/stats"
|
2020-11-03 08:15:51 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2020-03-23 07:01:34 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2018-05-28 06:53:10 +00:00
|
|
|
)
|
|
|
|
|
2020-11-16 00:58:48 +00:00
|
|
|
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) {
|
2018-05-28 06:53:10 +00:00
|
|
|
|
|
|
|
// autoChunking can be set at the command-line level or as a query param. Query param overrides command-line
|
|
|
|
query := r.URL.Query()
|
|
|
|
|
|
|
|
parsedMaxMB, _ := strconv.ParseInt(query.Get("maxMB"), 10, 32)
|
|
|
|
maxMB := int32(parsedMaxMB)
|
2018-07-07 09:18:47 +00:00
|
|
|
if maxMB <= 0 && fs.option.MaxMB > 0 {
|
|
|
|
maxMB = int32(fs.option.MaxMB)
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
chunkSize := 1024 * 1024 * maxMB
|
|
|
|
|
2020-08-08 17:45:37 +00:00
|
|
|
stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc()
|
|
|
|
start := time.Now()
|
|
|
|
defer func() {
|
|
|
|
stats.FilerRequestHistogram.WithLabelValues("postAutoChunk").Observe(time.Since(start).Seconds())
|
|
|
|
}()
|
|
|
|
|
|
|
|
var reply *FilerPostResult
|
|
|
|
var err error
|
2020-08-08 19:02:06 +00:00
|
|
|
var md5bytes []byte
|
2020-08-08 17:45:37 +00:00
|
|
|
if r.Method == "POST" {
|
2020-09-14 20:07:40 +00:00
|
|
|
if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") {
|
|
|
|
reply, err = fs.mkdir(ctx, w, r)
|
|
|
|
} else {
|
2020-11-15 22:41:56 +00:00
|
|
|
reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, so)
|
2020-09-14 20:07:40 +00:00
|
|
|
}
|
2020-08-08 17:45:37 +00:00
|
|
|
} else {
|
2020-11-15 22:41:56 +00:00
|
|
|
reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, so)
|
2020-08-08 17:45:37 +00:00
|
|
|
}
|
2018-05-28 06:53:10 +00:00
|
|
|
if err != nil {
|
2020-12-16 16:17:57 +00:00
|
|
|
if strings.HasPrefix(err.Error(), "read input:") {
|
|
|
|
writeJsonError(w, r, 499, err)
|
|
|
|
} else {
|
|
|
|
writeJsonError(w, r, http.StatusInternalServerError, err)
|
|
|
|
}
|
2018-05-28 06:53:10 +00:00
|
|
|
} else if reply != nil {
|
2020-08-08 19:02:06 +00:00
|
|
|
if len(md5bytes) > 0 {
|
|
|
|
w.Header().Set("Content-MD5", util.Base64Encode(md5bytes))
|
|
|
|
}
|
2018-05-28 06:53:10 +00:00
|
|
|
writeJsonQuiet(w, r, http.StatusCreated, reply)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-16 00:58:48 +00:00
|
|
|
func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
|
2018-05-28 06:53:10 +00:00
|
|
|
|
|
|
|
multipartReader, multipartReaderErr := r.MultipartReader()
|
|
|
|
if multipartReaderErr != nil {
|
2020-08-08 19:02:06 +00:00
|
|
|
return nil, nil, multipartReaderErr
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
part1, part1Err := multipartReader.NextPart()
|
|
|
|
if part1Err != nil {
|
2020-08-08 19:02:06 +00:00
|
|
|
return nil, nil, part1Err
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fileName := part1.FileName()
|
|
|
|
if fileName != "" {
|
|
|
|
fileName = path.Base(fileName)
|
|
|
|
}
|
2020-03-09 04:39:33 +00:00
|
|
|
contentType := part1.Header.Get("Content-Type")
|
2020-08-08 17:45:37 +00:00
|
|
|
if contentType == "application/octet-stream" {
|
|
|
|
contentType = ""
|
|
|
|
}
|
2020-03-09 04:39:33 +00:00
|
|
|
|
2020-11-30 12:34:04 +00:00
|
|
|
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, so)
|
2020-08-08 17:45:37 +00:00
|
|
|
if err != nil {
|
2020-08-08 19:02:06 +00:00
|
|
|
return nil, nil, err
|
2020-08-08 17:45:37 +00:00
|
|
|
}
|
2018-05-28 06:53:10 +00:00
|
|
|
|
2020-08-08 19:02:06 +00:00
|
|
|
md5bytes = md5Hash.Sum(nil)
|
2020-11-30 12:34:04 +00:00
|
|
|
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
|
2018-05-28 06:53:10 +00:00
|
|
|
|
2020-08-08 17:45:37 +00:00
|
|
|
return
|
|
|
|
}
|
2018-05-28 06:53:10 +00:00
|
|
|
|
2020-11-16 00:58:48 +00:00
|
|
|
func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) {
|
2020-02-04 01:04:06 +00:00
|
|
|
|
2021-02-18 04:54:53 +00:00
|
|
|
fileName := path.Base(r.URL.Path)
|
2020-12-31 03:32:45 +00:00
|
|
|
contentType := r.Header.Get("Content-Type")
|
|
|
|
if contentType == "application/octet-stream" {
|
|
|
|
contentType = ""
|
|
|
|
}
|
2020-03-03 04:27:14 +00:00
|
|
|
|
2020-11-30 12:34:04 +00:00
|
|
|
fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, so)
|
2020-08-08 17:45:37 +00:00
|
|
|
if err != nil {
|
2020-08-08 19:02:06 +00:00
|
|
|
return nil, nil, err
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
|
|
|
|
2020-08-08 19:02:06 +00:00
|
|
|
md5bytes = md5Hash.Sum(nil)
|
2020-11-30 12:34:04 +00:00
|
|
|
filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent)
|
2020-08-08 17:45:37 +00:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:40:32 +00:00
|
|
|
func isAppend(r *http.Request) bool {
|
|
|
|
return r.URL.Query().Get("op") == "append"
|
|
|
|
}
|
2020-08-08 19:02:06 +00:00
|
|
|
|
2020-11-30 12:34:04 +00:00
|
|
|
func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) {
|
2020-08-08 19:02:06 +00:00
|
|
|
|
|
|
|
// detect file mode
|
|
|
|
modeStr := r.URL.Query().Get("mode")
|
|
|
|
if modeStr == "" {
|
|
|
|
modeStr = "0660"
|
|
|
|
}
|
|
|
|
mode, err := strconv.ParseUint(modeStr, 8, 32)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
|
|
|
|
mode = 0660
|
|
|
|
}
|
|
|
|
|
|
|
|
// fix the path
|
2018-05-28 06:53:10 +00:00
|
|
|
path := r.URL.Path
|
2018-07-22 00:47:59 +00:00
|
|
|
if strings.HasSuffix(path, "/") {
|
|
|
|
if fileName != "" {
|
|
|
|
path += fileName
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-20 21:40:32 +00:00
|
|
|
var entry *filer.Entry
|
|
|
|
var mergedChunks []*filer_pb.FileChunk
|
|
|
|
// when it is an append
|
|
|
|
if isAppend(r) {
|
|
|
|
existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path))
|
|
|
|
if findErr != nil && findErr != filer_pb.ErrNotFound {
|
|
|
|
glog.V(0).Infof("failing to find %s: %v", path, findErr)
|
|
|
|
}
|
|
|
|
entry = existingEntry
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
2021-01-20 21:40:32 +00:00
|
|
|
if entry != nil {
|
|
|
|
entry.Mtime = time.Now()
|
|
|
|
entry.Md5 = nil
|
|
|
|
// adjust chunk offsets
|
|
|
|
for _, chunk := range fileChunks {
|
|
|
|
chunk.Offset += int64(entry.FileSize)
|
|
|
|
}
|
|
|
|
mergedChunks = append(entry.Chunks, fileChunks...)
|
|
|
|
entry.FileSize += uint64(chunkOffset)
|
|
|
|
|
|
|
|
// TODO
|
|
|
|
if len(entry.Content) > 0 {
|
|
|
|
replyerr = fmt.Errorf("append to small file is not supported yet")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
glog.V(4).Infoln("saving", path)
|
|
|
|
mergedChunks = fileChunks
|
|
|
|
entry = &filer.Entry{
|
|
|
|
FullPath: util.FullPath(path),
|
|
|
|
Attr: filer.Attr{
|
|
|
|
Mtime: time.Now(),
|
|
|
|
Crtime: time.Now(),
|
|
|
|
Mode: os.FileMode(mode),
|
|
|
|
Uid: OS_UID,
|
|
|
|
Gid: OS_GID,
|
|
|
|
Replication: so.Replication,
|
|
|
|
Collection: so.Collection,
|
|
|
|
TtlSec: so.TtlSeconds,
|
2021-02-09 19:37:07 +00:00
|
|
|
DiskType: so.DiskType,
|
2021-01-20 21:40:32 +00:00
|
|
|
Mime: contentType,
|
|
|
|
Md5: md5bytes,
|
|
|
|
FileSize: uint64(chunkOffset),
|
|
|
|
},
|
|
|
|
Content: content,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// maybe compact entry chunks
|
|
|
|
mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks)
|
|
|
|
if replyerr != nil {
|
|
|
|
glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr)
|
|
|
|
return
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
2021-01-20 21:40:32 +00:00
|
|
|
entry.Chunks = mergedChunks
|
2020-02-04 01:04:06 +00:00
|
|
|
|
|
|
|
filerResult = &FilerPostResult{
|
|
|
|
Name: fileName,
|
2021-01-23 12:19:49 +00:00
|
|
|
Size: int64(entry.FileSize),
|
2020-02-04 01:04:06 +00:00
|
|
|
}
|
|
|
|
|
2020-11-03 08:15:51 +00:00
|
|
|
if entry.Extended == nil {
|
|
|
|
entry.Extended = make(map[string][]byte)
|
|
|
|
}
|
|
|
|
|
2020-10-28 10:16:05 +00:00
|
|
|
fs.saveAmzMetaData(r, entry)
|
|
|
|
|
2020-11-03 08:15:51 +00:00
|
|
|
for k, v := range r.Header {
|
|
|
|
if len(v) > 0 && strings.HasPrefix(k, needle.PairNamePrefix) {
|
2020-11-03 08:21:10 +00:00
|
|
|
entry.Extended[k] = []byte(v[0])
|
2020-11-03 08:15:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-29 06:48:48 +00:00
|
|
|
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil {
|
2021-01-20 21:40:32 +00:00
|
|
|
fs.filer.DeleteChunks(fileChunks)
|
2019-06-23 05:53:52 +00:00
|
|
|
replyerr = dbErr
|
|
|
|
filerResult.Error = dbErr.Error()
|
|
|
|
glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
2020-08-08 17:45:37 +00:00
|
|
|
return filerResult, replyerr
|
|
|
|
}
|
2018-05-28 06:53:10 +00:00
|
|
|
|
2020-11-30 12:34:04 +00:00
|
|
|
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
|
2020-08-08 17:45:37 +00:00
|
|
|
var fileChunks []*filer_pb.FileChunk
|
|
|
|
|
|
|
|
md5Hash := md5.New()
|
|
|
|
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
|
|
|
|
|
|
|
|
chunkOffset := int64(0)
|
2021-01-11 07:14:46 +00:00
|
|
|
var smallContent []byte
|
2020-08-08 17:45:37 +00:00
|
|
|
|
2020-08-08 19:02:06 +00:00
|
|
|
for {
|
2020-08-08 17:45:37 +00:00
|
|
|
limitedReader := io.LimitReader(partReader, int64(chunkSize))
|
|
|
|
|
2020-12-25 10:32:52 +00:00
|
|
|
data, err := ioutil.ReadAll(limitedReader)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, 0, err, nil
|
|
|
|
}
|
2021-01-20 21:40:32 +00:00
|
|
|
if chunkOffset == 0 && !isAppend(r) {
|
2021-01-11 07:14:46 +00:00
|
|
|
if len(data) < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
|
|
|
|
smallContent = data
|
|
|
|
chunkOffset += int64(len(data))
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-12-25 10:32:52 +00:00
|
|
|
dataReader := util.NewBytesReader(data)
|
|
|
|
|
|
|
|
// retry to assign a different file id
|
|
|
|
var fileId, urlLocation string
|
|
|
|
var auth security.EncodedJwt
|
|
|
|
var assignErr, uploadErr error
|
|
|
|
var uploadResult *operation.UploadResult
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
// assign one file id for one chunk
|
|
|
|
fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
|
|
|
|
if assignErr != nil {
|
|
|
|
return nil, nil, 0, assignErr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// upload the chunk to the volume server
|
|
|
|
uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
|
|
|
|
if uploadErr != nil {
|
|
|
|
time.Sleep(251 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
2020-08-08 17:45:37 +00:00
|
|
|
}
|
|
|
|
if uploadErr != nil {
|
2020-11-30 12:34:04 +00:00
|
|
|
return nil, nil, 0, uploadErr, nil
|
2020-08-08 17:45:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// if last chunk exhausted the reader exactly at the border
|
|
|
|
if uploadResult.Size == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save to chunk manifest structure
|
|
|
|
fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
|
|
|
|
|
2020-08-08 19:02:06 +00:00
|
|
|
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
|
2020-08-08 17:45:37 +00:00
|
|
|
|
|
|
|
// reset variables for the next chunk
|
|
|
|
chunkOffset = chunkOffset + int64(uploadResult.Size)
|
|
|
|
|
|
|
|
// if last chunk was not at full chunk size, but already exhausted the reader
|
|
|
|
if int64(uploadResult.Size) < int64(chunkSize) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-12-07 08:11:35 +00:00
|
|
|
|
2020-11-30 12:34:04 +00:00
|
|
|
return fileChunks, md5Hash, chunkOffset, nil, smallContent
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 12:34:04 +00:00
|
|
|
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {
|
2018-05-28 06:53:10 +00:00
|
|
|
|
2019-06-23 05:53:52 +00:00
|
|
|
stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc()
|
|
|
|
start := time.Now()
|
2019-06-23 08:57:51 +00:00
|
|
|
defer func() {
|
|
|
|
stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
|
|
|
|
}()
|
2019-06-23 05:53:52 +00:00
|
|
|
|
2020-11-30 12:34:04 +00:00
|
|
|
uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth)
|
|
|
|
return uploadResult, err, data
|
2018-05-28 06:53:10 +00:00
|
|
|
}
|
2020-07-20 00:59:43 +00:00
|
|
|
|
2020-11-16 00:58:48 +00:00
|
|
|
func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType {
|
2020-07-20 00:59:43 +00:00
|
|
|
|
|
|
|
return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) {
|
|
|
|
// assign one file id for one chunk
|
2020-11-15 22:41:56 +00:00
|
|
|
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so)
|
2020-07-20 00:59:43 +00:00
|
|
|
if assignErr != nil {
|
|
|
|
return nil, "", "", assignErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// upload the chunk to the volume server
|
|
|
|
uploadResult, uploadErr, _ := operation.Upload(urlLocation, name, fs.option.Cipher, reader, false, "", nil, auth)
|
|
|
|
if uploadErr != nil {
|
|
|
|
return nil, "", "", uploadErr
|
|
|
|
}
|
|
|
|
|
2020-11-15 22:41:56 +00:00
|
|
|
return uploadResult.ToPbFileChunk(fileId, offset), so.Collection, so.Replication, nil
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-14 20:07:40 +00:00
|
|
|
|
|
|
|
func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http.Request) (filerResult *FilerPostResult, replyerr error) {
|
|
|
|
|
|
|
|
// detect file mode
|
|
|
|
modeStr := r.URL.Query().Get("mode")
|
|
|
|
if modeStr == "" {
|
|
|
|
modeStr = "0660"
|
|
|
|
}
|
|
|
|
mode, err := strconv.ParseUint(modeStr, 8, 32)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
|
|
|
|
mode = 0660
|
|
|
|
}
|
|
|
|
|
|
|
|
// fix the path
|
|
|
|
path := r.URL.Path
|
|
|
|
if strings.HasSuffix(path, "/") {
|
|
|
|
path = path[:len(path)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
|
|
|
|
if err == nil && existingEntry != nil {
|
|
|
|
replyerr = fmt.Errorf("dir %s already exists", path)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.V(4).Infoln("mkdir", path)
|
|
|
|
entry := &filer.Entry{
|
|
|
|
FullPath: util.FullPath(path),
|
|
|
|
Attr: filer.Attr{
|
2020-09-16 08:27:24 +00:00
|
|
|
Mtime: time.Now(),
|
|
|
|
Crtime: time.Now(),
|
|
|
|
Mode: os.FileMode(mode) | os.ModeDir,
|
|
|
|
Uid: OS_UID,
|
|
|
|
Gid: OS_GID,
|
2020-09-14 20:07:40 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
filerResult = &FilerPostResult{
|
|
|
|
Name: util.FullPath(path).Name(),
|
|
|
|
}
|
|
|
|
|
|
|
|
if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil {
|
|
|
|
replyerr = dbErr
|
|
|
|
filerResult.Error = dbErr.Error()
|
|
|
|
glog.V(0).Infof("failing to create dir %s on filer server : %v", path, dbErr)
|
|
|
|
}
|
|
|
|
return filerResult, replyerr
|
|
|
|
}
|
2020-10-28 10:16:05 +00:00
|
|
|
|
|
|
|
func (fs *FilerServer) saveAmzMetaData(r *http.Request, entry *filer.Entry) {
|
|
|
|
|
2020-10-29 08:05:40 +00:00
|
|
|
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
|
|
|
|
entry.Extended[xhttp.AmzStorageClass] = []byte(sc)
|
2020-10-28 10:16:05 +00:00
|
|
|
}
|
|
|
|
|
2020-10-29 08:05:40 +00:00
|
|
|
if tags := r.Header.Get(xhttp.AmzObjectTagging); tags != "" {
|
2020-10-28 10:16:05 +00:00
|
|
|
for _, v := range strings.Split(tags, "&") {
|
|
|
|
tag := strings.Split(v, "=")
|
|
|
|
if len(tag) == 2 {
|
2020-10-29 08:05:40 +00:00
|
|
|
entry.Extended[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
2020-10-28 10:16:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for header, values := range r.Header {
|
2020-10-29 08:05:40 +00:00
|
|
|
if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) {
|
2020-10-28 10:16:05 +00:00
|
|
|
for _, value := range values {
|
|
|
|
entry.Extended[header] = []byte(value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|