mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
filer cipher: single chunk http POST and PUT and read
This commit is contained in:
parent
e3b8bf5588
commit
ea1169dc80
|
@ -69,18 +69,12 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader,
|
||||||
// encrypt data
|
// encrypt data
|
||||||
var cipherKey util.CipherKey
|
var cipherKey util.CipherKey
|
||||||
var clearDataLen int
|
var clearDataLen int
|
||||||
|
var err error
|
||||||
if cipher {
|
if cipher {
|
||||||
clearData, err := ioutil.ReadAll(reader)
|
cipherKey, reader, clearDataLen, _, err = util.EncryptReader(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read raw input: %v", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
clearDataLen = len(clearData)
|
|
||||||
cipherKey = util.GenCipherKey()
|
|
||||||
encryptedData, err := util.Encrypt(clearData, cipherKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("encrypt input: %v", err)
|
|
||||||
}
|
|
||||||
reader = bytes.NewReader(encryptedData)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload data
|
// upload data
|
||||||
|
|
|
@ -99,13 +99,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
|
||||||
}
|
}
|
||||||
|
|
||||||
debug("parsing upload file...")
|
debug("parsing upload file...")
|
||||||
fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r, 256*1024*1024)
|
pu, pe := needle.ParseUpload(r, 256*1024*1024)
|
||||||
if pe != nil {
|
if pe != nil {
|
||||||
writeJsonError(w, r, http.StatusBadRequest, pe)
|
writeJsonError(w, r, http.StatusBadRequest, pe)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
debug("assigning file id for", fname)
|
debug("assigning file id for", pu.FileName)
|
||||||
r.ParseForm()
|
r.ParseForm()
|
||||||
count := uint64(1)
|
count := uint64(1)
|
||||||
if r.FormValue("count") != "" {
|
if r.FormValue("count") != "" {
|
||||||
|
@ -129,21 +129,21 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
|
||||||
}
|
}
|
||||||
|
|
||||||
url := "http://" + assignResult.Url + "/" + assignResult.Fid
|
url := "http://" + assignResult.Url + "/" + assignResult.Fid
|
||||||
if lastModified != 0 {
|
if pu.ModifiedTime != 0 {
|
||||||
url = url + "?ts=" + strconv.FormatUint(lastModified, 10)
|
url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
debug("upload file to store", url)
|
debug("upload file to store", url)
|
||||||
uploadResult, err := operation.Upload(url, fname, false, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth)
|
uploadResult, err := operation.Upload(url, pu.FileName, false, bytes.NewReader(pu.Data), pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeJsonError(w, r, http.StatusInternalServerError, err)
|
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
m["fileName"] = fname
|
m["fileName"] = pu.FileName
|
||||||
m["fid"] = assignResult.Fid
|
m["fid"] = assignResult.Fid
|
||||||
m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid
|
m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid
|
||||||
m["size"] = originalDataSize
|
m["size"] = pu.OriginalDataSize
|
||||||
m["eTag"] = uploadResult.ETag
|
m["eTag"] = uploadResult.ETag
|
||||||
writeJsonQuiet(w, r, http.StatusCreated, m)
|
writeJsonQuiet(w, r, http.StatusCreated, m)
|
||||||
return
|
return
|
||||||
|
|
|
@ -2,6 +2,7 @@ package weed_server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
|
@ -14,7 +15,6 @@ import (
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/stats"
|
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
)
|
)
|
||||||
|
@ -136,15 +136,16 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request,
|
||||||
if entry.Attr.Mime != "" {
|
if entry.Attr.Mime != "" {
|
||||||
w.Header().Set("Content-Type", entry.Attr.Mime)
|
w.Header().Set("Content-Type", entry.Attr.Mime)
|
||||||
}
|
}
|
||||||
w.WriteHeader(resp.StatusCode)
|
|
||||||
if entry.Chunks[0].CipherKey == nil {
|
if entry.Chunks[0].CipherKey == nil {
|
||||||
|
w.WriteHeader(resp.StatusCode)
|
||||||
io.Copy(w, resp.Body)
|
io.Copy(w, resp.Body)
|
||||||
} else {
|
} else {
|
||||||
fs.writeEncryptedChunk(w, resp, entry.Chunks[0])
|
fs.writeEncryptedChunk(w, resp, entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Response, chunk *filer_pb.FileChunk) {
|
func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Response, entry *filer2.Entry) {
|
||||||
|
chunk := entry.Chunks[0]
|
||||||
encryptedData, err := ioutil.ReadAll(resp.Body)
|
encryptedData, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("read encrypted %s failed, err: %v", chunk.FileId, err)
|
glog.V(1).Infof("read encrypted %s failed, err: %v", chunk.FileId, err)
|
||||||
|
@ -157,6 +158,8 @@ func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Res
|
||||||
w.WriteHeader(http.StatusNotFound)
|
w.WriteHeader(http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", chunk.Size))
|
||||||
|
w.WriteHeader(resp.StatusCode)
|
||||||
w.Write(decryptedData)
|
w.Write(decryptedData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,10 +90,22 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fs.option.Cipher {
|
||||||
|
reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter)
|
||||||
|
if err != nil {
|
||||||
|
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||||
|
} else if reply != nil {
|
||||||
|
writeJsonQuiet(w, r, http.StatusCreated, reply)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
|
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
|
||||||
|
|
||||||
if err != nil || fileId == "" || urlLocation == "" {
|
if err != nil || fileId == "" || urlLocation == "" {
|
||||||
glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
|
glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
|
||||||
|
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,7 +146,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
// update metadata in filer store
|
// update metadata in filer store
|
||||||
func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter,
|
func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter,
|
||||||
replication string, collection string, ret operation.UploadResult, fileId string) (err error) {
|
replication string, collection string, ret *operation.UploadResult, fileId string) (err error) {
|
||||||
|
|
||||||
stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
|
stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
@ -198,12 +210,14 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
|
||||||
}
|
}
|
||||||
|
|
||||||
// send request to volume server
|
// send request to volume server
|
||||||
func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret operation.UploadResult, err error) {
|
func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, err error) {
|
||||||
|
|
||||||
stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
|
stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
|
defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
|
||||||
|
|
||||||
|
ret = &operation.UploadResult{}
|
||||||
|
|
||||||
request := &http.Request{
|
request := &http.Request{
|
||||||
Method: r.Method,
|
Method: r.Method,
|
||||||
URL: u,
|
URL: u,
|
||||||
|
@ -215,6 +229,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se
|
||||||
Host: r.Host,
|
Host: r.Host,
|
||||||
ContentLength: r.ContentLength,
|
ContentLength: r.ContentLength,
|
||||||
}
|
}
|
||||||
|
|
||||||
if auth != "" {
|
if auth != "" {
|
||||||
request.Header.Set("Authorization", "BEARER "+string(auth))
|
request.Header.Set("Authorization", "BEARER "+string(auth))
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,13 +103,13 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
|
||||||
|
|
||||||
// upload the chunk to the volume server
|
// upload the chunk to the volume server
|
||||||
chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10)
|
chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10)
|
||||||
uploadedSize, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth)
|
uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth)
|
||||||
if uploadErr != nil {
|
if uploadErr != nil {
|
||||||
return nil, uploadErr
|
return nil, uploadErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// if last chunk exhausted the reader exactly at the border
|
// if last chunk exhausted the reader exactly at the border
|
||||||
if uploadedSize == 0 {
|
if uploadResult.Size == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,18 +118,20 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
|
||||||
&filer_pb.FileChunk{
|
&filer_pb.FileChunk{
|
||||||
FileId: fileId,
|
FileId: fileId,
|
||||||
Offset: chunkOffset,
|
Offset: chunkOffset,
|
||||||
Size: uint64(uploadedSize),
|
Size: uint64(uploadResult.Size),
|
||||||
Mtime: time.Now().UnixNano(),
|
Mtime: time.Now().UnixNano(),
|
||||||
|
ETag: uploadResult.ETag,
|
||||||
|
CipherKey: uploadResult.CipherKey,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadedSize), contentLength)
|
glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size), contentLength)
|
||||||
|
|
||||||
// reset variables for the next chunk
|
// reset variables for the next chunk
|
||||||
chunkOffset = chunkOffset + int64(uploadedSize)
|
chunkOffset = chunkOffset + int64(uploadResult.Size)
|
||||||
|
|
||||||
// if last chunk was not at full chunk size, but already exhausted the reader
|
// if last chunk was not at full chunk size, but already exhausted the reader
|
||||||
if uploadedSize < int64(chunkSize) {
|
if int64(uploadResult.Size) < int64(chunkSize) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -174,7 +176,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request,
|
func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request,
|
||||||
limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (size int64, err error) {
|
limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (*operation.UploadResult, error) {
|
||||||
|
|
||||||
stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc()
|
stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc()
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
@ -182,9 +184,5 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht
|
||||||
stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
|
stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
uploadResult, uploadError := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, nil, auth)
|
return operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, nil, auth)
|
||||||
if uploadError != nil {
|
|
||||||
return 0, uploadError
|
|
||||||
}
|
|
||||||
return int64(uploadResult.Size), nil
|
|
||||||
}
|
}
|
||||||
|
|
103
weed/server/filer_server_handlers_write_cipher.go
Normal file
103
weed/server/filer_server_handlers_write_cipher.go
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
package weed_server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// handling single chunk POST or PUT upload
|
||||||
|
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request,
|
||||||
|
replication string, collection string, dataCenter string) (filerResult *FilerPostResult, err error) {
|
||||||
|
|
||||||
|
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter)
|
||||||
|
|
||||||
|
if err != nil || fileId == "" || urlLocation == "" {
|
||||||
|
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
|
||||||
|
|
||||||
|
// Note: gzip(cipher(data)), cipher data first, then gzip
|
||||||
|
|
||||||
|
sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
|
||||||
|
|
||||||
|
pu, err := needle.ParseUpload(r, sizeLimit)
|
||||||
|
data := pu.Data
|
||||||
|
uncompressedData := pu.Data
|
||||||
|
cipherKey := util.GenCipherKey()
|
||||||
|
if pu.IsGzipped {
|
||||||
|
uncompressedData = pu.UncompressedData
|
||||||
|
data, err = util.Encrypt(pu.UncompressedData, cipherKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("encrypt input: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pu.MimeType == "" {
|
||||||
|
pu.MimeType = http.DetectContentType(uncompressedData)
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadResult, uploadError := operation.Upload(urlLocation, pu.FileName, true, bytes.NewReader(data), pu.IsGzipped, "", pu.PairMap, auth)
|
||||||
|
if uploadError != nil {
|
||||||
|
return nil, fmt.Errorf("upload to volume server: %v", uploadError)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save to chunk manifest structure
|
||||||
|
fileChunks := []*filer_pb.FileChunk{
|
||||||
|
{
|
||||||
|
FileId: fileId,
|
||||||
|
Offset: 0,
|
||||||
|
Size: uint64(uploadResult.Size),
|
||||||
|
Mtime: time.Now().UnixNano(),
|
||||||
|
ETag: uploadResult.ETag,
|
||||||
|
CipherKey: uploadResult.CipherKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
path := r.URL.Path
|
||||||
|
if strings.HasSuffix(path, "/") {
|
||||||
|
if pu.FileName != "" {
|
||||||
|
path += pu.FileName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := &filer2.Entry{
|
||||||
|
FullPath: filer2.FullPath(path),
|
||||||
|
Attr: filer2.Attr{
|
||||||
|
Mtime: time.Now(),
|
||||||
|
Crtime: time.Now(),
|
||||||
|
Mode: 0660,
|
||||||
|
Uid: OS_UID,
|
||||||
|
Gid: OS_GID,
|
||||||
|
Replication: replication,
|
||||||
|
Collection: collection,
|
||||||
|
TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)),
|
||||||
|
Mime: pu.MimeType,
|
||||||
|
},
|
||||||
|
Chunks: fileChunks,
|
||||||
|
}
|
||||||
|
|
||||||
|
filerResult = &FilerPostResult{
|
||||||
|
Name: pu.FileName,
|
||||||
|
Size: int64(pu.OriginalDataSize),
|
||||||
|
}
|
||||||
|
|
||||||
|
if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil {
|
||||||
|
fs.filer.DeleteChunks(entry.Chunks)
|
||||||
|
err = dbErr
|
||||||
|
filerResult.Error = dbErr.Error()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
|
@ -3,8 +3,6 @@ package needle
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -12,7 +10,6 @@ import (
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/images"
|
"github.com/chrislusf/seaweedfs/weed/images"
|
||||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -51,67 +48,30 @@ func (n *Needle) String() (str string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseUpload(r *http.Request, sizeLimit int64) (
|
|
||||||
fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int,
|
|
||||||
modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) {
|
|
||||||
pairMap = make(map[string]string)
|
|
||||||
for k, v := range r.Header {
|
|
||||||
if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) {
|
|
||||||
pairMap[k] = v[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method == "POST" {
|
|
||||||
fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r, sizeLimit)
|
|
||||||
} else {
|
|
||||||
isGzipped = r.Header.Get("Content-Encoding") == "gzip"
|
|
||||||
mimeType = r.Header.Get("Content-Type")
|
|
||||||
fileName = ""
|
|
||||||
data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1))
|
|
||||||
originalDataSize = len(data)
|
|
||||||
if e == io.EOF || int64(originalDataSize) == sizeLimit+1 {
|
|
||||||
io.Copy(ioutil.Discard, r.Body)
|
|
||||||
}
|
|
||||||
r.Body.Close()
|
|
||||||
if isGzipped {
|
|
||||||
if unzipped, e := util.UnGzipData(data); e == nil {
|
|
||||||
originalDataSize = len(unzipped)
|
|
||||||
}
|
|
||||||
} else if shouldGzip, _ := util.IsGzippableFileType("", mimeType); shouldGzip {
|
|
||||||
if compressedData, err := util.GzipData(data); err == nil {
|
|
||||||
data = compressedData
|
|
||||||
isGzipped = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if e != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
|
|
||||||
ttl, _ = ReadTTL(r.FormValue("ttl"))
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) {
|
func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) {
|
||||||
var pairMap map[string]string
|
|
||||||
fname, mimeType, isGzipped, isChunkedFile := "", "", false, false
|
|
||||||
n = new(Needle)
|
n = new(Needle)
|
||||||
fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r, sizeLimit)
|
pu, e := ParseUpload(r, sizeLimit)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(fname) < 256 {
|
n.Data = pu.Data
|
||||||
n.Name = []byte(fname)
|
originalSize = pu.OriginalDataSize
|
||||||
|
n.LastModified = pu.ModifiedTime
|
||||||
|
n.Ttl = pu.Ttl
|
||||||
|
|
||||||
|
|
||||||
|
if len(pu.FileName) < 256 {
|
||||||
|
n.Name = []byte(pu.FileName)
|
||||||
n.SetHasName()
|
n.SetHasName()
|
||||||
}
|
}
|
||||||
if len(mimeType) < 256 {
|
if len(pu.MimeType) < 256 {
|
||||||
n.Mime = []byte(mimeType)
|
n.Mime = []byte(pu.MimeType)
|
||||||
n.SetHasMime()
|
n.SetHasMime()
|
||||||
}
|
}
|
||||||
if len(pairMap) != 0 {
|
if len(pu.PairMap) != 0 {
|
||||||
trimmedPairMap := make(map[string]string)
|
trimmedPairMap := make(map[string]string)
|
||||||
for k, v := range pairMap {
|
for k, v := range pu.PairMap {
|
||||||
trimmedPairMap[k[len(PairNamePrefix):]] = v
|
trimmedPairMap[k[len(PairNamePrefix):]] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,7 +82,7 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit
|
||||||
n.SetHasPairs()
|
n.SetHasPairs()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isGzipped {
|
if pu.IsGzipped {
|
||||||
n.SetGzipped()
|
n.SetGzipped()
|
||||||
}
|
}
|
||||||
if n.LastModified == 0 {
|
if n.LastModified == 0 {
|
||||||
|
@ -133,13 +93,13 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit
|
||||||
n.SetHasTtl()
|
n.SetHasTtl()
|
||||||
}
|
}
|
||||||
|
|
||||||
if isChunkedFile {
|
if pu.IsChunkedFile {
|
||||||
n.SetIsChunkManifest()
|
n.SetIsChunkManifest()
|
||||||
}
|
}
|
||||||
|
|
||||||
if fixJpgOrientation {
|
if fixJpgOrientation {
|
||||||
loweredName := strings.ToLower(fname)
|
loweredName := strings.ToLower(pu.FileName)
|
||||||
if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") {
|
if pu.MimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") {
|
||||||
n.Data = images.FixJpgOrientation(n.Data)
|
n.Data = images.FixJpgOrientation(n.Data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,118 +0,0 @@
|
||||||
package needle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"mime"
|
|
||||||
"net/http"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseMultipart(r *http.Request, sizeLimit int64) (
|
|
||||||
fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) {
|
|
||||||
defer func() {
|
|
||||||
if e != nil && r.Body != nil {
|
|
||||||
io.Copy(ioutil.Discard, r.Body)
|
|
||||||
r.Body.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
form, fe := r.MultipartReader()
|
|
||||||
if fe != nil {
|
|
||||||
glog.V(0).Infoln("MultipartReader [ERROR]", fe)
|
|
||||||
e = fe
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//first multi-part item
|
|
||||||
part, fe := form.NextPart()
|
|
||||||
if fe != nil {
|
|
||||||
glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
|
|
||||||
e = fe
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fileName = part.FileName()
|
|
||||||
if fileName != "" {
|
|
||||||
fileName = path.Base(fileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1))
|
|
||||||
if e != nil {
|
|
||||||
glog.V(0).Infoln("Reading Content [ERROR]", e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(data) == int(sizeLimit)+1 {
|
|
||||||
e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//if the filename is empty string, do a search on the other multi-part items
|
|
||||||
for fileName == "" {
|
|
||||||
part2, fe := form.NextPart()
|
|
||||||
if fe != nil {
|
|
||||||
break // no more or on error, just safely break
|
|
||||||
}
|
|
||||||
|
|
||||||
fName := part2.FileName()
|
|
||||||
|
|
||||||
//found the first <file type> multi-part has filename
|
|
||||||
if fName != "" {
|
|
||||||
data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1))
|
|
||||||
if fe2 != nil {
|
|
||||||
glog.V(0).Infoln("Reading Content [ERROR]", fe2)
|
|
||||||
e = fe2
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(data) == int(sizeLimit)+1 {
|
|
||||||
e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//update
|
|
||||||
data = data2
|
|
||||||
fileName = path.Base(fName)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
originalDataSize = len(data)
|
|
||||||
|
|
||||||
isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
|
|
||||||
|
|
||||||
if !isChunkedFile {
|
|
||||||
|
|
||||||
dotIndex := strings.LastIndex(fileName, ".")
|
|
||||||
ext, mtype := "", ""
|
|
||||||
if dotIndex > 0 {
|
|
||||||
ext = strings.ToLower(fileName[dotIndex:])
|
|
||||||
mtype = mime.TypeByExtension(ext)
|
|
||||||
}
|
|
||||||
contentType := part.Header.Get("Content-Type")
|
|
||||||
if contentType != "" && mtype != contentType {
|
|
||||||
mimeType = contentType //only return mime type if not deductable
|
|
||||||
mtype = contentType
|
|
||||||
}
|
|
||||||
|
|
||||||
if part.Header.Get("Content-Encoding") == "gzip" {
|
|
||||||
if unzipped, e := util.UnGzipData(data); e == nil {
|
|
||||||
originalDataSize = len(unzipped)
|
|
||||||
}
|
|
||||||
isGzipped = true
|
|
||||||
} else if util.IsGzippable(ext, mtype, data) {
|
|
||||||
if compressedData, err := util.GzipData(data); err == nil {
|
|
||||||
if len(data) > len(compressedData) {
|
|
||||||
data = compressedData
|
|
||||||
isGzipped = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
166
weed/storage/needle/needle_parse_upload.go
Normal file
166
weed/storage/needle/needle_parse_upload.go
Normal file
|
@ -0,0 +1,166 @@
|
||||||
|
package needle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ParsedUpload struct {
|
||||||
|
FileName string
|
||||||
|
Data []byte
|
||||||
|
MimeType string
|
||||||
|
PairMap map[string]string
|
||||||
|
IsGzipped bool
|
||||||
|
OriginalDataSize int
|
||||||
|
ModifiedTime uint64
|
||||||
|
Ttl *TTL
|
||||||
|
IsChunkedFile bool
|
||||||
|
UncompressedData []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) {
|
||||||
|
pu = &ParsedUpload{}
|
||||||
|
pu.PairMap = make(map[string]string)
|
||||||
|
for k, v := range r.Header {
|
||||||
|
if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) {
|
||||||
|
pu.PairMap[k] = v[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method == "POST" {
|
||||||
|
e = parseMultipart(r, sizeLimit, pu)
|
||||||
|
} else {
|
||||||
|
e = parsePut(r, sizeLimit, pu)
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pu.ModifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
|
||||||
|
pu.Ttl, _ = ReadTTL(r.FormValue("ttl"))
|
||||||
|
|
||||||
|
pu.OriginalDataSize = len(pu.Data)
|
||||||
|
pu.UncompressedData = pu.Data
|
||||||
|
if pu.IsGzipped {
|
||||||
|
if unzipped, e := util.UnGzipData(pu.Data); e == nil {
|
||||||
|
pu.OriginalDataSize = len(unzipped)
|
||||||
|
pu.UncompressedData = unzipped
|
||||||
|
}
|
||||||
|
} else if shouldGzip, _ := util.IsGzippableFileType("", pu.MimeType); shouldGzip {
|
||||||
|
if compressedData, err := util.GzipData(pu.Data); err == nil {
|
||||||
|
pu.Data = compressedData
|
||||||
|
pu.IsGzipped = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
|
||||||
|
pu.IsGzipped = r.Header.Get("Content-Encoding") == "gzip"
|
||||||
|
pu.MimeType = r.Header.Get("Content-Type")
|
||||||
|
pu.FileName = ""
|
||||||
|
pu.Data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1))
|
||||||
|
if e == io.EOF || int64(pu.OriginalDataSize) == sizeLimit+1 {
|
||||||
|
io.Copy(ioutil.Discard, r.Body)
|
||||||
|
}
|
||||||
|
r.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
|
||||||
|
defer func() {
|
||||||
|
if e != nil && r.Body != nil {
|
||||||
|
io.Copy(ioutil.Discard, r.Body)
|
||||||
|
r.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
form, fe := r.MultipartReader()
|
||||||
|
if fe != nil {
|
||||||
|
glog.V(0).Infoln("MultipartReader [ERROR]", fe)
|
||||||
|
e = fe
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//first multi-part item
|
||||||
|
part, fe := form.NextPart()
|
||||||
|
if fe != nil {
|
||||||
|
glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
|
||||||
|
e = fe
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pu.FileName = part.FileName()
|
||||||
|
if pu.FileName != "" {
|
||||||
|
pu.FileName = path.Base(pu.FileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1))
|
||||||
|
if e != nil {
|
||||||
|
glog.V(0).Infoln("Reading Content [ERROR]", e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(pu.Data) == int(sizeLimit)+1 {
|
||||||
|
e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//if the filename is empty string, do a search on the other multi-part items
|
||||||
|
for pu.FileName == "" {
|
||||||
|
part2, fe := form.NextPart()
|
||||||
|
if fe != nil {
|
||||||
|
break // no more or on error, just safely break
|
||||||
|
}
|
||||||
|
|
||||||
|
fName := part2.FileName()
|
||||||
|
|
||||||
|
//found the first <file type> multi-part has filename
|
||||||
|
if fName != "" {
|
||||||
|
data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1))
|
||||||
|
if fe2 != nil {
|
||||||
|
glog.V(0).Infoln("Reading Content [ERROR]", fe2)
|
||||||
|
e = fe2
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(data2) == int(sizeLimit)+1 {
|
||||||
|
e = fmt.Errorf("file over the limited %d bytes", sizeLimit)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//update
|
||||||
|
pu.Data = data2
|
||||||
|
pu.FileName = path.Base(fName)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pu.IsChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
|
||||||
|
|
||||||
|
if !pu.IsChunkedFile {
|
||||||
|
|
||||||
|
dotIndex := strings.LastIndex(pu.FileName, ".")
|
||||||
|
ext, mtype := "", ""
|
||||||
|
if dotIndex > 0 {
|
||||||
|
ext = strings.ToLower(pu.FileName[dotIndex:])
|
||||||
|
mtype = mime.TypeByExtension(ext)
|
||||||
|
}
|
||||||
|
contentType := part.Header.Get("Content-Type")
|
||||||
|
if contentType != "" && contentType != "application/octet-stream" && mtype != contentType {
|
||||||
|
pu.MimeType = contentType //only return mime type if not deductable
|
||||||
|
mtype = contentType
|
||||||
|
}
|
||||||
|
|
||||||
|
pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip"
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
|
@ -1,11 +1,14 @@
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
)
|
)
|
||||||
|
@ -58,3 +61,21 @@ func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) {
|
||||||
nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:]
|
nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:]
|
||||||
return gcm.Open(nil, nonce, ciphertext, nil)
|
return gcm.Open(nil, nonce, ciphertext, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func EncryptReader(clearReader io.Reader) (cipherKey CipherKey, encryptedReader io.ReadCloser, clearDataLen, encryptedDataLen int, err error) {
|
||||||
|
clearData, err := ioutil.ReadAll(clearReader)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("read raw input: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
clearDataLen = len(clearData)
|
||||||
|
cipherKey = GenCipherKey()
|
||||||
|
encryptedData, err := Encrypt(clearData, cipherKey)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("encrypt input: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
encryptedDataLen = len(encryptedData)
|
||||||
|
encryptedReader = ioutil.NopCloser(bytes.NewReader(encryptedData))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue