mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #224 from tnextday/feature/chunked-file-support
Feature/chunked file support
This commit is contained in:
commit
df5e54e02a
74
.gitignore
vendored
74
.gitignore
vendored
|
@ -1,3 +1,77 @@
|
|||
go/weed/.goxc*
|
||||
tags
|
||||
*.swp
|
||||
### OSX template
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
### JetBrains template
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
# if you remove the above rule, at least ignore the following:
|
||||
|
||||
# User-specific stuff:
|
||||
# .idea/workspace.xml
|
||||
# .idea/tasks.xml
|
||||
# .idea/dictionaries
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
# .idea/dataSources.ids
|
||||
# .idea/dataSources.xml
|
||||
# .idea/sqlDataSources.xml
|
||||
# .idea/dynamic.xml
|
||||
# .idea/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
# .idea/gradle.xml
|
||||
# .idea/libraries
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
# .idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
|
||||
test_data
|
||||
|
|
25
Makefile
25
Makefile
|
@ -1,11 +1,22 @@
|
|||
BINARY = weed
|
||||
|
||||
.clean:
|
||||
go clean -i -v ./go/weed/
|
||||
GO_FLAGS = #-v
|
||||
SOURCE_DIR = ./go/weed/
|
||||
|
||||
.deps:
|
||||
go get -d ./go/weed/
|
||||
all: build
|
||||
|
||||
.build: .deps
|
||||
go build -v ./go/weed/
|
||||
.PHONY : clean deps build linux
|
||||
|
||||
all: .build
|
||||
clean:
|
||||
go clean -i $(GO_FLAGS) $(SOURCE_DIR)
|
||||
rm -f $(BINARY)
|
||||
|
||||
deps:
|
||||
go get $(GO_FLAGS) -d $(SOURCE_DIR)
|
||||
|
||||
build: deps
|
||||
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
|
||||
|
||||
linux: deps
|
||||
mkdir -p linux
|
||||
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)
|
||||
|
|
213
go/operation/chunked_file.go
Normal file
213
go/operation/chunked_file.go
Normal file
|
@ -0,0 +1,213 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/go/glog"
|
||||
"github.com/chrislusf/seaweedfs/go/util"
|
||||
)
|
||||
|
||||
var (
|
||||
// when the remote server does not allow range requests (Accept-Ranges was not set)
|
||||
ErrRangeRequestsNotSupported = errors.New("Range requests are not supported by the remote server")
|
||||
// ErrInvalidRange is returned by Read when trying to read past the end of the file
|
||||
ErrInvalidRange = errors.New("Invalid range")
|
||||
)
|
||||
|
||||
type ChunkInfo struct {
|
||||
Fid string `json:"fid"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
type ChunkList []*ChunkInfo
|
||||
|
||||
type ChunkManifest struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Mime string `json:"mime,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
Chunks ChunkList `json:"chunks,omitempty"`
|
||||
}
|
||||
|
||||
// seekable chunked file reader
|
||||
type ChunkedFileReader struct {
|
||||
Manifest *ChunkManifest
|
||||
Master string
|
||||
pos int64
|
||||
pr *io.PipeReader
|
||||
pw *io.PipeWriter
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func (s ChunkList) Len() int { return len(s) }
|
||||
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
|
||||
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
|
||||
if isGzipped {
|
||||
var err error
|
||||
if buffer, err = UnGzipData(buffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cm := ChunkManifest{}
|
||||
if e := json.Unmarshal(buffer, &cm); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
sort.Sort(cm.Chunks)
|
||||
return &cm, nil
|
||||
}
|
||||
|
||||
func (cm *ChunkManifest) Marshal() ([]byte, error) {
|
||||
return json.Marshal(cm)
|
||||
}
|
||||
|
||||
func (cm *ChunkManifest) DeleteChunks(master string) error {
|
||||
deleteError := 0
|
||||
for _, ci := range cm.Chunks {
|
||||
if e := DeleteFile(master, ci.Fid, ""); e != nil {
|
||||
deleteError++
|
||||
glog.V(0).Infof("Delete %s error: %s, master: %s", ci.Fid, e.Error(), master)
|
||||
}
|
||||
}
|
||||
if deleteError > 0 {
|
||||
return errors.New("Not all chunks deleted.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, e error) {
|
||||
req, err := http.NewRequest("GET", fileUrl, nil)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if offset > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
||||
}
|
||||
|
||||
resp, err := util.Do(req)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusRequestedRangeNotSatisfiable:
|
||||
return written, ErrInvalidRange
|
||||
case http.StatusOK:
|
||||
if offset > 0 {
|
||||
return written, ErrRangeRequestsNotSupported
|
||||
}
|
||||
case http.StatusPartialContent:
|
||||
break
|
||||
default:
|
||||
return written, fmt.Errorf("Read chunk needle error: [%d] %s", resp.StatusCode, fileUrl)
|
||||
|
||||
}
|
||||
return io.Copy(w, resp.Body)
|
||||
}
|
||||
|
||||
func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
var err error
|
||||
switch whence {
|
||||
case 0:
|
||||
case 1:
|
||||
offset += cf.pos
|
||||
case 2:
|
||||
offset = cf.Manifest.Size - offset
|
||||
}
|
||||
if offset > cf.Manifest.Size {
|
||||
err = ErrInvalidRange
|
||||
}
|
||||
if cf.pos != offset {
|
||||
cf.Close()
|
||||
}
|
||||
cf.pos = offset
|
||||
return cf.pos, err
|
||||
}
|
||||
|
||||
func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
cm := cf.Manifest
|
||||
chunkIndex := -1
|
||||
chunkStartOffset := int64(0)
|
||||
for i, ci := range cm.Chunks {
|
||||
if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size {
|
||||
chunkIndex = i
|
||||
chunkStartOffset = cf.pos - ci.Offset
|
||||
break
|
||||
}
|
||||
}
|
||||
if chunkIndex < 0 {
|
||||
return n, ErrInvalidRange
|
||||
}
|
||||
for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ {
|
||||
ci := cm.Chunks[chunkIndex]
|
||||
// if we need read date from local volume server first?
|
||||
fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid)
|
||||
if lookupError != nil {
|
||||
return n, lookupError
|
||||
}
|
||||
if wn, e := readChunkNeedle(fileUrl, w, chunkStartOffset); e != nil {
|
||||
return n, e
|
||||
} else {
|
||||
n += wn
|
||||
cf.pos += wn
|
||||
}
|
||||
|
||||
chunkStartOffset = 0
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cf *ChunkedFileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
cf.Seek(off, 0)
|
||||
return cf.Read(p)
|
||||
}
|
||||
|
||||
func (cf *ChunkedFileReader) Read(p []byte) (int, error) {
|
||||
return cf.getPipeReader().Read(p)
|
||||
}
|
||||
|
||||
func (cf *ChunkedFileReader) Close() (e error) {
|
||||
cf.mutex.Lock()
|
||||
defer cf.mutex.Unlock()
|
||||
return cf.closePipe()
|
||||
}
|
||||
|
||||
func (cf *ChunkedFileReader) closePipe() (e error) {
|
||||
if cf.pr != nil {
|
||||
if err := cf.pr.Close(); err != nil {
|
||||
e = err
|
||||
}
|
||||
}
|
||||
cf.pr = nil
|
||||
if cf.pw != nil {
|
||||
if err := cf.pw.Close(); err != nil {
|
||||
e = err
|
||||
}
|
||||
}
|
||||
cf.pw = nil
|
||||
return e
|
||||
}
|
||||
|
||||
func (cf *ChunkedFileReader) getPipeReader() io.Reader {
|
||||
cf.mutex.Lock()
|
||||
defer cf.mutex.Unlock()
|
||||
if cf.pr != nil && cf.pw != nil {
|
||||
return cf.pr
|
||||
}
|
||||
cf.closePipe()
|
||||
cf.pr, cf.pw = io.Pipe()
|
||||
go func(pw *io.PipeWriter) {
|
||||
_, e := cf.WriteTo(pw)
|
||||
pw.CloseWithError(e)
|
||||
}(cf.pw)
|
||||
return cf.pr
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package storage
|
||||
package operation
|
||||
|
||||
import (
|
||||
"bytes"
|
|
@ -7,14 +7,17 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"net/http"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/go/security"
|
||||
"github.com/chrislusf/seaweedfs/go/util"
|
||||
)
|
||||
|
||||
type DeleteResult struct {
|
||||
Fid string `json:"fid"`
|
||||
Size int `json:"size"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Fid string `json:"fid"`
|
||||
Size int `json:"size"`
|
||||
Status int `json:"status"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func DeleteFile(master string, fileId string, jwt security.EncodedJwt) error {
|
||||
|
@ -45,7 +48,11 @@ func DeleteFiles(master string, fileIds []string) (*DeleteFilesResult, error) {
|
|||
for _, fileId := range fileIds {
|
||||
vid, _, err := ParseFileId(fileId)
|
||||
if err != nil {
|
||||
ret.Results = append(ret.Results, DeleteResult{Fid: vid, Error: err.Error()})
|
||||
ret.Results = append(ret.Results, DeleteResult{
|
||||
Fid: vid,
|
||||
Status: http.StatusBadRequest,
|
||||
Error: err.Error()},
|
||||
)
|
||||
continue
|
||||
}
|
||||
if _, ok := vid_to_fileIds[vid]; !ok {
|
||||
|
@ -76,6 +83,7 @@ func DeleteFiles(master string, fileIds []string) (*DeleteFilesResult, error) {
|
|||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for server, fidList := range server_to_fileIds {
|
||||
wg.Add(1)
|
||||
go func(server string, fidList []string) {
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"net/url"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/go/glog"
|
||||
"github.com/chrislusf/seaweedfs/go/security"
|
||||
)
|
||||
|
@ -114,25 +116,44 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
|
|||
if closer, ok := fi.Reader.(io.Closer); ok {
|
||||
defer closer.Close()
|
||||
}
|
||||
baseName := path.Base(fi.FileName)
|
||||
if maxMB > 0 && fi.FileSize > int64(maxMB*1024*1024) {
|
||||
chunkSize := int64(maxMB * 1024 * 1024)
|
||||
chunks := fi.FileSize/chunkSize + 1
|
||||
var fids []string
|
||||
cm := ChunkManifest{
|
||||
Name: baseName,
|
||||
Size: fi.FileSize,
|
||||
Mime: fi.MimeType,
|
||||
Chunks: make([]*ChunkInfo, 0, chunks),
|
||||
}
|
||||
|
||||
for i := int64(0); i < chunks; i++ {
|
||||
id, count, e := upload_one_chunk(
|
||||
fi.FileName+"-"+strconv.FormatInt(i+1, 10),
|
||||
baseName+"-"+strconv.FormatInt(i+1, 10),
|
||||
io.LimitReader(fi.Reader, chunkSize),
|
||||
master, fi.Replication, fi.Collection, fi.Ttl,
|
||||
jwt)
|
||||
if e != nil {
|
||||
// delete all uploaded chunks
|
||||
cm.DeleteChunks(master)
|
||||
return 0, e
|
||||
}
|
||||
fids = append(fids, id)
|
||||
cm.Chunks = append(cm.Chunks,
|
||||
&ChunkInfo{
|
||||
Offset: i * chunkSize,
|
||||
Size: int64(count),
|
||||
Fid: id,
|
||||
},
|
||||
)
|
||||
retSize += count
|
||||
}
|
||||
err = upload_file_id_list(fileUrl, fi.FileName+"-list", fids, jwt)
|
||||
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
|
||||
if err != nil {
|
||||
// delete all uploaded chunks
|
||||
cm.DeleteChunks(master)
|
||||
}
|
||||
} else {
|
||||
ret, e := Upload(fileUrl, fi.FileName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt)
|
||||
ret, e := Upload(fileUrl, baseName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt)
|
||||
if e != nil {
|
||||
return 0, e
|
||||
}
|
||||
|
@ -158,10 +179,17 @@ func upload_one_chunk(filename string, reader io.Reader, master,
|
|||
return fid, uploadResult.Size, nil
|
||||
}
|
||||
|
||||
func upload_file_id_list(fileUrl, filename string, fids []string, jwt security.EncodedJwt) error {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(strings.Join(fids, "\n"))
|
||||
glog.V(4).Info("Uploading final list ", filename, " to ", fileUrl, "...")
|
||||
_, e := Upload(fileUrl, filename, &buf, false, "text/plain", jwt)
|
||||
func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error {
|
||||
buf, e := manifest.Marshal()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
bufReader := bytes.NewReader(buf)
|
||||
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
|
||||
u, _ := url.Parse(fileUrl)
|
||||
q := u.Query()
|
||||
q.Set("cm", "1")
|
||||
u.RawQuery = q.Encode()
|
||||
_, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", jwt)
|
||||
return e
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/go/glog"
|
||||
"github.com/chrislusf/seaweedfs/go/images"
|
||||
"github.com/chrislusf/seaweedfs/go/util"
|
||||
"github.com/chrislusf/seaweedfs/go/operation"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -52,7 +53,7 @@ func (n *Needle) String() (str string) {
|
|||
return
|
||||
}
|
||||
|
||||
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, ttl *TTL, e error) {
|
||||
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) {
|
||||
form, fe := r.MultipartReader()
|
||||
if fe != nil {
|
||||
glog.V(0).Infoln("MultipartReader [ERROR]", fe)
|
||||
|
@ -117,8 +118,8 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
|
|||
}
|
||||
if part.Header.Get("Content-Encoding") == "gzip" {
|
||||
isGzipped = true
|
||||
} else if IsGzippable(ext, mtype) {
|
||||
if data, e = GzipData(data); e != nil {
|
||||
} else if operation.IsGzippable(ext, mtype) {
|
||||
if data, e = operation.GzipData(data); e != nil {
|
||||
return
|
||||
}
|
||||
isGzipped = true
|
||||
|
@ -132,12 +133,13 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
|
|||
}
|
||||
modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
|
||||
ttl, _ = ReadTTL(r.FormValue("ttl"))
|
||||
isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
|
||||
return
|
||||
}
|
||||
func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) {
|
||||
fname, mimeType, isGzipped := "", "", false
|
||||
fname, mimeType, isGzipped, isChunkedFile := "", "", false, false
|
||||
n = new(Needle)
|
||||
fname, n.Data, mimeType, isGzipped, n.LastModified, n.Ttl, e = ParseUpload(r)
|
||||
fname, n.Data, mimeType, isGzipped, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r)
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
|
@ -160,6 +162,10 @@ func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) {
|
|||
n.SetHasTtl()
|
||||
}
|
||||
|
||||
if isChunkedFile {
|
||||
n.SetChunkManifest()
|
||||
}
|
||||
|
||||
if fixJpgOrientation {
|
||||
loweredName := strings.ToLower(fname)
|
||||
if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") {
|
||||
|
|
|
@ -16,6 +16,7 @@ const (
|
|||
FlagHasMime = 0x04
|
||||
FlagHasLastModifiedDate = 0x08
|
||||
FlagHasTtl = 0x10
|
||||
FlagChunkManifest = 0x80
|
||||
LastModifiedBytesLength = 5
|
||||
TtlBytesLength = 2
|
||||
)
|
||||
|
@ -280,3 +281,11 @@ func (n *Needle) HasTtl() bool {
|
|||
func (n *Needle) SetHasTtl() {
|
||||
n.Flags = n.Flags | FlagHasTtl
|
||||
}
|
||||
|
||||
func (n *Needle) IsChunkedManifest() bool {
|
||||
return n.Flags&FlagChunkManifest > 0
|
||||
}
|
||||
|
||||
func (n *Needle) SetChunkManifest() {
|
||||
n.Flags = n.Flags | FlagChunkManifest
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ func (dnll *VolumeLocationList) String() string {
|
|||
}
|
||||
|
||||
func (dnll *VolumeLocationList) Head() *DataNode {
|
||||
//mark first node as master volume
|
||||
return dnll.list[0]
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,10 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/go/security"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -79,10 +82,21 @@ func Delete(url string, jwt security.EncodedJwt) error {
|
|||
return e
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if _, err := ioutil.ReadAll(resp.Body); err != nil {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound, http.StatusAccepted, http.StatusOK:
|
||||
return nil
|
||||
}
|
||||
m := make(map[string]interface{})
|
||||
if e := json.Unmarshal(body, m); e == nil {
|
||||
if s, ok := m["error"].(string); ok {
|
||||
return errors.New(s)
|
||||
}
|
||||
}
|
||||
return errors.New(string(body))
|
||||
}
|
||||
|
||||
func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error {
|
||||
|
@ -122,12 +136,11 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e
|
|||
return readFn(r.Body)
|
||||
}
|
||||
|
||||
func DownloadUrl(fileUrl string) (filename string, content []byte, e error) {
|
||||
func DownloadUrl(fileUrl string) (filename string, rc io.ReadCloser, e error) {
|
||||
response, err := client.Get(fileUrl)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
contentDisposition := response.Header["Content-Disposition"]
|
||||
if len(contentDisposition) > 0 {
|
||||
if strings.HasPrefix(contentDisposition[0], "filename=") {
|
||||
|
@ -135,7 +148,7 @@ func DownloadUrl(fileUrl string) (filename string, content []byte, e error) {
|
|||
filename = strings.Trim(filename, "\"")
|
||||
}
|
||||
}
|
||||
content, e = ioutil.ReadAll(response.Body)
|
||||
rc = response.Body
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -3,9 +3,11 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/go/operation"
|
||||
|
@ -43,50 +45,76 @@ var cmdDownload = &Command{
|
|||
|
||||
func runDownload(cmd *Command, args []string) bool {
|
||||
for _, fid := range args {
|
||||
filename, content, e := fetchFileId(*d.server, fid)
|
||||
if e != nil {
|
||||
fmt.Println("Fetch Error:", e)
|
||||
continue
|
||||
}
|
||||
if filename == "" {
|
||||
filename = fid
|
||||
}
|
||||
if strings.HasSuffix(filename, "-list") {
|
||||
filename = filename[0 : len(filename)-len("-list")]
|
||||
fids := strings.Split(string(content), "\n")
|
||||
f, err := os.OpenFile(path.Join(*d.dir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
|
||||
if err != nil {
|
||||
fmt.Println("File Creation Error:", e)
|
||||
continue
|
||||
}
|
||||
defer f.Close()
|
||||
for _, partId := range fids {
|
||||
var n int
|
||||
_, part, err := fetchFileId(*d.server, partId)
|
||||
if err == nil {
|
||||
n, err = f.Write(part)
|
||||
}
|
||||
if err == nil && n < len(part) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("File Write Error:", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ioutil.WriteFile(path.Join(*d.dir, filename), content, os.ModePerm)
|
||||
if e := downloadToFile(*d.server, fid, *d.dir); e != nil {
|
||||
fmt.Println("Download Error: ", fid, e)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func fetchFileId(server string, fileId string) (filename string, content []byte, e error) {
|
||||
func downloadToFile(server, fileId, saveDir string) error {
|
||||
fileUrl, lookupError := operation.LookupFileId(server, fileId)
|
||||
if lookupError != nil {
|
||||
return lookupError
|
||||
}
|
||||
filename, rc, err := util.DownloadUrl(fileUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
if filename == "" {
|
||||
filename = fileId
|
||||
}
|
||||
isFileList := false
|
||||
if strings.HasSuffix(filename, "-list") {
|
||||
// old command compatible
|
||||
isFileList = true
|
||||
filename = filename[0 : len(filename)-len("-list")]
|
||||
}
|
||||
f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if isFileList {
|
||||
content, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fids := strings.Split(string(content), "\n")
|
||||
for _, partId := range fids {
|
||||
var n int
|
||||
_, part, err := fetchContent(*d.server, partId)
|
||||
if err == nil {
|
||||
n, err = f.Write(part)
|
||||
}
|
||||
if err == nil && n < len(part) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if _, err = io.Copy(f, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchContent(server string, fileId string) (filename string, content []byte, e error) {
|
||||
fileUrl, lookupError := operation.LookupFileId(server, fileId)
|
||||
if lookupError != nil {
|
||||
return "", nil, lookupError
|
||||
}
|
||||
filename, content, e = util.DownloadUrl(fileUrl)
|
||||
var rc io.ReadCloser
|
||||
if filename, rc, e = util.DownloadUrl(fileUrl); e != nil {
|
||||
return "", nil, e
|
||||
}
|
||||
content, e = ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
|
|||
}
|
||||
|
||||
debug("parsing upload file...")
|
||||
fname, data, mimeType, isGzipped, lastModified, _, pe := storage.ParseUpload(r)
|
||||
fname, data, mimeType, isGzipped, lastModified, _, _, pe := storage.ParseUpload(r)
|
||||
if pe != nil {
|
||||
writeJsonError(w, r, http.StatusBadRequest, pe)
|
||||
return
|
||||
|
|
|
@ -9,6 +9,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
|
||||
"bytes"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/go/glog"
|
||||
"github.com/chrislusf/seaweedfs/go/images"
|
||||
"github.com/chrislusf/seaweedfs/go/operation"
|
||||
|
@ -81,36 +85,32 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
w.Header().Set("Etag", etag)
|
||||
|
||||
if vs.tryHandleChunkedFile(n, filename, w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
if n.NameSize > 0 && filename == "" {
|
||||
filename = string(n.Name)
|
||||
dotIndex := strings.LastIndex(filename, ".")
|
||||
if dotIndex > 0 {
|
||||
ext = filename[dotIndex:]
|
||||
if ext == "" {
|
||||
ext = path.Ext(filename)
|
||||
}
|
||||
}
|
||||
mtype := ""
|
||||
if ext != "" {
|
||||
mtype = mime.TypeByExtension(ext)
|
||||
}
|
||||
if n.MimeSize > 0 {
|
||||
mt := string(n.Mime)
|
||||
if !strings.HasPrefix(mt, "application/octet-stream") {
|
||||
mtype = mt
|
||||
}
|
||||
}
|
||||
if mtype != "" {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
if filename != "" {
|
||||
w.Header().Set("Content-Disposition", "filename=\""+fileNameEscaper.Replace(filename)+"\"")
|
||||
}
|
||||
|
||||
if ext != ".gz" {
|
||||
if n.IsGzipped() {
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
if n.Data, err = storage.UnGzipData(n.Data); err != nil {
|
||||
glog.V(0).Infoln("lookup error:", err, r.URL.Path)
|
||||
if n.Data, err = operation.UnGzipData(n.Data); err != nil {
|
||||
glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -126,38 +126,94 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
n.Data, _, _ = images.Resized(ext, n.Data, width, height)
|
||||
}
|
||||
|
||||
if e := writeResponseContent(filename, mtype, bytes.NewReader(n.Data), w, r); e != nil {
|
||||
glog.V(2).Infoln("response write error:", e)
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {
|
||||
if !n.IsChunkedManifest() {
|
||||
return false
|
||||
}
|
||||
raw, _ := strconv.ParseBool(r.FormValue("raw"))
|
||||
if raw {
|
||||
return false
|
||||
}
|
||||
processed = true
|
||||
|
||||
chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
|
||||
if e != nil {
|
||||
glog.V(0).Infof("load chunked manifest (%s) error: %s", r.URL.Path, e.Error())
|
||||
return false
|
||||
}
|
||||
if fileName == "" && chunkManifest.Name != "" {
|
||||
fileName = chunkManifest.Name
|
||||
}
|
||||
mType := ""
|
||||
if chunkManifest.Mime != "" {
|
||||
mt := chunkManifest.Mime
|
||||
if !strings.HasPrefix(mt, "application/octet-stream") {
|
||||
mType = mt
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("X-File-Store", "chunked")
|
||||
|
||||
chunkedFileReader := &operation.ChunkedFileReader{
|
||||
Manifest: chunkManifest,
|
||||
Master: vs.GetMasterNode(),
|
||||
}
|
||||
defer chunkedFileReader.Close()
|
||||
if e := writeResponseContent(fileName, mType, chunkedFileReader, w, r); e != nil {
|
||||
glog.V(2).Infoln("response write error:", e)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {
|
||||
totalSize, e := rs.Seek(0, 2)
|
||||
if mimeType == "" {
|
||||
if ext := path.Ext(filename); ext != "" {
|
||||
mimeType = mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
if mimeType != "" {
|
||||
w.Header().Set("Content-Type", mimeType)
|
||||
}
|
||||
if filename != "" {
|
||||
w.Header().Set("Content-Disposition", `filename="`+fileNameEscaper.Replace(filename)+`"`)
|
||||
}
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
if r.Method == "HEAD" {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(n.Data)))
|
||||
return
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
|
||||
return nil
|
||||
}
|
||||
rangeReq := r.Header.Get("Range")
|
||||
if rangeReq == "" {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(n.Data)))
|
||||
if _, e = w.Write(n.Data); e != nil {
|
||||
glog.V(0).Infoln("response write error:", e)
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
|
||||
if _, e = rs.Seek(0, 0); e != nil {
|
||||
return e
|
||||
}
|
||||
return
|
||||
_, e = io.Copy(w, rs)
|
||||
return e
|
||||
}
|
||||
|
||||
//the rest is dealing with partial content request
|
||||
//mostly copy from src/pkg/net/http/fs.go
|
||||
size := int64(len(n.Data))
|
||||
ranges, err := parseRange(rangeReq, size)
|
||||
ranges, err := parseRange(rangeReq, totalSize)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if sumRangesSize(ranges) > size {
|
||||
if sumRangesSize(ranges) > totalSize {
|
||||
// The total number of bytes in all the ranges
|
||||
// is larger than the size of the file by
|
||||
// itself, so this is probably an attack, or a
|
||||
// dumb client. Ignore the range request.
|
||||
ranges = nil
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if len(ranges) == 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if len(ranges) == 1 {
|
||||
// RFC 2616, Section 14.16:
|
||||
|
@ -173,21 +229,23 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
// be sent using the multipart/byteranges media type."
|
||||
ra := ranges[0]
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
|
||||
w.Header().Set("Content-Range", ra.contentRange(size))
|
||||
w.Header().Set("Content-Range", ra.contentRange(totalSize))
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
if _, e = w.Write(n.Data[ra.start : ra.start+ra.length]); e != nil {
|
||||
glog.V(0).Infoln("response write error:", e)
|
||||
if _, e = rs.Seek(ra.start, 0); e != nil {
|
||||
return e
|
||||
}
|
||||
return
|
||||
|
||||
_, e = io.CopyN(w, rs, ra.length)
|
||||
return e
|
||||
}
|
||||
// process mulitple ranges
|
||||
// process multiple ranges
|
||||
for _, ra := range ranges {
|
||||
if ra.start > size {
|
||||
if ra.start > totalSize {
|
||||
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
sendSize := rangesMIMESize(ranges, mtype, size)
|
||||
sendSize := rangesMIMESize(ranges, mimeType, totalSize)
|
||||
pr, pw := io.Pipe()
|
||||
mw := multipart.NewWriter(pw)
|
||||
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
|
||||
|
@ -195,13 +253,17 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
|
||||
go func() {
|
||||
for _, ra := range ranges {
|
||||
part, err := mw.CreatePart(ra.mimeHeader(mtype, size))
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
|
||||
if e != nil {
|
||||
pw.CloseWithError(e)
|
||||
return
|
||||
}
|
||||
if _, err = part.Write(n.Data[ra.start : ra.start+ra.length]); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
if _, e = rs.Seek(ra.start, 0); e != nil {
|
||||
pw.CloseWithError(e)
|
||||
return
|
||||
}
|
||||
if _, e = io.CopyN(part, rs, ra.length); e != nil {
|
||||
pw.CloseWithError(e)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -212,6 +274,6 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
|
||||
}
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
io.CopyN(w, sendContent, sendSize)
|
||||
|
||||
_, e = io.CopyN(w, sendContent, sendSize)
|
||||
return e
|
||||
}
|
||||
|
|
|
@ -53,9 +53,8 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
|||
glog.V(2).Infoln("deleting", n)
|
||||
|
||||
cookie := n.Cookie
|
||||
count, ok := vs.store.ReadVolumeNeedle(volumeId, n)
|
||||
|
||||
if ok != nil {
|
||||
if _, ok := vs.store.ReadVolumeNeedle(volumeId, n); ok != nil {
|
||||
m := make(map[string]uint32)
|
||||
m["size"] = 0
|
||||
writeJsonQuiet(w, r, http.StatusNotFound, m)
|
||||
|
@ -64,14 +63,31 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if n.Cookie != cookie {
|
||||
glog.V(0).Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||
writeJsonError(w, r, http.StatusBadRequest, errors.New("File Random Cookie does not match."))
|
||||
return
|
||||
}
|
||||
|
||||
count := int64(n.Size)
|
||||
|
||||
if n.IsChunkedManifest() {
|
||||
chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
|
||||
if e != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Load chunks manifest error: "+e.Error()))
|
||||
return
|
||||
}
|
||||
// make sure all chunks had deleted before delete manifest
|
||||
if e := chunkManifest.DeleteChunks(vs.GetMasterNode()); e != nil {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Delete chunks error: "+e.Error()))
|
||||
return
|
||||
}
|
||||
count = chunkManifest.Size
|
||||
}
|
||||
|
||||
ret := topology.ReplicatedDelete(vs.GetMasterNode(), vs.store, volumeId, n, r)
|
||||
|
||||
if ret != 0 {
|
||||
m := make(map[string]uint32)
|
||||
m["size"] = uint32(count)
|
||||
m := make(map[string]int64)
|
||||
m["size"] = count
|
||||
writeJsonQuiet(w, r, http.StatusAccepted, m)
|
||||
} else {
|
||||
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Deletion Failed."))
|
||||
|
@ -86,7 +102,10 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques
|
|||
for _, fid := range r.Form["fid"] {
|
||||
vid, id_cookie, err := operation.ParseFileId(fid)
|
||||
if err != nil {
|
||||
ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
|
||||
ret = append(ret, operation.DeleteResult{
|
||||
Fid: fid,
|
||||
Status: http.StatusBadRequest,
|
||||
Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
n := new(storage.Needle)
|
||||
|
@ -95,18 +114,45 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques
|
|||
glog.V(4).Infoln("batch deleting", n)
|
||||
cookie := n.Cookie
|
||||
if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil {
|
||||
ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
|
||||
ret = append(ret, operation.DeleteResult{
|
||||
Fid: fid,
|
||||
Status: http.StatusNotFound,
|
||||
Error: err.Error(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if n.IsChunkedManifest() {
|
||||
//Don't allow delete manifest in batch delete mode
|
||||
ret = append(ret, operation.DeleteResult{
|
||||
Fid: fid,
|
||||
Status: http.StatusNotAcceptable,
|
||||
Error: "ChunkManifest: not allow.",
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if n.Cookie != cookie {
|
||||
ret = append(ret, operation.DeleteResult{Fid: fid, Error: "File Random Cookie does not match."})
|
||||
ret = append(ret, operation.DeleteResult{
|
||||
Fid: fid,
|
||||
Status: http.StatusBadRequest,
|
||||
Error: "File Random Cookie does not match.",
|
||||
})
|
||||
glog.V(0).Infoln("deleting", fid, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||
return
|
||||
}
|
||||
if size, err := vs.store.Delete(volumeId, n); err != nil {
|
||||
ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
|
||||
ret = append(ret, operation.DeleteResult{
|
||||
Fid: fid,
|
||||
Status: http.StatusInternalServerError,
|
||||
Error: err.Error()},
|
||||
)
|
||||
} else {
|
||||
ret = append(ret, operation.DeleteResult{Fid: fid, Size: int(size)})
|
||||
ret = append(ret, operation.DeleteResult{
|
||||
Fid: fid,
|
||||
Status: http.StatusAccepted,
|
||||
Size: int(size)},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue