mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
FilePart.Upload
use base name instead of full path
fix chunked file download error
This commit is contained in:
parent
2c0a7fe75e
commit
d0e2475ece
|
@ -22,9 +22,9 @@ var (
|
|||
)
|
||||
|
||||
type ChunkInfo struct {
|
||||
Fid string `json:"fid,omitempty"`
|
||||
Offset int64 `json:"offset,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
Fid string `json:"fid"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
type ChunkList []*ChunkInfo
|
||||
|
@ -52,7 +52,7 @@ func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|||
|
||||
func LoadChunkedManifest(buffer []byte) (*ChunkManifest, error) {
|
||||
cm := ChunkManifest{}
|
||||
if e := json.Unmarshal(buffer, cm); e != nil {
|
||||
if e := json.Unmarshal(buffer, &cm); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
sort.Sort(cm.Chunks)
|
||||
|
|
|
@ -116,11 +116,12 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
|
|||
if closer, ok := fi.Reader.(io.Closer); ok {
|
||||
defer closer.Close()
|
||||
}
|
||||
baseName := path.Base(fi.FileName)
|
||||
if maxMB > 0 && fi.FileSize > int64(maxMB*1024*1024) {
|
||||
chunkSize := int64(maxMB * 1024 * 1024)
|
||||
chunks := fi.FileSize/chunkSize + 1
|
||||
cm := ChunkManifest{
|
||||
Name: fi.FileName,
|
||||
Name: baseName,
|
||||
Size: fi.FileSize,
|
||||
Mime: fi.MimeType,
|
||||
Chunks: make([]*ChunkInfo, 0, chunks),
|
||||
|
@ -128,7 +129,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
|
|||
|
||||
for i := int64(0); i < chunks; i++ {
|
||||
id, count, e := upload_one_chunk(
|
||||
fi.FileName+"-"+strconv.FormatInt(i+1, 10),
|
||||
baseName+"-"+strconv.FormatInt(i+1, 10),
|
||||
io.LimitReader(fi.Reader, chunkSize),
|
||||
master, fi.Replication, fi.Collection, fi.Ttl,
|
||||
jwt)
|
||||
|
@ -152,7 +153,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
|
|||
cm.DeleteChunks(master)
|
||||
}
|
||||
} else {
|
||||
ret, e := Upload(fileUrl, fi.FileName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt)
|
||||
ret, e := Upload(fileUrl, baseName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt)
|
||||
if e != nil {
|
||||
return 0, e
|
||||
}
|
||||
|
|
|
@ -225,18 +225,22 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string,
|
|||
if !n.IsChunkedManifest() {
|
||||
return false
|
||||
}
|
||||
processed = true
|
||||
raw, _ := strconv.ParseBool(r.FormValue("raw"))
|
||||
if raw {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(n.Data)))
|
||||
if _, e := w.Write(n.Data); e != nil {
|
||||
glog.V(0).Infoln("response write error:", e)
|
||||
}
|
||||
return true
|
||||
return false
|
||||
}
|
||||
processed = true
|
||||
if n.IsGzipped(){
|
||||
var err error
|
||||
if n.Data, err = storage.UnGzipData(n.Data); err != nil {
|
||||
glog.V(0).Infoln("ungzip data error:", err, r.URL.Path)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
chunkManifest, e := operation.LoadChunkedManifest(n.Data)
|
||||
if e != nil {
|
||||
glog.V(0).Infoln("load chunked manifest error:", e)
|
||||
return false
|
||||
}
|
||||
ext := ""
|
||||
|
|
Loading…
Reference in a new issue