2015-11-29 15:49:41 +00:00
|
|
|
package operation
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-02-18 20:11:52 +00:00
|
|
|
"google.golang.org/grpc"
|
2015-11-29 15:49:41 +00:00
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"sort"
|
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
"sync"
|
|
|
|
|
2018-10-14 07:12:28 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2018-10-14 07:30:20 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2015-11-29 15:49:41 +00:00
|
|
|
)
|
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
var (
|
|
|
|
// when the remote server does not allow range requests (Accept-Ranges was not set)
|
|
|
|
ErrRangeRequestsNotSupported = errors.New("Range requests are not supported by the remote server")
|
|
|
|
// ErrInvalidRange is returned by Read when trying to read past the end of the file
|
|
|
|
ErrInvalidRange = errors.New("Invalid range")
|
|
|
|
)
|
2015-11-29 15:49:41 +00:00
|
|
|
|
|
|
|
type ChunkInfo struct {
|
2015-12-02 08:35:16 +00:00
|
|
|
Fid string `json:"fid"`
|
|
|
|
Offset int64 `json:"offset"`
|
|
|
|
Size int64 `json:"size"`
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type ChunkList []*ChunkInfo
|
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
type ChunkManifest struct {
|
2015-12-02 13:27:29 +00:00
|
|
|
Name string `json:"name,omitempty"`
|
|
|
|
Mime string `json:"mime,omitempty"`
|
|
|
|
Size int64 `json:"size,omitempty"`
|
|
|
|
Chunks ChunkList `json:"chunks,omitempty"`
|
2015-12-01 12:23:50 +00:00
|
|
|
}
|
2015-11-29 15:49:41 +00:00
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
// seekable chunked file reader
|
|
|
|
type ChunkedFileReader struct {
|
|
|
|
Manifest *ChunkManifest
|
|
|
|
Master string
|
|
|
|
pos int64
|
|
|
|
pr *io.PipeReader
|
|
|
|
pw *io.PipeWriter
|
|
|
|
mutex sync.Mutex
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s ChunkList) Len() int { return len(s) }
|
|
|
|
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
|
|
|
|
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
|
2015-12-02 13:27:29 +00:00
|
|
|
func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
|
|
|
|
if isGzipped {
|
|
|
|
var err error
|
|
|
|
if buffer, err = UnGzipData(buffer); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2015-12-01 12:23:50 +00:00
|
|
|
cm := ChunkManifest{}
|
2015-12-02 08:35:16 +00:00
|
|
|
if e := json.Unmarshal(buffer, &cm); e != nil {
|
2015-11-29 15:49:41 +00:00
|
|
|
return nil, e
|
|
|
|
}
|
2015-12-01 12:23:50 +00:00
|
|
|
sort.Sort(cm.Chunks)
|
|
|
|
return &cm, nil
|
|
|
|
}
|
|
|
|
|
2015-12-14 14:01:30 +00:00
|
|
|
func (cm *ChunkManifest) Marshal() ([]byte, error) {
|
2015-12-01 12:23:50 +00:00
|
|
|
return json.Marshal(cm)
|
|
|
|
}
|
|
|
|
|
2019-02-18 20:11:52 +00:00
|
|
|
func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error {
|
2018-10-14 07:12:28 +00:00
|
|
|
var fileIds []string
|
2015-12-03 08:27:02 +00:00
|
|
|
for _, ci := range cm.Chunks {
|
2018-10-14 07:12:28 +00:00
|
|
|
fileIds = append(fileIds, ci.Fid)
|
2015-12-01 12:23:50 +00:00
|
|
|
}
|
2019-02-18 20:11:52 +00:00
|
|
|
results, err := DeleteFiles(master, grpcDialOption, fileIds)
|
2018-10-14 07:12:28 +00:00
|
|
|
if err != nil {
|
|
|
|
glog.V(0).Infof("delete %+v: %v", fileIds, err)
|
|
|
|
return fmt.Errorf("chunk delete: %v", err)
|
2015-12-01 12:23:50 +00:00
|
|
|
}
|
2018-10-14 07:12:28 +00:00
|
|
|
for _, result := range results {
|
|
|
|
if result.Error != "" {
|
|
|
|
glog.V(0).Infof("delete file %+v: %v", result.FileId, result.Error)
|
|
|
|
return fmt.Errorf("chunk delete %v: %v", result.FileId, result.Error)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
return nil
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
2015-12-03 13:35:33 +00:00
|
|
|
func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, e error) {
|
2015-11-29 15:49:41 +00:00
|
|
|
req, err := http.NewRequest("GET", fileUrl, nil)
|
|
|
|
if err != nil {
|
|
|
|
return written, err
|
|
|
|
}
|
2015-12-01 12:23:50 +00:00
|
|
|
if offset > 0 {
|
|
|
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := util.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return written, err
|
|
|
|
}
|
2015-11-29 16:21:42 +00:00
|
|
|
defer resp.Body.Close()
|
2015-11-29 15:49:41 +00:00
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
switch resp.StatusCode {
|
|
|
|
case http.StatusRequestedRangeNotSatisfiable:
|
|
|
|
return written, ErrInvalidRange
|
|
|
|
case http.StatusOK:
|
|
|
|
if offset > 0 {
|
|
|
|
return written, ErrRangeRequestsNotSupported
|
|
|
|
}
|
|
|
|
case http.StatusPartialContent:
|
|
|
|
break
|
|
|
|
default:
|
2015-12-03 13:35:33 +00:00
|
|
|
return written, fmt.Errorf("Read chunk needle error: [%d] %s", resp.StatusCode, fileUrl)
|
2015-12-01 12:23:50 +00:00
|
|
|
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
2015-12-01 12:23:50 +00:00
|
|
|
return io.Copy(w, resp.Body)
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
|
|
|
|
var err error
|
|
|
|
switch whence {
|
|
|
|
case 0:
|
|
|
|
case 1:
|
|
|
|
offset += cf.pos
|
|
|
|
case 2:
|
|
|
|
offset = cf.Manifest.Size - offset
|
|
|
|
}
|
|
|
|
if offset > cf.Manifest.Size {
|
|
|
|
err = ErrInvalidRange
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
2015-12-01 12:23:50 +00:00
|
|
|
if cf.pos != offset {
|
|
|
|
cf.Close()
|
|
|
|
}
|
|
|
|
cf.pos = offset
|
|
|
|
return cf.pos, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
cm := cf.Manifest
|
2015-11-29 15:49:41 +00:00
|
|
|
chunkIndex := -1
|
2015-11-29 16:21:42 +00:00
|
|
|
chunkStartOffset := int64(0)
|
2015-12-01 12:23:50 +00:00
|
|
|
for i, ci := range cm.Chunks {
|
|
|
|
if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size {
|
2015-11-29 15:49:41 +00:00
|
|
|
chunkIndex = i
|
2015-12-01 12:23:50 +00:00
|
|
|
chunkStartOffset = cf.pos - ci.Offset
|
2015-11-29 15:49:41 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if chunkIndex < 0 {
|
2015-12-01 12:23:50 +00:00
|
|
|
return n, ErrInvalidRange
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
2015-12-01 12:23:50 +00:00
|
|
|
for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ {
|
|
|
|
ci := cm.Chunks[chunkIndex]
|
|
|
|
// if we need read date from local volume server first?
|
|
|
|
fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid)
|
2015-11-29 15:49:41 +00:00
|
|
|
if lookupError != nil {
|
2015-12-01 12:23:50 +00:00
|
|
|
return n, lookupError
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
2015-12-03 13:35:33 +00:00
|
|
|
if wn, e := readChunkNeedle(fileUrl, w, chunkStartOffset); e != nil {
|
2015-12-01 12:23:50 +00:00
|
|
|
return n, e
|
2015-11-29 15:49:41 +00:00
|
|
|
} else {
|
2015-12-01 12:23:50 +00:00
|
|
|
n += wn
|
|
|
|
cf.pos += wn
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
chunkStartOffset = 0
|
|
|
|
}
|
2015-12-01 12:23:50 +00:00
|
|
|
return n, nil
|
|
|
|
}
|
2015-11-29 15:49:41 +00:00
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
func (cf *ChunkedFileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
|
|
|
cf.Seek(off, 0)
|
|
|
|
return cf.Read(p)
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
func (cf *ChunkedFileReader) Read(p []byte) (int, error) {
|
|
|
|
return cf.getPipeReader().Read(p)
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|
|
|
|
|
2015-12-01 12:23:50 +00:00
|
|
|
func (cf *ChunkedFileReader) Close() (e error) {
|
|
|
|
cf.mutex.Lock()
|
|
|
|
defer cf.mutex.Unlock()
|
|
|
|
return cf.closePipe()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cf *ChunkedFileReader) closePipe() (e error) {
|
|
|
|
if cf.pr != nil {
|
|
|
|
if err := cf.pr.Close(); err != nil {
|
|
|
|
e = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cf.pr = nil
|
|
|
|
if cf.pw != nil {
|
|
|
|
if err := cf.pw.Close(); err != nil {
|
|
|
|
e = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cf.pw = nil
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cf *ChunkedFileReader) getPipeReader() io.Reader {
|
|
|
|
cf.mutex.Lock()
|
|
|
|
defer cf.mutex.Unlock()
|
|
|
|
if cf.pr != nil && cf.pw != nil {
|
|
|
|
return cf.pr
|
|
|
|
}
|
|
|
|
cf.closePipe()
|
|
|
|
cf.pr, cf.pw = io.Pipe()
|
|
|
|
go func(pw *io.PipeWriter) {
|
|
|
|
_, e := cf.WriteTo(pw)
|
|
|
|
pw.CloseWithError(e)
|
|
|
|
}(cf.pw)
|
|
|
|
return cf.pr
|
2015-11-29 15:49:41 +00:00
|
|
|
}
|