2020-09-01 07:21:19 +00:00
|
|
|
package filer
|
2020-07-20 00:59:43 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math"
|
2021-09-08 02:29:42 +00:00
|
|
|
"net/url"
|
|
|
|
"strings"
|
2022-03-02 21:50:46 +00:00
|
|
|
"sync"
|
2020-10-09 06:31:26 +00:00
|
|
|
"time"
|
2020-07-20 00:59:43 +00:00
|
|
|
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
2022-06-19 08:54:04 +00:00
|
|
|
|
2022-08-17 19:05:07 +00:00
|
|
|
"google.golang.org/protobuf/proto"
|
2020-07-20 00:59:43 +00:00
|
|
|
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
2020-07-20 00:59:43 +00:00
|
|
|
)
|
|
|
|
|
2020-07-20 10:34:06 +00:00
|
|
|
const (
|
2022-03-07 08:07:53 +00:00
|
|
|
ManifestBatch = 10000
|
2020-07-20 10:34:06 +00:00
|
|
|
)
|
|
|
|
|
2022-03-02 21:50:46 +00:00
|
|
|
var bytesBufferPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return new(bytes.Buffer)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-07-20 00:59:43 +00:00
|
|
|
func HasChunkManifest(chunks []*filer_pb.FileChunk) bool {
|
|
|
|
for _, chunk := range chunks {
|
|
|
|
if chunk.IsChunkManifest {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-08-24 01:30:11 +00:00
|
|
|
func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonManifestChunks []*filer_pb.FileChunk) {
|
|
|
|
for _, c := range chunks {
|
2020-08-30 09:07:14 +00:00
|
|
|
if c.IsChunkManifest {
|
2020-08-24 01:30:11 +00:00
|
|
|
manifestChunks = append(manifestChunks, c)
|
|
|
|
} else {
|
|
|
|
nonManifestChunks = append(nonManifestChunks, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-07-20 06:07:22 +00:00
|
|
|
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset, stopOffset int64) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
2020-07-20 00:59:43 +00:00
|
|
|
// TODO maybe parallel this
|
|
|
|
for _, chunk := range chunks {
|
2021-07-20 06:07:22 +00:00
|
|
|
|
|
|
|
if max(chunk.Offset, startOffset) >= min(chunk.Offset+int64(chunk.Size), stopOffset) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-07-20 00:59:43 +00:00
|
|
|
if !chunk.IsChunkManifest {
|
|
|
|
dataChunks = append(dataChunks, chunk)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-08-30 09:07:14 +00:00
|
|
|
resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk)
|
2020-07-20 00:59:43 +00:00
|
|
|
if err != nil {
|
2022-06-19 08:54:04 +00:00
|
|
|
return dataChunks, nil, err
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
2020-08-30 09:07:14 +00:00
|
|
|
|
2020-07-20 00:59:43 +00:00
|
|
|
manifestChunks = append(manifestChunks, chunk)
|
|
|
|
// recursive
|
2022-05-23 08:16:10 +00:00
|
|
|
subDataChunks, subManifestChunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks, startOffset, stopOffset)
|
2020-07-20 00:59:43 +00:00
|
|
|
if subErr != nil {
|
2022-06-19 08:54:04 +00:00
|
|
|
return dataChunks, nil, subErr
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
2022-05-23 08:14:56 +00:00
|
|
|
dataChunks = append(dataChunks, subDataChunks...)
|
|
|
|
manifestChunks = append(manifestChunks, subManifestChunks...)
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-06 12:21:34 +00:00
|
|
|
func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
|
2020-08-30 09:07:14 +00:00
|
|
|
if !chunk.IsChunkManifest {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsChunkManifest
|
2022-03-02 21:50:46 +00:00
|
|
|
bytesBuffer := bytesBufferPool.Get().(*bytes.Buffer)
|
2022-03-06 13:06:04 +00:00
|
|
|
bytesBuffer.Reset()
|
2022-03-02 21:50:46 +00:00
|
|
|
defer bytesBufferPool.Put(bytesBuffer)
|
|
|
|
err := fetchWholeChunk(bytesBuffer, lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed)
|
2020-08-30 09:07:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("fail to read manifest %s: %v", chunk.GetFileIdString(), err)
|
|
|
|
}
|
|
|
|
m := &filer_pb.FileChunkManifest{}
|
2022-03-02 21:50:46 +00:00
|
|
|
if err := proto.Unmarshal(bytesBuffer.Bytes(), m); err != nil {
|
2020-08-30 09:07:14 +00:00
|
|
|
return nil, fmt.Errorf("fail to unmarshal manifest %s: %v", chunk.GetFileIdString(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// recursive
|
|
|
|
filer_pb.AfterEntryDeserialization(m.Chunks)
|
|
|
|
return m.Chunks, nil
|
|
|
|
}
|
|
|
|
|
2020-07-20 10:34:06 +00:00
|
|
|
// TODO fetch from cache for weed mount?
|
2022-03-02 21:50:46 +00:00
|
|
|
func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
|
2020-10-08 05:49:04 +00:00
|
|
|
urlStrings, err := lookupFileIdFn(fileId)
|
2020-07-20 00:59:43 +00:00
|
|
|
if err != nil {
|
2020-07-20 10:34:06 +00:00
|
|
|
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
2022-03-02 21:50:46 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, cipherKey, isGzipped, true, 0, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
2022-03-02 21:50:46 +00:00
|
|
|
return nil
|
2020-10-09 06:19:42 +00:00
|
|
|
}
|
2020-10-08 05:49:04 +00:00
|
|
|
|
2022-03-13 09:38:52 +00:00
|
|
|
func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool, offset int64) (int, error) {
|
|
|
|
urlStrings, err := lookupFileIdFn(fileId)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return retriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset)
|
|
|
|
}
|
|
|
|
|
2022-02-26 10:16:47 +00:00
|
|
|
func retriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64) (n int, err error) {
|
2020-10-09 06:19:42 +00:00
|
|
|
|
2020-10-13 07:29:46 +00:00
|
|
|
var shouldRetry bool
|
2020-10-09 06:31:26 +00:00
|
|
|
|
2020-11-01 10:36:43 +00:00
|
|
|
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
|
2020-10-09 06:31:26 +00:00
|
|
|
for _, urlString := range urlStrings {
|
2022-02-26 10:16:47 +00:00
|
|
|
n = 0
|
2021-09-08 02:29:42 +00:00
|
|
|
if strings.Contains(urlString, "%") {
|
|
|
|
urlString = url.PathEscape(urlString)
|
|
|
|
}
|
2022-02-26 10:16:47 +00:00
|
|
|
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, len(buffer), func(data []byte) {
|
2022-02-26 11:23:06 +00:00
|
|
|
if n < len(buffer) {
|
|
|
|
x := copy(buffer[n:], data)
|
|
|
|
n += x
|
|
|
|
}
|
2020-10-09 06:31:26 +00:00
|
|
|
})
|
2020-10-13 07:29:46 +00:00
|
|
|
if !shouldRetry {
|
|
|
|
break
|
|
|
|
}
|
2020-10-09 06:31:26 +00:00
|
|
|
if err != nil {
|
|
|
|
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
2020-10-08 05:49:04 +00:00
|
|
|
}
|
2020-10-14 02:50:46 +00:00
|
|
|
if err != nil && shouldRetry {
|
2020-10-14 02:50:22 +00:00
|
|
|
glog.V(0).Infof("retry reading in %v", waitTime)
|
2020-10-09 07:01:47 +00:00
|
|
|
time.Sleep(waitTime)
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
|
|
|
|
2022-02-26 10:16:47 +00:00
|
|
|
return n, err
|
2021-03-23 05:12:57 +00:00
|
|
|
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
|
|
|
|
2021-08-13 18:00:11 +00:00
|
|
|
func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) {
|
|
|
|
|
|
|
|
var shouldRetry bool
|
2021-08-13 18:30:38 +00:00
|
|
|
var totalWritten int
|
2021-08-13 18:00:11 +00:00
|
|
|
|
|
|
|
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
|
|
|
|
for _, urlString := range urlStrings {
|
2021-08-13 18:30:38 +00:00
|
|
|
var localProcesed int
|
2021-08-13 18:00:11 +00:00
|
|
|
shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
|
2021-08-13 18:30:38 +00:00
|
|
|
if totalWritten > localProcesed {
|
|
|
|
toBeSkipped := totalWritten - localProcesed
|
|
|
|
if len(data) <= toBeSkipped {
|
|
|
|
localProcesed += len(data)
|
|
|
|
return // skip if already processed
|
|
|
|
}
|
2021-08-13 18:31:43 +00:00
|
|
|
data = data[toBeSkipped:]
|
2021-08-13 18:30:38 +00:00
|
|
|
localProcesed += toBeSkipped
|
|
|
|
}
|
2021-08-13 18:00:11 +00:00
|
|
|
writer.Write(data)
|
2021-08-13 18:30:38 +00:00
|
|
|
localProcesed += len(data)
|
|
|
|
totalWritten += len(data)
|
2021-08-13 18:00:11 +00:00
|
|
|
})
|
|
|
|
if !shouldRetry {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2021-08-13 18:13:30 +00:00
|
|
|
if err != nil && shouldRetry {
|
2021-08-13 18:00:11 +00:00
|
|
|
glog.V(0).Infof("retry reading in %v", waitTime)
|
|
|
|
time.Sleep(waitTime)
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-07-20 10:34:06 +00:00
|
|
|
func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) {
|
|
|
|
return doMaybeManifestize(saveFunc, inputChunks, ManifestBatch, mergeIntoManifest)
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func doMaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk, mergeFactor int, mergefn func(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error)) (chunks []*filer_pb.FileChunk, err error) {
|
|
|
|
|
|
|
|
var dataChunks []*filer_pb.FileChunk
|
|
|
|
for _, chunk := range inputChunks {
|
|
|
|
if !chunk.IsChunkManifest {
|
|
|
|
dataChunks = append(dataChunks, chunk)
|
|
|
|
} else {
|
|
|
|
chunks = append(chunks, chunk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
remaining := len(dataChunks)
|
2020-07-20 10:34:06 +00:00
|
|
|
for i := 0; i+mergeFactor <= len(dataChunks); i += mergeFactor {
|
|
|
|
chunk, err := mergefn(saveFunc, dataChunks[i:i+mergeFactor])
|
2020-07-20 00:59:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return dataChunks, err
|
|
|
|
}
|
|
|
|
chunks = append(chunks, chunk)
|
2020-07-20 10:34:06 +00:00
|
|
|
remaining -= mergeFactor
|
2020-07-20 00:59:43 +00:00
|
|
|
}
|
|
|
|
// remaining
|
|
|
|
for i := len(dataChunks) - remaining; i < len(dataChunks); i++ {
|
|
|
|
chunks = append(chunks, dataChunks[i])
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error) {
|
|
|
|
|
2020-07-21 05:01:39 +00:00
|
|
|
filer_pb.BeforeEntrySerialization(dataChunks)
|
|
|
|
|
2020-07-20 00:59:43 +00:00
|
|
|
// create and serialize the manifest
|
|
|
|
data, serErr := proto.Marshal(&filer_pb.FileChunkManifest{
|
|
|
|
Chunks: dataChunks,
|
|
|
|
})
|
|
|
|
if serErr != nil {
|
|
|
|
return nil, fmt.Errorf("serializing manifest: %v", serErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64)
|
2020-07-20 10:34:06 +00:00
|
|
|
for _, chunk := range dataChunks {
|
2020-07-20 00:59:43 +00:00
|
|
|
if minOffset > int64(chunk.Offset) {
|
|
|
|
minOffset = chunk.Offset
|
|
|
|
}
|
|
|
|
if maxOffset < int64(chunk.Size)+chunk.Offset {
|
|
|
|
maxOffset = int64(chunk.Size) + chunk.Offset
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-21 01:14:57 +00:00
|
|
|
manifestChunk, err = saveFunc(bytes.NewReader(data), "", 0)
|
2020-07-20 00:59:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
manifestChunk.IsChunkManifest = true
|
|
|
|
manifestChunk.Offset = minOffset
|
|
|
|
manifestChunk.Size = uint64(maxOffset - minOffset)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-21 01:14:57 +00:00
|
|
|
type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, err error)
|