2020-09-01 07:21:19 +00:00
|
|
|
package filer
|
2018-05-13 07:11:26 +00:00
|
|
|
|
2018-05-21 00:06:09 +00:00
|
|
|
import (
|
2020-10-05 09:43:32 +00:00
|
|
|
"bytes"
|
2018-09-09 23:26:11 +00:00
|
|
|
"fmt"
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
2022-07-08 05:26:03 +00:00
|
|
|
"math"
|
2022-06-19 08:54:04 +00:00
|
|
|
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
2018-05-21 00:06:09 +00:00
|
|
|
)
|
2018-05-16 07:08:44 +00:00
|
|
|
|
2018-05-21 00:06:09 +00:00
|
|
|
func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
|
2018-05-13 07:11:26 +00:00
|
|
|
for _, c := range chunks {
|
|
|
|
t := uint64(c.Offset + int64(c.Size))
|
|
|
|
if size < t {
|
|
|
|
size = t
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-15 16:32:47 +00:00
|
|
|
func FileSize(entry *filer_pb.Entry) (size uint64) {
|
2022-04-05 17:49:17 +00:00
|
|
|
if entry == nil || entry.Attributes == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2022-03-21 09:09:54 +00:00
|
|
|
fileSize := entry.Attributes.FileSize
|
|
|
|
if entry.RemoteEntry != nil {
|
|
|
|
if entry.RemoteEntry.RemoteMtime > entry.Attributes.Mtime {
|
|
|
|
fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize))
|
|
|
|
}
|
|
|
|
}
|
2022-11-15 14:33:36 +00:00
|
|
|
return maxUint64(TotalSize(entry.GetChunks()), fileSize)
|
2020-08-15 16:32:47 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 15:12:00 +00:00
|
|
|
func ETag(entry *filer_pb.Entry) (etag string) {
|
|
|
|
if entry.Attributes == nil || entry.Attributes.Md5 == nil {
|
2022-11-15 14:33:36 +00:00
|
|
|
return ETagChunks(entry.GetChunks())
|
2020-04-08 15:12:00 +00:00
|
|
|
}
|
|
|
|
return fmt.Sprintf("%x", entry.Attributes.Md5)
|
|
|
|
}
|
|
|
|
|
|
|
|
func ETagEntry(entry *Entry) (etag string) {
|
2023-01-03 07:20:45 +00:00
|
|
|
if entry.IsInRemoteOnly() {
|
2022-11-30 15:43:30 +00:00
|
|
|
return entry.Remote.RemoteETag
|
|
|
|
}
|
2020-04-08 15:12:00 +00:00
|
|
|
if entry.Attr.Md5 == nil {
|
2022-11-15 14:33:36 +00:00
|
|
|
return ETagChunks(entry.GetChunks())
|
2020-04-08 15:12:00 +00:00
|
|
|
}
|
|
|
|
return fmt.Sprintf("%x", entry.Attr.Md5)
|
|
|
|
}
|
|
|
|
|
|
|
|
func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
|
2018-09-09 23:25:43 +00:00
|
|
|
if len(chunks) == 1 {
|
2021-04-28 17:28:05 +00:00
|
|
|
return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
|
2018-09-09 23:25:43 +00:00
|
|
|
}
|
2022-07-08 05:27:24 +00:00
|
|
|
var md5Digests [][]byte
|
2018-09-09 23:25:43 +00:00
|
|
|
for _, c := range chunks {
|
2022-07-08 05:27:24 +00:00
|
|
|
md5Digests = append(md5Digests, util.Base64Md5ToBytes(c.ETag))
|
2018-09-09 23:25:43 +00:00
|
|
|
}
|
2022-07-08 05:27:24 +00:00
|
|
|
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5Digests, nil)), len(chunks))
|
2018-09-09 23:25:43 +00:00
|
|
|
}
|
|
|
|
|
2021-01-06 12:21:34 +00:00
|
|
|
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
|
2018-05-21 07:00:28 +00:00
|
|
|
|
2021-07-20 06:07:22 +00:00
|
|
|
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
|
2018-05-21 07:00:28 +00:00
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
compacted, garbage = SeparateGarbageChunks(visibles, chunks)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func SeparateGarbageChunks(visibles *IntervalList[*VisibleInterval], chunks []*filer_pb.FileChunk) (compacted []*filer_pb.FileChunk, garbage []*filer_pb.FileChunk) {
|
2018-05-21 07:00:28 +00:00
|
|
|
fileIds := make(map[string]bool)
|
2023-01-03 07:20:45 +00:00
|
|
|
for x := visibles.Front(); x != nil; x = x.Next {
|
|
|
|
interval := x.Value
|
2018-05-21 07:00:28 +00:00
|
|
|
fileIds[interval.fileId] = true
|
|
|
|
}
|
|
|
|
for _, chunk := range chunks {
|
2019-06-23 03:04:56 +00:00
|
|
|
if _, found := fileIds[chunk.GetFileIdString()]; found {
|
2018-05-21 07:00:28 +00:00
|
|
|
compacted = append(compacted, chunk)
|
|
|
|
} else {
|
|
|
|
garbage = append(garbage, chunk)
|
|
|
|
}
|
|
|
|
}
|
2023-01-03 07:20:45 +00:00
|
|
|
return compacted, garbage
|
2018-05-13 07:11:26 +00:00
|
|
|
}
|
2018-05-21 00:06:09 +00:00
|
|
|
|
2023-01-10 08:46:46 +00:00
|
|
|
func FindGarbageChunks(visibles *IntervalList[*VisibleInterval], start int64, stop int64) (garbageFileIds map[string]struct{}) {
|
|
|
|
garbageFileIds = make(map[string]struct{})
|
2023-01-06 09:03:29 +00:00
|
|
|
for x := visibles.Front(); x != nil; x = x.Next {
|
|
|
|
interval := x.Value
|
|
|
|
offset := interval.start - interval.offsetInChunk
|
|
|
|
if start <= offset && offset+int64(interval.chunkSize) <= stop {
|
2023-01-10 08:46:46 +00:00
|
|
|
garbageFileIds[interval.fileId] = struct{}{}
|
2023-01-06 09:03:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-06 12:21:34 +00:00
|
|
|
func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
|
2020-07-20 00:59:43 +00:00
|
|
|
|
2021-07-20 06:07:22 +00:00
|
|
|
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64)
|
2020-07-20 00:59:43 +00:00
|
|
|
if aErr != nil {
|
|
|
|
return nil, aErr
|
|
|
|
}
|
2021-07-20 06:07:22 +00:00
|
|
|
bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64)
|
2020-07-20 00:59:43 +00:00
|
|
|
if bErr != nil {
|
|
|
|
return nil, bErr
|
|
|
|
}
|
|
|
|
|
|
|
|
delta = append(delta, DoMinusChunks(aData, bData)...)
|
|
|
|
delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
|
2018-05-22 10:26:38 +00:00
|
|
|
|
|
|
|
fileIds := make(map[string]bool)
|
2019-06-23 03:04:56 +00:00
|
|
|
for _, interval := range bs {
|
|
|
|
fileIds[interval.GetFileIdString()] = true
|
2018-05-22 10:26:38 +00:00
|
|
|
}
|
2019-06-23 03:04:56 +00:00
|
|
|
for _, chunk := range as {
|
|
|
|
if _, found := fileIds[chunk.GetFileIdString()]; !found {
|
|
|
|
delta = append(delta, chunk)
|
2018-05-22 10:26:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-02-07 11:46:28 +00:00
|
|
|
func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
|
|
|
|
|
|
|
|
fileIds := make(map[string]bool)
|
|
|
|
for _, interval := range bs {
|
|
|
|
fileIds[interval.GetFileIdString()] = true
|
2022-08-01 07:06:18 +00:00
|
|
|
fileIds[interval.GetSourceFileId()] = true
|
2022-02-07 11:46:28 +00:00
|
|
|
}
|
|
|
|
for _, chunk := range as {
|
2022-08-01 07:06:18 +00:00
|
|
|
_, sourceFileIdFound := fileIds[chunk.GetSourceFileId()]
|
|
|
|
_, fileIdFound := fileIds[chunk.GetFileId()]
|
|
|
|
if !sourceFileIdFound && !fileIdFound {
|
2022-02-07 11:46:28 +00:00
|
|
|
delta = append(delta, chunk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-24 08:22:37 +00:00
|
|
|
type ChunkView struct {
|
2023-01-03 07:20:45 +00:00
|
|
|
FileId string
|
|
|
|
OffsetInChunk int64 // offset within the chunk
|
|
|
|
ViewSize uint64
|
|
|
|
ViewOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
|
|
|
|
ChunkSize uint64
|
|
|
|
CipherKey []byte
|
|
|
|
IsGzipped bool
|
|
|
|
ModifiedTsNs int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cv *ChunkView) SetStartStop(start, stop int64) {
|
|
|
|
cv.OffsetInChunk += start - cv.ViewOffset
|
|
|
|
cv.ViewOffset = start
|
|
|
|
cv.ViewSize = uint64(stop - start)
|
|
|
|
}
|
|
|
|
func (cv *ChunkView) Clone() IntervalValue {
|
|
|
|
return &ChunkView{
|
|
|
|
FileId: cv.FileId,
|
|
|
|
OffsetInChunk: cv.OffsetInChunk,
|
|
|
|
ViewSize: cv.ViewSize,
|
|
|
|
ViewOffset: cv.ViewOffset,
|
|
|
|
ChunkSize: cv.ChunkSize,
|
|
|
|
CipherKey: cv.CipherKey,
|
|
|
|
IsGzipped: cv.IsGzipped,
|
|
|
|
ModifiedTsNs: cv.ModifiedTsNs,
|
|
|
|
}
|
2018-05-24 08:22:37 +00:00
|
|
|
}
|
|
|
|
|
2020-04-14 04:58:10 +00:00
|
|
|
func (cv *ChunkView) IsFullChunk() bool {
|
2023-01-03 07:20:45 +00:00
|
|
|
return cv.ViewSize == cv.ChunkSize
|
2020-04-14 04:58:10 +00:00
|
|
|
}
|
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) {
|
2018-05-24 05:28:54 +00:00
|
|
|
|
2021-07-20 06:07:22 +00:00
|
|
|
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
|
2018-12-30 08:51:44 +00:00
|
|
|
|
|
|
|
return ViewFromVisibleIntervals(visibles, offset, size)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
func ViewFromVisibleIntervals(visibles *IntervalList[*VisibleInterval], offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) {
|
2018-05-24 05:28:54 +00:00
|
|
|
|
2020-03-22 08:37:46 +00:00
|
|
|
stop := offset + size
|
2020-03-27 11:35:31 +00:00
|
|
|
if size == math.MaxInt64 {
|
|
|
|
stop = math.MaxInt64
|
|
|
|
}
|
|
|
|
if stop < offset {
|
|
|
|
stop = math.MaxInt64
|
|
|
|
}
|
2018-05-24 05:28:54 +00:00
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
chunkViews = NewIntervalList[*ChunkView]()
|
|
|
|
for x := visibles.Front(); x != nil; x = x.Next {
|
|
|
|
chunk := x.Value
|
2020-03-09 04:39:33 +00:00
|
|
|
|
2020-08-16 07:49:26 +00:00
|
|
|
chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
|
|
|
|
|
|
|
|
if chunkStart < chunkStop {
|
2023-01-03 07:20:45 +00:00
|
|
|
chunkView := &ChunkView{
|
|
|
|
FileId: chunk.fileId,
|
|
|
|
OffsetInChunk: chunkStart - chunk.start + chunk.offsetInChunk,
|
|
|
|
ViewSize: uint64(chunkStop - chunkStart),
|
|
|
|
ViewOffset: chunkStart,
|
|
|
|
ChunkSize: chunk.chunkSize,
|
|
|
|
CipherKey: chunk.cipherKey,
|
|
|
|
IsGzipped: chunk.isGzipped,
|
|
|
|
ModifiedTsNs: chunk.modifiedTsNs,
|
|
|
|
}
|
|
|
|
chunkViews.AppendInterval(&Interval[*ChunkView]{
|
|
|
|
StartOffset: chunkStart,
|
|
|
|
StopOffset: chunkStop,
|
|
|
|
TsNs: chunk.modifiedTsNs,
|
|
|
|
Value: chunkView,
|
|
|
|
Prev: nil,
|
|
|
|
Next: nil,
|
2018-05-24 05:28:54 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
return chunkViews
|
2018-05-24 05:28:54 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
func MergeIntoVisibles(visibles *IntervalList[*VisibleInterval], start int64, stop int64, chunk *filer_pb.FileChunk) {
|
|
|
|
|
|
|
|
newV := &VisibleInterval{
|
|
|
|
start: start,
|
|
|
|
stop: stop,
|
|
|
|
fileId: chunk.GetFileIdString(),
|
|
|
|
modifiedTsNs: chunk.ModifiedTsNs,
|
|
|
|
offsetInChunk: start - chunk.Offset, // the starting position in the chunk
|
|
|
|
chunkSize: chunk.Size, // size of the chunk
|
|
|
|
cipherKey: chunk.CipherKey,
|
|
|
|
isGzipped: chunk.IsCompressed,
|
2021-10-16 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
visibles.InsertInterval(start, stop, chunk.ModifiedTsNs, newV)
|
|
|
|
}
|
2021-10-16 23:09:33 +00:00
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
func MergeIntoChunkViews(chunkViews *IntervalList[*ChunkView], start int64, stop int64, chunk *filer_pb.FileChunk) {
|
|
|
|
|
|
|
|
chunkView := &ChunkView{
|
|
|
|
FileId: chunk.GetFileIdString(),
|
|
|
|
OffsetInChunk: start - chunk.Offset,
|
|
|
|
ViewSize: uint64(stop - start),
|
|
|
|
ViewOffset: start,
|
|
|
|
ChunkSize: chunk.Size,
|
|
|
|
CipherKey: chunk.CipherKey,
|
|
|
|
IsGzipped: chunk.IsCompressed,
|
|
|
|
ModifiedTsNs: chunk.ModifiedTsNs,
|
2021-10-16 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
chunkViews.InsertInterval(start, stop, chunk.ModifiedTsNs, chunkView)
|
2021-10-16 23:09:33 +00:00
|
|
|
}
|
|
|
|
|
2020-07-20 00:59:43 +00:00
|
|
|
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
|
|
|
|
// If the file chunk content is a chunk manifest
|
2023-01-03 07:20:45 +00:00
|
|
|
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval], err error) {
|
2020-07-20 00:59:43 +00:00
|
|
|
|
2021-07-20 06:07:22 +00:00
|
|
|
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
|
2022-06-19 08:54:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-05-21 00:06:09 +00:00
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
visibles2 := readResolvedChunks(chunks, 0, math.MaxInt64)
|
2021-10-16 23:09:33 +00:00
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
return visibles2, err
|
2021-10-16 23:03:16 +00:00
|
|
|
}
|
|
|
|
|
2018-05-21 00:06:09 +00:00
|
|
|
// find non-overlapping visible intervals
|
|
|
|
// visible interval map to one file chunk
|
|
|
|
|
2018-12-30 08:51:44 +00:00
|
|
|
type VisibleInterval struct {
|
2023-01-03 07:20:45 +00:00
|
|
|
start int64
|
|
|
|
stop int64
|
|
|
|
modifiedTsNs int64
|
|
|
|
fileId string
|
|
|
|
offsetInChunk int64
|
|
|
|
chunkSize uint64
|
|
|
|
cipherKey []byte
|
|
|
|
isGzipped bool
|
2018-05-21 00:06:09 +00:00
|
|
|
}
|
|
|
|
|
2023-01-03 07:20:45 +00:00
|
|
|
func (v *VisibleInterval) SetStartStop(start, stop int64) {
|
|
|
|
v.offsetInChunk += start - v.start
|
|
|
|
v.start, v.stop = start, stop
|
|
|
|
}
|
|
|
|
func (v *VisibleInterval) Clone() IntervalValue {
|
|
|
|
return &VisibleInterval{
|
|
|
|
start: v.start,
|
|
|
|
stop: v.stop,
|
|
|
|
modifiedTsNs: v.modifiedTsNs,
|
|
|
|
fileId: v.fileId,
|
|
|
|
offsetInChunk: v.offsetInChunk,
|
|
|
|
chunkSize: v.chunkSize,
|
|
|
|
cipherKey: v.cipherKey,
|
|
|
|
isGzipped: v.isGzipped,
|
2021-10-16 23:09:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 00:06:09 +00:00
|
|
|
func min(x, y int64) int64 {
|
|
|
|
if x <= y {
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
return y
|
|
|
|
}
|
2020-08-16 07:49:26 +00:00
|
|
|
func max(x, y int64) int64 {
|
|
|
|
if x <= y {
|
|
|
|
return y
|
|
|
|
}
|
|
|
|
return x
|
|
|
|
}
|