mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
remove unused code
This commit is contained in:
parent
cc66e25cd2
commit
cd47528a75
|
@ -1,10 +1,11 @@
|
||||||
package filer2
|
package filer2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
||||||
"sort"
|
"sort"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
|
func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
|
||||||
|
@ -21,125 +22,10 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func mergeToVisibleIntervals(visibles []*visibleInterval, chunk *filer_pb.FileChunk) (merged []*visibleInterval) {
|
|
||||||
if len(visibles) == 0 {
|
|
||||||
return []*visibleInterval{newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId, chunk.Mtime)}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("merge chunk %+v => %d", chunk, len(visibles))
|
|
||||||
for _, v := range visibles {
|
|
||||||
log.Printf("=> %+v", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
var nonOverlappingStop int
|
|
||||||
|
|
||||||
// find merge candidates
|
|
||||||
var mergeCandidates []int
|
|
||||||
for t := len(visibles) - 1; t >= 0; t-- {
|
|
||||||
if visibles[t].stop > chunk.Offset {
|
|
||||||
mergeCandidates = append(mergeCandidates, t)
|
|
||||||
} else {
|
|
||||||
nonOverlappingStop = t
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Printf("merged candidates: %+v, starting from %d", mergeCandidates, nonOverlappingStop)
|
|
||||||
|
|
||||||
if len(mergeCandidates) == 0 {
|
|
||||||
merged = append(visibles, newVisibleInterval(
|
|
||||||
chunk.Offset,
|
|
||||||
chunk.Offset+int64(chunk.Size),
|
|
||||||
chunk.FileId,
|
|
||||||
chunk.Mtime,
|
|
||||||
))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// reverse merge candidates
|
|
||||||
i, j := 0, len(mergeCandidates)-1
|
|
||||||
for i < j {
|
|
||||||
mergeCandidates[i], mergeCandidates[j] = mergeCandidates[j], mergeCandidates[i]
|
|
||||||
i++
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
log.Printf("reversed merged candidates: %+v", mergeCandidates)
|
|
||||||
|
|
||||||
// add chunk into a possibly connected intervals
|
|
||||||
var overlappingIntevals []*visibleInterval
|
|
||||||
for i = 0; i < len(mergeCandidates); i++ {
|
|
||||||
interval := visibles[mergeCandidates[i]]
|
|
||||||
if interval.modifiedTime >= chunk.Mtime {
|
|
||||||
log.Printf("overlappingIntevals add existing interval: [%d,%d)", interval.start, interval.stop)
|
|
||||||
overlappingIntevals = append(overlappingIntevals, interval)
|
|
||||||
} else {
|
|
||||||
start := max(interval.start, chunk.Offset)
|
|
||||||
stop := min(interval.stop, chunk.Offset+int64(chunk.Size))
|
|
||||||
if interval.start <= chunk.Offset {
|
|
||||||
if interval.start < start {
|
|
||||||
log.Printf("overlappingIntevals add 1: [%d,%d)", interval.start, start)
|
|
||||||
overlappingIntevals = append(overlappingIntevals, newVisibleInterval(
|
|
||||||
interval.start,
|
|
||||||
start,
|
|
||||||
interval.fileId,
|
|
||||||
interval.modifiedTime,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
log.Printf("overlappingIntevals add 2: [%d,%d)", start, stop)
|
|
||||||
overlappingIntevals = append(overlappingIntevals, newVisibleInterval(
|
|
||||||
start,
|
|
||||||
stop,
|
|
||||||
chunk.FileId,
|
|
||||||
chunk.Mtime,
|
|
||||||
))
|
|
||||||
if interval.stop < stop {
|
|
||||||
log.Printf("overlappingIntevals add 3: [%d,%d)", interval.stop, stop)
|
|
||||||
overlappingIntevals = append(overlappingIntevals, newVisibleInterval(
|
|
||||||
interval.stop,
|
|
||||||
stop,
|
|
||||||
interval.fileId,
|
|
||||||
interval.modifiedTime,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logPrintf("overlappingIntevals", overlappingIntevals)
|
|
||||||
|
|
||||||
// merge connected intervals
|
|
||||||
merged = visibles[:nonOverlappingStop]
|
|
||||||
var lastInterval *visibleInterval
|
|
||||||
var prevIntervalIndex int
|
|
||||||
for i, interval := range overlappingIntevals {
|
|
||||||
if i == 0 {
|
|
||||||
prevIntervalIndex = i
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if overlappingIntevals[prevIntervalIndex].fileId != interval.fileId {
|
|
||||||
merged = append(merged, newVisibleInterval(
|
|
||||||
overlappingIntevals[prevIntervalIndex].start,
|
|
||||||
interval.start,
|
|
||||||
overlappingIntevals[prevIntervalIndex].fileId,
|
|
||||||
overlappingIntevals[prevIntervalIndex].modifiedTime,
|
|
||||||
))
|
|
||||||
prevIntervalIndex = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lastInterval != nil {
|
|
||||||
merged = append(merged, newVisibleInterval(
|
|
||||||
overlappingIntevals[prevIntervalIndex].start,
|
|
||||||
lastInterval.start,
|
|
||||||
overlappingIntevals[prevIntervalIndex].fileId,
|
|
||||||
overlappingIntevals[prevIntervalIndex].modifiedTime,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
logPrintf("merged", merged)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func logPrintf(name string, visibles []*visibleInterval) {
|
func logPrintf(name string, visibles []*visibleInterval) {
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
log.Printf("%s len %d", name, len(visibles))
|
log.Printf("%s len %d", name, len(visibles))
|
||||||
for _, v := range visibles {
|
for _, v := range visibles {
|
||||||
log.Printf("%s: => %+v", name, v)
|
log.Printf("%s: => %+v", name, v)
|
||||||
|
@ -166,7 +52,7 @@ func nonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []*v
|
||||||
var minStopInterval, upToDateInterval *visibleInterval
|
var minStopInterval, upToDateInterval *visibleInterval
|
||||||
watermarkStart := chunks[0].Offset
|
watermarkStart := chunks[0].Offset
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
log.Printf("checking chunk: [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
|
// log.Printf("checking chunk: [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
|
||||||
logPrintf("parallelIntervals", parallelIntervals)
|
logPrintf("parallelIntervals", parallelIntervals)
|
||||||
for len(parallelIntervals) > 0 && watermarkStart < chunk.Offset {
|
for len(parallelIntervals) > 0 && watermarkStart < chunk.Offset {
|
||||||
logPrintf("parallelIntervals loop 1", parallelIntervals)
|
logPrintf("parallelIntervals loop 1", parallelIntervals)
|
||||||
|
@ -291,25 +177,6 @@ func findMinStopInterval(intervals []*visibleInterval) (minStopInterval, upToDat
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func nonOverlappingVisibleIntervals0(chunks []*filer_pb.FileChunk) (visibles []*visibleInterval) {
|
|
||||||
|
|
||||||
sort.Slice(chunks, func(i, j int) bool {
|
|
||||||
if chunks[i].Offset < chunks[j].Offset {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if chunks[i].Offset == chunks[j].Offset {
|
|
||||||
return chunks[i].Mtime < chunks[j].Mtime
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, c := range chunks {
|
|
||||||
visibles = mergeToVisibleIntervals(visibles, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// find non-overlapping visible intervals
|
// find non-overlapping visible intervals
|
||||||
// visible interval map to one file chunk
|
// visible interval map to one file chunk
|
||||||
|
|
||||||
|
@ -324,28 +191,6 @@ func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64) *v
|
||||||
return &visibleInterval{start: start, stop: stop, fileId: fileId, modifiedTime: modifiedTime}
|
return &visibleInterval{start: start, stop: stop, fileId: fileId, modifiedTime: modifiedTime}
|
||||||
}
|
}
|
||||||
|
|
||||||
type stackOfChunkIds struct {
|
|
||||||
ids []int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stackOfChunkIds) isEmpty() bool {
|
|
||||||
return len(s.ids) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stackOfChunkIds) pop() int {
|
|
||||||
t := s.ids[len(s.ids)-1]
|
|
||||||
s.ids = s.ids[:len(s.ids)-1]
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stackOfChunkIds) push(x int) {
|
|
||||||
s.ids = append(s.ids, x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stackOfChunkIds) peek() int {
|
|
||||||
return s.ids[len(s.ids)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
func min(x, y int64) int64 {
|
func min(x, y int64) int64 {
|
||||||
if x <= y {
|
if x <= y {
|
||||||
return x
|
return x
|
||||||
|
|
|
@ -2,8 +2,9 @@ package filer2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIntervalMerging(t *testing.T) {
|
func TestIntervalMerging(t *testing.T) {
|
||||||
|
|
Loading…
Reference in a new issue