2022-01-17 09:53:56 +00:00
|
|
|
package page_writer
|
|
|
|
|
|
|
|
import (
|
2022-01-17 22:15:10 +00:00
|
|
|
"fmt"
|
2022-01-17 09:53:56 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/mem"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2022-01-18 06:24:44 +00:00
|
|
|
"time"
|
2022-01-17 09:53:56 +00:00
|
|
|
)
|
|
|
|
|
2022-01-18 06:24:44 +00:00
|
|
|
type LogicChunkIndex int
|
|
|
|
|
2022-01-17 09:53:56 +00:00
|
|
|
type UploadPipeline struct {
|
2022-01-18 06:24:44 +00:00
|
|
|
filepath util.FullPath
|
|
|
|
ChunkSize int64
|
|
|
|
writableChunks map[LogicChunkIndex]*MemChunk
|
|
|
|
writableChunksLock sync.Mutex
|
|
|
|
sealedChunks map[LogicChunkIndex]*SealedChunk
|
|
|
|
sealedChunksLock sync.Mutex
|
2022-01-22 09:46:10 +00:00
|
|
|
uploaders *util.LimitedConcurrentExecutor
|
|
|
|
uploaderCount int32
|
|
|
|
uploaderCountCond *sync.Cond
|
2022-01-22 09:43:14 +00:00
|
|
|
saveToStorageFn SaveToStorageFunc
|
2022-01-18 06:24:44 +00:00
|
|
|
activeReadChunks map[LogicChunkIndex]int
|
|
|
|
activeReadChunksLock sync.Mutex
|
2022-01-17 09:53:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type SealedChunk struct {
|
|
|
|
chunk *MemChunk
|
|
|
|
referenceCounter int // track uploading or reading processes
|
|
|
|
}
|
|
|
|
|
2022-01-17 22:15:10 +00:00
|
|
|
func (sc *SealedChunk) FreeReference(messageOnFree string) {
|
2022-01-17 09:53:56 +00:00
|
|
|
sc.referenceCounter--
|
|
|
|
if sc.referenceCounter == 0 {
|
2022-01-17 22:15:10 +00:00
|
|
|
glog.V(4).Infof("Free sealed chunk: %s", messageOnFree)
|
2022-01-17 09:53:56 +00:00
|
|
|
mem.Free(sc.chunk.buf)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-17 22:15:10 +00:00
|
|
|
func NewUploadPipeline(filepath util.FullPath, writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc) *UploadPipeline {
|
2022-01-17 09:53:56 +00:00
|
|
|
return &UploadPipeline{
|
2022-01-22 09:46:10 +00:00
|
|
|
ChunkSize: chunkSize,
|
|
|
|
writableChunks: make(map[LogicChunkIndex]*MemChunk),
|
|
|
|
sealedChunks: make(map[LogicChunkIndex]*SealedChunk),
|
|
|
|
uploaders: writers,
|
|
|
|
uploaderCountCond: sync.NewCond(&sync.Mutex{}),
|
|
|
|
saveToStorageFn: saveToStorageFn,
|
|
|
|
filepath: filepath,
|
|
|
|
activeReadChunks: make(map[LogicChunkIndex]int),
|
2022-01-17 09:53:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) SaveDataAt(p []byte, off int64) (n int) {
|
|
|
|
cw.writableChunksLock.Lock()
|
|
|
|
defer cw.writableChunksLock.Unlock()
|
|
|
|
|
|
|
|
logicChunkIndex := LogicChunkIndex(off / cw.ChunkSize)
|
|
|
|
offsetRemainder := off % cw.ChunkSize
|
|
|
|
|
|
|
|
memChunk, found := cw.writableChunks[logicChunkIndex]
|
|
|
|
if !found {
|
|
|
|
memChunk = &MemChunk{
|
|
|
|
buf: mem.Allocate(int(cw.ChunkSize)),
|
|
|
|
usage: newChunkWrittenIntervalList(),
|
|
|
|
}
|
|
|
|
cw.writableChunks[logicChunkIndex] = memChunk
|
|
|
|
}
|
|
|
|
n = copy(memChunk.buf[offsetRemainder:], p)
|
|
|
|
memChunk.usage.MarkWritten(offsetRemainder, offsetRemainder+int64(n))
|
|
|
|
cw.maybeMoveToSealed(memChunk, logicChunkIndex)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
|
|
|
|
logicChunkIndex := LogicChunkIndex(off / cw.ChunkSize)
|
|
|
|
|
|
|
|
// read from sealed chunks first
|
|
|
|
cw.sealedChunksLock.Lock()
|
|
|
|
sealedChunk, found := cw.sealedChunks[logicChunkIndex]
|
|
|
|
if found {
|
|
|
|
sealedChunk.referenceCounter++
|
|
|
|
}
|
|
|
|
cw.sealedChunksLock.Unlock()
|
|
|
|
if found {
|
|
|
|
maxStop = readMemChunk(sealedChunk.chunk, p, off, logicChunkIndex, cw.ChunkSize)
|
2022-01-18 04:41:00 +00:00
|
|
|
glog.V(4).Infof("%s read sealed memchunk [%d,%d)", cw.filepath, off, maxStop)
|
2022-01-17 22:15:10 +00:00
|
|
|
sealedChunk.FreeReference(fmt.Sprintf("%s finish reading chunk %d", cw.filepath, logicChunkIndex))
|
2022-01-17 09:53:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// read from writable chunks last
|
|
|
|
cw.writableChunksLock.Lock()
|
|
|
|
defer cw.writableChunksLock.Unlock()
|
|
|
|
writableChunk, found := cw.writableChunks[logicChunkIndex]
|
|
|
|
if !found {
|
|
|
|
return
|
|
|
|
}
|
2022-01-18 04:41:00 +00:00
|
|
|
writableMaxStop := readMemChunk(writableChunk, p, off, logicChunkIndex, cw.ChunkSize)
|
|
|
|
glog.V(4).Infof("%s read writable memchunk [%d,%d)", cw.filepath, off, writableMaxStop)
|
|
|
|
maxStop = max(maxStop, writableMaxStop)
|
2022-01-17 09:53:56 +00:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) FlushAll() {
|
|
|
|
cw.writableChunksLock.Lock()
|
|
|
|
defer cw.writableChunksLock.Unlock()
|
|
|
|
|
|
|
|
for logicChunkIndex, memChunk := range cw.writableChunks {
|
|
|
|
cw.moveToSealed(memChunk, logicChunkIndex)
|
|
|
|
}
|
|
|
|
|
|
|
|
cw.waitForCurrentWritersToComplete()
|
|
|
|
}
|
|
|
|
|
2022-01-18 06:24:44 +00:00
|
|
|
func (cw *UploadPipeline) LockForRead(startOffset, stopOffset int64) {
|
|
|
|
startLogicChunkIndex := LogicChunkIndex(startOffset / cw.ChunkSize)
|
|
|
|
stopLogicChunkIndex := LogicChunkIndex(stopOffset / cw.ChunkSize)
|
|
|
|
if stopOffset%cw.ChunkSize > 0 {
|
|
|
|
stopLogicChunkIndex += 1
|
|
|
|
}
|
|
|
|
cw.activeReadChunksLock.Lock()
|
|
|
|
defer cw.activeReadChunksLock.Unlock()
|
|
|
|
for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ {
|
|
|
|
if count, found := cw.activeReadChunks[i]; found {
|
|
|
|
cw.activeReadChunks[i] = count + 1
|
|
|
|
} else {
|
|
|
|
cw.activeReadChunks[i] = 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) UnlockForRead(startOffset, stopOffset int64) {
|
|
|
|
startLogicChunkIndex := LogicChunkIndex(startOffset / cw.ChunkSize)
|
|
|
|
stopLogicChunkIndex := LogicChunkIndex(stopOffset / cw.ChunkSize)
|
|
|
|
if stopOffset%cw.ChunkSize > 0 {
|
|
|
|
stopLogicChunkIndex += 1
|
|
|
|
}
|
|
|
|
cw.activeReadChunksLock.Lock()
|
|
|
|
defer cw.activeReadChunksLock.Unlock()
|
|
|
|
for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ {
|
|
|
|
if count, found := cw.activeReadChunks[i]; found {
|
|
|
|
if count == 1 {
|
|
|
|
delete(cw.activeReadChunks, i)
|
|
|
|
} else {
|
|
|
|
cw.activeReadChunks[i] = count - 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) IsLocked(logicChunkIndex LogicChunkIndex) bool {
|
|
|
|
cw.activeReadChunksLock.Lock()
|
|
|
|
defer cw.activeReadChunksLock.Unlock()
|
|
|
|
if count, found := cw.activeReadChunks[logicChunkIndex]; found {
|
|
|
|
return count > 0
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-01-17 09:53:56 +00:00
|
|
|
func (cw *UploadPipeline) waitForCurrentWritersToComplete() {
|
2022-01-22 09:46:10 +00:00
|
|
|
cw.uploaderCountCond.L.Lock()
|
2022-01-17 09:53:56 +00:00
|
|
|
t := int32(100)
|
|
|
|
for {
|
2022-01-22 09:46:10 +00:00
|
|
|
t = atomic.LoadInt32(&cw.uploaderCount)
|
2022-01-17 09:53:56 +00:00
|
|
|
if t <= 0 {
|
|
|
|
break
|
|
|
|
}
|
2022-01-22 09:46:10 +00:00
|
|
|
cw.uploaderCountCond.Wait()
|
2022-01-17 09:53:56 +00:00
|
|
|
}
|
2022-01-22 09:46:10 +00:00
|
|
|
cw.uploaderCountCond.L.Unlock()
|
2022-01-17 09:53:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) maybeMoveToSealed(memChunk *MemChunk, logicChunkIndex LogicChunkIndex) {
|
|
|
|
if memChunk.usage.IsComplete(cw.ChunkSize) {
|
|
|
|
cw.moveToSealed(memChunk, logicChunkIndex)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) moveToSealed(memChunk *MemChunk, logicChunkIndex LogicChunkIndex) {
|
2022-01-22 09:46:10 +00:00
|
|
|
atomic.AddInt32(&cw.uploaderCount, 1)
|
|
|
|
glog.V(4).Infof("%s uploaderCount %d ++> %d", cw.filepath, cw.uploaderCount-1, cw.uploaderCount)
|
2022-01-17 09:53:56 +00:00
|
|
|
|
|
|
|
cw.sealedChunksLock.Lock()
|
|
|
|
|
|
|
|
if oldMemChunk, found := cw.sealedChunks[logicChunkIndex]; found {
|
2022-01-17 22:15:10 +00:00
|
|
|
oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", cw.filepath, logicChunkIndex))
|
2022-01-17 09:53:56 +00:00
|
|
|
}
|
|
|
|
sealedChunk := &SealedChunk{
|
|
|
|
chunk: memChunk,
|
|
|
|
referenceCounter: 1, // default 1 is for uploading process
|
|
|
|
}
|
|
|
|
cw.sealedChunks[logicChunkIndex] = sealedChunk
|
|
|
|
delete(cw.writableChunks, logicChunkIndex)
|
|
|
|
|
|
|
|
cw.sealedChunksLock.Unlock()
|
|
|
|
|
2022-01-22 09:46:10 +00:00
|
|
|
cw.uploaders.Execute(func() {
|
2022-01-17 21:40:41 +00:00
|
|
|
// first add to the file chunks
|
2022-01-17 09:53:56 +00:00
|
|
|
cw.saveOneChunk(sealedChunk.chunk, logicChunkIndex)
|
|
|
|
|
2022-01-18 06:24:44 +00:00
|
|
|
// notify waiting process
|
2022-01-22 09:46:10 +00:00
|
|
|
atomic.AddInt32(&cw.uploaderCount, -1)
|
|
|
|
glog.V(4).Infof("%s uploaderCount %d --> %d", cw.filepath, cw.uploaderCount+1, cw.uploaderCount)
|
2022-01-17 09:53:56 +00:00
|
|
|
// Lock and Unlock are not required,
|
|
|
|
// but it may signal multiple times during one wakeup,
|
|
|
|
// and the waiting goroutine may miss some of them!
|
2022-01-22 09:46:10 +00:00
|
|
|
cw.uploaderCountCond.L.Lock()
|
|
|
|
cw.uploaderCountCond.Broadcast()
|
|
|
|
cw.uploaderCountCond.L.Unlock()
|
2022-01-18 06:24:44 +00:00
|
|
|
|
|
|
|
// wait for readers
|
|
|
|
for cw.IsLocked(logicChunkIndex) {
|
|
|
|
time.Sleep(59 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
// then remove from sealed chunks
|
|
|
|
cw.sealedChunksLock.Lock()
|
|
|
|
defer cw.sealedChunksLock.Unlock()
|
|
|
|
delete(cw.sealedChunks, logicChunkIndex)
|
|
|
|
sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", cw.filepath, logicChunkIndex))
|
|
|
|
|
2022-01-17 09:53:56 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cw *UploadPipeline) saveOneChunk(memChunk *MemChunk, logicChunkIndex LogicChunkIndex) {
|
2022-01-17 21:40:41 +00:00
|
|
|
if cw.saveToStorageFn == nil {
|
|
|
|
return
|
|
|
|
}
|
2022-01-17 09:53:56 +00:00
|
|
|
for t := memChunk.usage.head.next; t != memChunk.usage.tail; t = t.next {
|
|
|
|
reader := util.NewBytesReader(memChunk.buf[t.StartOffset:t.stopOffset])
|
|
|
|
cw.saveToStorageFn(reader, int64(logicChunkIndex)*cw.ChunkSize+t.StartOffset, t.Size(), func() {
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func readMemChunk(memChunk *MemChunk, p []byte, off int64, logicChunkIndex LogicChunkIndex, chunkSize int64) (maxStop int64) {
|
|
|
|
memChunkBaseOffset := int64(logicChunkIndex) * chunkSize
|
|
|
|
for t := memChunk.usage.head.next; t != memChunk.usage.tail; t = t.next {
|
|
|
|
logicStart := max(off, int64(logicChunkIndex)*chunkSize+t.StartOffset)
|
|
|
|
logicStop := min(off+int64(len(p)), memChunkBaseOffset+t.stopOffset)
|
|
|
|
if logicStart < logicStop {
|
|
|
|
copy(p[logicStart-off:logicStop-off], memChunk.buf[logicStart-memChunkBaseOffset:logicStop-memChunkBaseOffset])
|
|
|
|
maxStop = max(maxStop, logicStop)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p2 *UploadPipeline) Shutdown() {
|
|
|
|
|
|
|
|
}
|