mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
more logs
This commit is contained in:
parent
fc22071a2f
commit
7bf7af971b
|
@ -27,7 +27,7 @@ func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *MemoryChunkPages {
|
||||||
fh: fh,
|
fh: fh,
|
||||||
}
|
}
|
||||||
|
|
||||||
dirtyPages.uploadPipeline = page_writer.NewUploadPipeline(
|
dirtyPages.uploadPipeline = page_writer.NewUploadPipeline(fh.f.fullpath(),
|
||||||
fh.f.wfs.concurrentWriters, chunkSize, dirtyPages.saveChunkedFileIntevalToStorage)
|
fh.f.wfs.concurrentWriters, chunkSize, dirtyPages.saveChunkedFileIntevalToStorage)
|
||||||
|
|
||||||
return dirtyPages
|
return dirtyPages
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package page_writer
|
package page_writer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util/mem"
|
"github.com/chrislusf/seaweedfs/weed/util/mem"
|
||||||
|
@ -18,6 +19,7 @@ type UploadPipeline struct {
|
||||||
activeWriterCond *sync.Cond
|
activeWriterCond *sync.Cond
|
||||||
activeWriterCount int32
|
activeWriterCount int32
|
||||||
saveToStorageFn SaveToStorageFunc
|
saveToStorageFn SaveToStorageFunc
|
||||||
|
filepath util.FullPath
|
||||||
}
|
}
|
||||||
|
|
||||||
type SealedChunk struct {
|
type SealedChunk struct {
|
||||||
|
@ -25,14 +27,15 @@ type SealedChunk struct {
|
||||||
referenceCounter int // track uploading or reading processes
|
referenceCounter int // track uploading or reading processes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *SealedChunk) FreeReference() {
|
func (sc *SealedChunk) FreeReference(messageOnFree string) {
|
||||||
sc.referenceCounter--
|
sc.referenceCounter--
|
||||||
if sc.referenceCounter == 0 {
|
if sc.referenceCounter == 0 {
|
||||||
|
glog.V(4).Infof("Free sealed chunk: %s", messageOnFree)
|
||||||
mem.Free(sc.chunk.buf)
|
mem.Free(sc.chunk.buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc) *UploadPipeline {
|
func NewUploadPipeline(filepath util.FullPath, writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc) *UploadPipeline {
|
||||||
return &UploadPipeline{
|
return &UploadPipeline{
|
||||||
ChunkSize: chunkSize,
|
ChunkSize: chunkSize,
|
||||||
writableChunks: make(map[LogicChunkIndex]*MemChunk),
|
writableChunks: make(map[LogicChunkIndex]*MemChunk),
|
||||||
|
@ -40,6 +43,7 @@ func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64,
|
||||||
writers: writers,
|
writers: writers,
|
||||||
activeWriterCond: sync.NewCond(&sync.Mutex{}),
|
activeWriterCond: sync.NewCond(&sync.Mutex{}),
|
||||||
saveToStorageFn: saveToStorageFn,
|
saveToStorageFn: saveToStorageFn,
|
||||||
|
filepath: filepath,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +81,7 @@ func (cw *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) {
|
||||||
cw.sealedChunksLock.Unlock()
|
cw.sealedChunksLock.Unlock()
|
||||||
if found {
|
if found {
|
||||||
maxStop = readMemChunk(sealedChunk.chunk, p, off, logicChunkIndex, cw.ChunkSize)
|
maxStop = readMemChunk(sealedChunk.chunk, p, off, logicChunkIndex, cw.ChunkSize)
|
||||||
sealedChunk.FreeReference()
|
sealedChunk.FreeReference(fmt.Sprintf("%s finish reading chunk %d", cw.filepath, logicChunkIndex))
|
||||||
}
|
}
|
||||||
|
|
||||||
// read from writable chunks last
|
// read from writable chunks last
|
||||||
|
@ -125,12 +129,12 @@ func (cw *UploadPipeline) maybeMoveToSealed(memChunk *MemChunk, logicChunkIndex
|
||||||
|
|
||||||
func (cw *UploadPipeline) moveToSealed(memChunk *MemChunk, logicChunkIndex LogicChunkIndex) {
|
func (cw *UploadPipeline) moveToSealed(memChunk *MemChunk, logicChunkIndex LogicChunkIndex) {
|
||||||
atomic.AddInt32(&cw.activeWriterCount, 1)
|
atomic.AddInt32(&cw.activeWriterCount, 1)
|
||||||
glog.V(4).Infof("activeWriterCount %d ++> %d", cw.activeWriterCount-1, cw.activeWriterCount)
|
glog.V(4).Infof("%s activeWriterCount %d ++> %d", cw.filepath, cw.activeWriterCount-1, cw.activeWriterCount)
|
||||||
|
|
||||||
cw.sealedChunksLock.Lock()
|
cw.sealedChunksLock.Lock()
|
||||||
|
|
||||||
if oldMemChunk, found := cw.sealedChunks[logicChunkIndex]; found {
|
if oldMemChunk, found := cw.sealedChunks[logicChunkIndex]; found {
|
||||||
oldMemChunk.FreeReference()
|
oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", cw.filepath, logicChunkIndex))
|
||||||
}
|
}
|
||||||
sealedChunk := &SealedChunk{
|
sealedChunk := &SealedChunk{
|
||||||
chunk: memChunk,
|
chunk: memChunk,
|
||||||
|
@ -149,10 +153,10 @@ func (cw *UploadPipeline) moveToSealed(memChunk *MemChunk, logicChunkIndex Logic
|
||||||
cw.sealedChunksLock.Lock()
|
cw.sealedChunksLock.Lock()
|
||||||
defer cw.sealedChunksLock.Unlock()
|
defer cw.sealedChunksLock.Unlock()
|
||||||
delete(cw.sealedChunks, logicChunkIndex)
|
delete(cw.sealedChunks, logicChunkIndex)
|
||||||
sealedChunk.FreeReference()
|
sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", cw.filepath, logicChunkIndex))
|
||||||
|
|
||||||
atomic.AddInt32(&cw.activeWriterCount, -1)
|
atomic.AddInt32(&cw.activeWriterCount, -1)
|
||||||
glog.V(4).Infof("activeWriterCount %d --> %d", cw.activeWriterCount+1, cw.activeWriterCount)
|
glog.V(4).Infof("%s activeWriterCount %d --> %d", cw.filepath, cw.activeWriterCount+1, cw.activeWriterCount)
|
||||||
// Lock and Unlock are not required,
|
// Lock and Unlock are not required,
|
||||||
// but it may signal multiple times during one wakeup,
|
// but it may signal multiple times during one wakeup,
|
||||||
// and the waiting goroutine may miss some of them!
|
// and the waiting goroutine may miss some of them!
|
||||||
|
|
Loading…
Reference in a new issue