mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
mount: less channel waiting
This commit is contained in:
parent
c31b254248
commit
3bf0116de1
|
@ -2,11 +2,17 @@ package filesys
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
||||
var (
|
||||
concurrentWriterLimit = runtime.NumCPU()
|
||||
)
|
||||
|
||||
type ContinuousDirtyPages struct {
|
||||
|
@ -15,17 +21,26 @@ type ContinuousDirtyPages struct {
|
|||
writeWaitGroup sync.WaitGroup
|
||||
chunkSaveErrChan chan error
|
||||
chunkSaveErrChanClosed bool
|
||||
lastErr error
|
||||
lock sync.Mutex
|
||||
collection string
|
||||
replication string
|
||||
}
|
||||
|
||||
func newDirtyPages(file *File) *ContinuousDirtyPages {
|
||||
return &ContinuousDirtyPages{
|
||||
dirtyPages := &ContinuousDirtyPages{
|
||||
intervals: &ContinuousIntervals{},
|
||||
f: file,
|
||||
chunkSaveErrChan: make(chan error, 8),
|
||||
chunkSaveErrChan: make(chan error, concurrentWriterLimit),
|
||||
}
|
||||
go func() {
|
||||
for t := range dirtyPages.chunkSaveErrChan {
|
||||
if t != nil {
|
||||
dirtyPages.lastErr = t
|
||||
}
|
||||
}
|
||||
}()
|
||||
return dirtyPages
|
||||
}
|
||||
|
||||
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) {
|
||||
|
@ -105,7 +120,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
|
|||
chunk.Mtime = mtime
|
||||
pages.collection, pages.replication = collection, replication
|
||||
pages.f.addChunks([]*filer_pb.FileChunk{chunk})
|
||||
pages.chunkSaveErrChan <- nil
|
||||
glog.V(0).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
|
||||
}()
|
||||
}
|
||||
|
||||
|
|
|
@ -208,25 +208,17 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
|
|||
|
||||
fh.dirtyPages.saveExistingPagesToStorage()
|
||||
|
||||
var err error
|
||||
go func() {
|
||||
for t := range fh.dirtyPages.chunkSaveErrChan {
|
||||
if t != nil {
|
||||
err = t
|
||||
}
|
||||
}
|
||||
}()
|
||||
fh.dirtyPages.writeWaitGroup.Wait()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
if fh.dirtyPages.lastErr != nil {
|
||||
return fh.dirtyPages.lastErr
|
||||
}
|
||||
|
||||
if !fh.f.dirtyMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
|
||||
if fh.f.entry.Attributes != nil {
|
||||
fh.f.entry.Attributes.Mime = fh.contentType
|
||||
|
|
40
weed/util/limiter.go
Normal file
40
weed/util/limiter.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package util
|
||||
|
||||
// initial version comes from https://github.com/korovkin/limiter/blob/master/limiter.go
|
||||
|
||||
// LimitedConcurrentExecutor object
|
||||
type LimitedConcurrentExecutor struct {
|
||||
limit int
|
||||
tokenChan chan int
|
||||
}
|
||||
|
||||
func NewLimitedConcurrentExecutor(limit int) *LimitedConcurrentExecutor {
|
||||
|
||||
// allocate a limiter instance
|
||||
c := &LimitedConcurrentExecutor{
|
||||
limit: limit,
|
||||
tokenChan: make(chan int, limit),
|
||||
}
|
||||
|
||||
// allocate the tokenChan:
|
||||
for i := 0; i < c.limit; i++ {
|
||||
c.tokenChan <- i
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Execute adds a function to the execution queue.
|
||||
// if num of go routines allocated by this instance is < limit
|
||||
// launch a new go routine to execute job
|
||||
// else wait until a go routine becomes available
|
||||
func (c *LimitedConcurrentExecutor) Execute(job func()) {
|
||||
token := <-c.tokenChan
|
||||
go func() {
|
||||
defer func() {
|
||||
c.tokenChan <- token
|
||||
}()
|
||||
// run the job
|
||||
job()
|
||||
}()
|
||||
}
|
Loading…
Reference in a new issue