seaweedfs/weed/pb/filer_pb/filer_client_bfs.go
Konstantin Lebedev e20f0dbd2d
avoid data race of TraverseBfs (#3856)
* avoid data race of TraverseBfs

* close is enough
avoid panic
I1014 12:29:59.207120 volume_loading.go:131 loading sorted db /tmp/sw/test2_19.sdx error: unexpected file /tmp/sw/test2_19.idx size: 255
I1014 12:29:59.207125 volume_loading.go:119 open to write file /tmp/sw/test4_26.idx
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x5260a4c]

goroutine 287 [running]:
github.com/seaweedfs/seaweedfs/weed/storage.(*SortedFileNeedleMap).Close(0x0)
        /Users/tochka/GolandProjects/seaweedfs/weed/storage/needle_map_sorted_file.go:97 +0x2c
github.com/seaweedfs/seaweedfs/weed/storage.(*Volume).load.func1()
        /Users/tochka/GolandProjects/seaweedfs/weed/storage/volume_loading.go:32 +0x8e
github.com/seaweedfs/seaweedfs/weed/storage.(*Volume).load(0xc001b36280, 0x1, 0x1, 0x0, 0x69228c0?)
        /Users/tochka/GolandProjects/seaweedfs/weed/storage/volume_loading.go:205 +0x256c
github.com/seaweedfs/seaweedfs/weed/storage.NewVolume({0x7ffeefbff6e0, 0x7}, {0x7ffeefbff6e0, 0x7}, {0xc0009a9284, 0x5}, 0x13, 0x0, 0x0, 0x0, ...)
        /Users/tochka/GolandProjects/seaweedfs/weed/storage/volume.go:62 +0x30f
github.com/seaweedfs/seaweedfs/weed/storage.(*DiskLocation).loadExistingVolume(0xc0006f40c0, {0x846c8d0, 0xc0009ce600}, 0x0?, 0x1)
        /Users/tochka/GolandProjects/seaweedfs/weed/storage/disk_location.go:161 +0x4da
github.com/seaweedfs/seaweedfs/weed/storage.(*DiskLocation).concurrentLoadingVolumes.func2()
        /Users/tochka/GolandProjects/seaweedfs/weed/storage/disk_location.go:201 +0xf9
created by github.com/seaweedfs/seaweedfs/weed/storage.(*DiskLocation).concurrentLoadingVolumes
        /Users/tochka/GolandProjects/seaweedfs/weed/storage/disk_location.go:198 +0x150
2022-10-16 11:30:41 -07:00

68 lines
1.4 KiB
Go

package filer_pb
import (
"fmt"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/util"
)
func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
K := 5
var jobQueueWg sync.WaitGroup
queue := util.NewQueue()
jobQueueWg.Add(1)
queue.Enqueue(parentPath)
terminates := make([]chan bool, K)
for i := 0; i < K; i++ {
terminates[i] = make(chan bool)
go func(j int) {
for {
select {
case <-terminates[j]:
return
default:
t := queue.Dequeue()
if t == nil {
time.Sleep(329 * time.Millisecond)
continue
}
dir := t.(util.FullPath)
processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn)
if processErr != nil {
err = processErr
}
jobQueueWg.Done()
}
}
}(i)
}
jobQueueWg.Wait()
for i := 0; i < K; i++ {
close(terminates[i])
}
return
}
func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) {
return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error {
fn(parentPath, entry)
if entry.IsDirectory {
subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name)
if parentPath == "/" {
subDir = "/" + entry.Name
}
jobQueueWg.Add(1)
queue.Enqueue(util.FullPath(subDir))
}
return nil
})
}