Fix resource leaks (#4737)

* Fix division by zero

* Fix file handle leak

* Fix file handle leak

* Fix file handle leak

* Fix goroutine leak
This commit is contained in:
Nikita Mochalov 2023-08-10 01:30:36 +03:00 committed by GitHub
parent 3365468d0d
commit e6a49dc533
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 40 additions and 24 deletions

View file

@ -35,6 +35,7 @@ type DiskLocation struct {
ecVolumesLock sync.RWMutex ecVolumesLock sync.RWMutex
isDiskSpaceLow bool isDiskSpaceLow bool
closeCh chan struct{}
} }
func GenerateDirUuid(dir string) (dirUuidString string, err error) { func GenerateDirUuid(dir string) (dirUuidString string, err error) {
@ -80,7 +81,17 @@ func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFree
} }
location.volumes = make(map[needle.VolumeId]*Volume) location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume) location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
go location.CheckDiskSpace() location.closeCh = make(chan struct{})
go func() {
for {
select {
case <-location.closeCh:
return
case <-time.After(time.Minute):
location.CheckDiskSpace()
}
}
}()
return location return location
} }
@ -384,6 +395,7 @@ func (l *DiskLocation) Close() {
} }
l.ecVolumesLock.Unlock() l.ecVolumesLock.Unlock()
close(l.closeCh)
return return
} }
@ -420,26 +432,22 @@ func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64)
} }
func (l *DiskLocation) CheckDiskSpace() { func (l *DiskLocation) CheckDiskSpace() {
for { if dir, e := filepath.Abs(l.Directory); e == nil {
if dir, e := filepath.Abs(l.Directory); e == nil { s := stats.NewDiskStatus(dir)
s := stats.NewDiskStatus(dir) stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All)) stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used)) stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree) isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
if isLow != l.isDiskSpaceLow { if isLow != l.isDiskSpaceLow {
l.isDiskSpaceLow = !l.isDiskSpaceLow l.isDiskSpaceLow = !l.isDiskSpaceLow
}
logLevel := glog.Level(4)
if l.isDiskSpaceLow {
logLevel = glog.Level(0)
}
glog.V(logLevel).Infof("dir %s %s", dir, desc)
} }
time.Sleep(time.Minute)
}
logLevel := glog.Level(4)
if l.isDiskSpaceLow {
logLevel = glog.Level(0)
}
glog.V(logLevel).Infof("dir %s %s", dir, desc)
}
} }

View file

@ -120,6 +120,10 @@ func generateMissingEcFiles(baseFileName string, bufferSize int, largeBlockSize
func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error { func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize int64, buffers [][]byte, outputs []*os.File) error {
bufferSize := int64(len(buffers[0])) bufferSize := int64(len(buffers[0]))
if bufferSize == 0 {
glog.Fatal("unexpected zero buffer size")
}
batchCount := blockSize / bufferSize batchCount := blockSize / bufferSize
if blockSize%bufferSize != 0 { if blockSize%bufferSize != 0 {
glog.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize) glog.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize)

View file

@ -2,7 +2,6 @@ package erasure_coding
import ( import (
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"os" "os"
"path" "path"
"strconv" "strconv"
@ -10,6 +9,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
) )
type ShardId uint8 type ShardId uint8
@ -39,6 +39,7 @@ func NewEcVolumeShard(diskType types.DiskType, dirname string, collection string
} }
ecdFi, statErr := v.ecdFile.Stat() ecdFi, statErr := v.ecdFile.Stat()
if statErr != nil { if statErr != nil {
_ = v.ecdFile.Close()
return nil, fmt.Errorf("can not stat ec volume shard %s%s: %v", baseFileName, ToExt(int(shardId)), statErr) return nil, fmt.Errorf("can not stat ec volume shard %s%s: %v", baseFileName, ToExt(int(shardId)), statErr)
} }
v.ecdFileSize = ecdFi.Size() v.ecdFileSize = ecdFi.Size()

View file

@ -3,19 +3,20 @@ package erasure_coding
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
"golang.org/x/exp/slices"
"math" "math"
"os" "os"
"sync" "sync"
"time" "time"
"golang.org/x/exp/slices"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/idx"
"github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
) )
var ( var (
@ -52,6 +53,7 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection
} }
ecxFi, statErr := ev.ecxFile.Stat() ecxFi, statErr := ev.ecxFile.Stat()
if statErr != nil { if statErr != nil {
_ = ev.ecxFile.Close()
return nil, fmt.Errorf("can not stat ec volume index %s.ecx: %v", indexBaseFileName, statErr) return nil, fmt.Errorf("can not stat ec volume index %s.ecx: %v", indexBaseFileName, statErr)
} }
ev.ecxFileSize = ecxFi.Size() ev.ecxFileSize = ecxFi.Size()

View file

@ -35,6 +35,7 @@ func NewSortedFileNeedleMap(indexBaseFileName string, indexFile *os.File) (m *So
glog.V(1).Infof("Loading %s...", indexFile.Name()) glog.V(1).Infof("Loading %s...", indexFile.Name())
mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile)
if indexLoadError != nil { if indexLoadError != nil {
_ = m.dbFile.Close()
return nil, indexLoadError return nil, indexLoadError
} }
m.mapMetric = *mm m.mapMetric = *mm