volume: add option to limit compaction speed

This commit is contained in:
Chris Lu 2019-05-03 17:22:39 -07:00
parent f0f981e7c8
commit b335f81a4f
9 changed files with 69 additions and 33 deletions

View file

@ -91,7 +91,7 @@ func runBackup(cmd *Command, args []string) bool {
}
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
if err = v.Compact(0); err != nil {
if err = v.Compact(0, 0); err != nil {
fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true
}

View file

@ -43,7 +43,7 @@ func runCompact(cmd *Command, args []string) bool {
glog.Fatalf("Load Volume [ERROR] %s\n", err)
}
if *compactMethod == 0 {
if err = v.Compact(preallocate); err != nil {
if err = v.Compact(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
}
} else {

View file

@ -94,6 +94,7 @@ func init() {
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets")

View file

@ -43,6 +43,7 @@ type VolumeServerOptions struct {
readRedirect *bool
cpuProfile *string
memProfile *string
compactionMBPerSecond *int
}
func init() {
@ -63,6 +64,7 @@ func init() {
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit compaction speed in mega bytes per second")
}
var cmdVolume = &Command{
@ -157,6 +159,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack,
v.whiteList,
*v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond,
)
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)

View file

@ -28,7 +28,7 @@ func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_ser
resp := &volume_server_pb.VacuumVolumeCompactResponse{}
err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate)
err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond)
if err != nil {
glog.Errorf("compact volume %d: %v", req.VolumeId, err)

View file

@ -23,6 +23,7 @@ type VolumeServer struct {
needleMapKind storage.NeedleMapType
FixJpgOrientation bool
ReadRedirect bool
compactionBytePerSecond int64
}
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
@ -33,7 +34,9 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
dataCenter string, rack string,
whiteList []string,
fixJpgOrientation bool,
readRedirect bool) *VolumeServer {
readRedirect bool,
compactionMBPerSecond int,
) *VolumeServer {
v := viper.GetViper()
signingKey := v.GetString("jwt.signing.key")
@ -47,6 +50,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
FixJpgOrientation: fixJpgOrientation,
ReadRedirect: readRedirect,
grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"),
compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024,
}
vs.MasterNodes = masterNodes
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)

View file

@ -14,9 +14,9 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
}
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)
}
func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64) error {
func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error {
if v := s.findVolume(vid); v != nil {
return v.Compact(preallocate)
return v.Compact(preallocate, compactionBytePerSecond)
}
return fmt.Errorf("volume id %d is not found during compact", vid)
}

View file

@ -18,7 +18,7 @@ func (v *Volume) garbageLevel() float64 {
return float64(v.nm.DeletedSize()) / float64(v.ContentSize())
}
func (v *Volume) Compact(preallocate int64) error {
func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error {
glog.V(3).Infof("Compacting volume %d ...", v.Id)
//no need to lock for copy on write
//v.accessLock.Lock()
@ -29,7 +29,7 @@ func (v *Volume) Compact(preallocate int64) error {
v.lastCompactIndexOffset = v.nm.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision
glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate)
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond)
}
func (v *Volume) Compact2() error {
@ -242,6 +242,9 @@ type VolumeFileScanner4Vacuum struct {
nm *NeedleMap
newOffset int64
now uint64
compactionBytePerSecond int64
lastSizeCounter int64
lastSizeCheckTime time.Time
}
func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock SuperBlock) error {
@ -269,13 +272,32 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil {
return fmt.Errorf("cannot append needle: %s", err)
}
scanner.newOffset += n.DiskSize(scanner.version)
delta := n.DiskSize(scanner.version)
scanner.newOffset += delta
scanner.maybeSlowdown(delta)
glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
}
return nil
}
func (scanner *VolumeFileScanner4Vacuum) maybeSlowdown(delta int64) {
if scanner.compactionBytePerSecond > 0 {
scanner.lastSizeCounter += delta
now := time.Now()
elapsedDuration := now.Sub(scanner.lastSizeCheckTime)
if elapsedDuration > 100*time.Millisecond {
overLimitBytes := scanner.lastSizeCounter - scanner.compactionBytePerSecond/10
if overLimitBytes > 0 {
overRatio := float64(overLimitBytes) / float64(scanner.compactionBytePerSecond)
sleepTime := time.Duration(overRatio*1000) * time.Millisecond
// glog.V(0).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", scanner.lastSizeCounter, scanner.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio)
time.Sleep(sleepTime)
}
scanner.lastSizeCounter, scanner.lastSizeCheckTime = 0, time.Now()
}
}
}
func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64) (err error) {
func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64, compactionBytePerSecond int64) (err error) {
var (
dst, idx *os.File
)
@ -294,6 +316,8 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
now: uint64(time.Now().Unix()),
nm: NewBtreeNeedleMap(idx),
dst: dst,
compactionBytePerSecond: compactionBytePerSecond,
lastSizeCheckTime: time.Now(),
}
err = ScanVolumeFile(v.dir, v.Collection, v.Id, v.needleMapKind, scanner)
return

View file

@ -5,6 +5,7 @@ import (
"math/rand"
"os"
"testing"
"time"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types"
@ -72,8 +73,8 @@ func TestCompaction(t *testing.T) {
t.Fatalf("volume creation: %v", err)
}
beforeCommitFileCount := 1000
afterCommitFileCount := 1000
beforeCommitFileCount := 10000
afterCommitFileCount := 10000
infos := make([]*needleInfo, beforeCommitFileCount+afterCommitFileCount)
@ -81,7 +82,10 @@ func TestCompaction(t *testing.T) {
doSomeWritesDeletes(i, v, t, infos)
}
v.Compact(0)
startTime := time.Now()
v.Compact(0, 1024*1024)
speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds()
t.Logf("compaction speed: %.2f bytes/s", speed)
for i := 1; i <= afterCommitFileCount; i++ {
doSomeWritesDeletes(i+beforeCommitFileCount, v, t, infos)