Optimiz leveldb metric (#3830)

* optimiz updating mapmetric for leveldb

* import loading leveldb

* add comments
This commit is contained in:
Guo Lei 2022-10-12 12:13:25 +08:00 committed by GitHub
parent d21e2f523d
commit 84c401e693
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 39 additions and 28 deletions

View file

@ -48,7 +48,6 @@ type TempNeedleMapper interface {
NeedleMapper
DoOffsetLoading(v *Volume, indexFile *os.File, startFrom uint64) error
UpdateNeedleMap(v *Volume, indexFile *os.File, opts *opt.Options) error
UpdateNeedleMapMetric(indexFile *os.File) error
}
func (nm *baseNeedleMapper) IndexFileSize() uint64 {

View file

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
@ -179,6 +180,7 @@ func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, update
}
return nil
}
func levelDbDelete(db *leveldb.DB, key NeedleId) error {
bytes := make([]byte, NeedleIdSize)
NeedleIdToBytes(bytes, key)
@ -305,23 +307,45 @@ func (m *LevelDbNeedleMap) DoOffsetLoading(v *Volume, indexFile *os.File, startF
}
err = idx.WalkIndexFile(indexFile, startFrom, func(key NeedleId, offset Offset, size Size) (e error) {
if !offset.IsZero() && size.IsValid() {
m.mapMetric.FileCounter++
bytes := make([]byte, NeedleIdSize)
NeedleIdToBytes(bytes[0:NeedleIdSize], key)
// fresh loading
if startFrom == 0 {
m.mapMetric.FileByteCounter += uint64(size)
e = levelDbWrite(db, key, offset, size, false, 0)
return e
}
// increment loading
data, err := db.Get(bytes, nil)
if err != nil {
if !strings.Contains(strings.ToLower(err.Error()), "not found") {
// unexpected error
return err
}
// new needle, unlikely happen
m.mapMetric.FileByteCounter += uint64(size)
e = levelDbWrite(db, key, offset, size, false, 0)
} else {
// needle is found
oldSize := BytesToSize(data[OffsetSize : OffsetSize+SizeSize])
oldOffset := BytesToOffset(data[0:OffsetSize])
if !offset.IsZero() && size.IsValid() {
// updated needle
m.mapMetric.FileByteCounter += uint64(size)
if !oldOffset.IsZero() && oldSize.IsValid() {
m.mapMetric.DeletionCounter++
m.mapMetric.DeletionByteCounter += uint64(oldSize)
}
e = levelDbWrite(db, key, offset, size, false, 0)
} else {
// deleted needle
m.mapMetric.DeletionCounter++
m.mapMetric.DeletionByteCounter += uint64(oldSize)
e = levelDbDelete(db, key)
}
}
return e
})
if err != nil {
return err
}
if startFrom != 0 {
return needleMapMetricFromIndexFile(indexFile, &m.mapMetric)
}
return nil
}
func (m *LevelDbNeedleMap) UpdateNeedleMapMetric(indexFile *os.File) error {
return needleMapMetricFromIndexFile(indexFile, &m.mapMetric)
}

View file

@ -129,7 +129,3 @@ func (nm *NeedleMap) DoOffsetLoading(v *Volume, indexFile *os.File, startFrom ui
return e
}
func (m *NeedleMap) UpdateNeedleMapMetric(indexFile *os.File) error {
return nil
}

View file

@ -219,16 +219,8 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", oldIdxFileName, err)
}
if indexSize == 0 || uint64(indexSize) <= v.lastCompactIndexOffset {
if v.needleMapKind == NeedleMapInMemory {
return nil
}
newIdx, err := os.OpenFile(newIdxFileName, os.O_RDWR, 0644)
if err != nil {
return fmt.Errorf("open idx file %s failed: %v", newIdxFileName, err)
}
defer newIdx.Close()
return v.tmpNm.UpdateNeedleMapMetric(newIdx)
}
// fail if the old .dat file has changed to a new revision
oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatBackend)