mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
fix byte counter on loading index file
fix https://github.com/chrislusf/seaweedfs/issues/441
This commit is contained in:
parent
8b15523c9d
commit
59022b6fe0
|
@ -39,14 +39,13 @@ func NewCompactSection(start Key) *CompactSection {
|
|||
}
|
||||
|
||||
//return old entry size
|
||||
func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
|
||||
ret := uint32(0)
|
||||
func (cs *CompactSection) Set(key Key, offset, size uint32) (oldOffset, oldSize uint32) {
|
||||
cs.Lock()
|
||||
if key > cs.end {
|
||||
cs.end = key
|
||||
}
|
||||
if i := cs.binarySearchValues(key); i >= 0 {
|
||||
ret = cs.values[i].Size
|
||||
oldOffset, oldSize = cs.values[i].Offset, cs.values[i].Size
|
||||
//println("key", key, "old size", ret)
|
||||
cs.values[i].Offset, cs.values[i].Size = offset, size
|
||||
} else {
|
||||
|
@ -55,7 +54,7 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
|
|||
if needOverflow {
|
||||
//println("start", cs.start, "counter", cs.counter, "key", key)
|
||||
if oldValue, found := cs.overflow[key]; found {
|
||||
ret = oldValue.Size
|
||||
oldOffset, oldSize = oldValue.Offset, oldValue.Size
|
||||
}
|
||||
cs.overflow[key] = NeedleValue{Key: key, Offset: offset, Size: size}
|
||||
} else {
|
||||
|
@ -66,7 +65,7 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
|
|||
}
|
||||
}
|
||||
cs.Unlock()
|
||||
return ret
|
||||
return
|
||||
}
|
||||
|
||||
//return old entry size
|
||||
|
@ -129,7 +128,7 @@ func NewCompactMap() CompactMap {
|
|||
return CompactMap{}
|
||||
}
|
||||
|
||||
func (cm *CompactMap) Set(key Key, offset uint32, size uint32) uint32 {
|
||||
func (cm *CompactMap) Set(key Key, offset, size uint32) (oldOffset, oldSize uint32) {
|
||||
x := cm.binarySearchCompactSection(key)
|
||||
if x < 0 {
|
||||
//println(x, "creating", len(cm.list), "section, starting", key)
|
||||
|
|
|
@ -31,12 +31,12 @@ func LoadNeedleMap(file *os.File) (*NeedleMap, error) {
|
|||
if key > nm.MaximumFileKey {
|
||||
nm.MaximumFileKey = key
|
||||
}
|
||||
if offset > 0 && size != TombstoneFileSize {
|
||||
nm.FileCounter++
|
||||
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
|
||||
if offset > 0 && size != TombstoneFileSize {
|
||||
oldSize := nm.m.Set(Key(key), offset, size)
|
||||
oldOffset, oldSize := nm.m.Set(Key(key), offset, size)
|
||||
glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
|
||||
if oldSize > 0 {
|
||||
if oldOffset > 0 && oldSize != TombstoneFileSize {
|
||||
nm.DeletionCounter++
|
||||
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ func WalkIndexFile(r *os.File, fn func(key uint64, offset, size uint32) error) e
|
|||
}
|
||||
|
||||
func (nm *NeedleMap) Put(key uint64, offset uint32, size uint32) error {
|
||||
oldSize := nm.m.Set(Key(key), offset, size)
|
||||
_, oldSize := nm.m.Set(Key(key), offset, size)
|
||||
nm.logPut(key, oldSize, size)
|
||||
return nm.appendToIndexFile(key, offset, size)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue