2018-07-07 07:51:17 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2019-04-15 06:00:37 +00:00
|
|
|
"os"
|
|
|
|
"sync/atomic"
|
|
|
|
|
2019-05-22 05:41:20 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
2018-07-08 09:28:04 +00:00
|
|
|
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
2018-07-22 00:39:10 +00:00
|
|
|
"github.com/willf/bloom"
|
2018-07-07 07:51:17 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type mapMetric struct {
|
2019-04-15 06:00:37 +00:00
|
|
|
DeletionCounter uint32 `json:"DeletionCounter"`
|
|
|
|
FileCounter uint32 `json:"FileCounter"`
|
|
|
|
DeletionByteCounter uint64 `json:"DeletionByteCounter"`
|
|
|
|
FileByteCounter uint64 `json:"FileByteCounter"`
|
|
|
|
MaximumFileKey uint64 `json:"MaxFileKey"`
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 00:04:28 +00:00
|
|
|
func (mm *mapMetric) logDelete(deletedByteCount Size) {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
mm.LogDeletionCounter(deletedByteCount)
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
|
|
|
|
2020-08-19 00:04:28 +00:00
|
|
|
func (mm *mapMetric) logPut(key NeedleId, oldSize Size, newSize Size) {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
mm.MaybeSetMaxFileKey(key)
|
|
|
|
mm.LogFileCounter(newSize)
|
2020-08-19 01:01:37 +00:00
|
|
|
if oldSize > 0 && oldSize.IsValid() {
|
2019-04-15 06:00:37 +00:00
|
|
|
mm.LogDeletionCounter(oldSize)
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
}
|
2020-08-19 00:04:28 +00:00
|
|
|
func (mm *mapMetric) LogFileCounter(newSize Size) {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
atomic.AddUint32(&mm.FileCounter, 1)
|
|
|
|
atomic.AddUint64(&mm.FileByteCounter, uint64(newSize))
|
|
|
|
}
|
2020-08-19 00:04:28 +00:00
|
|
|
func (mm *mapMetric) LogDeletionCounter(oldSize Size) {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return
|
|
|
|
}
|
2018-07-07 07:51:17 +00:00
|
|
|
if oldSize > 0 {
|
2019-04-15 06:00:37 +00:00
|
|
|
atomic.AddUint32(&mm.DeletionCounter, 1)
|
|
|
|
atomic.AddUint64(&mm.DeletionByteCounter, uint64(oldSize))
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-22 19:32:10 +00:00
|
|
|
func (mm *mapMetric) ContentSize() uint64 {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
return atomic.LoadUint64(&mm.FileByteCounter)
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
2019-04-22 19:32:10 +00:00
|
|
|
func (mm *mapMetric) DeletedSize() uint64 {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
return atomic.LoadUint64(&mm.DeletionByteCounter)
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
2019-04-22 19:32:10 +00:00
|
|
|
func (mm *mapMetric) FileCount() int {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
return int(atomic.LoadUint32(&mm.FileCounter))
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
2019-04-22 19:32:10 +00:00
|
|
|
func (mm *mapMetric) DeletedCount() int {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
return int(atomic.LoadUint32(&mm.DeletionCounter))
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
2019-04-22 19:32:10 +00:00
|
|
|
func (mm *mapMetric) MaxFileKey() NeedleId {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
t := uint64(mm.MaximumFileKey)
|
2019-06-21 08:14:10 +00:00
|
|
|
return Uint64ToNeedleId(t)
|
2019-04-15 06:00:37 +00:00
|
|
|
}
|
2019-04-22 19:32:10 +00:00
|
|
|
func (mm *mapMetric) MaybeSetMaxFileKey(key NeedleId) {
|
2019-08-14 08:08:01 +00:00
|
|
|
if mm == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-04-15 06:00:37 +00:00
|
|
|
if key > mm.MaxFileKey() {
|
|
|
|
atomic.StoreUint64(&mm.MaximumFileKey, uint64(key))
|
|
|
|
}
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) {
|
|
|
|
mm = &mapMetric{}
|
|
|
|
var bf *bloom.BloomFilter
|
2018-07-08 09:28:04 +00:00
|
|
|
buf := make([]byte, NeedleIdSize)
|
2018-07-07 07:51:17 +00:00
|
|
|
err = reverseWalkIndexFile(r, func(entryCount int64) {
|
|
|
|
bf = bloom.NewWithEstimates(uint(entryCount), 0.001)
|
2020-08-19 00:04:28 +00:00
|
|
|
}, func(key NeedleId, offset Offset, size Size) error {
|
2018-07-07 07:51:17 +00:00
|
|
|
|
2019-04-15 06:00:37 +00:00
|
|
|
mm.MaybeSetMaxFileKey(key)
|
2018-07-08 09:28:04 +00:00
|
|
|
NeedleIdToBytes(buf, key)
|
2020-08-19 01:01:37 +00:00
|
|
|
if size.IsValid() {
|
2018-07-07 07:51:17 +00:00
|
|
|
mm.FileByteCounter += uint64(size)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bf.Test(buf) {
|
|
|
|
mm.FileCounter++
|
|
|
|
bf.Add(buf)
|
|
|
|
} else {
|
|
|
|
// deleted file
|
|
|
|
mm.DeletionCounter++
|
2020-08-19 01:01:37 +00:00
|
|
|
if size.IsValid() {
|
2018-07-07 07:51:17 +00:00
|
|
|
// previously already deleted file
|
|
|
|
mm.DeletionByteCounter += uint64(size)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-19 00:04:28 +00:00
|
|
|
func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key NeedleId, offset Offset, size Size) error) error {
|
2018-07-07 07:51:17 +00:00
|
|
|
fi, err := r.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("file %s stat error: %v", r.Name(), err)
|
|
|
|
}
|
|
|
|
fileSize := fi.Size()
|
2019-04-19 07:39:34 +00:00
|
|
|
if fileSize%NeedleMapEntrySize != 0 {
|
2018-07-07 07:51:17 +00:00
|
|
|
return fmt.Errorf("unexpected file %s size: %d", r.Name(), fileSize)
|
|
|
|
}
|
|
|
|
|
2019-04-19 07:39:34 +00:00
|
|
|
entryCount := fileSize / NeedleMapEntrySize
|
2018-07-25 05:17:56 +00:00
|
|
|
initFn(entryCount)
|
2018-07-07 07:51:17 +00:00
|
|
|
|
2018-07-25 05:17:56 +00:00
|
|
|
batchSize := int64(1024 * 4)
|
|
|
|
|
2019-04-19 07:39:34 +00:00
|
|
|
bytes := make([]byte, NeedleMapEntrySize*batchSize)
|
2018-07-25 05:17:56 +00:00
|
|
|
nextBatchSize := entryCount % batchSize
|
|
|
|
if nextBatchSize == 0 {
|
|
|
|
nextBatchSize = batchSize
|
|
|
|
}
|
|
|
|
remainingCount := entryCount - nextBatchSize
|
|
|
|
|
|
|
|
for remainingCount >= 0 {
|
2019-04-19 07:39:34 +00:00
|
|
|
_, e := r.ReadAt(bytes[:NeedleMapEntrySize*nextBatchSize], NeedleMapEntrySize*remainingCount)
|
2020-11-17 06:26:58 +00:00
|
|
|
// log.Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e)
|
2018-07-25 05:17:56 +00:00
|
|
|
if e != nil {
|
2018-07-07 07:51:17 +00:00
|
|
|
return e
|
|
|
|
}
|
2018-07-25 05:17:56 +00:00
|
|
|
for i := int(nextBatchSize) - 1; i >= 0; i-- {
|
2019-05-22 05:41:20 +00:00
|
|
|
key, offset, size := idx.IdxFileEntry(bytes[i*NeedleMapEntrySize : i*NeedleMapEntrySize+NeedleMapEntrySize])
|
2018-07-25 05:17:56 +00:00
|
|
|
if e = fn(key, offset, size); e != nil {
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nextBatchSize = batchSize
|
|
|
|
remainingCount -= nextBatchSize
|
2018-07-07 07:51:17 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|