mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
metrics: disk size for <collection, normal/EC>
This commit is contained in:
parent
0fdb1e705d
commit
935639b908
|
@ -60,23 +60,17 @@ var (
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
Namespace: "SeaweedFS",
|
Namespace: "SeaweedFS",
|
||||||
Subsystem: "volumeServer",
|
Subsystem: "volumeServer",
|
||||||
Name: "ecShards",
|
Name: "ec_shards",
|
||||||
Help: "Number of EC shards.",
|
Help: "Number of EC shards.",
|
||||||
})
|
})
|
||||||
VolumeServerVolumeSizeGauge = prometheus.NewGauge(
|
|
||||||
|
VolumeServerDiskSizeGauge = prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
Namespace: "SeaweedFS",
|
Namespace: "SeaweedFS",
|
||||||
Subsystem: "volumeServer",
|
Subsystem: "volumeServer",
|
||||||
Name: "totalVolumeSize",
|
Name: "total_disk_size",
|
||||||
Help: "Actual disk size used by volumes.",
|
Help: "Actual disk size used by volumes.",
|
||||||
})
|
}, []string{"collection", "type"})
|
||||||
VolumeServerEcShardSizeGauge = prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Namespace: "SeaweedFS",
|
|
||||||
Subsystem: "volumeServer",
|
|
||||||
Name: "totalEcShardSize",
|
|
||||||
Help: "Actual disk size used by ec shards.",
|
|
||||||
})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -88,8 +82,7 @@ func init() {
|
||||||
VolumeServerGather.MustRegister(VolumeServerRequestHistogram)
|
VolumeServerGather.MustRegister(VolumeServerRequestHistogram)
|
||||||
VolumeServerGather.MustRegister(VolumeServerVolumeCounter)
|
VolumeServerGather.MustRegister(VolumeServerVolumeCounter)
|
||||||
VolumeServerGather.MustRegister(VolumeServerEcShardCounter)
|
VolumeServerGather.MustRegister(VolumeServerEcShardCounter)
|
||||||
VolumeServerGather.MustRegister(VolumeServerVolumeSizeGauge)
|
VolumeServerGather.MustRegister(VolumeServerDiskSizeGauge)
|
||||||
VolumeServerGather.MustRegister(VolumeServerEcShardSizeGauge)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -162,7 +162,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||||
var volumeMessages []*master_pb.VolumeInformationMessage
|
var volumeMessages []*master_pb.VolumeInformationMessage
|
||||||
maxVolumeCount := 0
|
maxVolumeCount := 0
|
||||||
var maxFileKey NeedleId
|
var maxFileKey NeedleId
|
||||||
var totalVolumeSize uint64
|
collectionVolumeSize := make(map[string]uint64)
|
||||||
for _, location := range s.Locations {
|
for _, location := range s.Locations {
|
||||||
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
|
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
|
||||||
location.Lock()
|
location.Lock()
|
||||||
|
@ -181,11 +181,14 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fileSize, _, _ := v.FileStat()
|
fileSize, _, _ := v.FileStat()
|
||||||
totalVolumeSize += fileSize
|
collectionVolumeSize[v.Collection] += fileSize
|
||||||
}
|
}
|
||||||
location.Unlock()
|
location.Unlock()
|
||||||
}
|
}
|
||||||
stats.VolumeServerVolumeSizeGauge.Set(float64(totalVolumeSize))
|
|
||||||
|
for col, size := range collectionVolumeSize {
|
||||||
|
stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "normal").Set(float64(size))
|
||||||
|
}
|
||||||
|
|
||||||
return &master_pb.Heartbeat{
|
return &master_pb.Heartbeat{
|
||||||
Ip: s.Ip,
|
Ip: s.Ip,
|
||||||
|
|
|
@ -20,20 +20,22 @@ import (
|
||||||
|
|
||||||
func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
|
func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat {
|
||||||
var ecShardMessages []*master_pb.VolumeEcShardInformationMessage
|
var ecShardMessages []*master_pb.VolumeEcShardInformationMessage
|
||||||
var totalEcShardSize int64
|
collectionEcShardSize := make(map[string]int64)
|
||||||
for _, location := range s.Locations {
|
for _, location := range s.Locations {
|
||||||
location.ecVolumesLock.RLock()
|
location.ecVolumesLock.RLock()
|
||||||
for _, ecShards := range location.ecVolumes {
|
for _, ecShards := range location.ecVolumes {
|
||||||
ecShardMessages = append(ecShardMessages, ecShards.ToVolumeEcShardInformationMessage()...)
|
ecShardMessages = append(ecShardMessages, ecShards.ToVolumeEcShardInformationMessage()...)
|
||||||
|
|
||||||
for _, ecShard := range ecShards.Shards {
|
for _, ecShard := range ecShards.Shards {
|
||||||
totalEcShardSize += ecShard.Size()
|
collectionEcShardSize[ecShards.Collection] += ecShard.Size()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
location.ecVolumesLock.RUnlock()
|
location.ecVolumesLock.RUnlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
stats.VolumeServerEcShardSizeGauge.Set(float64(totalEcShardSize))
|
for col, size := range collectionEcShardSize {
|
||||||
|
stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "ec").Set(float64(size))
|
||||||
|
}
|
||||||
|
|
||||||
return &master_pb.Heartbeat{
|
return &master_pb.Heartbeat{
|
||||||
EcShards: ecShardMessages,
|
EcShards: ecShardMessages,
|
||||||
|
|
Loading…
Reference in a new issue