adjust for metrics port

This commit is contained in:
Chris Lu 2020-09-24 10:21:23 -07:00
parent d2d3aec3e1
commit 4856bce0ee
7 changed files with 30 additions and 34 deletions

View file

@ -13,6 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/server"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -86,6 +87,8 @@ func runFiler(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
go stats_collect.StartMetricsServer(*f.metricsHttpPort)
f.startFiler() f.startFiler()
return true return true
@ -124,7 +127,6 @@ func (fo *FilerOptions) startFiler() {
Port: uint32(*fo.port), Port: uint32(*fo.port),
Cipher: *fo.cipher, Cipher: *fo.cipher,
Filers: peers, Filers: peers,
MetricsHttpPort: *fo.metricsHttpPort,
}) })
if nfs_err != nil { if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err) glog.Fatalf("Filer startup error: %v", nfs_err)

View file

@ -115,6 +115,8 @@ func runS3(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpPort)
return s3StandaloneOptions.startS3Server() return s3StandaloneOptions.startS3Server()
} }
@ -155,8 +157,7 @@ func (s3opt *S3Options) startS3Server() bool {
} }
} }
go stats_collect.StartMetricsServer(stats_collect.S3Gather, *s3opt.metricsHttpPort) go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), metricsAddress, metricsIntervalSec)
go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), stats_collect.S3Gather, metricsAddress, metricsIntervalSec)
router := mux.NewRouter().SkipClean(true) router := mux.NewRouter().SkipClean(true)

View file

@ -2,6 +2,7 @@ package command
import ( import (
"fmt" "fmt"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"os" "os"
"runtime" "runtime"
"runtime/pprof" "runtime/pprof"
@ -151,11 +152,6 @@ func runServer(cmd *Command, args []string) bool {
serverOptions.v.rack = serverRack serverOptions.v.rack = serverRack
msgBrokerOptions.ip = serverIp msgBrokerOptions.ip = serverIp
// metrics port
filerOptions.metricsHttpPort = serverMetricsHttpPort
serverOptions.v.metricsHttpPort = serverMetricsHttpPort
s3Options.metricsHttpPort = serverMetricsHttpPort
// serverOptions.v.pulseSeconds = pulseSeconds // serverOptions.v.pulseSeconds = pulseSeconds
// masterOptions.pulseSeconds = pulseSeconds // masterOptions.pulseSeconds = pulseSeconds
@ -174,6 +170,7 @@ func runServer(cmd *Command, args []string) bool {
} }
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
go stats_collect.StartMetricsServer(*serverMetricsHttpPort)
folders := strings.Split(*volumeDataFolders, ",") folders := strings.Split(*volumeDataFolders, ",")

View file

@ -25,6 +25,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/server"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -111,6 +112,8 @@ func runVolume(cmd *Command, args []string) bool {
grace.SetupProfiling(*v.cpuProfile, *v.memProfile) grace.SetupProfiling(*v.cpuProfile, *v.memProfile)
} }
go stats_collect.StartMetricsServer(*v.metricsHttpPort)
v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent) v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent)
return true return true
@ -209,7 +212,6 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
*v.fixJpgOrientation, *v.readRedirect, *v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond, *v.compactionMBPerSecond,
*v.fileSizeLimitMB, *v.fileSizeLimitMB,
*v.metricsHttpPort,
) )
// starting grpc server // starting grpc server
grpcS := v.startGrpcService(volumeServer) grpcS := v.startGrpcService(volumeServer)

View file

@ -54,7 +54,6 @@ type FilerOption struct {
recursiveDelete bool recursiveDelete bool
Cipher bool Cipher bool
Filers []string Filers []string
MetricsHttpPort int
} }
type FilerServer struct { type FilerServer struct {
@ -158,8 +157,7 @@ func (fs *FilerServer) maybeStartMetrics() {
} }
} }
go stats.StartMetricsServer(stats.FilerGather, fs.option.MetricsHttpPort) go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), fs.metricsAddress, fs.metricsIntervalSec)
go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), stats.FilerGather, fs.metricsAddress, fs.metricsIntervalSec)
} }
func readFilerConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (metricsAddress string, metricsIntervalSec int, err error) { func readFilerConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (metricsAddress string, metricsIntervalSec int, err error) {

View file

@ -46,7 +46,6 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
readRedirect bool, readRedirect bool,
compactionMBPerSecond int, compactionMBPerSecond int,
fileSizeLimitMB int, fileSizeLimitMB int,
metricsHttpPort int,
) *VolumeServer { ) *VolumeServer {
v := util.GetViper() v := util.GetViper()
@ -98,8 +97,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
} }
go vs.heartbeat() go vs.heartbeat()
go stats.StartMetricsServer(stats.VolumeServerGather, metricsHttpPort) go stats.LoopPushingMetric("volumeServer", fmt.Sprintf("%s:%d", ip, port), vs.metricsAddress, vs.metricsIntervalSec)
go stats.LoopPushingMetric("volumeServer", fmt.Sprintf("%s:%d", ip, port), stats.VolumeServerGather, vs.metricsAddress, vs.metricsIntervalSec)
return vs return vs
} }

View file

@ -16,9 +16,7 @@ import (
) )
var ( var (
FilerGather = prometheus.NewRegistry() Gather = prometheus.NewRegistry()
VolumeServerGather = prometheus.NewRegistry()
S3Gather = prometheus.NewRegistry()
FilerRequestCounter = prometheus.NewCounterVec( FilerRequestCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
@ -114,23 +112,23 @@ var (
func init() { func init() {
FilerGather.MustRegister(FilerRequestCounter) Gather.MustRegister(FilerRequestCounter)
FilerGather.MustRegister(FilerRequestHistogram) Gather.MustRegister(FilerRequestHistogram)
FilerGather.MustRegister(FilerStoreCounter) Gather.MustRegister(FilerStoreCounter)
FilerGather.MustRegister(FilerStoreHistogram) Gather.MustRegister(FilerStoreHistogram)
FilerGather.MustRegister(prometheus.NewGoCollector()) Gather.MustRegister(prometheus.NewGoCollector())
VolumeServerGather.MustRegister(VolumeServerRequestCounter) Gather.MustRegister(VolumeServerRequestCounter)
VolumeServerGather.MustRegister(VolumeServerRequestHistogram) Gather.MustRegister(VolumeServerRequestHistogram)
VolumeServerGather.MustRegister(VolumeServerVolumeCounter) Gather.MustRegister(VolumeServerVolumeCounter)
VolumeServerGather.MustRegister(VolumeServerMaxVolumeCounter) Gather.MustRegister(VolumeServerMaxVolumeCounter)
VolumeServerGather.MustRegister(VolumeServerDiskSizeGauge) Gather.MustRegister(VolumeServerDiskSizeGauge)
S3Gather.MustRegister(S3RequestCounter) Gather.MustRegister(S3RequestCounter)
S3Gather.MustRegister(S3RequestHistogram) Gather.MustRegister(S3RequestHistogram)
} }
func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, addr string, intervalSeconds int) { func LoopPushingMetric(name, instance, addr string, intervalSeconds int) {
if addr == "" || intervalSeconds == 0 { if addr == "" || intervalSeconds == 0 {
return return
@ -138,7 +136,7 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, add
glog.V(0).Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds) glog.V(0).Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds)
pusher := push.New(addr, name).Gatherer(gatherer).Grouping("instance", instance) pusher := push.New(addr, name).Gatherer(Gather).Grouping("instance", instance)
for { for {
err := pusher.Push() err := pusher.Push()
@ -153,11 +151,11 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, add
} }
} }
func StartMetricsServer(gatherer *prometheus.Registry, port int) { func StartMetricsServer(port int) {
if port == 0 { if port == 0 {
return return
} }
http.Handle("/metrics", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) http.Handle("/metrics", promhttp.HandlerFor(Gather, promhttp.HandlerOpts{}))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
} }