2021-11-06 00:39:41 +00:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/cluster"
|
2023-01-10 08:51:25 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
2023-01-10 08:51:25 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
2021-11-06 00:39:41 +00:00
|
|
|
"io"
|
2023-01-10 08:51:25 +00:00
|
|
|
"time"
|
2021-11-06 00:39:41 +00:00
|
|
|
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
2021-11-06 00:39:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
Commands = append(Commands, &commandClusterPs{})
|
|
|
|
}
|
|
|
|
|
|
|
|
type commandClusterPs struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandClusterPs) Name() string {
|
|
|
|
return "cluster.ps"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandClusterPs) Help() string {
|
|
|
|
return `check current cluster process status
|
|
|
|
|
|
|
|
cluster.ps
|
|
|
|
|
|
|
|
`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandClusterPs) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
|
|
|
|
|
|
|
clusterPsCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
|
|
|
if err = clusterPsCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-29 23:37:14 +00:00
|
|
|
var filerNodes []*master_pb.ListClusterNodesResponse_ClusterNode
|
2022-07-02 06:34:51 +00:00
|
|
|
var mqBrokerNodes []*master_pb.ListClusterNodesResponse_ClusterNode
|
2022-05-29 23:37:14 +00:00
|
|
|
|
2022-07-02 06:34:51 +00:00
|
|
|
// get the list of filers
|
2021-12-26 08:15:03 +00:00
|
|
|
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
|
2021-11-06 00:39:41 +00:00
|
|
|
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
2021-11-08 08:09:11 +00:00
|
|
|
ClientType: cluster.FilerType,
|
2022-05-02 04:59:16 +00:00
|
|
|
FilerGroup: *commandEnv.option.FilerGroup,
|
2021-11-06 00:39:41 +00:00
|
|
|
})
|
2022-05-29 23:37:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-11-06 00:39:41 +00:00
|
|
|
}
|
2022-05-29 23:37:14 +00:00
|
|
|
|
|
|
|
filerNodes = resp.ClusterNodes
|
2021-11-06 00:39:41 +00:00
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-07-02 06:34:51 +00:00
|
|
|
// get the list of message queue brokers
|
|
|
|
err = commandEnv.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error {
|
|
|
|
resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
|
|
|
|
ClientType: cluster.BrokerType,
|
|
|
|
FilerGroup: *commandEnv.option.FilerGroup,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
mqBrokerNodes = resp.ClusterNodes
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(mqBrokerNodes) > 0 {
|
|
|
|
fmt.Fprintf(writer, "* message queue brokers %d\n", len(mqBrokerNodes))
|
|
|
|
for _, node := range mqBrokerNodes {
|
|
|
|
fmt.Fprintf(writer, " * %s (%v)\n", node.Address, node.Version)
|
2022-07-03 07:55:37 +00:00
|
|
|
if node.DataCenter != "" {
|
|
|
|
fmt.Fprintf(writer, " DataCenter: %v\n", node.DataCenter)
|
|
|
|
}
|
|
|
|
if node.Rack != "" {
|
|
|
|
fmt.Fprintf(writer, " Rack: %v\n", node.Rack)
|
|
|
|
}
|
2022-07-11 07:20:27 +00:00
|
|
|
if node.IsLeader {
|
|
|
|
fmt.Fprintf(writer, " IsLeader: %v\n", true)
|
|
|
|
}
|
2022-07-02 06:34:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-10 08:51:25 +00:00
|
|
|
filerSignatures := make(map[*master_pb.ListClusterNodesResponse_ClusterNode]int32)
|
2022-06-12 18:56:23 +00:00
|
|
|
fmt.Fprintf(writer, "* filers %d\n", len(filerNodes))
|
2022-05-29 23:37:14 +00:00
|
|
|
for _, node := range filerNodes {
|
|
|
|
fmt.Fprintf(writer, " * %s (%v)\n", node.Address, node.Version)
|
2022-07-03 07:55:37 +00:00
|
|
|
if node.DataCenter != "" {
|
|
|
|
fmt.Fprintf(writer, " DataCenter: %v\n", node.DataCenter)
|
|
|
|
}
|
|
|
|
if node.Rack != "" {
|
|
|
|
fmt.Fprintf(writer, " Rack: %v\n", node.Rack)
|
|
|
|
}
|
2022-05-29 23:37:14 +00:00
|
|
|
pb.WithFilerClient(false, pb.ServerAddress(node.Address), commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
|
|
|
|
if err == nil {
|
|
|
|
if resp.FilerGroup != "" {
|
|
|
|
fmt.Fprintf(writer, " filer group: %s\n", resp.FilerGroup)
|
|
|
|
}
|
|
|
|
fmt.Fprintf(writer, " signature: %d\n", resp.Signature)
|
2023-01-10 08:51:25 +00:00
|
|
|
filerSignatures[node] = resp.Signature
|
2022-05-29 23:37:14 +00:00
|
|
|
} else {
|
|
|
|
fmt.Fprintf(writer, " failed to connect: %v\n", err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
2023-01-10 08:51:25 +00:00
|
|
|
for _, node := range filerNodes {
|
|
|
|
pb.WithFilerClient(false, pb.ServerAddress(node.Address), commandEnv.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
fmt.Fprintf(writer, "* filer %s metadata sync time\n", node.Address)
|
|
|
|
selfSignature := filerSignatures[node]
|
|
|
|
for peer, peerSignature := range filerSignatures {
|
|
|
|
if selfSignature == peerSignature {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if resp, err := client.KvGet(context.Background(), &filer_pb.KvGetRequest{Key: filer.GetPeerMetaOffsetKey(peerSignature)}); err == nil && len(resp.Value) == 8 {
|
|
|
|
lastTsNs := int64(util.BytesToUint64(resp.Value))
|
|
|
|
fmt.Fprintf(writer, " %s: %v\n", peer.Address, time.Unix(0, lastTsNs).UTC())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
2022-05-29 23:37:14 +00:00
|
|
|
|
2022-06-12 18:56:23 +00:00
|
|
|
// collect volume servers
|
|
|
|
var volumeServers []pb.ServerAddress
|
|
|
|
t, _, err := collectTopologyInfo(commandEnv, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, dc := range t.DataCenterInfos {
|
|
|
|
for _, r := range dc.RackInfos {
|
|
|
|
for _, dn := range r.DataNodeInfos {
|
|
|
|
volumeServers = append(volumeServers, pb.NewServerAddressFromDataNode(dn))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Fprintf(writer, "* volume servers %d\n", len(volumeServers))
|
|
|
|
for _, dc := range t.DataCenterInfos {
|
|
|
|
fmt.Fprintf(writer, " * data center: %s\n", dc.Id)
|
|
|
|
for _, r := range dc.RackInfos {
|
|
|
|
fmt.Fprintf(writer, " * rack: %s\n", r.Id)
|
|
|
|
for _, dn := range r.DataNodeInfos {
|
|
|
|
pb.WithVolumeServerClient(false, pb.NewServerAddressFromDataNode(dn), commandEnv.option.GrpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
|
|
|
resp, err := client.VolumeServerStatus(context.Background(), &volume_server_pb.VolumeServerStatusRequest{})
|
|
|
|
if err == nil {
|
|
|
|
fmt.Fprintf(writer, " * %s (%v)\n", dn.Id, resp.Version)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-06 00:39:41 +00:00
|
|
|
return nil
|
|
|
|
}
|