add dc and rack

This commit is contained in:
chrislu 2022-07-03 00:29:25 -07:00
parent 8d31e73ffd
commit 68065128b8
20 changed files with 953 additions and 887 deletions

View file

@ -24,6 +24,15 @@ type Leaders struct {
leaders [3]pb.ServerAddress
}
type DataCenter string
type Rack string
type DataCenterBrokers struct {
brokers map[Rack]*RackBrokers
}
type RackBrokers struct {
brokers map[pb.ServerAddress]*ClusterNode
}
type ClusterNode struct {
Address pb.ServerAddress
Version string
@ -34,14 +43,14 @@ type ClusterNode struct {
type Cluster struct {
filerGroup2filers map[FilerGroup]*Filers
filersLock sync.RWMutex
brokers map[pb.ServerAddress]*ClusterNode
brokers map[DataCenter]*DataCenterBrokers
brokersLock sync.RWMutex
}
func NewCluster() *Cluster {
return &Cluster{
filerGroup2filers: make(map[FilerGroup]*Filers),
brokers: make(map[pb.ServerAddress]*ClusterNode),
brokers: make(map[DataCenter]*DataCenterBrokers),
}
}
@ -57,7 +66,7 @@ func (cluster *Cluster) getFilers(filerGroup FilerGroup, createIfNotFound bool)
return filers
}
func (cluster *Cluster) AddClusterNode(ns, nodeType string, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
func (cluster *Cluster) AddClusterNode(ns, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
filerGroup := FilerGroup(ns)
switch nodeType {
case FilerType:
@ -78,11 +87,24 @@ func (cluster *Cluster) AddClusterNode(ns, nodeType string, address pb.ServerAdd
case BrokerType:
cluster.brokersLock.Lock()
defer cluster.brokersLock.Unlock()
if existingNode, found := cluster.brokers[address]; found {
existingNode.counter++
existingDataCenterBrokers, foundDataCenter := cluster.brokers[dataCenter]
if !foundDataCenter {
existingDataCenterBrokers = &DataCenterBrokers{
brokers: make(map[Rack]*RackBrokers),
}
}
existingRackBrokers, foundRack := existingDataCenterBrokers.brokers[rack]
if !foundRack {
existingRackBrokers = &RackBrokers{
brokers: make(map[pb.ServerAddress]*ClusterNode),
}
}
if existingBroker, found := existingRackBrokers.brokers[address]; found {
existingBroker.counter++
return nil
}
cluster.brokers[address] = &ClusterNode{
existingRackBrokers.brokers[address] = &ClusterNode{
Address: address,
Version: version,
counter: 1,
@ -111,7 +133,7 @@ func (cluster *Cluster) AddClusterNode(ns, nodeType string, address pb.ServerAdd
return nil
}
func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
filerGroup := FilerGroup(ns)
switch nodeType {
case FilerType:
@ -133,23 +155,35 @@ func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, address pb
case BrokerType:
cluster.brokersLock.Lock()
defer cluster.brokersLock.Unlock()
if existingNode, found := cluster.brokers[address]; !found {
existingDataCenterBrokers, foundDataCenter := cluster.brokers[dataCenter]
if !foundDataCenter {
return nil
} else {
existingNode.counter--
if existingNode.counter <= 0 {
delete(cluster.brokers, address)
return []*master_pb.KeepConnectedResponse{
{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsAdd: false,
},
}
existingRackBrokers, foundRack := existingDataCenterBrokers.brokers[Rack(rack)]
if !foundRack {
return nil
}
existingBroker, found := existingRackBrokers.brokers[address]
if !found {
return nil
}
existingBroker.counter--
if existingBroker.counter <= 0 {
delete(existingRackBrokers.brokers, address)
return []*master_pb.KeepConnectedResponse{
{
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
NodeType: nodeType,
Address: string(address),
IsAdd: false,
},
}
},
}
}
return nil
case MasterType:
return []*master_pb.KeepConnectedResponse{
{
@ -179,8 +213,12 @@ func (cluster *Cluster) ListClusterNode(filerGroup FilerGroup, nodeType string)
case BrokerType:
cluster.brokersLock.RLock()
defer cluster.brokersLock.RUnlock()
for _, node := range cluster.brokers {
nodes = append(nodes, node)
for _, dcNodes := range cluster.brokers {
for _, rackNodes := range dcNodes.brokers {
for _, node := range rackNodes.brokers {
nodes = append(nodes, node)
}
}
}
case MasterType:
}

View file

@ -11,24 +11,24 @@ import (
func TestClusterAddRemoveNodes(t *testing.T) {
c := NewCluster()
c.AddClusterNode("", "filer", pb.ServerAddress("111:1"), "23.45")
c.AddClusterNode("", "filer", pb.ServerAddress("111:2"), "23.45")
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:1"), "23.45")
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:2"), "23.45")
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:1"),
pb.ServerAddress("111:2"),
}, c.getFilers("", false).leaders.GetLeaders())
c.AddClusterNode("", "filer", pb.ServerAddress("111:3"), "23.45")
c.AddClusterNode("", "filer", pb.ServerAddress("111:4"), "23.45")
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:3"), "23.45")
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:4"), "23.45")
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:1"),
pb.ServerAddress("111:2"),
pb.ServerAddress("111:3"),
}, c.getFilers("", false).leaders.GetLeaders())
c.AddClusterNode("", "filer", pb.ServerAddress("111:5"), "23.45")
c.AddClusterNode("", "filer", pb.ServerAddress("111:6"), "23.45")
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:4"))
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:5"), "23.45")
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:6"), "23.45")
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:4"))
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:1"),
pb.ServerAddress("111:2"),
@ -36,7 +36,7 @@ func TestClusterAddRemoveNodes(t *testing.T) {
}, c.getFilers("", false).leaders.GetLeaders())
// remove oldest
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1"))
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:1"))
assert.Equal(t, []pb.ServerAddress{
pb.ServerAddress("111:6"),
pb.ServerAddress("111:2"),
@ -44,7 +44,7 @@ func TestClusterAddRemoveNodes(t *testing.T) {
}, c.getFilers("", false).leaders.GetLeaders())
// remove oldest
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1"))
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:1"))
}
@ -56,7 +56,7 @@ func TestConcurrentAddRemoveNodes(t *testing.T) {
go func(i int) {
defer wg.Done()
address := strconv.Itoa(i)
c.AddClusterNode("", "filer", pb.ServerAddress(address), "23.45")
c.AddClusterNode("", "filer", "", "", pb.ServerAddress(address), "23.45")
}(i)
}
wg.Wait()
@ -66,7 +66,7 @@ func TestConcurrentAddRemoveNodes(t *testing.T) {
go func(i int) {
defer wg.Done()
address := strconv.Itoa(i)
node := c.RemoveClusterNode("", "filer", pb.ServerAddress(address))
node := c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress(address))
if len(node) == 0 {
t.Errorf("TestConcurrentAddRemoveNodes: node[%s] not found", address)

View file

@ -129,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile()
}
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "", "client", "", "", pb.ServerAddresses(*b.masters).ToAddressMap())
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "", "client", "", "", "", pb.ServerAddresses(*b.masters).ToAddressMap())
go b.masterClient.KeepConnectedToMaster()
b.masterClient.WaitUntilConnected()

View file

@ -96,6 +96,8 @@ func (mqBrokerOpt *MessageQueueBrokerOptions) startQueueServer() bool {
qs, err := broker.NewMessageBroker(&broker.MessageQueueBrokerOption{
Masters: pb.ServerAddresses(*mqBrokerOpt.masters).ToAddressMap(),
FilerGroup: *mqBrokerOpt.filerGroup,
DataCenter: *mqBrokerOpt.dataCenter,
Rack: *mqBrokerOpt.rack,
Filers: []pb.ServerAddress{filerAddress},
DefaultReplication: "",
MaxMB: 0,

View file

@ -52,7 +52,7 @@ type Filer struct {
func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress,
filerGroup string, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
f := &Filer{
MasterClient: wdclient.NewMasterClient(grpcDialOption, filerGroup, cluster.FilerType, filerHost, dataCenter, masters),
MasterClient: wdclient.NewMasterClient(grpcDialOption, filerGroup, cluster.FilerType, filerHost, dataCenter, "", masters),
fileIdDeletionQueue: util.NewUnboundedQueue(),
GrpcDialOption: grpcDialOption,
FilerConf: NewFilerConf(),

View file

@ -50,7 +50,7 @@ var s3ApiConfigure IamS3ApiConfig
func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
s3ApiConfigure = IamS3ApiConfigure{
option: option,
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "", "iam", "", "", option.Masters),
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "", "iam", "", "", "", option.Masters),
}
s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
iamApiServer = &IamApiServer{

View file

@ -18,6 +18,8 @@ import (
type MessageQueueBrokerOption struct {
Masters map[string]pb.ServerAddress
FilerGroup string
DataCenter string
Rack string
Filers []pb.ServerAddress
DefaultReplication string
MaxMB int
@ -39,7 +41,7 @@ func NewMessageBroker(option *MessageQueueBrokerOption, grpcDialOption grpc.Dial
mqBroker = &MessageQueueBroker{
option: option,
grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(grpcDialOption, option.FilerGroup, cluster.BrokerType, pb.NewServerAddress(option.Ip, option.Port, 0), "", option.Masters),
MasterClient: wdclient.NewMasterClient(grpcDialOption, option.FilerGroup, cluster.BrokerType, pb.NewServerAddress(option.Ip, option.Port, 0), option.DataCenter, "", option.Masters),
}
mqBroker.topicManager = NewTopicManager(mqBroker)

View file

@ -139,6 +139,8 @@ message KeepConnectedRequest {
string client_address = 3;
string version = 4;
string filer_group = 5;
string data_center = 6;
string rack = 7;
}
message VolumeLocation {

File diff suppressed because it is too large Load diff

View file

@ -143,12 +143,12 @@ func file_mount_proto_rawDescGZIP() []byte {
var file_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_mount_proto_goTypes = []interface{}{
(*ConfigureRequest)(nil), // 0: mq_pb.ConfigureRequest
(*ConfigureResponse)(nil), // 1: mq_pb.ConfigureResponse
(*ConfigureRequest)(nil), // 0: messaging_pb.ConfigureRequest
(*ConfigureResponse)(nil), // 1: messaging_pb.ConfigureResponse
}
var file_mount_proto_depIdxs = []int32{
0, // 0: mq_pb.SeaweedMount.Configure:input_type -> mq_pb.ConfigureRequest
1, // 1: mq_pb.SeaweedMount.Configure:output_type -> mq_pb.ConfigureResponse
0, // 0: messaging_pb.SeaweedMount.Configure:input_type -> messaging_pb.ConfigureRequest
1, // 1: messaging_pb.SeaweedMount.Configure:output_type -> messaging_pb.ConfigureResponse
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name

View file

@ -31,7 +31,7 @@ func NewSeaweedMountClient(cc grpc.ClientConnInterface) SeaweedMountClient {
func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) {
out := new(ConfigureResponse)
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMount/Configure", in, out, opts...)
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMount/Configure", in, out, opts...)
if err != nil {
return nil, err
}
@ -76,7 +76,7 @@ func _SeaweedMount_Configure_Handler(srv interface{}, ctx context.Context, dec f
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/mq_pb.SeaweedMount/Configure",
FullMethod: "/messaging_pb.SeaweedMount/Configure",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedMountServer).Configure(ctx, req.(*ConfigureRequest))
@ -88,7 +88,7 @@ func _SeaweedMount_Configure_Handler(srv interface{}, ctx context.Context, dec f
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var SeaweedMount_ServiceDesc = grpc.ServiceDesc{
ServiceName: "mq_pb.SeaweedMount",
ServiceName: "messaging_pb.SeaweedMount",
HandlerType: (*SeaweedMountServer)(nil),
Methods: []grpc.MethodDesc{
{

View file

@ -3,8 +3,8 @@ syntax = "proto3";
package messaging_pb;
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/mq_pb";
option java_package = "seaweedfs.client";
option java_outer_classname = "MessagingProto";
option java_package = "seaweedfs.mq";
option java_outer_classname = "MessagQueueProto";
//////////////////////////////////////////////////

File diff suppressed because it is too large Load diff

View file

@ -35,7 +35,7 @@ func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClie
}
func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], "/mq_pb.SeaweedMessaging/Subscribe", opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...)
if err != nil {
return nil, err
}
@ -66,7 +66,7 @@ func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) {
}
func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) {
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], "/mq_pb.SeaweedMessaging/Publish", opts...)
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...)
if err != nil {
return nil, err
}
@ -98,7 +98,7 @@ func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) {
func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) {
out := new(DeleteTopicResponse)
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/DeleteTopic", in, out, opts...)
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...)
if err != nil {
return nil, err
}
@ -107,7 +107,7 @@ func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopi
func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
out := new(ConfigureTopicResponse)
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...)
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...)
if err != nil {
return nil, err
}
@ -116,7 +116,7 @@ func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *Configu
func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) {
out := new(GetTopicConfigurationResponse)
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...)
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...)
if err != nil {
return nil, err
}
@ -125,7 +125,7 @@ func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *
func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) {
out := new(FindBrokerResponse)
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/FindBroker", in, out, opts...)
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...)
if err != nil {
return nil, err
}
@ -242,7 +242,7 @@ func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/mq_pb.SeaweedMessaging/DeleteTopic",
FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
@ -260,7 +260,7 @@ func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Conte
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/mq_pb.SeaweedMessaging/ConfigureTopic",
FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
@ -278,7 +278,7 @@ func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/mq_pb.SeaweedMessaging/GetTopicConfiguration",
FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest))
@ -296,7 +296,7 @@ func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/mq_pb.SeaweedMessaging/FindBroker",
FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest))
@ -308,7 +308,7 @@ func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context,
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var SeaweedMessaging_ServiceDesc = grpc.ServiceDesc{
ServiceName: "mq_pb.SeaweedMessaging",
ServiceName: "messaging_pb.SeaweedMessaging",
HandlerType: (*SeaweedMessagingServer)(nil),
Methods: []grpc.MethodDesc{
{

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.21.1
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: s3.proto
package s3_pb
@ -283,20 +283,20 @@ func file_s3_proto_rawDescGZIP() []byte {
var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_s3_proto_goTypes = []interface{}{
(*S3ConfigureRequest)(nil), // 0: mq_pb.S3ConfigureRequest
(*S3ConfigureResponse)(nil), // 1: mq_pb.S3ConfigureResponse
(*S3CircuitBreakerConfig)(nil), // 2: mq_pb.S3CircuitBreakerConfig
(*S3CircuitBreakerOptions)(nil), // 3: mq_pb.S3CircuitBreakerOptions
nil, // 4: mq_pb.S3CircuitBreakerConfig.BucketsEntry
nil, // 5: mq_pb.S3CircuitBreakerOptions.ActionsEntry
(*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest
(*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse
(*S3CircuitBreakerConfig)(nil), // 2: messaging_pb.S3CircuitBreakerConfig
(*S3CircuitBreakerOptions)(nil), // 3: messaging_pb.S3CircuitBreakerOptions
nil, // 4: messaging_pb.S3CircuitBreakerConfig.BucketsEntry
nil, // 5: messaging_pb.S3CircuitBreakerOptions.ActionsEntry
}
var file_s3_proto_depIdxs = []int32{
3, // 0: mq_pb.S3CircuitBreakerConfig.global:type_name -> mq_pb.S3CircuitBreakerOptions
4, // 1: mq_pb.S3CircuitBreakerConfig.buckets:type_name -> mq_pb.S3CircuitBreakerConfig.BucketsEntry
5, // 2: mq_pb.S3CircuitBreakerOptions.actions:type_name -> mq_pb.S3CircuitBreakerOptions.ActionsEntry
3, // 3: mq_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> mq_pb.S3CircuitBreakerOptions
0, // 4: mq_pb.SeaweedS3.Configure:input_type -> mq_pb.S3ConfigureRequest
1, // 5: mq_pb.SeaweedS3.Configure:output_type -> mq_pb.S3ConfigureResponse
3, // 0: messaging_pb.S3CircuitBreakerConfig.global:type_name -> messaging_pb.S3CircuitBreakerOptions
4, // 1: messaging_pb.S3CircuitBreakerConfig.buckets:type_name -> messaging_pb.S3CircuitBreakerConfig.BucketsEntry
5, // 2: messaging_pb.S3CircuitBreakerOptions.actions:type_name -> messaging_pb.S3CircuitBreakerOptions.ActionsEntry
3, // 3: messaging_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> messaging_pb.S3CircuitBreakerOptions
0, // 4: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest
1, // 5: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse
5, // [5:6] is the sub-list for method output_type
4, // [4:5] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name

View file

@ -31,7 +31,7 @@ func NewSeaweedS3Client(cc grpc.ClientConnInterface) SeaweedS3Client {
func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest, opts ...grpc.CallOption) (*S3ConfigureResponse, error) {
out := new(S3ConfigureResponse)
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedS3/Configure", in, out, opts...)
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedS3/Configure", in, out, opts...)
if err != nil {
return nil, err
}
@ -76,7 +76,7 @@ func _SeaweedS3_Configure_Handler(srv interface{}, ctx context.Context, dec func
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/mq_pb.SeaweedS3/Configure",
FullMethod: "/messaging_pb.SeaweedS3/Configure",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedS3Server).Configure(ctx, req.(*S3ConfigureRequest))
@ -88,7 +88,7 @@ func _SeaweedS3_Configure_Handler(srv interface{}, ctx context.Context, dec func
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var SeaweedS3_ServiceDesc = grpc.ServiceDesc{
ServiceName: "mq_pb.SeaweedS3",
ServiceName: "messaging_pb.SeaweedS3",
HandlerType: (*SeaweedS3Server)(nil),
Methods: []grpc.MethodDesc{
{

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"github.com/chrislusf/seaweedfs/weed/cluster"
"net"
"sort"
"time"
@ -253,12 +254,12 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
stopChan := make(chan bool, 1)
clientName, messageChan := ms.addClient(req.FilerGroup, req.ClientType, peerAddress)
for _, update := range ms.Cluster.AddClusterNode(req.FilerGroup, req.ClientType, peerAddress, req.Version) {
for _, update := range ms.Cluster.AddClusterNode(req.FilerGroup, req.ClientType, cluster.DataCenter(req.DataCenter), cluster.Rack(req.Rack), peerAddress, req.Version) {
ms.broadcastToClients(update)
}
defer func() {
for _, update := range ms.Cluster.RemoveClusterNode(req.FilerGroup, req.ClientType, peerAddress) {
for _, update := range ms.Cluster.RemoveClusterNode(req.FilerGroup, req.ClientType, cluster.DataCenter(req.DataCenter), cluster.Rack(req.Rack), peerAddress) {
ms.broadcastToClients(update)
}
ms.deleteClient(clientName)

View file

@ -113,7 +113,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers map[string]pb.Se
vgCh: make(chan *topology.VolumeGrowRequest, 1<<6),
clientChans: make(map[string]chan *master_pb.KeepConnectedResponse),
grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(grpcDialOption, "", cluster.MasterType, option.Master, "", peers),
MasterClient: wdclient.NewMasterClient(grpcDialOption, "", cluster.MasterType, option.Master, "", "", peers),
adminLocks: NewAdminLocks(),
Cluster: cluster.NewCluster(),
}

View file

@ -47,7 +47,7 @@ var (
func NewCommandEnv(options *ShellOptions) *CommandEnv {
ce := &CommandEnv{
env: make(map[string]string),
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, *options.FilerGroup, pb.AdminShellClient, "", "", pb.ServerAddresses(*options.Masters).ToAddressMap()),
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, *options.FilerGroup, pb.AdminShellClient, "", "", "", pb.ServerAddresses(*options.Masters).ToAddressMap()),
option: options,
}
ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient, "admin")

View file

@ -19,6 +19,7 @@ type MasterClient struct {
FilerGroup string
clientType string
clientHost pb.ServerAddress
rack string
currentMaster pb.ServerAddress
masters map[string]pb.ServerAddress
grpcDialOption grpc.DialOption
@ -29,7 +30,7 @@ type MasterClient struct {
OnPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)
}
func NewMasterClient(grpcDialOption grpc.DialOption, filerGroup string, clientType string, clientHost pb.ServerAddress, clientDataCenter string, masters map[string]pb.ServerAddress) *MasterClient {
func NewMasterClient(grpcDialOption grpc.DialOption, filerGroup string, clientType string, clientHost pb.ServerAddress, clientDataCenter string, rack string, masters map[string]pb.ServerAddress) *MasterClient {
return &MasterClient{
FilerGroup: filerGroup,
clientType: clientType,
@ -152,6 +153,8 @@ func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedL
if err = stream.Send(&master_pb.KeepConnectedRequest{
FilerGroup: mc.FilerGroup,
DataCenter: mc.DataCenter,
Rack: mc.rack,
ClientType: mc.clientType,
ClientAddress: string(mc.clientHost),
Version: util.Version(),