mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
add dc and rack
This commit is contained in:
parent
8d31e73ffd
commit
68065128b8
|
@ -24,6 +24,15 @@ type Leaders struct {
|
||||||
leaders [3]pb.ServerAddress
|
leaders [3]pb.ServerAddress
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataCenter string
|
||||||
|
type Rack string
|
||||||
|
type DataCenterBrokers struct {
|
||||||
|
brokers map[Rack]*RackBrokers
|
||||||
|
}
|
||||||
|
type RackBrokers struct {
|
||||||
|
brokers map[pb.ServerAddress]*ClusterNode
|
||||||
|
}
|
||||||
|
|
||||||
type ClusterNode struct {
|
type ClusterNode struct {
|
||||||
Address pb.ServerAddress
|
Address pb.ServerAddress
|
||||||
Version string
|
Version string
|
||||||
|
@ -34,14 +43,14 @@ type ClusterNode struct {
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
filerGroup2filers map[FilerGroup]*Filers
|
filerGroup2filers map[FilerGroup]*Filers
|
||||||
filersLock sync.RWMutex
|
filersLock sync.RWMutex
|
||||||
brokers map[pb.ServerAddress]*ClusterNode
|
brokers map[DataCenter]*DataCenterBrokers
|
||||||
brokersLock sync.RWMutex
|
brokersLock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCluster() *Cluster {
|
func NewCluster() *Cluster {
|
||||||
return &Cluster{
|
return &Cluster{
|
||||||
filerGroup2filers: make(map[FilerGroup]*Filers),
|
filerGroup2filers: make(map[FilerGroup]*Filers),
|
||||||
brokers: make(map[pb.ServerAddress]*ClusterNode),
|
brokers: make(map[DataCenter]*DataCenterBrokers),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +66,7 @@ func (cluster *Cluster) getFilers(filerGroup FilerGroup, createIfNotFound bool)
|
||||||
return filers
|
return filers
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cluster *Cluster) AddClusterNode(ns, nodeType string, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
|
func (cluster *Cluster) AddClusterNode(ns, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
|
||||||
filerGroup := FilerGroup(ns)
|
filerGroup := FilerGroup(ns)
|
||||||
switch nodeType {
|
switch nodeType {
|
||||||
case FilerType:
|
case FilerType:
|
||||||
|
@ -78,11 +87,24 @@ func (cluster *Cluster) AddClusterNode(ns, nodeType string, address pb.ServerAdd
|
||||||
case BrokerType:
|
case BrokerType:
|
||||||
cluster.brokersLock.Lock()
|
cluster.brokersLock.Lock()
|
||||||
defer cluster.brokersLock.Unlock()
|
defer cluster.brokersLock.Unlock()
|
||||||
if existingNode, found := cluster.brokers[address]; found {
|
existingDataCenterBrokers, foundDataCenter := cluster.brokers[dataCenter]
|
||||||
existingNode.counter++
|
if !foundDataCenter {
|
||||||
|
existingDataCenterBrokers = &DataCenterBrokers{
|
||||||
|
brokers: make(map[Rack]*RackBrokers),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
existingRackBrokers, foundRack := existingDataCenterBrokers.brokers[rack]
|
||||||
|
if !foundRack {
|
||||||
|
existingRackBrokers = &RackBrokers{
|
||||||
|
brokers: make(map[pb.ServerAddress]*ClusterNode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingBroker, found := existingRackBrokers.brokers[address]; found {
|
||||||
|
existingBroker.counter++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cluster.brokers[address] = &ClusterNode{
|
existingRackBrokers.brokers[address] = &ClusterNode{
|
||||||
Address: address,
|
Address: address,
|
||||||
Version: version,
|
Version: version,
|
||||||
counter: 1,
|
counter: 1,
|
||||||
|
@ -111,7 +133,7 @@ func (cluster *Cluster) AddClusterNode(ns, nodeType string, address pb.ServerAdd
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
|
func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
|
||||||
filerGroup := FilerGroup(ns)
|
filerGroup := FilerGroup(ns)
|
||||||
switch nodeType {
|
switch nodeType {
|
||||||
case FilerType:
|
case FilerType:
|
||||||
|
@ -133,23 +155,35 @@ func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, address pb
|
||||||
case BrokerType:
|
case BrokerType:
|
||||||
cluster.brokersLock.Lock()
|
cluster.brokersLock.Lock()
|
||||||
defer cluster.brokersLock.Unlock()
|
defer cluster.brokersLock.Unlock()
|
||||||
if existingNode, found := cluster.brokers[address]; !found {
|
|
||||||
|
existingDataCenterBrokers, foundDataCenter := cluster.brokers[dataCenter]
|
||||||
|
if !foundDataCenter {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
}
|
||||||
existingNode.counter--
|
existingRackBrokers, foundRack := existingDataCenterBrokers.brokers[Rack(rack)]
|
||||||
if existingNode.counter <= 0 {
|
if !foundRack {
|
||||||
delete(cluster.brokers, address)
|
return nil
|
||||||
return []*master_pb.KeepConnectedResponse{
|
}
|
||||||
{
|
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
existingBroker, found := existingRackBrokers.brokers[address]
|
||||||
NodeType: nodeType,
|
if !found {
|
||||||
Address: string(address),
|
return nil
|
||||||
IsAdd: false,
|
}
|
||||||
},
|
existingBroker.counter--
|
||||||
|
if existingBroker.counter <= 0 {
|
||||||
|
delete(existingRackBrokers.brokers, address)
|
||||||
|
return []*master_pb.KeepConnectedResponse{
|
||||||
|
{
|
||||||
|
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||||
|
NodeType: nodeType,
|
||||||
|
Address: string(address),
|
||||||
|
IsAdd: false,
|
||||||
},
|
},
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
case MasterType:
|
case MasterType:
|
||||||
return []*master_pb.KeepConnectedResponse{
|
return []*master_pb.KeepConnectedResponse{
|
||||||
{
|
{
|
||||||
|
@ -179,8 +213,12 @@ func (cluster *Cluster) ListClusterNode(filerGroup FilerGroup, nodeType string)
|
||||||
case BrokerType:
|
case BrokerType:
|
||||||
cluster.brokersLock.RLock()
|
cluster.brokersLock.RLock()
|
||||||
defer cluster.brokersLock.RUnlock()
|
defer cluster.brokersLock.RUnlock()
|
||||||
for _, node := range cluster.brokers {
|
for _, dcNodes := range cluster.brokers {
|
||||||
nodes = append(nodes, node)
|
for _, rackNodes := range dcNodes.brokers {
|
||||||
|
for _, node := range rackNodes.brokers {
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case MasterType:
|
case MasterType:
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,24 +11,24 @@ import (
|
||||||
func TestClusterAddRemoveNodes(t *testing.T) {
|
func TestClusterAddRemoveNodes(t *testing.T) {
|
||||||
c := NewCluster()
|
c := NewCluster()
|
||||||
|
|
||||||
c.AddClusterNode("", "filer", pb.ServerAddress("111:1"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:1"), "23.45")
|
||||||
c.AddClusterNode("", "filer", pb.ServerAddress("111:2"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:2"), "23.45")
|
||||||
assert.Equal(t, []pb.ServerAddress{
|
assert.Equal(t, []pb.ServerAddress{
|
||||||
pb.ServerAddress("111:1"),
|
pb.ServerAddress("111:1"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getFilers("", false).leaders.GetLeaders())
|
||||||
|
|
||||||
c.AddClusterNode("", "filer", pb.ServerAddress("111:3"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:3"), "23.45")
|
||||||
c.AddClusterNode("", "filer", pb.ServerAddress("111:4"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:4"), "23.45")
|
||||||
assert.Equal(t, []pb.ServerAddress{
|
assert.Equal(t, []pb.ServerAddress{
|
||||||
pb.ServerAddress("111:1"),
|
pb.ServerAddress("111:1"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
pb.ServerAddress("111:3"),
|
pb.ServerAddress("111:3"),
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getFilers("", false).leaders.GetLeaders())
|
||||||
|
|
||||||
c.AddClusterNode("", "filer", pb.ServerAddress("111:5"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:5"), "23.45")
|
||||||
c.AddClusterNode("", "filer", pb.ServerAddress("111:6"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:6"), "23.45")
|
||||||
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:4"))
|
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:4"))
|
||||||
assert.Equal(t, []pb.ServerAddress{
|
assert.Equal(t, []pb.ServerAddress{
|
||||||
pb.ServerAddress("111:1"),
|
pb.ServerAddress("111:1"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
|
@ -36,7 +36,7 @@ func TestClusterAddRemoveNodes(t *testing.T) {
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getFilers("", false).leaders.GetLeaders())
|
||||||
|
|
||||||
// remove oldest
|
// remove oldest
|
||||||
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1"))
|
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:1"))
|
||||||
assert.Equal(t, []pb.ServerAddress{
|
assert.Equal(t, []pb.ServerAddress{
|
||||||
pb.ServerAddress("111:6"),
|
pb.ServerAddress("111:6"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
|
@ -44,7 +44,7 @@ func TestClusterAddRemoveNodes(t *testing.T) {
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getFilers("", false).leaders.GetLeaders())
|
||||||
|
|
||||||
// remove oldest
|
// remove oldest
|
||||||
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1"))
|
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:1"))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ func TestConcurrentAddRemoveNodes(t *testing.T) {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
address := strconv.Itoa(i)
|
address := strconv.Itoa(i)
|
||||||
c.AddClusterNode("", "filer", pb.ServerAddress(address), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress(address), "23.45")
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -66,7 +66,7 @@ func TestConcurrentAddRemoveNodes(t *testing.T) {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
address := strconv.Itoa(i)
|
address := strconv.Itoa(i)
|
||||||
node := c.RemoveClusterNode("", "filer", pb.ServerAddress(address))
|
node := c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress(address))
|
||||||
|
|
||||||
if len(node) == 0 {
|
if len(node) == 0 {
|
||||||
t.Errorf("TestConcurrentAddRemoveNodes: node[%s] not found", address)
|
t.Errorf("TestConcurrentAddRemoveNodes: node[%s] not found", address)
|
||||||
|
|
|
@ -129,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool {
|
||||||
defer pprof.StopCPUProfile()
|
defer pprof.StopCPUProfile()
|
||||||
}
|
}
|
||||||
|
|
||||||
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "", "client", "", "", pb.ServerAddresses(*b.masters).ToAddressMap())
|
b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "", "client", "", "", "", pb.ServerAddresses(*b.masters).ToAddressMap())
|
||||||
go b.masterClient.KeepConnectedToMaster()
|
go b.masterClient.KeepConnectedToMaster()
|
||||||
b.masterClient.WaitUntilConnected()
|
b.masterClient.WaitUntilConnected()
|
||||||
|
|
||||||
|
|
|
@ -96,6 +96,8 @@ func (mqBrokerOpt *MessageQueueBrokerOptions) startQueueServer() bool {
|
||||||
qs, err := broker.NewMessageBroker(&broker.MessageQueueBrokerOption{
|
qs, err := broker.NewMessageBroker(&broker.MessageQueueBrokerOption{
|
||||||
Masters: pb.ServerAddresses(*mqBrokerOpt.masters).ToAddressMap(),
|
Masters: pb.ServerAddresses(*mqBrokerOpt.masters).ToAddressMap(),
|
||||||
FilerGroup: *mqBrokerOpt.filerGroup,
|
FilerGroup: *mqBrokerOpt.filerGroup,
|
||||||
|
DataCenter: *mqBrokerOpt.dataCenter,
|
||||||
|
Rack: *mqBrokerOpt.rack,
|
||||||
Filers: []pb.ServerAddress{filerAddress},
|
Filers: []pb.ServerAddress{filerAddress},
|
||||||
DefaultReplication: "",
|
DefaultReplication: "",
|
||||||
MaxMB: 0,
|
MaxMB: 0,
|
||||||
|
|
|
@ -52,7 +52,7 @@ type Filer struct {
|
||||||
func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress,
|
func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress,
|
||||||
filerGroup string, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
|
filerGroup string, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
|
||||||
f := &Filer{
|
f := &Filer{
|
||||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, filerGroup, cluster.FilerType, filerHost, dataCenter, masters),
|
MasterClient: wdclient.NewMasterClient(grpcDialOption, filerGroup, cluster.FilerType, filerHost, dataCenter, "", masters),
|
||||||
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
fileIdDeletionQueue: util.NewUnboundedQueue(),
|
||||||
GrpcDialOption: grpcDialOption,
|
GrpcDialOption: grpcDialOption,
|
||||||
FilerConf: NewFilerConf(),
|
FilerConf: NewFilerConf(),
|
||||||
|
|
|
@ -50,7 +50,7 @@ var s3ApiConfigure IamS3ApiConfig
|
||||||
func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
|
func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
|
||||||
s3ApiConfigure = IamS3ApiConfigure{
|
s3ApiConfigure = IamS3ApiConfigure{
|
||||||
option: option,
|
option: option,
|
||||||
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "", "iam", "", "", option.Masters),
|
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "", "iam", "", "", "", option.Masters),
|
||||||
}
|
}
|
||||||
s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
|
s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
|
||||||
iamApiServer = &IamApiServer{
|
iamApiServer = &IamApiServer{
|
||||||
|
|
|
@ -18,6 +18,8 @@ import (
|
||||||
type MessageQueueBrokerOption struct {
|
type MessageQueueBrokerOption struct {
|
||||||
Masters map[string]pb.ServerAddress
|
Masters map[string]pb.ServerAddress
|
||||||
FilerGroup string
|
FilerGroup string
|
||||||
|
DataCenter string
|
||||||
|
Rack string
|
||||||
Filers []pb.ServerAddress
|
Filers []pb.ServerAddress
|
||||||
DefaultReplication string
|
DefaultReplication string
|
||||||
MaxMB int
|
MaxMB int
|
||||||
|
@ -39,7 +41,7 @@ func NewMessageBroker(option *MessageQueueBrokerOption, grpcDialOption grpc.Dial
|
||||||
mqBroker = &MessageQueueBroker{
|
mqBroker = &MessageQueueBroker{
|
||||||
option: option,
|
option: option,
|
||||||
grpcDialOption: grpcDialOption,
|
grpcDialOption: grpcDialOption,
|
||||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, option.FilerGroup, cluster.BrokerType, pb.NewServerAddress(option.Ip, option.Port, 0), "", option.Masters),
|
MasterClient: wdclient.NewMasterClient(grpcDialOption, option.FilerGroup, cluster.BrokerType, pb.NewServerAddress(option.Ip, option.Port, 0), option.DataCenter, "", option.Masters),
|
||||||
}
|
}
|
||||||
|
|
||||||
mqBroker.topicManager = NewTopicManager(mqBroker)
|
mqBroker.topicManager = NewTopicManager(mqBroker)
|
||||||
|
|
|
@ -139,6 +139,8 @@ message KeepConnectedRequest {
|
||||||
string client_address = 3;
|
string client_address = 3;
|
||||||
string version = 4;
|
string version = 4;
|
||||||
string filer_group = 5;
|
string filer_group = 5;
|
||||||
|
string data_center = 6;
|
||||||
|
string rack = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
message VolumeLocation {
|
message VolumeLocation {
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -143,12 +143,12 @@ func file_mount_proto_rawDescGZIP() []byte {
|
||||||
|
|
||||||
var file_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
var file_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||||
var file_mount_proto_goTypes = []interface{}{
|
var file_mount_proto_goTypes = []interface{}{
|
||||||
(*ConfigureRequest)(nil), // 0: mq_pb.ConfigureRequest
|
(*ConfigureRequest)(nil), // 0: messaging_pb.ConfigureRequest
|
||||||
(*ConfigureResponse)(nil), // 1: mq_pb.ConfigureResponse
|
(*ConfigureResponse)(nil), // 1: messaging_pb.ConfigureResponse
|
||||||
}
|
}
|
||||||
var file_mount_proto_depIdxs = []int32{
|
var file_mount_proto_depIdxs = []int32{
|
||||||
0, // 0: mq_pb.SeaweedMount.Configure:input_type -> mq_pb.ConfigureRequest
|
0, // 0: messaging_pb.SeaweedMount.Configure:input_type -> messaging_pb.ConfigureRequest
|
||||||
1, // 1: mq_pb.SeaweedMount.Configure:output_type -> mq_pb.ConfigureResponse
|
1, // 1: messaging_pb.SeaweedMount.Configure:output_type -> messaging_pb.ConfigureResponse
|
||||||
1, // [1:2] is the sub-list for method output_type
|
1, // [1:2] is the sub-list for method output_type
|
||||||
0, // [0:1] is the sub-list for method input_type
|
0, // [0:1] is the sub-list for method input_type
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
0, // [0:0] is the sub-list for extension type_name
|
||||||
|
|
|
@ -31,7 +31,7 @@ func NewSeaweedMountClient(cc grpc.ClientConnInterface) SeaweedMountClient {
|
||||||
|
|
||||||
func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) {
|
func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) {
|
||||||
out := new(ConfigureResponse)
|
out := new(ConfigureResponse)
|
||||||
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMount/Configure", in, out, opts...)
|
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMount/Configure", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ func _SeaweedMount_Configure_Handler(srv interface{}, ctx context.Context, dec f
|
||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/mq_pb.SeaweedMount/Configure",
|
FullMethod: "/messaging_pb.SeaweedMount/Configure",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(SeaweedMountServer).Configure(ctx, req.(*ConfigureRequest))
|
return srv.(SeaweedMountServer).Configure(ctx, req.(*ConfigureRequest))
|
||||||
|
@ -88,7 +88,7 @@ func _SeaweedMount_Configure_Handler(srv interface{}, ctx context.Context, dec f
|
||||||
// It's only intended for direct use with grpc.RegisterService,
|
// It's only intended for direct use with grpc.RegisterService,
|
||||||
// and not to be introspected or modified (even as a copy)
|
// and not to be introspected or modified (even as a copy)
|
||||||
var SeaweedMount_ServiceDesc = grpc.ServiceDesc{
|
var SeaweedMount_ServiceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "mq_pb.SeaweedMount",
|
ServiceName: "messaging_pb.SeaweedMount",
|
||||||
HandlerType: (*SeaweedMountServer)(nil),
|
HandlerType: (*SeaweedMountServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
|
|
|
@ -3,8 +3,8 @@ syntax = "proto3";
|
||||||
package messaging_pb;
|
package messaging_pb;
|
||||||
|
|
||||||
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/mq_pb";
|
option go_package = "github.com/chrislusf/seaweedfs/weed/pb/mq_pb";
|
||||||
option java_package = "seaweedfs.client";
|
option java_package = "seaweedfs.mq";
|
||||||
option java_outer_classname = "MessagingProto";
|
option java_outer_classname = "MessagQueueProto";
|
||||||
|
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -35,7 +35,7 @@ func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClie
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) {
|
func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) {
|
||||||
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], "/mq_pb.SeaweedMessaging/Subscribe", opts...)
|
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) {
|
func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) {
|
||||||
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], "/mq_pb.SeaweedMessaging/Publish", opts...)
|
stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) {
|
||||||
|
|
||||||
func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) {
|
func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) {
|
||||||
out := new(DeleteTopicResponse)
|
out := new(DeleteTopicResponse)
|
||||||
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/DeleteTopic", in, out, opts...)
|
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopi
|
||||||
|
|
||||||
func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
|
func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) {
|
||||||
out := new(ConfigureTopicResponse)
|
out := new(ConfigureTopicResponse)
|
||||||
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...)
|
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *Configu
|
||||||
|
|
||||||
func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) {
|
func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) {
|
||||||
out := new(GetTopicConfigurationResponse)
|
out := new(GetTopicConfigurationResponse)
|
||||||
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...)
|
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *
|
||||||
|
|
||||||
func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) {
|
func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) {
|
||||||
out := new(FindBrokerResponse)
|
out := new(FindBrokerResponse)
|
||||||
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedMessaging/FindBroker", in, out, opts...)
|
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -242,7 +242,7 @@ func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context,
|
||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/mq_pb.SeaweedMessaging/DeleteTopic",
|
FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
|
return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest))
|
||||||
|
@ -260,7 +260,7 @@ func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Conte
|
||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/mq_pb.SeaweedMessaging/ConfigureTopic",
|
FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
|
return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest))
|
||||||
|
@ -278,7 +278,7 @@ func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx contex
|
||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/mq_pb.SeaweedMessaging/GetTopicConfiguration",
|
FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest))
|
return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest))
|
||||||
|
@ -296,7 +296,7 @@ func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context,
|
||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/mq_pb.SeaweedMessaging/FindBroker",
|
FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest))
|
return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest))
|
||||||
|
@ -308,7 +308,7 @@ func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context,
|
||||||
// It's only intended for direct use with grpc.RegisterService,
|
// It's only intended for direct use with grpc.RegisterService,
|
||||||
// and not to be introspected or modified (even as a copy)
|
// and not to be introspected or modified (even as a copy)
|
||||||
var SeaweedMessaging_ServiceDesc = grpc.ServiceDesc{
|
var SeaweedMessaging_ServiceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "mq_pb.SeaweedMessaging",
|
ServiceName: "messaging_pb.SeaweedMessaging",
|
||||||
HandlerType: (*SeaweedMessagingServer)(nil),
|
HandlerType: (*SeaweedMessagingServer)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.28.0
|
// protoc-gen-go v1.26.0
|
||||||
// protoc v3.21.1
|
// protoc v3.17.3
|
||||||
// source: s3.proto
|
// source: s3.proto
|
||||||
|
|
||||||
package s3_pb
|
package s3_pb
|
||||||
|
@ -283,20 +283,20 @@ func file_s3_proto_rawDescGZIP() []byte {
|
||||||
|
|
||||||
var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||||
var file_s3_proto_goTypes = []interface{}{
|
var file_s3_proto_goTypes = []interface{}{
|
||||||
(*S3ConfigureRequest)(nil), // 0: mq_pb.S3ConfigureRequest
|
(*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest
|
||||||
(*S3ConfigureResponse)(nil), // 1: mq_pb.S3ConfigureResponse
|
(*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse
|
||||||
(*S3CircuitBreakerConfig)(nil), // 2: mq_pb.S3CircuitBreakerConfig
|
(*S3CircuitBreakerConfig)(nil), // 2: messaging_pb.S3CircuitBreakerConfig
|
||||||
(*S3CircuitBreakerOptions)(nil), // 3: mq_pb.S3CircuitBreakerOptions
|
(*S3CircuitBreakerOptions)(nil), // 3: messaging_pb.S3CircuitBreakerOptions
|
||||||
nil, // 4: mq_pb.S3CircuitBreakerConfig.BucketsEntry
|
nil, // 4: messaging_pb.S3CircuitBreakerConfig.BucketsEntry
|
||||||
nil, // 5: mq_pb.S3CircuitBreakerOptions.ActionsEntry
|
nil, // 5: messaging_pb.S3CircuitBreakerOptions.ActionsEntry
|
||||||
}
|
}
|
||||||
var file_s3_proto_depIdxs = []int32{
|
var file_s3_proto_depIdxs = []int32{
|
||||||
3, // 0: mq_pb.S3CircuitBreakerConfig.global:type_name -> mq_pb.S3CircuitBreakerOptions
|
3, // 0: messaging_pb.S3CircuitBreakerConfig.global:type_name -> messaging_pb.S3CircuitBreakerOptions
|
||||||
4, // 1: mq_pb.S3CircuitBreakerConfig.buckets:type_name -> mq_pb.S3CircuitBreakerConfig.BucketsEntry
|
4, // 1: messaging_pb.S3CircuitBreakerConfig.buckets:type_name -> messaging_pb.S3CircuitBreakerConfig.BucketsEntry
|
||||||
5, // 2: mq_pb.S3CircuitBreakerOptions.actions:type_name -> mq_pb.S3CircuitBreakerOptions.ActionsEntry
|
5, // 2: messaging_pb.S3CircuitBreakerOptions.actions:type_name -> messaging_pb.S3CircuitBreakerOptions.ActionsEntry
|
||||||
3, // 3: mq_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> mq_pb.S3CircuitBreakerOptions
|
3, // 3: messaging_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> messaging_pb.S3CircuitBreakerOptions
|
||||||
0, // 4: mq_pb.SeaweedS3.Configure:input_type -> mq_pb.S3ConfigureRequest
|
0, // 4: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest
|
||||||
1, // 5: mq_pb.SeaweedS3.Configure:output_type -> mq_pb.S3ConfigureResponse
|
1, // 5: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse
|
||||||
5, // [5:6] is the sub-list for method output_type
|
5, // [5:6] is the sub-list for method output_type
|
||||||
4, // [4:5] is the sub-list for method input_type
|
4, // [4:5] is the sub-list for method input_type
|
||||||
4, // [4:4] is the sub-list for extension type_name
|
4, // [4:4] is the sub-list for extension type_name
|
||||||
|
|
|
@ -31,7 +31,7 @@ func NewSeaweedS3Client(cc grpc.ClientConnInterface) SeaweedS3Client {
|
||||||
|
|
||||||
func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest, opts ...grpc.CallOption) (*S3ConfigureResponse, error) {
|
func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest, opts ...grpc.CallOption) (*S3ConfigureResponse, error) {
|
||||||
out := new(S3ConfigureResponse)
|
out := new(S3ConfigureResponse)
|
||||||
err := c.cc.Invoke(ctx, "/mq_pb.SeaweedS3/Configure", in, out, opts...)
|
err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedS3/Configure", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ func _SeaweedS3_Configure_Handler(srv interface{}, ctx context.Context, dec func
|
||||||
}
|
}
|
||||||
info := &grpc.UnaryServerInfo{
|
info := &grpc.UnaryServerInfo{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FullMethod: "/mq_pb.SeaweedS3/Configure",
|
FullMethod: "/messaging_pb.SeaweedS3/Configure",
|
||||||
}
|
}
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
return srv.(SeaweedS3Server).Configure(ctx, req.(*S3ConfigureRequest))
|
return srv.(SeaweedS3Server).Configure(ctx, req.(*S3ConfigureRequest))
|
||||||
|
@ -88,7 +88,7 @@ func _SeaweedS3_Configure_Handler(srv interface{}, ctx context.Context, dec func
|
||||||
// It's only intended for direct use with grpc.RegisterService,
|
// It's only intended for direct use with grpc.RegisterService,
|
||||||
// and not to be introspected or modified (even as a copy)
|
// and not to be introspected or modified (even as a copy)
|
||||||
var SeaweedS3_ServiceDesc = grpc.ServiceDesc{
|
var SeaweedS3_ServiceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "mq_pb.SeaweedS3",
|
ServiceName: "messaging_pb.SeaweedS3",
|
||||||
HandlerType: (*SeaweedS3Server)(nil),
|
HandlerType: (*SeaweedS3Server)(nil),
|
||||||
Methods: []grpc.MethodDesc{
|
Methods: []grpc.MethodDesc{
|
||||||
{
|
{
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/cluster"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
@ -253,12 +254,12 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
|
||||||
stopChan := make(chan bool, 1)
|
stopChan := make(chan bool, 1)
|
||||||
|
|
||||||
clientName, messageChan := ms.addClient(req.FilerGroup, req.ClientType, peerAddress)
|
clientName, messageChan := ms.addClient(req.FilerGroup, req.ClientType, peerAddress)
|
||||||
for _, update := range ms.Cluster.AddClusterNode(req.FilerGroup, req.ClientType, peerAddress, req.Version) {
|
for _, update := range ms.Cluster.AddClusterNode(req.FilerGroup, req.ClientType, cluster.DataCenter(req.DataCenter), cluster.Rack(req.Rack), peerAddress, req.Version) {
|
||||||
ms.broadcastToClients(update)
|
ms.broadcastToClients(update)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, update := range ms.Cluster.RemoveClusterNode(req.FilerGroup, req.ClientType, peerAddress) {
|
for _, update := range ms.Cluster.RemoveClusterNode(req.FilerGroup, req.ClientType, cluster.DataCenter(req.DataCenter), cluster.Rack(req.Rack), peerAddress) {
|
||||||
ms.broadcastToClients(update)
|
ms.broadcastToClients(update)
|
||||||
}
|
}
|
||||||
ms.deleteClient(clientName)
|
ms.deleteClient(clientName)
|
||||||
|
|
|
@ -113,7 +113,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers map[string]pb.Se
|
||||||
vgCh: make(chan *topology.VolumeGrowRequest, 1<<6),
|
vgCh: make(chan *topology.VolumeGrowRequest, 1<<6),
|
||||||
clientChans: make(map[string]chan *master_pb.KeepConnectedResponse),
|
clientChans: make(map[string]chan *master_pb.KeepConnectedResponse),
|
||||||
grpcDialOption: grpcDialOption,
|
grpcDialOption: grpcDialOption,
|
||||||
MasterClient: wdclient.NewMasterClient(grpcDialOption, "", cluster.MasterType, option.Master, "", peers),
|
MasterClient: wdclient.NewMasterClient(grpcDialOption, "", cluster.MasterType, option.Master, "", "", peers),
|
||||||
adminLocks: NewAdminLocks(),
|
adminLocks: NewAdminLocks(),
|
||||||
Cluster: cluster.NewCluster(),
|
Cluster: cluster.NewCluster(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ var (
|
||||||
func NewCommandEnv(options *ShellOptions) *CommandEnv {
|
func NewCommandEnv(options *ShellOptions) *CommandEnv {
|
||||||
ce := &CommandEnv{
|
ce := &CommandEnv{
|
||||||
env: make(map[string]string),
|
env: make(map[string]string),
|
||||||
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, *options.FilerGroup, pb.AdminShellClient, "", "", pb.ServerAddresses(*options.Masters).ToAddressMap()),
|
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, *options.FilerGroup, pb.AdminShellClient, "", "", "", pb.ServerAddresses(*options.Masters).ToAddressMap()),
|
||||||
option: options,
|
option: options,
|
||||||
}
|
}
|
||||||
ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient, "admin")
|
ce.locker = exclusive_locks.NewExclusiveLocker(ce.MasterClient, "admin")
|
||||||
|
|
|
@ -19,6 +19,7 @@ type MasterClient struct {
|
||||||
FilerGroup string
|
FilerGroup string
|
||||||
clientType string
|
clientType string
|
||||||
clientHost pb.ServerAddress
|
clientHost pb.ServerAddress
|
||||||
|
rack string
|
||||||
currentMaster pb.ServerAddress
|
currentMaster pb.ServerAddress
|
||||||
masters map[string]pb.ServerAddress
|
masters map[string]pb.ServerAddress
|
||||||
grpcDialOption grpc.DialOption
|
grpcDialOption grpc.DialOption
|
||||||
|
@ -29,7 +30,7 @@ type MasterClient struct {
|
||||||
OnPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)
|
OnPeerUpdate func(update *master_pb.ClusterNodeUpdate, startFrom time.Time)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMasterClient(grpcDialOption grpc.DialOption, filerGroup string, clientType string, clientHost pb.ServerAddress, clientDataCenter string, masters map[string]pb.ServerAddress) *MasterClient {
|
func NewMasterClient(grpcDialOption grpc.DialOption, filerGroup string, clientType string, clientHost pb.ServerAddress, clientDataCenter string, rack string, masters map[string]pb.ServerAddress) *MasterClient {
|
||||||
return &MasterClient{
|
return &MasterClient{
|
||||||
FilerGroup: filerGroup,
|
FilerGroup: filerGroup,
|
||||||
clientType: clientType,
|
clientType: clientType,
|
||||||
|
@ -152,6 +153,8 @@ func (mc *MasterClient) tryConnectToMaster(master pb.ServerAddress) (nextHintedL
|
||||||
|
|
||||||
if err = stream.Send(&master_pb.KeepConnectedRequest{
|
if err = stream.Send(&master_pb.KeepConnectedRequest{
|
||||||
FilerGroup: mc.FilerGroup,
|
FilerGroup: mc.FilerGroup,
|
||||||
|
DataCenter: mc.DataCenter,
|
||||||
|
Rack: mc.rack,
|
||||||
ClientType: mc.clientType,
|
ClientType: mc.clientType,
|
||||||
ClientAddress: string(mc.clientHost),
|
ClientAddress: string(mc.clientHost),
|
||||||
Version: util.Version(),
|
Version: util.Version(),
|
||||||
|
|
Loading…
Reference in a new issue