mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
refactor
This commit is contained in:
parent
ef93869009
commit
24291e23eb
|
@ -15,24 +15,13 @@ const (
|
||||||
BrokerType = "broker"
|
BrokerType = "broker"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FilerGroup string
|
type FilerGroupName string
|
||||||
type Filers struct {
|
type DataCenter string
|
||||||
members map[pb.ServerAddress]*ClusterNode
|
type Rack string
|
||||||
leaders *Leaders
|
|
||||||
}
|
|
||||||
type Leaders struct {
|
type Leaders struct {
|
||||||
leaders [3]pb.ServerAddress
|
leaders [3]pb.ServerAddress
|
||||||
}
|
}
|
||||||
|
|
||||||
type DataCenter string
|
|
||||||
type Rack string
|
|
||||||
type DataCenterBrokers struct {
|
|
||||||
brokers map[Rack]*RackBrokers
|
|
||||||
}
|
|
||||||
type RackBrokers struct {
|
|
||||||
brokers map[pb.ServerAddress]*ClusterNode
|
|
||||||
}
|
|
||||||
|
|
||||||
type ClusterNode struct {
|
type ClusterNode struct {
|
||||||
Address pb.ServerAddress
|
Address pb.ServerAddress
|
||||||
Version string
|
Version string
|
||||||
|
@ -41,45 +30,42 @@ type ClusterNode struct {
|
||||||
DataCenter DataCenter
|
DataCenter DataCenter
|
||||||
Rack Rack
|
Rack Rack
|
||||||
}
|
}
|
||||||
|
type GroupMembers struct {
|
||||||
|
members map[pb.ServerAddress]*ClusterNode
|
||||||
|
leaders *Leaders
|
||||||
|
}
|
||||||
|
type ClusterNodeGroups struct {
|
||||||
|
groupMembers map[FilerGroupName]*GroupMembers
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
filerGroup2filers map[FilerGroup]*Filers
|
filerGroups *ClusterNodeGroups
|
||||||
filersLock sync.RWMutex
|
brokerGroups *ClusterNodeGroups
|
||||||
brokers map[DataCenter]*DataCenterBrokers
|
|
||||||
brokersLock sync.RWMutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCluster() *Cluster {
|
func newClusterNodeGroups() *ClusterNodeGroups {
|
||||||
return &Cluster{
|
return &ClusterNodeGroups{
|
||||||
filerGroup2filers: make(map[FilerGroup]*Filers),
|
groupMembers: map[FilerGroupName]*GroupMembers{},
|
||||||
brokers: make(map[DataCenter]*DataCenterBrokers),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func (g *ClusterNodeGroups) getGroupMembers(filerGroup FilerGroupName, createIfNotFound bool) *GroupMembers {
|
||||||
func (cluster *Cluster) getFilers(filerGroup FilerGroup, createIfNotFound bool) *Filers {
|
filers, found := g.groupMembers[filerGroup]
|
||||||
filers, found := cluster.filerGroup2filers[filerGroup]
|
|
||||||
if !found && createIfNotFound {
|
if !found && createIfNotFound {
|
||||||
filers = &Filers{
|
filers = &GroupMembers{
|
||||||
members: make(map[pb.ServerAddress]*ClusterNode),
|
members: make(map[pb.ServerAddress]*ClusterNode),
|
||||||
leaders: &Leaders{},
|
leaders: &Leaders{},
|
||||||
}
|
}
|
||||||
cluster.filerGroup2filers[filerGroup] = filers
|
g.groupMembers[filerGroup] = filers
|
||||||
}
|
}
|
||||||
return filers
|
return filers
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cluster *Cluster) AddClusterNode(ns, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
|
func (m *GroupMembers) addMember(dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) *ClusterNode {
|
||||||
filerGroup := FilerGroup(ns)
|
if existingNode, found := m.members[address]; found {
|
||||||
switch nodeType {
|
|
||||||
case FilerType:
|
|
||||||
cluster.filersLock.Lock()
|
|
||||||
defer cluster.filersLock.Unlock()
|
|
||||||
filers := cluster.getFilers(filerGroup, true)
|
|
||||||
if existingNode, found := filers.members[address]; found {
|
|
||||||
existingNode.counter++
|
existingNode.counter++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
filers.members[address] = &ClusterNode{
|
t := &ClusterNode{
|
||||||
Address: address,
|
Address: address,
|
||||||
Version: version,
|
Version: version,
|
||||||
counter: 1,
|
counter: 1,
|
||||||
|
@ -87,163 +73,148 @@ func (cluster *Cluster) AddClusterNode(ns, nodeType string, dataCenter DataCente
|
||||||
DataCenter: dataCenter,
|
DataCenter: dataCenter,
|
||||||
Rack: rack,
|
Rack: rack,
|
||||||
}
|
}
|
||||||
return ensureFilerLeaders(filers, true, filerGroup, nodeType, address)
|
m.members[address] = t
|
||||||
case BrokerType:
|
return t
|
||||||
cluster.brokersLock.Lock()
|
|
||||||
defer cluster.brokersLock.Unlock()
|
|
||||||
existingDataCenterBrokers, foundDataCenter := cluster.brokers[dataCenter]
|
|
||||||
if !foundDataCenter {
|
|
||||||
existingDataCenterBrokers = &DataCenterBrokers{
|
|
||||||
brokers: make(map[Rack]*RackBrokers),
|
|
||||||
}
|
}
|
||||||
cluster.brokers[dataCenter] = existingDataCenterBrokers
|
func (m *GroupMembers) removeMember(address pb.ServerAddress) bool {
|
||||||
}
|
if existingNode, found := m.members[address]; !found {
|
||||||
existingRackBrokers, foundRack := existingDataCenterBrokers.brokers[rack]
|
return false
|
||||||
if !foundRack {
|
|
||||||
existingRackBrokers = &RackBrokers{
|
|
||||||
brokers: make(map[pb.ServerAddress]*ClusterNode),
|
|
||||||
}
|
|
||||||
existingDataCenterBrokers.brokers[rack] = existingRackBrokers
|
|
||||||
}
|
|
||||||
|
|
||||||
if existingBroker, found := existingRackBrokers.brokers[address]; found {
|
|
||||||
existingBroker.counter++
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
existingRackBrokers.brokers[address] = &ClusterNode{
|
|
||||||
Address: address,
|
|
||||||
Version: version,
|
|
||||||
counter: 1,
|
|
||||||
CreatedTs: time.Now(),
|
|
||||||
DataCenter: dataCenter,
|
|
||||||
Rack: rack,
|
|
||||||
}
|
|
||||||
return []*master_pb.KeepConnectedResponse{
|
|
||||||
{
|
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
|
||||||
NodeType: nodeType,
|
|
||||||
Address: string(address),
|
|
||||||
IsAdd: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
case MasterType:
|
|
||||||
return []*master_pb.KeepConnectedResponse{
|
|
||||||
{
|
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
|
||||||
NodeType: nodeType,
|
|
||||||
Address: string(address),
|
|
||||||
IsAdd: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
|
|
||||||
filerGroup := FilerGroup(ns)
|
|
||||||
switch nodeType {
|
|
||||||
case FilerType:
|
|
||||||
cluster.filersLock.Lock()
|
|
||||||
defer cluster.filersLock.Unlock()
|
|
||||||
filers := cluster.getFilers(filerGroup, false)
|
|
||||||
if filers == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if existingNode, found := filers.members[address]; !found {
|
|
||||||
return nil
|
|
||||||
} else {
|
} else {
|
||||||
existingNode.counter--
|
existingNode.counter--
|
||||||
if existingNode.counter <= 0 {
|
if existingNode.counter <= 0 {
|
||||||
delete(filers.members, address)
|
delete(m.members, address)
|
||||||
return ensureFilerLeaders(filers, false, filerGroup, nodeType, address)
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case BrokerType:
|
|
||||||
cluster.brokersLock.Lock()
|
|
||||||
defer cluster.brokersLock.Unlock()
|
|
||||||
|
|
||||||
existingDataCenterBrokers, foundDataCenter := cluster.brokers[dataCenter]
|
|
||||||
if !foundDataCenter {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
existingRackBrokers, foundRack := existingDataCenterBrokers.brokers[Rack(rack)]
|
|
||||||
if !foundRack {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
existingBroker, found := existingRackBrokers.brokers[address]
|
|
||||||
if !found {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
existingBroker.counter--
|
|
||||||
if existingBroker.counter <= 0 {
|
|
||||||
delete(existingRackBrokers.brokers, address)
|
|
||||||
return []*master_pb.KeepConnectedResponse{
|
|
||||||
{
|
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
|
||||||
NodeType: nodeType,
|
|
||||||
Address: string(address),
|
|
||||||
IsAdd: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case MasterType:
|
|
||||||
return []*master_pb.KeepConnectedResponse{
|
|
||||||
{
|
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
|
||||||
NodeType: nodeType,
|
|
||||||
Address: string(address),
|
|
||||||
IsAdd: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cluster *Cluster) ListClusterNode(filerGroup FilerGroup, nodeType string) (nodes []*ClusterNode) {
|
|
||||||
switch nodeType {
|
|
||||||
case FilerType:
|
|
||||||
cluster.filersLock.RLock()
|
|
||||||
defer cluster.filersLock.RUnlock()
|
|
||||||
filers := cluster.getFilers(filerGroup, false)
|
|
||||||
if filers == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, node := range filers.members {
|
|
||||||
nodes = append(nodes, node)
|
|
||||||
}
|
|
||||||
case BrokerType:
|
|
||||||
cluster.brokersLock.RLock()
|
|
||||||
defer cluster.brokersLock.RUnlock()
|
|
||||||
for _, dcNodes := range cluster.brokers {
|
|
||||||
for _, rackNodes := range dcNodes.brokers {
|
|
||||||
for _, node := range rackNodes.brokers {
|
|
||||||
nodes = append(nodes, node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case MasterType:
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cluster *Cluster) IsOneLeader(filerGroup FilerGroup, address pb.ServerAddress) bool {
|
|
||||||
filers := cluster.getFilers(filerGroup, false)
|
|
||||||
if filers == nil {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return filers.leaders.isOneLeader(address)
|
|
||||||
|
func (g *ClusterNodeGroups) AddClusterNode(filerGroup FilerGroupName, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
|
||||||
|
g.Lock()
|
||||||
|
defer g.Unlock()
|
||||||
|
m := g.getGroupMembers(filerGroup, true)
|
||||||
|
if t := m.addMember(dataCenter, rack, address, version); t != nil {
|
||||||
|
return ensureGroupLeaders(m, true, filerGroup, nodeType, address)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (g *ClusterNodeGroups) RemoveClusterNode(filerGroup FilerGroupName, nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
|
||||||
|
g.Lock()
|
||||||
|
defer g.Unlock()
|
||||||
|
m := g.getGroupMembers(filerGroup, false)
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if m.removeMember(address) {
|
||||||
|
return ensureGroupLeaders(m, false, filerGroup, nodeType, address)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (g *ClusterNodeGroups) ListClusterNode(filerGroup FilerGroupName) (nodes []*ClusterNode) {
|
||||||
|
g.Lock()
|
||||||
|
defer g.Unlock()
|
||||||
|
m := g.getGroupMembers(filerGroup, false)
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, node := range m.members {
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
func (g *ClusterNodeGroups) IsOneLeader(filerGroup FilerGroupName, address pb.ServerAddress) bool {
|
||||||
|
g.Lock()
|
||||||
|
defer g.Unlock()
|
||||||
|
m := g.getGroupMembers(filerGroup, false)
|
||||||
|
if m == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return m.leaders.isOneLeader(address)
|
||||||
|
}
|
||||||
|
func NewCluster() *Cluster {
|
||||||
|
return &Cluster{
|
||||||
|
filerGroups: newClusterNodeGroups(),
|
||||||
|
brokerGroups: newClusterNodeGroups(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureFilerLeaders(filers *Filers, isAdd bool, filerGroup FilerGroup, nodeType string, address pb.ServerAddress) (result []*master_pb.KeepConnectedResponse) {
|
func (cluster *Cluster) getGroupMembers(filerGroup FilerGroupName, nodeType string, createIfNotFound bool) *GroupMembers {
|
||||||
|
switch nodeType {
|
||||||
|
case FilerType:
|
||||||
|
return cluster.filerGroups.getGroupMembers(filerGroup, createIfNotFound)
|
||||||
|
case BrokerType:
|
||||||
|
return cluster.brokerGroups.getGroupMembers(filerGroup, createIfNotFound)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cluster *Cluster) AddClusterNode(ns, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse {
|
||||||
|
filerGroup := FilerGroupName(ns)
|
||||||
|
switch nodeType {
|
||||||
|
case FilerType:
|
||||||
|
return cluster.filerGroups.AddClusterNode(filerGroup, nodeType, dataCenter, rack, address, version)
|
||||||
|
case BrokerType:
|
||||||
|
return cluster.brokerGroups.AddClusterNode(filerGroup, nodeType, dataCenter, rack, address, version)
|
||||||
|
case MasterType:
|
||||||
|
return []*master_pb.KeepConnectedResponse{
|
||||||
|
{
|
||||||
|
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||||
|
NodeType: nodeType,
|
||||||
|
Address: string(address),
|
||||||
|
IsAdd: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse {
|
||||||
|
filerGroup := FilerGroupName(ns)
|
||||||
|
switch nodeType {
|
||||||
|
case FilerType:
|
||||||
|
return cluster.filerGroups.RemoveClusterNode(filerGroup, nodeType, address)
|
||||||
|
case BrokerType:
|
||||||
|
return cluster.brokerGroups.RemoveClusterNode(filerGroup, nodeType, address)
|
||||||
|
case MasterType:
|
||||||
|
return []*master_pb.KeepConnectedResponse{
|
||||||
|
{
|
||||||
|
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||||
|
NodeType: nodeType,
|
||||||
|
Address: string(address),
|
||||||
|
IsAdd: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cluster *Cluster) ListClusterNode(filerGroup FilerGroupName, nodeType string) (nodes []*ClusterNode) {
|
||||||
|
switch nodeType {
|
||||||
|
case FilerType:
|
||||||
|
return cluster.filerGroups.ListClusterNode(filerGroup)
|
||||||
|
case BrokerType:
|
||||||
|
return cluster.brokerGroups.ListClusterNode(filerGroup)
|
||||||
|
case MasterType:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cluster *Cluster) IsOneLeader(filerGroup FilerGroupName, nodeType string, address pb.ServerAddress) bool {
|
||||||
|
switch nodeType {
|
||||||
|
case FilerType:
|
||||||
|
return cluster.filerGroups.IsOneLeader(filerGroup, address)
|
||||||
|
case BrokerType:
|
||||||
|
return cluster.brokerGroups.IsOneLeader(filerGroup, address)
|
||||||
|
case MasterType:
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureGroupLeaders(m *GroupMembers, isAdd bool, filerGroup FilerGroupName, nodeType string, address pb.ServerAddress) (result []*master_pb.KeepConnectedResponse) {
|
||||||
if isAdd {
|
if isAdd {
|
||||||
if filers.leaders.addLeaderIfVacant(address) {
|
if m.leaders.addLeaderIfVacant(address) {
|
||||||
// has added the address as one leader
|
// has added the address as one leader
|
||||||
result = append(result, &master_pb.KeepConnectedResponse{
|
result = append(result, &master_pb.KeepConnectedResponse{
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||||
|
@ -266,7 +237,7 @@ func ensureFilerLeaders(filers *Filers, isAdd bool, filerGroup FilerGroup, nodeT
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if filers.leaders.removeLeaderIfExists(address) {
|
if m.leaders.removeLeaderIfExists(address) {
|
||||||
|
|
||||||
result = append(result, &master_pb.KeepConnectedResponse{
|
result = append(result, &master_pb.KeepConnectedResponse{
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||||
|
@ -282,8 +253,8 @@ func ensureFilerLeaders(filers *Filers, isAdd bool, filerGroup FilerGroup, nodeT
|
||||||
var shortestDuration int64 = math.MaxInt64
|
var shortestDuration int64 = math.MaxInt64
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
var candidateAddress pb.ServerAddress
|
var candidateAddress pb.ServerAddress
|
||||||
for _, node := range filers.members {
|
for _, node := range m.members {
|
||||||
if filers.leaders.isOneLeader(node.Address) {
|
if m.leaders.isOneLeader(node.Address) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
duration := now.Sub(node.CreatedTs).Nanoseconds()
|
duration := now.Sub(node.CreatedTs).Nanoseconds()
|
||||||
|
@ -293,7 +264,7 @@ func ensureFilerLeaders(filers *Filers, isAdd bool, filerGroup FilerGroup, nodeT
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if candidateAddress != "" {
|
if candidateAddress != "" {
|
||||||
filers.leaders.addLeaderIfVacant(candidateAddress)
|
m.leaders.addLeaderIfVacant(candidateAddress)
|
||||||
// added a new leader
|
// added a new leader
|
||||||
result = append(result, &master_pb.KeepConnectedResponse{
|
result = append(result, &master_pb.KeepConnectedResponse{
|
||||||
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{
|
||||||
|
|
|
@ -16,7 +16,7 @@ func TestClusterAddRemoveNodes(t *testing.T) {
|
||||||
assert.Equal(t, []pb.ServerAddress{
|
assert.Equal(t, []pb.ServerAddress{
|
||||||
pb.ServerAddress("111:1"),
|
pb.ServerAddress("111:1"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getGroupMembers("", "filer", true).leaders.GetLeaders())
|
||||||
|
|
||||||
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:3"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:3"), "23.45")
|
||||||
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:4"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:4"), "23.45")
|
||||||
|
@ -24,27 +24,27 @@ func TestClusterAddRemoveNodes(t *testing.T) {
|
||||||
pb.ServerAddress("111:1"),
|
pb.ServerAddress("111:1"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
pb.ServerAddress("111:3"),
|
pb.ServerAddress("111:3"),
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getGroupMembers("", "filer", true).leaders.GetLeaders())
|
||||||
|
|
||||||
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:5"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:5"), "23.45")
|
||||||
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:6"), "23.45")
|
c.AddClusterNode("", "filer", "", "", pb.ServerAddress("111:6"), "23.45")
|
||||||
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:4"))
|
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:4"))
|
||||||
assert.Equal(t, []pb.ServerAddress{
|
assert.Equal(t, []pb.ServerAddress{
|
||||||
pb.ServerAddress("111:1"),
|
pb.ServerAddress("111:1"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
pb.ServerAddress("111:3"),
|
pb.ServerAddress("111:3"),
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getGroupMembers("", "filer", true).leaders.GetLeaders())
|
||||||
|
|
||||||
// remove oldest
|
// remove oldest
|
||||||
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:1"))
|
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1"))
|
||||||
assert.Equal(t, []pb.ServerAddress{
|
assert.Equal(t, []pb.ServerAddress{
|
||||||
pb.ServerAddress("111:6"),
|
pb.ServerAddress("111:6"),
|
||||||
pb.ServerAddress("111:2"),
|
pb.ServerAddress("111:2"),
|
||||||
pb.ServerAddress("111:3"),
|
pb.ServerAddress("111:3"),
|
||||||
}, c.getFilers("", false).leaders.GetLeaders())
|
}, c.getGroupMembers("", "filer", true).leaders.GetLeaders())
|
||||||
|
|
||||||
// remove oldest
|
// remove oldest
|
||||||
c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress("111:1"))
|
c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1"))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ func TestConcurrentAddRemoveNodes(t *testing.T) {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
address := strconv.Itoa(i)
|
address := strconv.Itoa(i)
|
||||||
node := c.RemoveClusterNode("", "filer", "", "", pb.ServerAddress(address))
|
node := c.RemoveClusterNode("", "filer", pb.ServerAddress(address))
|
||||||
|
|
||||||
if len(node) == 0 {
|
if len(node) == 0 {
|
||||||
t.Errorf("TestConcurrentAddRemoveNodes: node[%s] not found", address)
|
t.Errorf("TestConcurrentAddRemoveNodes: node[%s] not found", address)
|
||||||
|
|
|
@ -259,7 +259,7 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, update := range ms.Cluster.RemoveClusterNode(req.FilerGroup, req.ClientType, cluster.DataCenter(req.DataCenter), cluster.Rack(req.Rack), peerAddress) {
|
for _, update := range ms.Cluster.RemoveClusterNode(req.FilerGroup, req.ClientType, peerAddress) {
|
||||||
ms.broadcastToClients(update)
|
ms.broadcastToClients(update)
|
||||||
}
|
}
|
||||||
ms.deleteClient(clientName)
|
ms.deleteClient(clientName)
|
||||||
|
|
|
@ -10,14 +10,14 @@ import (
|
||||||
|
|
||||||
func (ms *MasterServer) ListClusterNodes(ctx context.Context, req *master_pb.ListClusterNodesRequest) (*master_pb.ListClusterNodesResponse, error) {
|
func (ms *MasterServer) ListClusterNodes(ctx context.Context, req *master_pb.ListClusterNodesRequest) (*master_pb.ListClusterNodesResponse, error) {
|
||||||
resp := &master_pb.ListClusterNodesResponse{}
|
resp := &master_pb.ListClusterNodesResponse{}
|
||||||
filerGroup := cluster.FilerGroup(req.FilerGroup)
|
filerGroup := cluster.FilerGroupName(req.FilerGroup)
|
||||||
clusterNodes := ms.Cluster.ListClusterNode(filerGroup, req.ClientType)
|
clusterNodes := ms.Cluster.ListClusterNode(filerGroup, req.ClientType)
|
||||||
|
|
||||||
for _, node := range clusterNodes {
|
for _, node := range clusterNodes {
|
||||||
resp.ClusterNodes = append(resp.ClusterNodes, &master_pb.ListClusterNodesResponse_ClusterNode{
|
resp.ClusterNodes = append(resp.ClusterNodes, &master_pb.ListClusterNodesResponse_ClusterNode{
|
||||||
Address: string(node.Address),
|
Address: string(node.Address),
|
||||||
Version: node.Version,
|
Version: node.Version,
|
||||||
IsLeader: ms.Cluster.IsOneLeader(filerGroup, node.Address),
|
IsLeader: ms.Cluster.IsOneLeader(filerGroup, req.ClientType, node.Address),
|
||||||
CreatedAtNs: node.CreatedTs.UnixNano(),
|
CreatedAtNs: node.CreatedTs.UnixNano(),
|
||||||
DataCenter: string(node.DataCenter),
|
DataCenter: string(node.DataCenter),
|
||||||
Rack: string(node.Rack),
|
Rack: string(node.Rack),
|
||||||
|
@ -26,13 +26,13 @@ func (ms *MasterServer) ListClusterNodes(ctx context.Context, req *master_pb.Lis
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *MasterServer) GetOneFiler(filerGroup cluster.FilerGroup) pb.ServerAddress {
|
func (ms *MasterServer) GetOneFiler(filerGroup cluster.FilerGroupName) pb.ServerAddress {
|
||||||
|
|
||||||
clusterNodes := ms.Cluster.ListClusterNode(filerGroup, cluster.FilerType)
|
clusterNodes := ms.Cluster.ListClusterNode(filerGroup, cluster.FilerType)
|
||||||
|
|
||||||
var filers []pb.ServerAddress
|
var filers []pb.ServerAddress
|
||||||
for _, node := range clusterNodes {
|
for _, node := range clusterNodes {
|
||||||
if ms.Cluster.IsOneLeader(filerGroup, node.Address) {
|
if ms.Cluster.IsOneLeader(filerGroup, cluster.FilerType, node.Address) {
|
||||||
filers = append(filers, node.Address)
|
filers = append(filers, node.Address)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -287,7 +287,7 @@ func (ms *MasterServer) startAdminScripts() {
|
||||||
for {
|
for {
|
||||||
time.Sleep(time.Duration(sleepMinutes) * time.Minute)
|
time.Sleep(time.Duration(sleepMinutes) * time.Minute)
|
||||||
if ms.Topo.IsLeader() {
|
if ms.Topo.IsLeader() {
|
||||||
shellOptions.FilerAddress = ms.GetOneFiler(cluster.FilerGroup(*shellOptions.FilerGroup))
|
shellOptions.FilerAddress = ms.GetOneFiler(cluster.FilerGroupName(*shellOptions.FilerGroup))
|
||||||
if shellOptions.FilerAddress == "" {
|
if shellOptions.FilerAddress == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue