auto bootstraping and update peers

This commit is contained in:
Konstantin Lebedev 2022-04-05 13:17:53 +05:00
parent 622297f1a7
commit b7cdde14ae
2 changed files with 58 additions and 30 deletions

View file

@ -6,7 +6,7 @@ services:
ports:
- 9333:9333
- 19333:19333
command: "-v=3 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -raftBootstrap=false -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data"
command: "-v=4 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data"
volumes:
- ./master/0:/data
environment:
@ -18,7 +18,7 @@ services:
ports:
- 9334:9334
- 19334:19334
command: "-v=3 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -raftBootstrap=false -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data"
command: "-v=4 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data"
volumes:
- ./master/1:/data
environment:
@ -30,7 +30,7 @@ services:
ports:
- 9335:9335
- 19335:19335
command: "-v=3 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -raftBootstrap=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data"
command: "-v=4 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data"
volumes:
- ./master/2:/data
environment:
@ -46,7 +46,6 @@ services:
depends_on:
- master0
- master1
- master2
volume2:
image: chrislusf/seaweedfs:local
ports:
@ -56,7 +55,6 @@ services:
depends_on:
- master0
- master1
- master2
volume3:
image: chrislusf/seaweedfs:local
ports:
@ -66,7 +64,6 @@ services:
depends_on:
- master0
- master1
- master2
filer:
image: chrislusf/seaweedfs:local
ports:
@ -77,7 +74,6 @@ services:
depends_on:
- master0
- master1
- master2
- volume1
- volume2
s3:
@ -88,7 +84,6 @@ services:
depends_on:
- master0
- master1
- master2
- volume1
- volume2
- filer

View file

@ -16,6 +16,54 @@ import (
"time"
)
func (s *RaftServer) AddPeersConfiguration() (cfg raft.Configuration) {
for _, peer := range s.peers {
cfg.Servers = append(cfg.Servers, raft.Server{
Suffrage: raft.Voter,
ID: raft.ServerID(peer.String()),
Address: raft.ServerAddress(peer.ToGrpcAddress()),
})
}
return cfg
}
func (s *RaftServer) UpdatePeers() {
for {
select {
case isLeader := <-s.RaftHashicorp.LeaderCh():
if isLeader {
peerLeader := s.serverAddr.String()
existsPeerName := make(map[string]bool)
for _, server := range s.RaftHashicorp.GetConfiguration().Configuration().Servers {
if string(server.ID) == peerLeader {
continue
}
existsPeerName[string(server.ID)] = true
}
for _, peer := range s.peers {
if peer.String() == peerLeader || existsPeerName[peer.String()] {
continue
}
glog.V(0).Infof("adding new peer: %s", peer.String())
s.RaftHashicorp.AddVoter(
raft.ServerID(peer.String()), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0)
}
for peer, _ := range existsPeerName {
if _, found := s.peers[peer]; !found {
glog.V(0).Infof("removing old peer: %s", peer)
s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0)
}
}
if _, found := s.peers[peerLeader]; !found {
glog.V(0).Infof("removing old leader peer: %s", peerLeader)
s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0)
}
}
break
}
}
}
func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
s := &RaftServer{
peers: option.Peers,
@ -25,7 +73,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
}
c := raft.DefaultConfig()
c.LocalID = raft.ServerID(s.serverAddr) // TODO maybee the IP:port address will change
c.LocalID = raft.ServerID(s.serverAddr.String()) // TODO maybee the IP:port address will change
c.NoSnapshotRestoreOnStart = option.RaftResumeState
c.HeartbeatTimeout = time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1))
c.ElectionTimeout = option.ElectionTimeout
@ -66,32 +114,17 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
if err != nil {
return nil, fmt.Errorf("raft.NewRaft: %v", err)
}
if option.RaftBootstrap {
cfg := raft.Configuration{
Servers: []raft.Server{
{
Suffrage: raft.Voter,
ID: c.LocalID,
Address: raft.ServerAddress(s.serverAddr.ToGrpcAddress()),
},
},
}
// Add known peers to bootstrap
for _, peer := range option.Peers {
if peer == option.ServerAddr {
continue
}
cfg.Servers = append(cfg.Servers, raft.Server{
Suffrage: raft.Voter,
ID: raft.ServerID(peer),
Address: raft.ServerAddress(peer.ToGrpcAddress()),
})
}
if option.RaftBootstrap || len(s.RaftHashicorp.GetConfiguration().Configuration().Servers) == 0 {
cfg := s.AddPeersConfiguration()
glog.V(0).Infoln("Bootstrapping new cluster %+v", cfg)
f := s.RaftHashicorp.BootstrapCluster(cfg)
if err := f.Error(); err != nil {
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
}
} else {
go s.UpdatePeers()
}
ticker := time.NewTicker(c.HeartbeatTimeout * 10)
if glog.V(4) {
go func() {