seaweedfs/weed/server/raft_hashicorp.go
2022-04-04 17:51:51 +05:00

114 lines
3.3 KiB
Go

package weed_server
// https://yusufs.medium.com/creating-distributed-kv-database-by-implementing-raft-consensus-using-golang-d0884eef2e28
// https://github.com/Jille/raft-grpc-example/blob/cd5bcab0218f008e044fbeee4facdd01b06018ad/application.go#L18
import (
"fmt"
transport "github.com/Jille/raft-grpc-transport"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/hashicorp/raft"
boltdb "github.com/hashicorp/raft-boltdb"
"google.golang.org/grpc"
"math/rand"
"os"
"path/filepath"
"time"
)
func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
s := &RaftServer{
peers: option.Peers,
serverAddr: option.ServerAddr,
dataDir: option.DataDir,
topo: option.Topo,
}
c := raft.DefaultConfig()
c.LocalID = raft.ServerID(s.serverAddr) // TODO maybee the IP:port address will change
c.NoSnapshotRestoreOnStart = option.RaftResumeState
c.HeartbeatTimeout = time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1))
c.ElectionTimeout = option.ElectionTimeout
if c.LeaderLeaseTimeout > c.HeartbeatTimeout {
c.LeaderLeaseTimeout = c.HeartbeatTimeout
}
if glog.V(4) {
c.LogLevel = "Debug"
} else if glog.V(2) {
c.LogLevel = "Info"
} else if glog.V(1) {
c.LogLevel = "Warn"
} else if glog.V(0) {
c.LogLevel = "Error"
}
baseDir := s.dataDir
ldb, err := boltdb.NewBoltStore(filepath.Join(baseDir, "logs.dat"))
if err != nil {
return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "logs.dat"), err)
}
sdb, err := boltdb.NewBoltStore(filepath.Join(baseDir, "stable.dat"))
if err != nil {
return nil, fmt.Errorf(`boltdb.NewBoltStore(%q): %v`, filepath.Join(baseDir, "stable.dat"), err)
}
fss, err := raft.NewFileSnapshotStore(baseDir, 3, os.Stderr)
if err != nil {
return nil, fmt.Errorf(`raft.NewFileSnapshotStore(%q, ...): %v`, baseDir, err)
}
s.TransportManager = transport.New(raft.ServerAddress(s.serverAddr), []grpc.DialOption{option.GrpcDialOption})
stateMachine := StateMachine{topo: option.Topo}
s.RaftHashicorp, err = raft.NewRaft(c, &stateMachine, ldb, sdb, fss, s.TransportManager.Transport())
if err != nil {
return nil, fmt.Errorf("raft.NewRaft: %v", err)
}
if option.RaftBootstrap {
cfg := raft.Configuration{
Servers: []raft.Server{
{
Suffrage: raft.Voter,
ID: c.LocalID,
Address: raft.ServerAddress(s.serverAddr.ToGrpcAddress()),
},
},
}
// Add known peers to bootstrap
for _, peer := range option.Peers {
if peer == option.ServerAddr {
continue
}
cfg.Servers = append(cfg.Servers, raft.Server{
Suffrage: raft.Voter,
ID: raft.ServerID(peer),
Address: raft.ServerAddress(peer.ToGrpcAddress()),
})
}
f := s.RaftHashicorp.BootstrapCluster(cfg)
if err := f.Error(); err != nil {
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
}
}
ticker := time.NewTicker(c.HeartbeatTimeout * 10)
if glog.V(4) {
go func() {
for {
select {
case <-ticker.C:
cfuture := s.RaftHashicorp.GetConfiguration()
if err = cfuture.Error(); err != nil {
glog.Fatalf("error getting config: %s", err)
}
configuration := cfuture.Configuration()
glog.V(4).Infof("Showing peers known by %s:\n%+v", s.RaftHashicorp.String(), configuration.Servers)
}
}
}()
}
return s, nil
}