2017-01-10 09:01:12 +00:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
2020-03-02 06:13:47 +00:00
|
|
|
"context"
|
2018-07-29 04:02:56 +00:00
|
|
|
"fmt"
|
2020-10-07 08:25:39 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/backend"
|
2017-01-12 21:42:53 +00:00
|
|
|
"net"
|
2020-03-02 06:13:47 +00:00
|
|
|
"strings"
|
2019-01-11 13:47:46 +00:00
|
|
|
"time"
|
2017-01-12 21:42:53 +00:00
|
|
|
|
2018-07-29 04:02:56 +00:00
|
|
|
"github.com/chrislusf/raft"
|
2020-02-27 01:27:49 +00:00
|
|
|
"google.golang.org/grpc/peer"
|
|
|
|
|
2020-11-17 06:26:58 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util/log"
|
2018-05-10 06:11:54 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
2019-05-26 07:49:15 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2017-01-10 09:01:12 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/topology"
|
|
|
|
)
|
|
|
|
|
2018-05-27 18:58:00 +00:00
|
|
|
func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
|
2017-01-10 09:01:12 +00:00
|
|
|
var dn *topology.DataNode
|
2018-06-25 06:20:27 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if dn != nil {
|
2018-07-28 06:09:55 +00:00
|
|
|
|
2020-04-29 00:29:10 +00:00
|
|
|
// if the volume server disconnects and reconnects quickly
|
|
|
|
// the unregister and register can race with each other
|
2020-08-06 16:48:54 +00:00
|
|
|
ms.Topo.UnRegisterDataNode(dn)
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
|
2018-07-28 06:09:55 +00:00
|
|
|
|
|
|
|
message := &master_pb.VolumeLocation{
|
2020-11-11 23:10:06 +00:00
|
|
|
Url: dn.Url(),
|
|
|
|
PublicUrl: dn.PublicUrl,
|
2018-07-28 06:09:55 +00:00
|
|
|
}
|
|
|
|
for _, v := range dn.GetVolumes() {
|
|
|
|
message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
|
|
|
|
}
|
2019-05-26 08:05:08 +00:00
|
|
|
for _, s := range dn.GetEcShards() {
|
|
|
|
message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))
|
|
|
|
}
|
2018-07-28 06:09:55 +00:00
|
|
|
|
|
|
|
if len(message.DeletedVids) > 0 {
|
|
|
|
ms.clientChansLock.RLock()
|
|
|
|
for _, ch := range ms.clientChans {
|
|
|
|
ch <- message
|
|
|
|
}
|
|
|
|
ms.clientChansLock.RUnlock()
|
|
|
|
}
|
|
|
|
|
2018-06-25 06:20:27 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-01-10 09:01:12 +00:00
|
|
|
for {
|
|
|
|
heartbeat, err := stream.Recv()
|
2018-08-24 07:30:03 +00:00
|
|
|
if err != nil {
|
2019-10-25 16:35:19 +00:00
|
|
|
if dn != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err)
|
2019-10-25 16:35:19 +00:00
|
|
|
} else {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("SendHeartbeat.Recv: %v", err)
|
2019-10-25 16:35:19 +00:00
|
|
|
}
|
2018-08-24 07:30:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-06 16:48:54 +00:00
|
|
|
ms.Topo.Sequence.SetMax(heartbeat.MaxFileKey)
|
2019-07-18 06:23:01 +00:00
|
|
|
|
2018-08-24 07:30:03 +00:00
|
|
|
if dn == nil {
|
2020-08-06 16:48:54 +00:00
|
|
|
dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
|
|
|
|
dc := ms.Topo.GetOrCreateDataCenter(dcName)
|
2018-08-24 07:30:03 +00:00
|
|
|
rack := dc.GetOrCreateRack(rackName)
|
|
|
|
dn = rack.GetOrCreateDataNode(heartbeat.Ip,
|
|
|
|
int(heartbeat.Port), heartbeat.PublicUrl,
|
2019-04-05 02:27:00 +00:00
|
|
|
int64(heartbeat.MaxVolumeCount))
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort())
|
2018-08-24 07:30:03 +00:00
|
|
|
if err := stream.Send(&master_pb.HeartbeatResponse{
|
2020-09-19 21:10:26 +00:00
|
|
|
VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
|
2018-08-24 07:30:03 +00:00
|
|
|
}); err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err)
|
2018-08-24 07:30:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-01-10 09:01:12 +00:00
|
|
|
|
2020-03-23 01:33:19 +00:00
|
|
|
if heartbeat.MaxVolumeCount != 0 && dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) {
|
2020-03-22 23:21:42 +00:00
|
|
|
delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount()
|
|
|
|
dn.UpAdjustMaxVolumeCountDelta(delta)
|
|
|
|
}
|
|
|
|
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Tracef("master received heartbeat %s", heartbeat.String())
|
2018-08-24 07:30:03 +00:00
|
|
|
message := &master_pb.VolumeLocation{
|
2020-11-11 10:03:47 +00:00
|
|
|
Url: dn.Url(),
|
|
|
|
PublicUrl: dn.PublicUrl,
|
2020-11-11 23:10:06 +00:00
|
|
|
DataCenter: string(dn.GetDataCenter().Id()),
|
2018-08-24 07:30:03 +00:00
|
|
|
}
|
2019-04-20 18:35:20 +00:00
|
|
|
if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 {
|
2018-08-24 08:26:56 +00:00
|
|
|
// process delta volume ids if exists for fast volume id updates
|
2019-04-30 03:22:19 +00:00
|
|
|
for _, volInfo := range heartbeat.NewVolumes {
|
2019-04-20 18:35:20 +00:00
|
|
|
message.NewVids = append(message.NewVids, volInfo.Id)
|
|
|
|
}
|
2019-04-30 03:22:19 +00:00
|
|
|
for _, volInfo := range heartbeat.DeletedVolumes {
|
2019-04-20 18:35:20 +00:00
|
|
|
message.DeletedVids = append(message.DeletedVids, volInfo.Id)
|
|
|
|
}
|
|
|
|
// update master internal volume layouts
|
2020-08-06 16:48:54 +00:00
|
|
|
ms.Topo.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn)
|
2019-05-23 07:42:28 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 20:32:33 +00:00
|
|
|
if len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes {
|
2018-08-24 08:26:56 +00:00
|
|
|
// process heartbeat.Volumes
|
2020-08-06 16:48:54 +00:00
|
|
|
newVolumes, deletedVolumes := ms.Topo.SyncDataNodeRegistration(heartbeat.Volumes, dn)
|
2018-08-24 08:26:56 +00:00
|
|
|
|
|
|
|
for _, v := range newVolumes {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("master see new volume %d from %s", uint32(v.Id), dn.Url())
|
2018-08-24 08:26:56 +00:00
|
|
|
message.NewVids = append(message.NewVids, uint32(v.Id))
|
|
|
|
}
|
|
|
|
for _, v := range deletedVolumes {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url())
|
2018-08-24 08:26:56 +00:00
|
|
|
message.DeletedVids = append(message.DeletedVids, uint32(v.Id))
|
|
|
|
}
|
2019-05-23 07:42:28 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 07:21:17 +00:00
|
|
|
if len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 {
|
|
|
|
|
|
|
|
// update master internal volume layouts
|
2020-08-06 16:48:54 +00:00
|
|
|
ms.Topo.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn)
|
2019-05-26 07:49:15 +00:00
|
|
|
|
|
|
|
for _, s := range heartbeat.NewEcShards {
|
|
|
|
message.NewVids = append(message.NewVids, s.Id)
|
|
|
|
}
|
|
|
|
for _, s := range heartbeat.DeletedEcShards {
|
|
|
|
if dn.HasVolumesById(needle.VolumeId(s.Id)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
message.DeletedVids = append(message.DeletedVids, s.Id)
|
|
|
|
}
|
|
|
|
|
2019-05-26 07:21:17 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 20:32:33 +00:00
|
|
|
if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Debugf("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards)
|
2020-08-06 16:48:54 +00:00
|
|
|
newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn)
|
2019-05-24 06:47:49 +00:00
|
|
|
|
2019-05-26 07:49:15 +00:00
|
|
|
// broadcast the ec vid changes to master clients
|
|
|
|
for _, s := range newShards {
|
|
|
|
message.NewVids = append(message.NewVids, uint32(s.VolumeId))
|
2019-05-24 06:47:49 +00:00
|
|
|
}
|
2019-05-26 07:49:15 +00:00
|
|
|
for _, s := range deletedShards {
|
|
|
|
if dn.HasVolumesById(s.VolumeId) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))
|
2019-05-24 06:47:49 +00:00
|
|
|
}
|
|
|
|
|
2018-08-24 07:30:03 +00:00
|
|
|
}
|
|
|
|
if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {
|
|
|
|
ms.clientChansLock.RLock()
|
2019-04-20 18:35:20 +00:00
|
|
|
for host, ch := range ms.clientChans {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("master send to %s: %s", host, message.String())
|
2018-08-24 07:30:03 +00:00
|
|
|
ch <- message
|
2018-07-28 06:09:55 +00:00
|
|
|
}
|
2018-08-24 07:30:03 +00:00
|
|
|
ms.clientChansLock.RUnlock()
|
2017-01-10 09:01:12 +00:00
|
|
|
}
|
2017-01-18 17:34:27 +00:00
|
|
|
|
2017-01-21 21:58:56 +00:00
|
|
|
// tell the volume servers about the leader
|
2020-08-06 16:48:54 +00:00
|
|
|
newLeader, err := ms.Topo.Leader()
|
2019-03-04 04:43:43 +00:00
|
|
|
if err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("SendHeartbeat find leader: %v", err)
|
2019-03-04 04:43:43 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := stream.Send(&master_pb.HeartbeatResponse{
|
2019-11-29 04:48:08 +00:00
|
|
|
Leader: newLeader,
|
2019-03-04 04:43:43 +00:00
|
|
|
}); err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Warnf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err)
|
2019-03-04 04:43:43 +00:00
|
|
|
return err
|
2017-01-18 17:34:27 +00:00
|
|
|
}
|
2017-01-10 09:01:12 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-01 07:39:39 +00:00
|
|
|
|
2018-07-28 08:30:03 +00:00
|
|
|
// KeepConnected keep a stream gRPC call to the master. Used by clients to know the master is up.
|
|
|
|
// And clients gets the up-to-date list of volume locations
|
2018-06-01 07:39:39 +00:00
|
|
|
func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServer) error {
|
2018-07-28 06:09:55 +00:00
|
|
|
|
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:30:03 +00:00
|
|
|
if !ms.Topo.IsLeader() {
|
2019-07-31 08:54:42 +00:00
|
|
|
return ms.informNewLeader(stream)
|
2018-07-28 08:30:03 +00:00
|
|
|
}
|
|
|
|
|
2020-03-02 06:13:47 +00:00
|
|
|
peerAddress := findClientAddress(stream.Context(), req.GrpcPort)
|
2018-07-28 06:09:55 +00:00
|
|
|
|
2020-09-22 02:41:38 +00:00
|
|
|
// buffer by 1 so we don't end up getting stuck writing to stopChan forever
|
|
|
|
stopChan := make(chan bool, 1)
|
2018-07-28 06:09:55 +00:00
|
|
|
|
2020-03-02 06:13:47 +00:00
|
|
|
clientName, messageChan := ms.addClient(req.Name, peerAddress)
|
2018-07-28 06:09:55 +00:00
|
|
|
|
2020-03-02 06:13:47 +00:00
|
|
|
defer ms.deleteClient(clientName)
|
2018-07-28 06:09:55 +00:00
|
|
|
|
2018-07-28 08:17:35 +00:00
|
|
|
for _, message := range ms.Topo.ToVolumeLocations() {
|
|
|
|
if err := stream.Send(message); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-28 06:09:55 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
_, err := stream.Recv()
|
|
|
|
if err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Debugf("- client %v: %v", clientName, err)
|
2018-07-28 06:09:55 +00:00
|
|
|
stopChan <- true
|
|
|
|
break
|
|
|
|
}
|
2018-06-01 07:39:39 +00:00
|
|
|
}
|
2018-07-28 06:09:55 +00:00
|
|
|
}()
|
|
|
|
|
2019-01-11 13:47:46 +00:00
|
|
|
ticker := time.NewTicker(5 * time.Second)
|
2018-07-28 06:09:55 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case message := <-messageChan:
|
|
|
|
if err := stream.Send(message); err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("=> client %v: %+v", clientName, message)
|
2018-07-28 06:09:55 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-01-11 13:47:46 +00:00
|
|
|
case <-ticker.C:
|
|
|
|
if !ms.Topo.IsLeader() {
|
2019-07-31 08:54:42 +00:00
|
|
|
return ms.informNewLeader(stream)
|
2019-01-11 13:47:46 +00:00
|
|
|
}
|
2018-07-28 06:09:55 +00:00
|
|
|
case <-stopChan:
|
|
|
|
return nil
|
2018-06-01 07:39:39 +00:00
|
|
|
}
|
|
|
|
}
|
2018-07-28 06:09:55 +00:00
|
|
|
|
2018-06-01 07:39:39 +00:00
|
|
|
}
|
2019-07-31 08:54:42 +00:00
|
|
|
|
|
|
|
func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {
|
|
|
|
leader, err := ms.Topo.Leader()
|
|
|
|
if err != nil {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Errorf("topo leader: %v", err)
|
2019-07-31 08:54:42 +00:00
|
|
|
return raft.NotLeaderError
|
|
|
|
}
|
|
|
|
if err := stream.Send(&master_pb.VolumeLocation{
|
|
|
|
Leader: leader,
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2020-03-02 06:13:47 +00:00
|
|
|
|
|
|
|
func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) {
|
|
|
|
clientName = clientType + "@" + clientAddress
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("+ client %v", clientName)
|
2020-03-02 06:13:47 +00:00
|
|
|
|
2020-09-22 02:41:38 +00:00
|
|
|
// we buffer this because otherwise we end up in a potential deadlock where
|
|
|
|
// the KeepConnected loop is no longer listening on this channel but we're
|
|
|
|
// trying to send to it in SendHeartbeat and so we can't lock the
|
|
|
|
// clientChansLock to remove the channel and we're stuck writing to it
|
|
|
|
// 100 is probably overkill
|
|
|
|
messageChan = make(chan *master_pb.VolumeLocation, 100)
|
2020-03-02 06:13:47 +00:00
|
|
|
|
|
|
|
ms.clientChansLock.Lock()
|
|
|
|
ms.clientChans[clientName] = messageChan
|
|
|
|
ms.clientChansLock.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ms *MasterServer) deleteClient(clientName string) {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Infof("- client %v", clientName)
|
2020-03-02 06:13:47 +00:00
|
|
|
ms.clientChansLock.Lock()
|
|
|
|
delete(ms.clientChans, clientName)
|
|
|
|
ms.clientChansLock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func findClientAddress(ctx context.Context, grpcPort uint32) string {
|
|
|
|
// fmt.Printf("FromContext %+v\n", ctx)
|
|
|
|
pr, ok := peer.FromContext(ctx)
|
|
|
|
if !ok {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Error("failed to get peer from ctx")
|
2020-03-02 06:13:47 +00:00
|
|
|
return ""
|
|
|
|
}
|
|
|
|
if pr.Addr == net.Addr(nil) {
|
2020-11-17 06:26:58 +00:00
|
|
|
log.Error("failed to get peer address")
|
2020-03-02 06:13:47 +00:00
|
|
|
return ""
|
|
|
|
}
|
|
|
|
if grpcPort == 0 {
|
|
|
|
return pr.Addr.String()
|
|
|
|
}
|
|
|
|
if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok {
|
|
|
|
externalIP := tcpAddr.IP
|
|
|
|
return fmt.Sprintf("%s:%d", externalIP, grpcPort)
|
|
|
|
}
|
|
|
|
return pr.Addr.String()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-03-02 06:21:13 +00:00
|
|
|
func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) {
|
2020-03-02 06:13:47 +00:00
|
|
|
resp := &master_pb.ListMasterClientsResponse{}
|
|
|
|
ms.clientChansLock.RLock()
|
|
|
|
defer ms.clientChansLock.RUnlock()
|
|
|
|
|
|
|
|
for k := range ms.clientChans {
|
|
|
|
if strings.HasPrefix(k, req.ClientType+"@") {
|
|
|
|
resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
2020-10-07 08:25:39 +00:00
|
|
|
|
|
|
|
func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) {
|
|
|
|
|
|
|
|
// tell the volume servers about the leader
|
|
|
|
leader, _ := ms.Topo.Leader()
|
|
|
|
|
|
|
|
resp := &master_pb.GetMasterConfigurationResponse{
|
|
|
|
MetricsAddress: ms.option.MetricsAddress,
|
|
|
|
MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),
|
|
|
|
StorageBackends: backend.ToPbStorageBackends(),
|
|
|
|
DefaultReplication: ms.option.DefaultReplicaPlacement,
|
|
|
|
Leader: leader,
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|