fix naming convention

notify volume server of duplicate directoris
improve searching efficiency
This commit is contained in:
guol-fnst 2022-05-17 14:51:01 +08:00 committed by guol-fnst
parent 076595fbdd
commit b12944f9c6
7 changed files with 648 additions and 637 deletions

View file

@ -70,7 +70,7 @@ message Heartbeat {
map<string, uint32> max_volume_counts = 4; map<string, uint32> max_volume_counts = 4;
uint32 grpc_port = 20; uint32 grpc_port = 20;
repeated string LocationUUIDs = 21; repeated string location_uuids = 21;
} }
message HeartbeatResponse { message HeartbeatResponse {
@ -79,7 +79,7 @@ message HeartbeatResponse {
string metrics_address = 3; string metrics_address = 3;
uint32 metrics_interval_seconds = 4; uint32 metrics_interval_seconds = 4;
repeated StorageBackend storage_backends = 5; repeated StorageBackend storage_backends = 5;
bool has_duplicated_directory = 6; repeated string duplicated_uuids = 6;
} }
message VolumeInformationMessage { message VolumeInformationMessage {

File diff suppressed because it is too large Load diff

View file

@ -22,35 +22,39 @@ import (
"github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/topology"
) )
func (ms *MasterServer) RegisterUUIDs(heartbeat *master_pb.Heartbeat) error { func (ms *MasterServer) RegisterUuids(heartbeat *master_pb.Heartbeat) (duplicated_uuids []string, err error) {
ms.Topo.UUIDAccessLock.Lock() ms.Topo.UuidAccessLock.Lock()
defer ms.Topo.UUIDAccessLock.Unlock() defer ms.Topo.UuidAccessLock.Unlock()
key := fmt.Sprintf("%s:%d", heartbeat.Ip, heartbeat.Port) key := fmt.Sprintf("%s:%d", heartbeat.Ip, heartbeat.Port)
if ms.Topo.UUIDMap == nil { if ms.Topo.UuidMap == nil {
ms.Topo.UUIDMap = make(map[string][]string) ms.Topo.UuidMap = make(map[string][]string)
} }
// find whether new UUID exists // find whether new uuid exists
for k, v := range ms.Topo.UUIDMap { for k, v := range ms.Topo.UuidMap {
for _, id := range heartbeat.LocationUUIDs {
sort.Strings(v) sort.Strings(v)
for _, id := range heartbeat.LocationUuids {
index := sort.SearchStrings(v, id) index := sort.SearchStrings(v, id)
if index < len(v) && v[index] == id { if index < len(v) && v[index] == id {
glog.Error("directory of ", id, " on ", k, " has been loaded") duplicated_uuids = append(duplicated_uuids, id)
return errors.New("volume: Duplicated volume directories were loaded") glog.Errorf("directory of %s on %s has been loaded", id, k)
} }
} }
} }
ms.Topo.UUIDMap[key] = heartbeat.LocationUUIDs if len(duplicated_uuids) > 0 {
glog.V(0).Infof("found new UUID:%v %v , %v", key, heartbeat.LocationUUIDs, ms.Topo.UUIDMap) return duplicated_uuids, errors.New("volume: Duplicated volume directories were loaded")
return nil
} }
func (ms *MasterServer) UnRegisterUUIDs(ip string, port int) { ms.Topo.UuidMap[key] = heartbeat.LocationUuids
ms.Topo.UUIDAccessLock.Lock() glog.V(0).Infof("found new uuid:%v %v , %v", key, heartbeat.LocationUuids, ms.Topo.UuidMap)
defer ms.Topo.UUIDAccessLock.Unlock() return nil, nil
}
func (ms *MasterServer) UnRegisterUuids(ip string, port int) {
ms.Topo.UuidAccessLock.Lock()
defer ms.Topo.UuidAccessLock.Unlock()
key := fmt.Sprintf("%s:%d", ip, port) key := fmt.Sprintf("%s:%d", ip, port)
delete(ms.Topo.UUIDMap, key) delete(ms.Topo.UuidMap, key)
glog.V(0).Infof("remove volume server %v, online volume server: %v", key, ms.Topo.UUIDMap) glog.V(0).Infof("remove volume server %v, online volume server: %v", key, ms.Topo.UuidMap)
} }
func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error { func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {
@ -67,7 +71,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
// the unregister and register can race with each other // the unregister and register can race with each other
ms.Topo.UnRegisterDataNode(dn) ms.Topo.UnRegisterDataNode(dn)
glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port) glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port)
ms.UnRegisterUUIDs(dn.Ip, dn.Port) ms.UnRegisterUuids(dn.Ip, dn.Port)
message := &master_pb.VolumeLocation{ message := &master_pb.VolumeLocation{
Url: dn.Url(), Url: dn.Url(),
@ -105,11 +109,11 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
dc := ms.Topo.GetOrCreateDataCenter(dcName) dc := ms.Topo.GetOrCreateDataCenter(dcName)
rack := dc.GetOrCreateRack(rackName) rack := dc.GetOrCreateRack(rackName)
dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), int(heartbeat.GrpcPort), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts) dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), int(heartbeat.GrpcPort), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts)
glog.V(0).Infof("added volume server %d: %v:%d %v", dn.Counter, heartbeat.GetIp(), heartbeat.GetPort(), heartbeat.LocationUUIDs) glog.V(0).Infof("added volume server %d: %v:%d %v", dn.Counter, heartbeat.GetIp(), heartbeat.GetPort(), heartbeat.LocationUuids)
err := ms.RegisterUUIDs(heartbeat) uuidlist, err := ms.RegisterUuids(heartbeat)
if err != nil { if err != nil {
if stream_err := stream.Send(&master_pb.HeartbeatResponse{ if stream_err := stream.Send(&master_pb.HeartbeatResponse{
HasDuplicatedDirectory: true, DuplicatedUuids: uuidlist,
}); stream_err != nil { }); stream_err != nil {
glog.Warningf("SendHeartbeat.Send DuplicatedDirectory response to %s:%d %v", dn.Ip, dn.Port, stream_err) glog.Warningf("SendHeartbeat.Send DuplicatedDirectory response to %s:%d %v", dn.Ip, dn.Port, stream_err)
return stream_err return stream_err

View file

@ -118,8 +118,16 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
doneChan <- err doneChan <- err
return return
} }
if in.HasDuplicatedDirectory { if len(in.DuplicatedUuids) > 0 {
glog.Error("Shut Down Volume Server due to duplicated volume directory") var duplictedDir []string
for _, loc := range vs.store.Locations {
for _, uuid := range in.DuplicatedUuids {
if uuid == loc.DirectoryUuid {
duplictedDir = append(duplictedDir, loc.Directory)
}
}
}
glog.Errorf("Shut down Volume Server due to duplicated volume directories: %v", duplictedDir)
os.Exit(1) os.Exit(1)
} }
if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() { if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() {

View file

@ -19,7 +19,7 @@ import (
type DiskLocation struct { type DiskLocation struct {
Directory string Directory string
DirectoryUUID string DirectoryUuid string
IdxDirectory string IdxDirectory string
DiskType types.DiskType DiskType types.DiskType
MaxVolumeCount int MaxVolumeCount int
@ -35,27 +35,27 @@ type DiskLocation struct {
isDiskSpaceLow bool isDiskSpaceLow bool
} }
func GenerateDirUUID(dir string) (dirUUIDString string, err error) { func GenerateDirUuid(dir string) (dirUuidString string, err error) {
glog.V(1).Infof("Getting UUID of volume directory:%s", dir) glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
dirUUIDString = "" dirUuidString = ""
fileName := dir + "/vol_dir.uuid" fileName := dir + "/vol_dir.uuid"
if !util.FileExists(fileName) { if !util.FileExists(fileName) {
dirUUID, _ := uuid.NewRandom() dirUuid, _ := uuid.NewRandom()
dirUUIDString = dirUUID.String() dirUuidString = dirUuid.String()
writeErr := util.WriteFile(fileName, []byte(dirUUIDString), 0644) writeErr := util.WriteFile(fileName, []byte(dirUuidString), 0644)
if writeErr != nil { if writeErr != nil {
glog.Warningf("failed to write UUID to %s : %v", fileName, writeErr) glog.Warningf("failed to write uuid to %s : %v", fileName, writeErr)
return "", fmt.Errorf("failed to write UUID to %s : %v", fileName, writeErr) return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
} }
} else { } else {
uuidData, readErr := os.ReadFile(fileName) uuidData, readErr := os.ReadFile(fileName)
if readErr != nil { if readErr != nil {
glog.Warningf("failed to read UUID from %s : %v", fileName, readErr) glog.Warningf("failed to read uuid from %s : %v", fileName, readErr)
return "", fmt.Errorf("failed to read UUID from %s : %v", fileName, readErr) return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
} }
dirUUIDString = string(uuidData) dirUuidString = string(uuidData)
} }
return dirUUIDString, nil return dirUuidString, nil
} }
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation { func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
@ -65,10 +65,10 @@ func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpace util.MinFreeSp
} else { } else {
idxDir = util.ResolvePath(idxDir) idxDir = util.ResolvePath(idxDir)
} }
dirUUID, _ := GenerateDirUUID(dir) dirUuid, _ := GenerateDirUuid(dir)
location := &DiskLocation{ location := &DiskLocation{
Directory: dir, Directory: dir,
DirectoryUUID: dirUUID, DirectoryUuid: dirUuid,
IdxDirectory: idxDir, IdxDirectory: idxDir,
DiskType: diskType, DiskType: diskType,
MaxVolumeCount: maxVolumeCount, MaxVolumeCount: maxVolumeCount,

View file

@ -301,9 +301,9 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
} }
} }
var UUIDList []string var uuidList []string
for _, loc := range s.Locations { for _, loc := range s.Locations {
UUIDList = append(UUIDList, loc.DirectoryUUID) uuidList = append(uuidList, loc.DirectoryUuid)
} }
for col, size := range collectionVolumeSize { for col, size := range collectionVolumeSize {
@ -327,7 +327,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
Rack: s.rack, Rack: s.rack,
Volumes: volumeMessages, Volumes: volumeMessages,
HasNoVolumes: len(volumeMessages) == 0, HasNoVolumes: len(volumeMessages) == 0,
LocationUUIDs: UUIDList, LocationUuids: uuidList,
} }
} }

View file

@ -45,8 +45,8 @@ type Topology struct {
RaftServer raft.Server RaftServer raft.Server
HashicorpRaft *hashicorpRaft.Raft HashicorpRaft *hashicorpRaft.Raft
UUIDAccessLock sync.RWMutex UuidAccessLock sync.RWMutex
UUIDMap map[string][]string UuidMap map[string][]string
} }
func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology { func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, pulse int, replicationAsMin bool) *Topology {