mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
rename Server to DataNode
formatting
This commit is contained in:
parent
9bde067f16
commit
9f4630736d
|
@ -1,17 +1,17 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>weed-fs</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>com.googlecode.goclipse.goBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>goclipse.goNature</nature>
|
||||
</natures>
|
||||
<name>weed-fs</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>com.googlecode.goclipse.goBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>goclipse.goNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
||||
|
|
|
@ -29,7 +29,7 @@ var (
|
|||
chunkFolder = cmdVolume.Flag.String("dir", "/tmp", "data directory to store files")
|
||||
volumes = cmdVolume.Flag.String("volumes", "0,1-3,4", "comma-separated list of volume ids or range of ids")
|
||||
publicUrl = cmdVolume.Flag.String("publicUrl", "localhost:8080", "public url to serve data read")
|
||||
metaServer = cmdVolume.Flag.String("mserver", "localhost:9333", "master directory server to store mappings")
|
||||
masterNode = cmdVolume.Flag.String("mserver", "localhost:9333", "master directory server to store mappings")
|
||||
vpulse = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
|
||||
|
||||
store *storage.Store
|
||||
|
@ -161,11 +161,11 @@ func runVolume(cmd *Command, args []string) bool {
|
|||
|
||||
go func() {
|
||||
for {
|
||||
store.Join(*metaServer)
|
||||
store.Join(*masterNode)
|
||||
time.Sleep(time.Duration(float32(*vpulse*1e3)*(1+rand.Float32())) * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
log.Println("store joined at", *metaServer)
|
||||
log.Println("store joined at", *masterNode)
|
||||
|
||||
log.Println("Start storage service at http://127.0.0.1:"+strconv.Itoa(*vport), "public url", *publicUrl)
|
||||
e := http.ListenAndServe(":"+strconv.Itoa(*vport), nil)
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
type Machine struct {
|
||||
Volumes []storage.VolumeInfo
|
||||
C1Volumes []storage.VolumeInfo
|
||||
Url string //<server name/ip>[:port]
|
||||
PublicUrl string
|
||||
LastSeen int64 // unix time in seconds
|
||||
|
@ -29,7 +29,7 @@ type Mapper struct {
|
|||
}
|
||||
|
||||
func NewMachine(server, publicUrl string, volumes []storage.VolumeInfo, lastSeen int64) *Machine {
|
||||
return &Machine{Url: server, PublicUrl: publicUrl, Volumes: volumes, LastSeen: lastSeen}
|
||||
return &Machine{Url: server, PublicUrl: publicUrl, C1Volumes: volumes, LastSeen: lastSeen}
|
||||
}
|
||||
|
||||
func NewMapper(dirname string, filename string, volumeSizeLimit uint64, pulse int) (m *Mapper) {
|
||||
|
@ -72,7 +72,7 @@ func (m *Mapper) Get(vid storage.VolumeId) ([]*Machine, error) {
|
|||
func (m *Mapper) Add(machine *Machine) {
|
||||
m.Machines[machine.Url] = machine
|
||||
//add to vid2machine map, and Writers array
|
||||
for _, v := range machine.Volumes {
|
||||
for _, v := range machine.C1Volumes {
|
||||
list := m.vid2machines[v.Id]
|
||||
found := false
|
||||
for index, entry := range list {
|
||||
|
@ -89,7 +89,7 @@ func (m *Mapper) Add(machine *Machine) {
|
|||
}
|
||||
func (m *Mapper) remove(machine *Machine) {
|
||||
delete(m.Machines, machine.Url)
|
||||
for _, v := range machine.Volumes {
|
||||
for _, v := range machine.C1Volumes {
|
||||
list := m.vid2machines[v.Id]
|
||||
foundIndex := -1
|
||||
for index, entry := range list {
|
||||
|
@ -125,13 +125,13 @@ func (m *Mapper) refreshWritableVolumes() {
|
|||
var writers []storage.VolumeId
|
||||
for _, machine_entry := range m.Machines {
|
||||
if machine_entry.LastSeen > freshThreshHold {
|
||||
for _, v := range machine_entry.Volumes {
|
||||
for _, v := range machine_entry.C1Volumes {
|
||||
if uint64(v.Size) < m.volumeSizeLimit {
|
||||
writers = append(writers, v.Id)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Println("Warning! Server", machine_entry.Url, "last seen is", time.Now().Unix()-machine_entry.LastSeen, "seconds ago!")
|
||||
log.Println("Warning! DataNode", machine_entry.Url, "last seen is", time.Now().Unix()-machine_entry.LastSeen, "seconds ago!")
|
||||
m.remove(machine_entry)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func (vg *VolumeGrowth) GrowVolumeCopy(copyLevel int, topo *topology.Topology) {
|
|||
picked, ret := nl.RandomlyPickN(2)
|
||||
vid := topo.NextVolumeId()
|
||||
if ret {
|
||||
var servers []*topology.Server
|
||||
var servers []*topology.DataNode
|
||||
for _, n := range picked {
|
||||
if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid); ok {
|
||||
servers = append(servers, server)
|
||||
|
@ -54,7 +54,7 @@ func (vg *VolumeGrowth) GrowVolumeCopy(copyLevel int, topo *topology.Topology) {
|
|||
picked, ret := nl.RandomlyPickN(3)
|
||||
vid := topo.NextVolumeId()
|
||||
if ret {
|
||||
var servers []*topology.Server
|
||||
var servers []*topology.DataNode
|
||||
for _, n := range picked {
|
||||
if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid); ok {
|
||||
servers = append(servers, server)
|
||||
|
@ -86,7 +86,7 @@ func (vg *VolumeGrowth) GrowVolumeCopy(copyLevel int, topo *topology.Topology) {
|
|||
}
|
||||
|
||||
}
|
||||
func (vg *VolumeGrowth) Grow(vid storage.VolumeId, servers ...*topology.Server) {
|
||||
func (vg *VolumeGrowth) Grow(vid storage.VolumeId, servers ...*topology.DataNode) {
|
||||
for _, server := range servers {
|
||||
vi := &storage.VolumeInfo{Id: vid, Size: 0}
|
||||
server.AddVolume(vi)
|
||||
|
|
|
@ -91,7 +91,7 @@ func setup(topologyLayout string) *topology.Topology {
|
|||
rackMap := rackValue.(map[string]interface{})
|
||||
dc.LinkChildNode(rack)
|
||||
for serverKey, serverValue := range rackMap {
|
||||
server := topology.NewServer(serverKey)
|
||||
server := topology.NewDataNode(serverKey)
|
||||
serverMap := serverValue.(map[string]interface{})
|
||||
rack.LinkChildNode(server)
|
||||
for _, v := range serverMap["volumes"].([]interface{}) {
|
||||
|
|
|
@ -10,7 +10,7 @@ type Node interface {
|
|||
Id() NodeId
|
||||
String() string
|
||||
FreeSpace() int
|
||||
ReserveOneVolume(r int, vid storage.VolumeId) (bool, *Server)
|
||||
ReserveOneVolume(r int, vid storage.VolumeId) (bool, *DataNode)
|
||||
UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int)
|
||||
UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int)
|
||||
UpAdjustMaxVolumeId(vid storage.VolumeId)
|
||||
|
@ -21,7 +21,7 @@ type Node interface {
|
|||
LinkChildNode(node Node)
|
||||
UnlinkChildNode(nodeId NodeId)
|
||||
|
||||
IsServer() bool
|
||||
IsDataNode() bool
|
||||
Children() map[NodeId]Node
|
||||
Parent() Node
|
||||
}
|
||||
|
@ -37,8 +37,8 @@ type NodeImpl struct {
|
|||
nodeType string
|
||||
}
|
||||
|
||||
func (n *NodeImpl) IsServer() bool {
|
||||
return n.nodeType == "Server"
|
||||
func (n *NodeImpl) IsDataNode() bool {
|
||||
return n.nodeType == "DataNode"
|
||||
}
|
||||
func (n *NodeImpl) IsRack() bool {
|
||||
return n.nodeType == "Rack"
|
||||
|
@ -67,9 +67,9 @@ func (n *NodeImpl) Children() map[NodeId]Node {
|
|||
func (n *NodeImpl) Parent() Node {
|
||||
return n.parent
|
||||
}
|
||||
func (n *NodeImpl) ReserveOneVolume(r int, vid storage.VolumeId) (bool, *Server) {
|
||||
func (n *NodeImpl) ReserveOneVolume(r int, vid storage.VolumeId) (bool, *DataNode) {
|
||||
ret := false
|
||||
var assignedNode *Server
|
||||
var assignedNode *DataNode
|
||||
for _, node := range n.children {
|
||||
freeSpace := node.FreeSpace()
|
||||
//fmt.Println("r =", r, ", node =", node, ", freeSpace =", freeSpace)
|
||||
|
@ -79,9 +79,9 @@ func (n *NodeImpl) ReserveOneVolume(r int, vid storage.VolumeId) (bool, *Server)
|
|||
if r >= freeSpace {
|
||||
r -= freeSpace
|
||||
} else {
|
||||
if node.IsServer() && node.FreeSpace() > 0 {
|
||||
if node.IsDataNode() && node.FreeSpace() > 0 {
|
||||
//fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||
return true, node.(*Server)
|
||||
return true, node.(*DataNode)
|
||||
}
|
||||
ret, assignedNode = node.ReserveOneVolume(r, vid)
|
||||
if ret {
|
||||
|
|
|
@ -49,15 +49,15 @@ func (nl *NodeList) RandomlyPickN(n int) ([]Node, bool) {
|
|||
return list[len(list)-n:], true
|
||||
}
|
||||
|
||||
func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId) (bool, *Server) {
|
||||
func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId) (bool, *DataNode) {
|
||||
for _, node := range nl.nodes {
|
||||
freeSpace := node.FreeSpace()
|
||||
if randomVolumeIndex >= freeSpace {
|
||||
randomVolumeIndex -= freeSpace
|
||||
} else {
|
||||
if node.IsServer() && node.FreeSpace() > 0 {
|
||||
if node.IsDataNode() && node.FreeSpace() > 0 {
|
||||
fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||
return true, node.(*Server)
|
||||
return true, node.(*DataNode)
|
||||
}
|
||||
children := node.Children()
|
||||
newNodeList := NewNodeList(children, nl.except)
|
||||
|
|
|
@ -5,26 +5,23 @@ import (
|
|||
"pkg/storage"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
type DataNode struct {
|
||||
NodeImpl
|
||||
volumes map[storage.VolumeId]*storage.VolumeInfo
|
||||
Ip NodeId
|
||||
Port int
|
||||
PublicUrl string
|
||||
}
|
||||
|
||||
func NewServer(id string) *Server {
|
||||
s := &Server{}
|
||||
func NewDataNode(id string) *DataNode {
|
||||
s := &DataNode{}
|
||||
s.id = NodeId(id)
|
||||
s.nodeType = "Server"
|
||||
s.nodeType = "DataNode"
|
||||
s.volumes = make(map[storage.VolumeId]*storage.VolumeInfo)
|
||||
return s
|
||||
}
|
||||
func (s *Server) CreateOneVolume(r int, vid storage.VolumeId) storage.VolumeId {
|
||||
func (s *DataNode) CreateOneVolume(r int, vid storage.VolumeId) storage.VolumeId {
|
||||
s.AddVolume(&storage.VolumeInfo{Id: vid, Size: 32 * 1024 * 1024 * 1024})
|
||||
return vid
|
||||
}
|
||||
func (s *Server) AddVolume(v *storage.VolumeInfo) {
|
||||
func (s *DataNode) AddVolume(v *storage.VolumeInfo) {
|
||||
s.volumes[v.Id] = v
|
||||
s.UpAdjustActiveVolumeCountDelta(1)
|
||||
s.UpAdjustMaxVolumeId(v.Id)
|
||||
|
|
|
@ -91,7 +91,7 @@ func setup(topologyLayout string) *Topology {
|
|||
rackMap := rackValue.(map[string]interface{})
|
||||
dc.LinkChildNode(rack)
|
||||
for serverKey, serverValue := range rackMap {
|
||||
server := NewServer(serverKey)
|
||||
server := NewDataNode(serverKey)
|
||||
serverMap := serverValue.(map[string]interface{})
|
||||
rack.LinkChildNode(server)
|
||||
for _, v := range serverMap["volumes"].([]interface{}) {
|
||||
|
|
|
@ -17,13 +17,13 @@ func NewTopology(id string) *Topology {
|
|||
t.children = make(map[NodeId]Node)
|
||||
return t
|
||||
}
|
||||
func (t *Topology) RandomlyReserveOneVolume() (bool, *Server, storage.VolumeId) {
|
||||
func (t *Topology) RandomlyReserveOneVolume() (bool, *DataNode, storage.VolumeId) {
|
||||
vid := t.NextVolumeId()
|
||||
ret, node := t.ReserveOneVolume(rand.Intn(t.FreeSpace()), vid)
|
||||
return ret, node, vid
|
||||
}
|
||||
|
||||
func (t *Topology) RandomlyReserveOneVolumeExcept(except []Node) (bool, *Server, storage.VolumeId) {
|
||||
func (t *Topology) RandomlyReserveOneVolumeExcept(except []Node) (bool, *DataNode, storage.VolumeId) {
|
||||
freeSpace := t.FreeSpace()
|
||||
for _, node := range except {
|
||||
freeSpace -= node.FreeSpace()
|
||||
|
|
Loading…
Reference in a new issue