mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
rename Server to DataNode
formatting
This commit is contained in:
parent
9bde067f16
commit
9f4630736d
|
@ -1,17 +1,17 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<projectDescription>
|
<projectDescription>
|
||||||
<name>weed-fs</name>
|
<name>weed-fs</name>
|
||||||
<comment></comment>
|
<comment></comment>
|
||||||
<projects>
|
<projects>
|
||||||
</projects>
|
</projects>
|
||||||
<buildSpec>
|
<buildSpec>
|
||||||
<buildCommand>
|
<buildCommand>
|
||||||
<name>com.googlecode.goclipse.goBuilder</name>
|
<name>com.googlecode.goclipse.goBuilder</name>
|
||||||
<arguments>
|
<arguments>
|
||||||
</arguments>
|
</arguments>
|
||||||
</buildCommand>
|
</buildCommand>
|
||||||
</buildSpec>
|
</buildSpec>
|
||||||
<natures>
|
<natures>
|
||||||
<nature>goclipse.goNature</nature>
|
<nature>goclipse.goNature</nature>
|
||||||
</natures>
|
</natures>
|
||||||
</projectDescription>
|
</projectDescription>
|
||||||
|
|
|
@ -29,7 +29,7 @@ var (
|
||||||
chunkFolder = cmdVolume.Flag.String("dir", "/tmp", "data directory to store files")
|
chunkFolder = cmdVolume.Flag.String("dir", "/tmp", "data directory to store files")
|
||||||
volumes = cmdVolume.Flag.String("volumes", "0,1-3,4", "comma-separated list of volume ids or range of ids")
|
volumes = cmdVolume.Flag.String("volumes", "0,1-3,4", "comma-separated list of volume ids or range of ids")
|
||||||
publicUrl = cmdVolume.Flag.String("publicUrl", "localhost:8080", "public url to serve data read")
|
publicUrl = cmdVolume.Flag.String("publicUrl", "localhost:8080", "public url to serve data read")
|
||||||
metaServer = cmdVolume.Flag.String("mserver", "localhost:9333", "master directory server to store mappings")
|
masterNode = cmdVolume.Flag.String("mserver", "localhost:9333", "master directory server to store mappings")
|
||||||
vpulse = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
|
vpulse = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
|
||||||
|
|
||||||
store *storage.Store
|
store *storage.Store
|
||||||
|
@ -161,11 +161,11 @@ func runVolume(cmd *Command, args []string) bool {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
store.Join(*metaServer)
|
store.Join(*masterNode)
|
||||||
time.Sleep(time.Duration(float32(*vpulse*1e3)*(1+rand.Float32())) * time.Millisecond)
|
time.Sleep(time.Duration(float32(*vpulse*1e3)*(1+rand.Float32())) * time.Millisecond)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
log.Println("store joined at", *metaServer)
|
log.Println("store joined at", *masterNode)
|
||||||
|
|
||||||
log.Println("Start storage service at http://127.0.0.1:"+strconv.Itoa(*vport), "public url", *publicUrl)
|
log.Println("Start storage service at http://127.0.0.1:"+strconv.Itoa(*vport), "public url", *publicUrl)
|
||||||
e := http.ListenAndServe(":"+strconv.Itoa(*vport), nil)
|
e := http.ListenAndServe(":"+strconv.Itoa(*vport), nil)
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Machine struct {
|
type Machine struct {
|
||||||
Volumes []storage.VolumeInfo
|
C1Volumes []storage.VolumeInfo
|
||||||
Url string //<server name/ip>[:port]
|
Url string //<server name/ip>[:port]
|
||||||
PublicUrl string
|
PublicUrl string
|
||||||
LastSeen int64 // unix time in seconds
|
LastSeen int64 // unix time in seconds
|
||||||
|
@ -29,7 +29,7 @@ type Mapper struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMachine(server, publicUrl string, volumes []storage.VolumeInfo, lastSeen int64) *Machine {
|
func NewMachine(server, publicUrl string, volumes []storage.VolumeInfo, lastSeen int64) *Machine {
|
||||||
return &Machine{Url: server, PublicUrl: publicUrl, Volumes: volumes, LastSeen: lastSeen}
|
return &Machine{Url: server, PublicUrl: publicUrl, C1Volumes: volumes, LastSeen: lastSeen}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMapper(dirname string, filename string, volumeSizeLimit uint64, pulse int) (m *Mapper) {
|
func NewMapper(dirname string, filename string, volumeSizeLimit uint64, pulse int) (m *Mapper) {
|
||||||
|
@ -72,7 +72,7 @@ func (m *Mapper) Get(vid storage.VolumeId) ([]*Machine, error) {
|
||||||
func (m *Mapper) Add(machine *Machine) {
|
func (m *Mapper) Add(machine *Machine) {
|
||||||
m.Machines[machine.Url] = machine
|
m.Machines[machine.Url] = machine
|
||||||
//add to vid2machine map, and Writers array
|
//add to vid2machine map, and Writers array
|
||||||
for _, v := range machine.Volumes {
|
for _, v := range machine.C1Volumes {
|
||||||
list := m.vid2machines[v.Id]
|
list := m.vid2machines[v.Id]
|
||||||
found := false
|
found := false
|
||||||
for index, entry := range list {
|
for index, entry := range list {
|
||||||
|
@ -89,7 +89,7 @@ func (m *Mapper) Add(machine *Machine) {
|
||||||
}
|
}
|
||||||
func (m *Mapper) remove(machine *Machine) {
|
func (m *Mapper) remove(machine *Machine) {
|
||||||
delete(m.Machines, machine.Url)
|
delete(m.Machines, machine.Url)
|
||||||
for _, v := range machine.Volumes {
|
for _, v := range machine.C1Volumes {
|
||||||
list := m.vid2machines[v.Id]
|
list := m.vid2machines[v.Id]
|
||||||
foundIndex := -1
|
foundIndex := -1
|
||||||
for index, entry := range list {
|
for index, entry := range list {
|
||||||
|
@ -125,13 +125,13 @@ func (m *Mapper) refreshWritableVolumes() {
|
||||||
var writers []storage.VolumeId
|
var writers []storage.VolumeId
|
||||||
for _, machine_entry := range m.Machines {
|
for _, machine_entry := range m.Machines {
|
||||||
if machine_entry.LastSeen > freshThreshHold {
|
if machine_entry.LastSeen > freshThreshHold {
|
||||||
for _, v := range machine_entry.Volumes {
|
for _, v := range machine_entry.C1Volumes {
|
||||||
if uint64(v.Size) < m.volumeSizeLimit {
|
if uint64(v.Size) < m.volumeSizeLimit {
|
||||||
writers = append(writers, v.Id)
|
writers = append(writers, v.Id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Println("Warning! Server", machine_entry.Url, "last seen is", time.Now().Unix()-machine_entry.LastSeen, "seconds ago!")
|
log.Println("Warning! DataNode", machine_entry.Url, "last seen is", time.Now().Unix()-machine_entry.LastSeen, "seconds ago!")
|
||||||
m.remove(machine_entry)
|
m.remove(machine_entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ func (vg *VolumeGrowth) GrowVolumeCopy(copyLevel int, topo *topology.Topology) {
|
||||||
picked, ret := nl.RandomlyPickN(2)
|
picked, ret := nl.RandomlyPickN(2)
|
||||||
vid := topo.NextVolumeId()
|
vid := topo.NextVolumeId()
|
||||||
if ret {
|
if ret {
|
||||||
var servers []*topology.Server
|
var servers []*topology.DataNode
|
||||||
for _, n := range picked {
|
for _, n := range picked {
|
||||||
if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid); ok {
|
if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid); ok {
|
||||||
servers = append(servers, server)
|
servers = append(servers, server)
|
||||||
|
@ -54,7 +54,7 @@ func (vg *VolumeGrowth) GrowVolumeCopy(copyLevel int, topo *topology.Topology) {
|
||||||
picked, ret := nl.RandomlyPickN(3)
|
picked, ret := nl.RandomlyPickN(3)
|
||||||
vid := topo.NextVolumeId()
|
vid := topo.NextVolumeId()
|
||||||
if ret {
|
if ret {
|
||||||
var servers []*topology.Server
|
var servers []*topology.DataNode
|
||||||
for _, n := range picked {
|
for _, n := range picked {
|
||||||
if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid); ok {
|
if ok, server := n.ReserveOneVolume(rand.Intn(n.FreeSpace()), vid); ok {
|
||||||
servers = append(servers, server)
|
servers = append(servers, server)
|
||||||
|
@ -86,7 +86,7 @@ func (vg *VolumeGrowth) GrowVolumeCopy(copyLevel int, topo *topology.Topology) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
func (vg *VolumeGrowth) Grow(vid storage.VolumeId, servers ...*topology.Server) {
|
func (vg *VolumeGrowth) Grow(vid storage.VolumeId, servers ...*topology.DataNode) {
|
||||||
for _, server := range servers {
|
for _, server := range servers {
|
||||||
vi := &storage.VolumeInfo{Id: vid, Size: 0}
|
vi := &storage.VolumeInfo{Id: vid, Size: 0}
|
||||||
server.AddVolume(vi)
|
server.AddVolume(vi)
|
||||||
|
|
|
@ -91,7 +91,7 @@ func setup(topologyLayout string) *topology.Topology {
|
||||||
rackMap := rackValue.(map[string]interface{})
|
rackMap := rackValue.(map[string]interface{})
|
||||||
dc.LinkChildNode(rack)
|
dc.LinkChildNode(rack)
|
||||||
for serverKey, serverValue := range rackMap {
|
for serverKey, serverValue := range rackMap {
|
||||||
server := topology.NewServer(serverKey)
|
server := topology.NewDataNode(serverKey)
|
||||||
serverMap := serverValue.(map[string]interface{})
|
serverMap := serverValue.(map[string]interface{})
|
||||||
rack.LinkChildNode(server)
|
rack.LinkChildNode(server)
|
||||||
for _, v := range serverMap["volumes"].([]interface{}) {
|
for _, v := range serverMap["volumes"].([]interface{}) {
|
||||||
|
|
|
@ -10,7 +10,7 @@ type Node interface {
|
||||||
Id() NodeId
|
Id() NodeId
|
||||||
String() string
|
String() string
|
||||||
FreeSpace() int
|
FreeSpace() int
|
||||||
ReserveOneVolume(r int, vid storage.VolumeId) (bool, *Server)
|
ReserveOneVolume(r int, vid storage.VolumeId) (bool, *DataNode)
|
||||||
UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int)
|
UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int)
|
||||||
UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int)
|
UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int)
|
||||||
UpAdjustMaxVolumeId(vid storage.VolumeId)
|
UpAdjustMaxVolumeId(vid storage.VolumeId)
|
||||||
|
@ -21,7 +21,7 @@ type Node interface {
|
||||||
LinkChildNode(node Node)
|
LinkChildNode(node Node)
|
||||||
UnlinkChildNode(nodeId NodeId)
|
UnlinkChildNode(nodeId NodeId)
|
||||||
|
|
||||||
IsServer() bool
|
IsDataNode() bool
|
||||||
Children() map[NodeId]Node
|
Children() map[NodeId]Node
|
||||||
Parent() Node
|
Parent() Node
|
||||||
}
|
}
|
||||||
|
@ -37,8 +37,8 @@ type NodeImpl struct {
|
||||||
nodeType string
|
nodeType string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NodeImpl) IsServer() bool {
|
func (n *NodeImpl) IsDataNode() bool {
|
||||||
return n.nodeType == "Server"
|
return n.nodeType == "DataNode"
|
||||||
}
|
}
|
||||||
func (n *NodeImpl) IsRack() bool {
|
func (n *NodeImpl) IsRack() bool {
|
||||||
return n.nodeType == "Rack"
|
return n.nodeType == "Rack"
|
||||||
|
@ -67,9 +67,9 @@ func (n *NodeImpl) Children() map[NodeId]Node {
|
||||||
func (n *NodeImpl) Parent() Node {
|
func (n *NodeImpl) Parent() Node {
|
||||||
return n.parent
|
return n.parent
|
||||||
}
|
}
|
||||||
func (n *NodeImpl) ReserveOneVolume(r int, vid storage.VolumeId) (bool, *Server) {
|
func (n *NodeImpl) ReserveOneVolume(r int, vid storage.VolumeId) (bool, *DataNode) {
|
||||||
ret := false
|
ret := false
|
||||||
var assignedNode *Server
|
var assignedNode *DataNode
|
||||||
for _, node := range n.children {
|
for _, node := range n.children {
|
||||||
freeSpace := node.FreeSpace()
|
freeSpace := node.FreeSpace()
|
||||||
//fmt.Println("r =", r, ", node =", node, ", freeSpace =", freeSpace)
|
//fmt.Println("r =", r, ", node =", node, ", freeSpace =", freeSpace)
|
||||||
|
@ -79,9 +79,9 @@ func (n *NodeImpl) ReserveOneVolume(r int, vid storage.VolumeId) (bool, *Server)
|
||||||
if r >= freeSpace {
|
if r >= freeSpace {
|
||||||
r -= freeSpace
|
r -= freeSpace
|
||||||
} else {
|
} else {
|
||||||
if node.IsServer() && node.FreeSpace() > 0 {
|
if node.IsDataNode() && node.FreeSpace() > 0 {
|
||||||
//fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
//fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||||
return true, node.(*Server)
|
return true, node.(*DataNode)
|
||||||
}
|
}
|
||||||
ret, assignedNode = node.ReserveOneVolume(r, vid)
|
ret, assignedNode = node.ReserveOneVolume(r, vid)
|
||||||
if ret {
|
if ret {
|
||||||
|
|
|
@ -49,15 +49,15 @@ func (nl *NodeList) RandomlyPickN(n int) ([]Node, bool) {
|
||||||
return list[len(list)-n:], true
|
return list[len(list)-n:], true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId) (bool, *Server) {
|
func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId) (bool, *DataNode) {
|
||||||
for _, node := range nl.nodes {
|
for _, node := range nl.nodes {
|
||||||
freeSpace := node.FreeSpace()
|
freeSpace := node.FreeSpace()
|
||||||
if randomVolumeIndex >= freeSpace {
|
if randomVolumeIndex >= freeSpace {
|
||||||
randomVolumeIndex -= freeSpace
|
randomVolumeIndex -= freeSpace
|
||||||
} else {
|
} else {
|
||||||
if node.IsServer() && node.FreeSpace() > 0 {
|
if node.IsDataNode() && node.FreeSpace() > 0 {
|
||||||
fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||||
return true, node.(*Server)
|
return true, node.(*DataNode)
|
||||||
}
|
}
|
||||||
children := node.Children()
|
children := node.Children()
|
||||||
newNodeList := NewNodeList(children, nl.except)
|
newNodeList := NewNodeList(children, nl.except)
|
||||||
|
|
|
@ -5,26 +5,23 @@ import (
|
||||||
"pkg/storage"
|
"pkg/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Server struct {
|
type DataNode struct {
|
||||||
NodeImpl
|
NodeImpl
|
||||||
volumes map[storage.VolumeId]*storage.VolumeInfo
|
volumes map[storage.VolumeId]*storage.VolumeInfo
|
||||||
Ip NodeId
|
|
||||||
Port int
|
|
||||||
PublicUrl string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServer(id string) *Server {
|
func NewDataNode(id string) *DataNode {
|
||||||
s := &Server{}
|
s := &DataNode{}
|
||||||
s.id = NodeId(id)
|
s.id = NodeId(id)
|
||||||
s.nodeType = "Server"
|
s.nodeType = "DataNode"
|
||||||
s.volumes = make(map[storage.VolumeId]*storage.VolumeInfo)
|
s.volumes = make(map[storage.VolumeId]*storage.VolumeInfo)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
func (s *Server) CreateOneVolume(r int, vid storage.VolumeId) storage.VolumeId {
|
func (s *DataNode) CreateOneVolume(r int, vid storage.VolumeId) storage.VolumeId {
|
||||||
s.AddVolume(&storage.VolumeInfo{Id: vid, Size: 32 * 1024 * 1024 * 1024})
|
s.AddVolume(&storage.VolumeInfo{Id: vid, Size: 32 * 1024 * 1024 * 1024})
|
||||||
return vid
|
return vid
|
||||||
}
|
}
|
||||||
func (s *Server) AddVolume(v *storage.VolumeInfo) {
|
func (s *DataNode) AddVolume(v *storage.VolumeInfo) {
|
||||||
s.volumes[v.Id] = v
|
s.volumes[v.Id] = v
|
||||||
s.UpAdjustActiveVolumeCountDelta(1)
|
s.UpAdjustActiveVolumeCountDelta(1)
|
||||||
s.UpAdjustMaxVolumeId(v.Id)
|
s.UpAdjustMaxVolumeId(v.Id)
|
||||||
|
|
|
@ -91,7 +91,7 @@ func setup(topologyLayout string) *Topology {
|
||||||
rackMap := rackValue.(map[string]interface{})
|
rackMap := rackValue.(map[string]interface{})
|
||||||
dc.LinkChildNode(rack)
|
dc.LinkChildNode(rack)
|
||||||
for serverKey, serverValue := range rackMap {
|
for serverKey, serverValue := range rackMap {
|
||||||
server := NewServer(serverKey)
|
server := NewDataNode(serverKey)
|
||||||
serverMap := serverValue.(map[string]interface{})
|
serverMap := serverValue.(map[string]interface{})
|
||||||
rack.LinkChildNode(server)
|
rack.LinkChildNode(server)
|
||||||
for _, v := range serverMap["volumes"].([]interface{}) {
|
for _, v := range serverMap["volumes"].([]interface{}) {
|
||||||
|
|
|
@ -17,13 +17,13 @@ func NewTopology(id string) *Topology {
|
||||||
t.children = make(map[NodeId]Node)
|
t.children = make(map[NodeId]Node)
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
func (t *Topology) RandomlyReserveOneVolume() (bool, *Server, storage.VolumeId) {
|
func (t *Topology) RandomlyReserveOneVolume() (bool, *DataNode, storage.VolumeId) {
|
||||||
vid := t.NextVolumeId()
|
vid := t.NextVolumeId()
|
||||||
ret, node := t.ReserveOneVolume(rand.Intn(t.FreeSpace()), vid)
|
ret, node := t.ReserveOneVolume(rand.Intn(t.FreeSpace()), vid)
|
||||||
return ret, node, vid
|
return ret, node, vid
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Topology) RandomlyReserveOneVolumeExcept(except []Node) (bool, *Server, storage.VolumeId) {
|
func (t *Topology) RandomlyReserveOneVolumeExcept(except []Node) (bool, *DataNode, storage.VolumeId) {
|
||||||
freeSpace := t.FreeSpace()
|
freeSpace := t.FreeSpace()
|
||||||
for _, node := range except {
|
for _, node := range except {
|
||||||
freeSpace -= node.FreeSpace()
|
freeSpace -= node.FreeSpace()
|
||||||
|
|
Loading…
Reference in a new issue