mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
27c74a7e66
change replication_type to ReplicaPlacement, hopefully cleaner code works for 9 possible ReplicaPlacement xyz x : number of copies on other data centers y : number of copies on other racks z : number of copies on current rack x y z each can be 0,1,2 Minor: weed server "-mdir" default to "-dir" if empty
125 lines
2.6 KiB
Go
125 lines
2.6 KiB
Go
package topology
|
|
|
|
import (
|
|
"code.google.com/p/weed-fs/go/sequence"
|
|
"code.google.com/p/weed-fs/go/storage"
|
|
"encoding/json"
|
|
"fmt"
|
|
"testing"
|
|
)
|
|
|
|
var topologyLayout = `
|
|
{
|
|
"dc1":{
|
|
"rack1":{
|
|
"server1":{
|
|
"volumes":[
|
|
{"id":1, "size":12312},
|
|
{"id":2, "size":12312},
|
|
{"id":3, "size":12312}
|
|
],
|
|
"limit":3
|
|
},
|
|
"server2":{
|
|
"volumes":[
|
|
{"id":4, "size":12312},
|
|
{"id":5, "size":12312},
|
|
{"id":6, "size":12312}
|
|
],
|
|
"limit":10
|
|
}
|
|
},
|
|
"rack2":{
|
|
"server1":{
|
|
"volumes":[
|
|
{"id":4, "size":12312},
|
|
{"id":5, "size":12312},
|
|
{"id":6, "size":12312}
|
|
],
|
|
"limit":4
|
|
},
|
|
"server2":{
|
|
"volumes":[],
|
|
"limit":4
|
|
},
|
|
"server3":{
|
|
"volumes":[
|
|
{"id":2, "size":12312},
|
|
{"id":3, "size":12312},
|
|
{"id":4, "size":12312}
|
|
],
|
|
"limit":2
|
|
}
|
|
}
|
|
},
|
|
"dc2":{
|
|
},
|
|
"dc3":{
|
|
"rack2":{
|
|
"server1":{
|
|
"volumes":[
|
|
{"id":1, "size":12312},
|
|
{"id":3, "size":12312},
|
|
{"id":5, "size":12312}
|
|
],
|
|
"limit":4
|
|
}
|
|
}
|
|
}
|
|
}
|
|
`
|
|
|
|
func setup(topologyLayout string) *Topology {
|
|
var data interface{}
|
|
err := json.Unmarshal([]byte(topologyLayout), &data)
|
|
if err != nil {
|
|
fmt.Println("error:", err)
|
|
}
|
|
|
|
//need to connect all nodes first before server adding volumes
|
|
topo, err := NewTopology("mynetwork", "/etc/weed.conf", sequence.NewMemorySequencer(), 234, 5)
|
|
if err != nil {
|
|
fmt.Println("error:", err)
|
|
}
|
|
mTopology := data.(map[string]interface{})
|
|
for dcKey, dcValue := range mTopology {
|
|
dc := NewDataCenter(dcKey)
|
|
dcMap := dcValue.(map[string]interface{})
|
|
topo.LinkChildNode(dc)
|
|
for rackKey, rackValue := range dcMap {
|
|
rack := NewRack(rackKey)
|
|
rackMap := rackValue.(map[string]interface{})
|
|
dc.LinkChildNode(rack)
|
|
for serverKey, serverValue := range rackMap {
|
|
server := NewDataNode(serverKey)
|
|
serverMap := serverValue.(map[string]interface{})
|
|
rack.LinkChildNode(server)
|
|
for _, v := range serverMap["volumes"].([]interface{}) {
|
|
m := v.(map[string]interface{})
|
|
vi := storage.VolumeInfo{
|
|
Id: storage.VolumeId(int64(m["id"].(float64))),
|
|
Size: uint64(m["size"].(float64)),
|
|
Collection: "testingCollection",
|
|
Version: storage.CurrentVersion}
|
|
server.AddOrUpdateVolume(vi)
|
|
}
|
|
server.UpAdjustMaxVolumeCountDelta(int(serverMap["limit"].(float64)))
|
|
}
|
|
}
|
|
}
|
|
|
|
return topo
|
|
}
|
|
|
|
func TestRemoveDataCenter(t *testing.T) {
|
|
topo := setup(topologyLayout)
|
|
topo.UnlinkChildNode(NodeId("dc2"))
|
|
if topo.GetActiveVolumeCount() != 15 {
|
|
t.Fail()
|
|
}
|
|
topo.UnlinkChildNode(NodeId("dc3"))
|
|
if topo.GetActiveVolumeCount() != 12 {
|
|
t.Fail()
|
|
}
|
|
}
|