mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge branch 'extend_to_disk_type'
This commit is contained in:
commit
632c94d438
|
@ -5,7 +5,7 @@ package command
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
|
@ -169,7 +169,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
|
|||
mountRoot = mountRoot[0 : len(mountRoot)-1]
|
||||
}
|
||||
|
||||
diskType := storage.ToDiskType(*option.diskType)
|
||||
diskType := types.ToDiskType(*option.diskType)
|
||||
|
||||
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
|
||||
MountDirectory: dir,
|
||||
|
|
|
@ -2,6 +2,7 @@ package command
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"net/http"
|
||||
httppprof "net/http/pprof"
|
||||
"os"
|
||||
|
@ -170,10 +171,10 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
|
|||
}
|
||||
|
||||
// set disk types
|
||||
var diskTypes []storage.DiskType
|
||||
var diskTypes []types.DiskType
|
||||
diskTypeStrings := strings.Split(*v.diskType, ",")
|
||||
for _, diskTypeString := range diskTypeStrings {
|
||||
diskTypes = append(diskTypes, storage.ToDiskType(diskTypeString))
|
||||
diskTypes = append(diskTypes, types.ToDiskType(diskTypeString))
|
||||
}
|
||||
if len(diskTypes) == 1 && len(v.folders) > 1 {
|
||||
for i := 0; i < len(v.folders)-1; i++ {
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"math"
|
||||
"os"
|
||||
|
@ -35,7 +35,7 @@ type Option struct {
|
|||
Collection string
|
||||
Replication string
|
||||
TtlSec int32
|
||||
DiskType storage.DiskType
|
||||
DiskType types.DiskType
|
||||
ChunkSizeLimit int64
|
||||
ConcurrentWriters int
|
||||
CacheDir string
|
||||
|
|
|
@ -61,8 +61,7 @@ message Heartbeat {
|
|||
repeated VolumeEcShardInformationMessage deleted_ec_shards = 18;
|
||||
bool has_no_ec_shards = 19;
|
||||
|
||||
uint32 max_volume_count = 4;
|
||||
uint32 max_ssd_volume_count = 20;
|
||||
map<string, uint32> max_volume_counts = 4;
|
||||
|
||||
}
|
||||
|
||||
|
@ -105,6 +104,7 @@ message VolumeEcShardInformationMessage {
|
|||
uint32 id = 1;
|
||||
string collection = 2;
|
||||
uint32 ec_index_bits = 3;
|
||||
string disk_type = 4;
|
||||
}
|
||||
|
||||
message StorageBackend {
|
||||
|
@ -213,8 +213,8 @@ message CollectionDeleteResponse {
|
|||
//
|
||||
// volume related
|
||||
//
|
||||
message DataNodeInfo {
|
||||
string id = 1;
|
||||
message DiskInfo {
|
||||
string type = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
|
@ -222,41 +222,25 @@ message DataNodeInfo {
|
|||
repeated VolumeInformationMessage volume_infos = 6;
|
||||
repeated VolumeEcShardInformationMessage ec_shard_infos = 7;
|
||||
uint64 remote_volume_count = 8;
|
||||
uint64 max_ssd_volume_count = 9;
|
||||
uint64 ssd_volume_count = 10;
|
||||
}
|
||||
message DataNodeInfo {
|
||||
string id = 1;
|
||||
map<string, DiskInfo> diskInfos = 2;
|
||||
}
|
||||
message RackInfo {
|
||||
string id = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
uint64 active_volume_count = 5;
|
||||
repeated DataNodeInfo data_node_infos = 6;
|
||||
uint64 remote_volume_count = 7;
|
||||
uint64 max_ssd_volume_count = 8;
|
||||
uint64 ssd_volume_count = 9;
|
||||
repeated DataNodeInfo data_node_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message DataCenterInfo {
|
||||
string id = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
uint64 active_volume_count = 5;
|
||||
repeated RackInfo rack_infos = 6;
|
||||
uint64 remote_volume_count = 7;
|
||||
uint64 max_ssd_volume_count = 8;
|
||||
uint64 ssd_volume_count = 9;
|
||||
repeated RackInfo rack_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message TopologyInfo {
|
||||
string id = 1;
|
||||
uint64 volume_count = 2;
|
||||
uint64 max_volume_count = 3;
|
||||
uint64 free_volume_count = 4;
|
||||
uint64 active_volume_count = 5;
|
||||
repeated DataCenterInfo data_center_infos = 6;
|
||||
uint64 remote_volume_count = 7;
|
||||
uint64 max_ssd_volume_count = 8;
|
||||
uint64 ssd_volume_count = 9;
|
||||
repeated DataCenterInfo data_center_infos = 2;
|
||||
map<string, DiskInfo> diskInfos = 3;
|
||||
}
|
||||
message VolumeListRequest {
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -67,7 +67,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
dcName, rackName := ms.Topo.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)
|
||||
dc := ms.Topo.GetOrCreateDataCenter(dcName)
|
||||
rack := dc.GetOrCreateRack(rackName)
|
||||
dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), heartbeat.PublicUrl, int64(heartbeat.MaxVolumeCount), int64(heartbeat.MaxSsdVolumeCount))
|
||||
dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts)
|
||||
glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort())
|
||||
if err := stream.Send(&master_pb.HeartbeatResponse{
|
||||
VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
|
||||
|
@ -77,14 +77,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
|
|||
}
|
||||
}
|
||||
|
||||
if heartbeat.MaxVolumeCount != 0 && dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) {
|
||||
delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount()
|
||||
dn.UpAdjustMaxVolumeCountDelta(delta)
|
||||
}
|
||||
if heartbeat.MaxSsdVolumeCount != 0 && dn.GetMaxSsdVolumeCount() != int64(heartbeat.MaxSsdVolumeCount) {
|
||||
delta := int64(heartbeat.MaxSsdVolumeCount) - dn.GetMaxSsdVolumeCount()
|
||||
dn.UpAdjustMaxSsdVolumeCountDelta(delta)
|
||||
}
|
||||
dn.AdjustMaxVolumeCounts(heartbeat.MaxVolumeCounts)
|
||||
|
||||
glog.V(4).Infof("master received heartbeat %s", heartbeat.String())
|
||||
message := &master_pb.VolumeLocation{
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/raft"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
|
@ -61,7 +61,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diskType := storage.ToDiskType(req.DiskType)
|
||||
diskType := types.ToDiskType(req.DiskType)
|
||||
|
||||
option := &topology.VolumeGrowOption{
|
||||
Collection: req.Collection,
|
||||
|
@ -120,10 +120,10 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic
|
|||
return nil, err
|
||||
}
|
||||
|
||||
volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl, storage.DiskType(req.DiskType))
|
||||
volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl, types.ToDiskType(req.DiskType))
|
||||
stats := volumeLayout.Stats()
|
||||
|
||||
totalSize := (ms.Topo.GetMaxVolumeCount() + ms.Topo.GetMaxSsdVolumeCount()) * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
|
||||
totalSize := ms.Topo.GetDiskUsages().GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
|
||||
|
||||
resp := &master_pb.StatisticsResponse{
|
||||
TotalSize: uint64(totalSize),
|
||||
|
|
|
@ -3,7 +3,7 @@ package weed_server
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
@ -158,7 +158,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diskType := storage.ToDiskType(r.FormValue("disk"))
|
||||
diskType := types.ToDiskType(r.FormValue("disk"))
|
||||
|
||||
preallocate := ms.preallocateSize
|
||||
if r.FormValue("preallocate") != "" {
|
||||
|
|
|
@ -3,7 +3,6 @@ package weed_server
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -42,7 +41,7 @@ func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_p
|
|||
req.Ttl,
|
||||
req.Preallocate,
|
||||
req.MemoryMapMaxSizeMb,
|
||||
storage.DiskType(req.DiskType),
|
||||
types.ToDiskType(req.DiskType),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -222,7 +222,6 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi
|
|||
Ip: vs.store.Ip,
|
||||
Port: uint32(vs.store.Port),
|
||||
PublicUrl: vs.store.PublicUrl,
|
||||
MaxVolumeCount: uint32(0),
|
||||
MaxFileKey: uint64(0),
|
||||
DataCenter: vs.store.GetDataCenter(),
|
||||
Rack: vs.store.GetRack(),
|
||||
|
|
|
@ -3,6 +3,7 @@ package weed_server
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
|
@ -58,7 +59,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
|||
if req.DiskType != "" {
|
||||
diskType = req.DiskType
|
||||
}
|
||||
location := vs.store.FindFreeLocation(storage.DiskType(diskType))
|
||||
location := vs.store.FindFreeLocation(types.ToDiskType(diskType))
|
||||
if location == nil {
|
||||
return fmt.Errorf("no space left")
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
|
|||
|
||||
glog.V(0).Infof("VolumeEcShardsCopy: %v", req)
|
||||
|
||||
location := vs.store.FindFreeLocation(storage.HardDriveType)
|
||||
location := vs.store.FindFreeLocation(types.HardDriveType)
|
||||
if location == nil {
|
||||
return nil, fmt.Errorf("no space left")
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package weed_server
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
@ -37,7 +38,7 @@ type VolumeServer struct {
|
|||
|
||||
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
|
||||
port int, publicUrl string,
|
||||
folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []storage.DiskType,
|
||||
folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []types.DiskType,
|
||||
idxFolder string,
|
||||
needleMapKind storage.NeedleMapKind,
|
||||
masterNodes []string, pulseSeconds int,
|
||||
|
|
|
@ -3,6 +3,7 @@ package shell
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
|
@ -325,7 +326,9 @@ func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, ra
|
|||
|
||||
var possibleDestinationEcNodes []*EcNode
|
||||
for _, n := range racks[RackId(rackId)].ecNodes {
|
||||
possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
|
||||
if _, found := n.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
possibleDestinationEcNodes = append(possibleDestinationEcNodes, n)
|
||||
}
|
||||
}
|
||||
sourceEcNodes := rackEcNodesWithVid[rackId]
|
||||
averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes))
|
||||
|
@ -386,11 +389,15 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
|||
rackEcNodes = append(rackEcNodes, node)
|
||||
}
|
||||
|
||||
ecNodeIdToShardCount := groupByCount(rackEcNodes, func(node *EcNode) (id string, count int) {
|
||||
for _, ecShardInfo := range node.info.EcShardInfos {
|
||||
ecNodeIdToShardCount := groupByCount(rackEcNodes, func(ecNode *EcNode) (id string, count int) {
|
||||
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount()
|
||||
}
|
||||
return node.info.Id, count
|
||||
return ecNode.info.Id, count
|
||||
})
|
||||
|
||||
var totalShardCount int
|
||||
|
@ -411,26 +418,30 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
|||
if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount {
|
||||
|
||||
emptyNodeIds := make(map[uint32]bool)
|
||||
for _, shards := range emptyNode.info.EcShardInfos {
|
||||
emptyNodeIds[shards.Id] = true
|
||||
if emptyDiskInfo, found := emptyNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shards := range emptyDiskInfo.EcShardInfos {
|
||||
emptyNodeIds[shards.Id] = true
|
||||
}
|
||||
}
|
||||
for _, shards := range fullNode.info.EcShardInfos {
|
||||
if _, found := emptyNodeIds[shards.Id]; !found {
|
||||
for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
|
||||
if fullDiskInfo, found := fullNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shards := range fullDiskInfo.EcShardInfos {
|
||||
if _, found := emptyNodeIds[shards.Id]; !found {
|
||||
for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() {
|
||||
|
||||
fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
|
||||
fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id)
|
||||
|
||||
err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
|
||||
if err != nil {
|
||||
return err
|
||||
err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ecNodeIdToShardCount[emptyNode.info.Id]++
|
||||
ecNodeIdToShardCount[fullNode.info.Id]--
|
||||
hasMove = true
|
||||
break
|
||||
}
|
||||
|
||||
ecNodeIdToShardCount[emptyNode.info.Id]++
|
||||
ecNodeIdToShardCount[fullNode.info.Id]--
|
||||
hasMove = true
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -511,7 +522,11 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[
|
|||
func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode {
|
||||
vidLocations := make(map[needle.VolumeId][]*EcNode)
|
||||
for _, ecNode := range allEcNodes {
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package shell
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
|
@ -159,8 +160,15 @@ func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (cou
|
|||
return
|
||||
}
|
||||
|
||||
func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) {
|
||||
return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos)
|
||||
func countFreeShardSlots(dn *master_pb.DataNodeInfo, diskType types.DiskType) (count int) {
|
||||
if dn.DiskInfos == nil {
|
||||
return 0
|
||||
}
|
||||
diskInfo := dn.DiskInfos[string(diskType)]
|
||||
if diskInfo == nil {
|
||||
return 0
|
||||
}
|
||||
return int(diskInfo.MaxVolumeCount-diskInfo.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(diskInfo.EcShardInfos)
|
||||
}
|
||||
|
||||
type RackId string
|
||||
|
@ -174,10 +182,12 @@ type EcNode struct {
|
|||
}
|
||||
|
||||
func (ecNode *EcNode) localShardIdCount(vid uint32) int {
|
||||
for _, ecShardInfo := range ecNode.info.EcShardInfos {
|
||||
if vid == ecShardInfo.Id {
|
||||
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
return shardBits.ShardIdCount()
|
||||
for _, diskInfo := range ecNode.info.DiskInfos {
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
if vid == ecShardInfo.Id {
|
||||
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
return shardBits.ShardIdCount()
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
|
@ -214,7 +224,7 @@ func collectEcVolumeServersByDc(topo *master_pb.TopologyInfo, selectedDataCenter
|
|||
return
|
||||
}
|
||||
|
||||
freeEcSlots := countFreeShardSlots(dn)
|
||||
freeEcSlots := countFreeShardSlots(dn, types.HardDriveType)
|
||||
ecNodes = append(ecNodes, &EcNode{
|
||||
info: dn,
|
||||
dc: dc,
|
||||
|
@ -278,9 +288,11 @@ func ceilDivide(total, n int) int {
|
|||
|
||||
func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits {
|
||||
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
return erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
return erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -290,18 +302,26 @@ func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.Shar
|
|||
func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode {
|
||||
|
||||
foundVolume := false
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
|
||||
diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]
|
||||
if found {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
foundVolume = true
|
||||
break
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
foundVolume = true
|
||||
break
|
||||
}
|
||||
} else {
|
||||
diskInfo = &master_pb.DiskInfo{
|
||||
Type: string(types.HardDriveType),
|
||||
}
|
||||
ecNode.info.DiskInfos[string(types.HardDriveType)] = diskInfo
|
||||
}
|
||||
|
||||
if !foundVolume {
|
||||
|
@ -309,10 +329,11 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
|
|||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId))
|
||||
}
|
||||
ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
|
||||
diskInfo.EcShardInfos = append(diskInfo.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{
|
||||
Id: uint32(vid),
|
||||
Collection: collection,
|
||||
EcIndexBits: uint32(newShardBits),
|
||||
DiskType: string(types.HardDriveType),
|
||||
})
|
||||
ecNode.freeEcSlot -= len(shardIds)
|
||||
}
|
||||
|
@ -322,15 +343,17 @@ func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string,
|
|||
|
||||
func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode {
|
||||
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
|
||||
if diskInfo, found := ecNode.info.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if needle.VolumeId(shardInfo.Id) == vid {
|
||||
oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits)
|
||||
newShardBits := oldShardBits
|
||||
for _, shardId := range shardIds {
|
||||
newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId))
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
}
|
||||
shardInfo.EcIndexBits = uint32(newShardBits)
|
||||
ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
@ -225,9 +226,11 @@ func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyIn
|
|||
func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) {
|
||||
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.EcShardInfos {
|
||||
if v.Collection == selectedCollection && v.Id == uint32(vid) {
|
||||
ecShardInfos = append(ecShardInfos, v)
|
||||
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, v := range diskInfo.EcShardInfos {
|
||||
if v.Collection == selectedCollection && v.Id == uint32(vid) {
|
||||
ecShardInfos = append(ecShardInfos, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -239,9 +242,11 @@ func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection stri
|
|||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.EcShardInfos {
|
||||
if v.Collection == selectedCollection {
|
||||
vidMap[v.Id] = true
|
||||
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, v := range diskInfo.EcShardInfos {
|
||||
if v.Collection == selectedCollection {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -257,9 +262,11 @@ func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeI
|
|||
|
||||
nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits)
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.EcShardInfos {
|
||||
if v.Id == uint32(vid) {
|
||||
nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
|
||||
if diskInfo, found := dn.DiskInfos[string(types.HardDriveType)]; found {
|
||||
for _, v := range diskInfo.EcShardInfos {
|
||||
if v.Id == uint32(vid) {
|
||||
nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
@ -281,10 +281,12 @@ func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection stri
|
|||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
|
||||
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
|
||||
vidMap[v.Id] = true
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
|
||||
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,10 +188,12 @@ func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection
|
|||
|
||||
needEcxFile := true
|
||||
var localShardBits erasure_coding.ShardBits
|
||||
for _, ecShardInfo := range rebuilder.info.EcShardInfos {
|
||||
if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
|
||||
needEcxFile = false
|
||||
localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
for _, diskInfo := range rebuilder.info.DiskInfos {
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId {
|
||||
needEcxFile = false
|
||||
localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,15 +249,17 @@ type EcShardMap map[needle.VolumeId]EcShardLocations
|
|||
type EcShardLocations [][]*EcNode
|
||||
|
||||
func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) {
|
||||
for _, shardInfo := range ecNode.info.EcShardInfos {
|
||||
if shardInfo.Collection == collection {
|
||||
existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
|
||||
if !found {
|
||||
existing = make([][]*EcNode, erasure_coding.TotalShardsCount)
|
||||
ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
|
||||
}
|
||||
for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
|
||||
existing[shardId] = append(existing[shardId], ecNode)
|
||||
for _, diskInfo := range ecNode.info.DiskInfos {
|
||||
for _, shardInfo := range diskInfo.EcShardInfos {
|
||||
if shardInfo.Collection == collection {
|
||||
existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)]
|
||||
if !found {
|
||||
existing = make([][]*EcNode, erasure_coding.TotalShardsCount)
|
||||
ecShardMap[needle.VolumeId(shardInfo.Id)] = existing
|
||||
}
|
||||
for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() {
|
||||
existing[shardId] = append(existing[shardId], ecNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,6 +127,7 @@ func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNod
|
|||
return &EcNode{
|
||||
info: &master_pb.DataNodeInfo{
|
||||
Id: dataNodeId,
|
||||
DiskInfos: make(map[string]*master_pb.DiskInfo),
|
||||
},
|
||||
dc: dc,
|
||||
rack: RackId(rack),
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
|
@ -111,7 +111,7 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
|
|||
return nil
|
||||
}
|
||||
|
||||
func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []storage.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
|
||||
func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
|
||||
|
||||
for _, diskType := range diskTypes {
|
||||
if err := balanceVolumeServersByDiskType(commandEnv, diskType, volumeReplicas, nodes, volumeSizeLimit, collection, applyBalancing); err != nil {
|
||||
|
@ -122,7 +122,7 @@ func balanceVolumeServers(commandEnv *CommandEnv, diskTypes []storage.DiskType,
|
|||
|
||||
}
|
||||
|
||||
func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType storage.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
|
||||
func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType types.DiskType, volumeReplicas map[uint32][]*VolumeReplica, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error {
|
||||
|
||||
// balance writable volumes
|
||||
for _, n := range nodes {
|
||||
|
@ -135,7 +135,7 @@ func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType storage.Dis
|
|||
return v.DiskType == string(diskType) && (!v.ReadOnly && v.Size < volumeSizeLimit)
|
||||
})
|
||||
}
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount, sortWritableVolumes, applyBalancing); err != nil {
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortWritableVolumes, applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ func balanceVolumeServersByDiskType(commandEnv *CommandEnv, diskType storage.Dis
|
|||
return v.DiskType == string(diskType) && (v.ReadOnly || v.Size >= volumeSizeLimit)
|
||||
})
|
||||
}
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount, sortReadOnlyVolumes, applyBalancing); err != nil {
|
||||
if err := balanceSelectedVolume(commandEnv, volumeReplicas, nodes, capacityByMaxVolumeCount(diskType), sortReadOnlyVolumes, applyBalancing); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -175,21 +175,21 @@ func collectVolumeServersByDc(t *master_pb.TopologyInfo, selectedDataCenter stri
|
|||
return
|
||||
}
|
||||
|
||||
func collectVolumeDiskTypes(t *master_pb.TopologyInfo) (diskTypes []storage.DiskType) {
|
||||
func collectVolumeDiskTypes(t *master_pb.TopologyInfo) (diskTypes []types.DiskType) {
|
||||
knownTypes := make(map[string]bool)
|
||||
for _, dc := range t.DataCenterInfos {
|
||||
for _, r := range dc.RackInfos {
|
||||
for _, dn := range r.DataNodeInfos {
|
||||
for _, vi := range dn.VolumeInfos {
|
||||
if _, found := knownTypes[vi.DiskType]; !found {
|
||||
knownTypes[vi.DiskType] = true
|
||||
for diskType, _ := range dn.DiskInfos {
|
||||
if _, found := knownTypes[diskType]; !found {
|
||||
knownTypes[diskType] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for diskType, _ := range knownTypes {
|
||||
diskTypes = append(diskTypes, storage.ToDiskType(diskType))
|
||||
diskTypes = append(diskTypes, types.ToDiskType(diskType))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -203,11 +203,24 @@ type Node struct {
|
|||
|
||||
type CapacityFunc func(*master_pb.DataNodeInfo) int
|
||||
|
||||
func capacityByMaxSsdVolumeCount(info *master_pb.DataNodeInfo) int {
|
||||
return int(info.MaxSsdVolumeCount)
|
||||
func capacityByMaxVolumeCount(diskType types.DiskType) CapacityFunc {
|
||||
return func(info *master_pb.DataNodeInfo) int {
|
||||
diskInfo, found := info.DiskInfos[string(diskType)]
|
||||
if !found {
|
||||
return 0
|
||||
}
|
||||
return int(diskInfo.MaxVolumeCount)
|
||||
}
|
||||
}
|
||||
func capacityByMaxVolumeCount(info *master_pb.DataNodeInfo) int {
|
||||
return int(info.MaxVolumeCount)
|
||||
|
||||
func capacityByFreeVolumeCount(diskType types.DiskType) CapacityFunc {
|
||||
return func(info *master_pb.DataNodeInfo) int {
|
||||
diskInfo, found := info.DiskInfos[string(diskType)]
|
||||
if !found {
|
||||
return 0
|
||||
}
|
||||
return int(diskInfo.MaxVolumeCount - diskInfo.VolumeCount)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) localVolumeRatio(capacityFunc CapacityFunc) float64 {
|
||||
|
@ -220,9 +233,11 @@ func (n *Node) localVolumeNextRatio(capacityFunc CapacityFunc) float64 {
|
|||
|
||||
func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool) {
|
||||
n.selectedVolumes = make(map[uint32]*master_pb.VolumeInformationMessage)
|
||||
for _, v := range n.info.VolumeInfos {
|
||||
if fn(v) {
|
||||
n.selectedVolumes[v.Id] = v
|
||||
for _, diskInfo := range n.info.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if fn(v) {
|
||||
n.selectedVolumes[v.Id] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -329,7 +344,7 @@ func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, f
|
|||
}
|
||||
fmt.Fprintf(os.Stdout, " moving %s volume %s%d %s => %s\n", v.DiskType, collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id)
|
||||
if applyChange {
|
||||
return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second, "")
|
||||
return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second, v.DiskType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -71,10 +71,12 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
|
|||
var allLocations []location
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
loc := newLocation(dc, string(rack), dn)
|
||||
for _, v := range dn.VolumeInfos {
|
||||
if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
|
||||
allLocations = append(allLocations, loc)
|
||||
continue
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 {
|
||||
allLocations = append(allLocations, loc)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -102,8 +103,6 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
|
|||
}
|
||||
|
||||
// find the most under populated data nodes
|
||||
keepDataNodesSorted(allLocations)
|
||||
|
||||
return c.fixUnderReplicatedVolumes(commandEnv, writer, takeAction, underReplicatedVolumeIds, volumeReplicas, allLocations)
|
||||
|
||||
}
|
||||
|
@ -113,11 +112,13 @@ func collectVolumeReplicaLocations(resp *master_pb.VolumeListResponse) (map[uint
|
|||
var allLocations []location
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
loc := newLocation(dc, string(rack), dn)
|
||||
for _, v := range dn.VolumeInfos {
|
||||
volumeReplicas[v.Id] = append(volumeReplicas[v.Id], &VolumeReplica{
|
||||
location: &loc,
|
||||
info: v,
|
||||
})
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
volumeReplicas[v.Id] = append(volumeReplicas[v.Id], &VolumeReplica{
|
||||
location: &loc,
|
||||
info: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
allLocations = append(allLocations, loc)
|
||||
})
|
||||
|
@ -157,15 +158,18 @@ func (c *commandVolumeFixReplication) fixOverReplicatedVolumes(commandEnv *Comma
|
|||
}
|
||||
|
||||
func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *CommandEnv, writer io.Writer, takeAction bool, underReplicatedVolumeIds []uint32, volumeReplicas map[uint32][]*VolumeReplica, allLocations []location) error {
|
||||
|
||||
for _, vid := range underReplicatedVolumeIds {
|
||||
replicas := volumeReplicas[vid]
|
||||
replica := pickOneReplicaToCopyFrom(replicas)
|
||||
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(replica.info.ReplicaPlacement))
|
||||
foundNewLocation := false
|
||||
hasSkippedCollection := false
|
||||
keepDataNodesSorted(allLocations, replica.info.DiskType)
|
||||
for _, dst := range allLocations {
|
||||
// check whether data nodes satisfy the constraints
|
||||
if dst.dataNode.FreeVolumeCount > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
|
||||
fn := capacityByFreeVolumeCount(types.ToDiskType(replica.info.DiskType))
|
||||
if fn(dst.dataNode) > 0 && satisfyReplicaPlacement(replicaPlacement, replicas, dst) {
|
||||
// check collection name pattern
|
||||
if *c.collectionPattern != "" {
|
||||
matched, err := filepath.Match(*c.collectionPattern, replica.info.Collection)
|
||||
|
@ -202,11 +206,11 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
|
|||
}
|
||||
|
||||
// adjust free volume count
|
||||
dst.dataNode.FreeVolumeCount--
|
||||
keepDataNodesSorted(allLocations)
|
||||
dst.dataNode.DiskInfos[replica.info.DiskType].FreeVolumeCount--
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundNewLocation && !hasSkippedCollection {
|
||||
fmt.Fprintf(writer, "failed to place volume %d replica as %s, existing:%+v\n", replica.info.Id, replicaPlacement, len(replicas))
|
||||
}
|
||||
|
@ -215,9 +219,10 @@ func (c *commandVolumeFixReplication) fixUnderReplicatedVolumes(commandEnv *Comm
|
|||
return nil
|
||||
}
|
||||
|
||||
func keepDataNodesSorted(dataNodes []location) {
|
||||
func keepDataNodesSorted(dataNodes []location, diskType string) {
|
||||
fn := capacityByFreeVolumeCount(types.ToDiskType(diskType))
|
||||
sort.Slice(dataNodes, func(i, j int) bool {
|
||||
return dataNodes[i].dataNode.FreeVolumeCount > dataNodes[j].dataNode.FreeVolumeCount
|
||||
return fn(dataNodes[i].dataNode) > fn(dataNodes[j].dataNode)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -285,18 +285,20 @@ func (c *commandVolumeFsck) collectVolumeIds(verbose bool, writer io.Writer) (vo
|
|||
}
|
||||
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
|
||||
for _, vi := range t.VolumeInfos {
|
||||
volumeIdToServer[vi.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: vi.Collection,
|
||||
isEcVolume: false,
|
||||
for _, diskInfo := range t.DiskInfos{
|
||||
for _, vi := range diskInfo.VolumeInfos {
|
||||
volumeIdToServer[vi.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: vi.Collection,
|
||||
isEcVolume: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, ecShardInfo := range t.EcShardInfos {
|
||||
volumeIdToServer[ecShardInfo.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: ecShardInfo.Collection,
|
||||
isEcVolume: true,
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
volumeIdToServer[ecShardInfo.Id] = VInfo{
|
||||
server: t.Id,
|
||||
collection: ecShardInfo.Collection,
|
||||
isEcVolume: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
|
@ -44,8 +45,25 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
|
|||
return nil
|
||||
}
|
||||
|
||||
func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string {
|
||||
var buf bytes.Buffer
|
||||
for diskType, diskInfo := range diskInfos {
|
||||
if diskType == "" {
|
||||
diskType = "hdd"
|
||||
}
|
||||
fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics {
|
||||
fmt.Fprintf(writer, "Topology volume:%d/%d active:%d free:%d remote:%d volumeSizeLimit:%d MB\n", t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount, volumeSizeLimitMb)
|
||||
fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
|
||||
sort.Slice(t.DataCenterInfos, func(i, j int) bool {
|
||||
return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id
|
||||
})
|
||||
|
@ -57,7 +75,7 @@ func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLi
|
|||
return s
|
||||
}
|
||||
func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics {
|
||||
fmt.Fprintf(writer, " DataCenter %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
|
||||
fmt.Fprintf(writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||
var s statistics
|
||||
sort.Slice(t.RackInfos, func(i, j int) bool {
|
||||
return t.RackInfos[i].Id < t.RackInfos[j].Id
|
||||
|
@ -69,7 +87,7 @@ func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statisti
|
|||
return s
|
||||
}
|
||||
func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
|
||||
fmt.Fprintf(writer, " Rack %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
|
||||
fmt.Fprintf(writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||
var s statistics
|
||||
sort.Slice(t.DataNodeInfos, func(i, j int) bool {
|
||||
return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id
|
||||
|
@ -81,8 +99,22 @@ func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
|
|||
return s
|
||||
}
|
||||
func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
|
||||
fmt.Fprintf(writer, " DataNode %s volume:%d/%d active:%d free:%d remote:%d\n", t.Id, t.VolumeCount, t.MaxVolumeCount, t.ActiveVolumeCount, t.FreeVolumeCount, t.RemoteVolumeCount)
|
||||
fmt.Fprintf(writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||
var s statistics
|
||||
for _, diskInfo := range t.DiskInfos {
|
||||
s = s.plus(writeDiskInfo(writer, diskInfo))
|
||||
}
|
||||
fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo) statistics {
|
||||
var s statistics
|
||||
diskType := t.Type
|
||||
if diskType == "" {
|
||||
diskType = "hdd"
|
||||
}
|
||||
fmt.Fprintf(writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
|
||||
sort.Slice(t.VolumeInfos, func(i, j int) bool {
|
||||
return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id
|
||||
})
|
||||
|
@ -90,13 +122,14 @@ func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
|
|||
s = s.plus(writeVolumeInformationMessage(writer, vi))
|
||||
}
|
||||
for _, ecShardInfo := range t.EcShardInfos {
|
||||
fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
|
||||
fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
|
||||
}
|
||||
fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
|
||||
fmt.Fprintf(writer, " Disk %s %+v \n", diskType, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics {
|
||||
fmt.Fprintf(writer, " volume %+v \n", t)
|
||||
fmt.Fprintf(writer, " volume %+v \n", t)
|
||||
return newStatistics(t)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
|
@ -100,17 +101,19 @@ func evacuateNormalVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListRes
|
|||
|
||||
// move away normal volumes
|
||||
volumeReplicas, _ := collectVolumeReplicaLocations(resp)
|
||||
for _, vol := range thisNode.info.VolumeInfos {
|
||||
hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement))
|
||||
fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String())
|
||||
} else {
|
||||
return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer)
|
||||
for _, diskInfo := range thisNode.info.DiskInfos {
|
||||
for _, vol := range diskInfo.VolumeInfos {
|
||||
hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", vol.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(vol.ReplicaPlacement))
|
||||
fmt.Fprintf(writer, "skipping non moveable volume %d replication:%s\n", vol.Id, replicaPlacement.String())
|
||||
} else {
|
||||
return fmt.Errorf("failed to move volume %d from %s", vol.Id, volumeServer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -126,16 +129,18 @@ func evacuateEcVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListRespons
|
|||
}
|
||||
|
||||
// move away ec volumes
|
||||
for _, ecShardInfo := range thisNode.info.EcShardInfos {
|
||||
hasMoved, err := moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer)
|
||||
} else {
|
||||
return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer)
|
||||
for _, diskInfo := range thisNode.info.DiskInfos {
|
||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||
hasMoved, err := moveAwayOneEcVolume(commandEnv, ecShardInfo, thisNode, otherNodes, applyChange)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move away volume %d from %s: %v", ecShardInfo.Id, volumeServer, err)
|
||||
}
|
||||
if !hasMoved {
|
||||
if skipNonMoveable {
|
||||
fmt.Fprintf(writer, "failed to move away ec volume %d from %s\n", ecShardInfo.Id, volumeServer)
|
||||
} else {
|
||||
return fmt.Errorf("failed to move away ec volume %d from %s", ecShardInfo.Id, volumeServer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -174,8 +179,9 @@ func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEc
|
|||
}
|
||||
|
||||
func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, vol *master_pb.VolumeInformationMessage, thisNode *Node, otherNodes []*Node, applyChange bool) (hasMoved bool, err error) {
|
||||
fn := capacityByFreeVolumeCount(types.ToDiskType(vol.DiskType))
|
||||
sort.Slice(otherNodes, func(i, j int) bool {
|
||||
return otherNodes[i].localVolumeRatio(capacityByMaxVolumeCount)+otherNodes[i].localVolumeRatio(capacityByMaxSsdVolumeCount) < otherNodes[j].localVolumeRatio(capacityByMaxVolumeCount)+otherNodes[j].localVolumeRatio(capacityByMaxSsdVolumeCount)
|
||||
return otherNodes[i].localVolumeRatio(fn) > otherNodes[j].localVolumeRatio(fn)
|
||||
})
|
||||
|
||||
for i := 0; i < len(otherNodes); i++ {
|
||||
|
|
|
@ -86,9 +86,11 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s
|
|||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, v := range dn.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" {
|
||||
vidMap[v.Id] = true
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.RemoteStorageKey != "" && v.RemoteStorageName != "" {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
108
weed/shell/command_volume_tier_move.go
Normal file
108
weed/shell/command_volume_tier_move.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package shell
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Commands = append(Commands, &commandVolumeTierMove{})
|
||||
}
|
||||
|
||||
type commandVolumeTierMove struct {
|
||||
}
|
||||
|
||||
func (c *commandVolumeTierMove) Name() string {
|
||||
return "volume.tier.upload"
|
||||
}
|
||||
|
||||
func (c *commandVolumeTierMove) Help() string {
|
||||
return `change a volume from one disk type to another
|
||||
|
||||
volume.tier.move -source=hdd -target=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
|
||||
volume.tier.move -target=hdd [-collection=""] -volumeId=<volume_id>
|
||||
|
||||
`
|
||||
}
|
||||
|
||||
func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
||||
|
||||
if err = commandEnv.confirmIsLocked(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||
volumeId := tierCommand.Int("volumeId", 0, "the volume id")
|
||||
collection := tierCommand.String("collection", "", "the collection name")
|
||||
fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
|
||||
quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
|
||||
source := tierCommand.String("fromDiskType", "", "the source disk type")
|
||||
target := tierCommand.String("toDiskType", "", "the target disk type")
|
||||
if err = tierCommand.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if *source == *target {
|
||||
return fmt.Errorf("source tier %s is the same as target tier %s", *source, *target)
|
||||
}
|
||||
|
||||
vid := needle.VolumeId(*volumeId)
|
||||
|
||||
// volumeId is provided
|
||||
if vid != 0 {
|
||||
// return doVolumeTierMove(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
|
||||
}
|
||||
|
||||
// apply to all volumes in the collection
|
||||
// reusing collectVolumeIdsForEcEncode for now
|
||||
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, *source, *collection, *fullPercentage, *quietPeriod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("tier move volumes: %v\n", volumeIds)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, sourceTier string, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
|
||||
|
||||
var resp *master_pb.VolumeListResponse
|
||||
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
|
||||
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
quietSeconds := int64(quietPeriod / time.Second)
|
||||
nowUnixSeconds := time.Now().Unix()
|
||||
|
||||
fmt.Printf("collect %s volumes quiet for: %d seconds\n", sourceTier, quietSeconds)
|
||||
|
||||
vidMap := make(map[uint32]bool)
|
||||
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == types.ToDiskType(sourceTier) {
|
||||
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
|
||||
vidMap[v.Id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
for vid := range vidMap {
|
||||
vids = append(vids, needle.VolumeId(vid))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -2,6 +2,7 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -19,7 +20,7 @@ import (
|
|||
type DiskLocation struct {
|
||||
Directory string
|
||||
IdxDirectory string
|
||||
DiskType DiskType
|
||||
DiskType types.DiskType
|
||||
MaxVolumeCount int
|
||||
OriginalMaxVolumeCount int
|
||||
MinFreeSpacePercent float32
|
||||
|
@ -33,7 +34,7 @@ type DiskLocation struct {
|
|||
isDiskSpaceLow bool
|
||||
}
|
||||
|
||||
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32, idxDir string, diskType DiskType) *DiskLocation {
|
||||
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32, idxDir string, diskType types.DiskType) *DiskLocation {
|
||||
dir = util.ResolvePath(dir)
|
||||
if idxDir == "" {
|
||||
idxDir = dir
|
||||
|
|
|
@ -57,7 +57,7 @@ func (l *DiskLocation) FindEcShard(vid needle.VolumeId, shardId erasure_coding.S
|
|||
|
||||
func (l *DiskLocation) LoadEcShard(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) (err error) {
|
||||
|
||||
ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.Directory, collection, vid, shardId)
|
||||
ecVolumeShard, err := erasure_coding.NewEcVolumeShard(l.DiskType, l.Directory, collection, vid, shardId)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
return os.ErrNotExist
|
||||
|
@ -68,7 +68,7 @@ func (l *DiskLocation) LoadEcShard(collection string, vid needle.VolumeId, shard
|
|||
defer l.ecVolumesLock.Unlock()
|
||||
ecVolume, found := l.ecVolumes[vid]
|
||||
if !found {
|
||||
ecVolume, err = erasure_coding.NewEcVolume(l.Directory, l.IdxDirectory, collection, vid)
|
||||
ecVolume, err = erasure_coding.NewEcVolume(l.DiskType, l.Directory, l.IdxDirectory, collection, vid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create ec volume %d: %v", vid, err)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package erasure_coding
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -20,11 +21,12 @@ type EcVolumeShard struct {
|
|||
dir string
|
||||
ecdFile *os.File
|
||||
ecdFileSize int64
|
||||
DiskType types.DiskType
|
||||
}
|
||||
|
||||
func NewEcVolumeShard(dirname string, collection string, id needle.VolumeId, shardId ShardId) (v *EcVolumeShard, e error) {
|
||||
func NewEcVolumeShard(diskType types.DiskType, dirname string, collection string, id needle.VolumeId, shardId ShardId) (v *EcVolumeShard, e error) {
|
||||
|
||||
v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: shardId}
|
||||
v = &EcVolumeShard{dir: dirname, Collection: collection, VolumeId: id, ShardId: shardId, DiskType: diskType}
|
||||
|
||||
baseFileName := v.FileName()
|
||||
|
||||
|
|
|
@ -36,10 +36,11 @@ type EcVolume struct {
|
|||
Version needle.Version
|
||||
ecjFile *os.File
|
||||
ecjFileAccessLock sync.Mutex
|
||||
diskType types.DiskType
|
||||
}
|
||||
|
||||
func NewEcVolume(dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) {
|
||||
ev = &EcVolume{dir: dir, dirIdx: dirIdx, Collection: collection, VolumeId: vid}
|
||||
func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection string, vid needle.VolumeId) (ev *EcVolume, err error) {
|
||||
ev = &EcVolume{dir: dir, dirIdx: dirIdx, Collection: collection, VolumeId: vid, diskType: diskType}
|
||||
|
||||
dataBaseFileName := EcShardFileName(collection, dir, int(vid))
|
||||
indexBaseFileName := EcShardFileName(collection, dirIdx, int(vid))
|
||||
|
@ -191,6 +192,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V
|
|||
m = &master_pb.VolumeEcShardInformationMessage{
|
||||
Id: uint32(s.VolumeId),
|
||||
Collection: s.Collection,
|
||||
DiskType: string(ev.diskType),
|
||||
}
|
||||
messages = append(messages, m)
|
||||
}
|
||||
|
|
|
@ -10,13 +10,15 @@ type EcVolumeInfo struct {
|
|||
VolumeId needle.VolumeId
|
||||
Collection string
|
||||
ShardBits ShardBits
|
||||
DiskType string
|
||||
}
|
||||
|
||||
func NewEcVolumeInfo(collection string, vid needle.VolumeId, shardBits ShardBits) *EcVolumeInfo {
|
||||
func NewEcVolumeInfo(diskType string, collection string, vid needle.VolumeId, shardBits ShardBits) *EcVolumeInfo {
|
||||
return &EcVolumeInfo{
|
||||
Collection: collection,
|
||||
VolumeId: vid,
|
||||
ShardBits: shardBits,
|
||||
DiskType: diskType,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,6 +47,7 @@ func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo {
|
|||
VolumeId: ecInfo.VolumeId,
|
||||
Collection: ecInfo.Collection,
|
||||
ShardBits: ecInfo.ShardBits.Minus(other.ShardBits),
|
||||
DiskType: ecInfo.DiskType,
|
||||
}
|
||||
|
||||
return ret
|
||||
|
@ -55,6 +58,7 @@ func (ecInfo *EcVolumeInfo) ToVolumeEcShardInformationMessage() (ret *master_pb.
|
|||
Id: uint32(ecInfo.VolumeId),
|
||||
EcIndexBits: uint32(ecInfo.ShardBits),
|
||||
Collection: ecInfo.Collection,
|
||||
DiskType: ecInfo.DiskType,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -207,19 +207,13 @@ func (s *Store) GetRack() string {
|
|||
|
||||
func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
||||
var volumeMessages []*master_pb.VolumeInformationMessage
|
||||
maxVolumeCount := 0
|
||||
maxSsdVolumeCount := 0
|
||||
maxVolumeCounts := make(map[string]uint32)
|
||||
var maxFileKey NeedleId
|
||||
collectionVolumeSize := make(map[string]uint64)
|
||||
collectionVolumeReadOnlyCount := make(map[string]map[string]uint8)
|
||||
for _, location := range s.Locations {
|
||||
var deleteVids []needle.VolumeId
|
||||
switch location.DiskType {
|
||||
case SsdType:
|
||||
maxSsdVolumeCount = maxSsdVolumeCount + location.MaxVolumeCount
|
||||
case HardDriveType:
|
||||
maxVolumeCount = maxVolumeCount + location.MaxVolumeCount
|
||||
}
|
||||
maxVolumeCounts[string(location.DiskType)] += uint32(location.MaxVolumeCount)
|
||||
location.volumesLock.RLock()
|
||||
for _, v := range location.volumes {
|
||||
curMaxFileKey, volumeMessage := v.ToVolumeInformationMessage()
|
||||
|
@ -294,8 +288,7 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
|
|||
Ip: s.Ip,
|
||||
Port: uint32(s.Port),
|
||||
PublicUrl: s.PublicUrl,
|
||||
MaxVolumeCount: uint32(maxVolumeCount),
|
||||
MaxSsdVolumeCount: uint32(maxSsdVolumeCount),
|
||||
MaxVolumeCounts: maxVolumeCounts,
|
||||
MaxFileKey: NeedleIdToUint64(maxFileKey),
|
||||
DataCenter: s.dataCenter,
|
||||
Rack: s.rack,
|
||||
|
@ -478,6 +471,9 @@ func (s *Store) GetVolumeSizeLimit() uint64 {
|
|||
|
||||
func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
|
||||
volumeSizeLimit := s.GetVolumeSizeLimit()
|
||||
if volumeSizeLimit == 0 {
|
||||
return
|
||||
}
|
||||
for _, diskLocation := range s.Locations {
|
||||
if diskLocation.OriginalMaxVolumeCount == 0 {
|
||||
currentMaxVolumeCount := diskLocation.MaxVolumeCount
|
||||
|
|
|
@ -58,6 +58,7 @@ func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId er
|
|||
Id: uint32(vid),
|
||||
Collection: collection,
|
||||
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
|
||||
DiskType: string(location.DiskType),
|
||||
}
|
||||
return nil
|
||||
} else if err == os.ErrNotExist {
|
||||
|
@ -82,6 +83,7 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar
|
|||
Id: uint32(vid),
|
||||
Collection: ecShard.Collection,
|
||||
EcIndexBits: uint32(shardBits.AddShardId(shardId)),
|
||||
DiskType: string(ecShard.DiskType),
|
||||
}
|
||||
|
||||
for _, location := range s.Locations {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package storage
|
||||
package types
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
@ -27,7 +27,7 @@ func ToDiskType(vt string) (diskType DiskType) {
|
|||
|
||||
func (diskType DiskType) String() string{
|
||||
if diskType == "" {
|
||||
return "hdd"
|
||||
return ""
|
||||
}
|
||||
return string(diskType)
|
||||
}
|
|
@ -171,7 +171,7 @@ func (v *Volume) IndexFileSize() uint64 {
|
|||
return v.nm.IndexFileSize()
|
||||
}
|
||||
|
||||
func (v *Volume) DiskType() DiskType {
|
||||
func (v *Volume) DiskType() types.DiskType {
|
||||
return v.location.DiskType
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
|
@ -30,12 +30,12 @@ func (c *Collection) String() string {
|
|||
return fmt.Sprintf("Name:%s, volumeSizeLimit:%d, storageType2VolumeLayout:%v", c.Name, c.volumeSizeLimit, c.storageType2VolumeLayout)
|
||||
}
|
||||
|
||||
func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout {
|
||||
func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) *VolumeLayout {
|
||||
keyString := rp.String()
|
||||
if ttl != nil {
|
||||
keyString += ttl.String()
|
||||
}
|
||||
if diskType != storage.HardDriveType {
|
||||
if diskType != types.HardDriveType {
|
||||
keyString += string(diskType)
|
||||
}
|
||||
vl := c.storageType2VolumeLayout.Get(keyString, func() interface{} {
|
||||
|
@ -44,12 +44,12 @@ func (c *Collection) GetOrCreateVolumeLayout(rp *super_block.ReplicaPlacement, t
|
|||
return vl.(*VolumeLayout)
|
||||
}
|
||||
|
||||
func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) {
|
||||
func (c *Collection) DeleteVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) {
|
||||
keyString := rp.String()
|
||||
if ttl != nil {
|
||||
keyString += ttl.String()
|
||||
}
|
||||
if diskType != storage.HardDriveType {
|
||||
if diskType != types.HardDriveType {
|
||||
keyString += string(diskType)
|
||||
}
|
||||
c.storageType2VolumeLayout.Delete(keyString)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package topology
|
||||
|
||||
import "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
)
|
||||
|
||||
type DataCenter struct {
|
||||
NodeImpl
|
||||
|
@ -10,6 +12,7 @@ func NewDataCenter(id string) *DataCenter {
|
|||
dc := &DataCenter{}
|
||||
dc.id = NodeId(id)
|
||||
dc.nodeType = "DataCenter"
|
||||
dc.diskUsages = newDiskUsages()
|
||||
dc.children = make(map[NodeId]Node)
|
||||
dc.NodeImpl.value = dc
|
||||
return dc
|
||||
|
@ -30,9 +33,6 @@ func (dc *DataCenter) GetOrCreateRack(rackName string) *Rack {
|
|||
func (dc *DataCenter) ToMap() interface{} {
|
||||
m := make(map[string]interface{})
|
||||
m["Id"] = dc.Id()
|
||||
m["Max"] = dc.GetMaxVolumeCount()
|
||||
m["MaxSsd"] = dc.GetMaxSsdVolumeCount()
|
||||
m["Free"] = dc.FreeSpace()
|
||||
var racks []interface{}
|
||||
for _, c := range dc.Children() {
|
||||
rack := c.(*Rack)
|
||||
|
@ -44,14 +44,8 @@ func (dc *DataCenter) ToMap() interface{} {
|
|||
|
||||
func (dc *DataCenter) ToDataCenterInfo() *master_pb.DataCenterInfo {
|
||||
m := &master_pb.DataCenterInfo{
|
||||
Id: string(dc.Id()),
|
||||
VolumeCount: uint64(dc.GetVolumeCount()),
|
||||
MaxVolumeCount: uint64(dc.GetMaxVolumeCount()),
|
||||
MaxSsdVolumeCount: uint64(dc.GetMaxSsdVolumeCount()),
|
||||
SsdVolumeCount: uint64(dc.GetSsdVolumeCount()),
|
||||
FreeVolumeCount: uint64(dc.FreeSpace()),
|
||||
ActiveVolumeCount: uint64(dc.GetActiveVolumeCount()),
|
||||
RemoteVolumeCount: uint64(dc.GetRemoteVolumeCount()),
|
||||
Id: string(dc.Id()),
|
||||
DiskInfos: dc.diskUsages.ToDiskInfo(),
|
||||
}
|
||||
for _, c := range dc.Children() {
|
||||
rack := c.(*Rack)
|
||||
|
|
|
@ -2,13 +2,11 @@ package topology
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
|
@ -16,29 +14,26 @@ import (
|
|||
|
||||
type DataNode struct {
|
||||
NodeImpl
|
||||
volumes map[needle.VolumeId]storage.VolumeInfo
|
||||
Ip string
|
||||
Port int
|
||||
PublicUrl string
|
||||
LastSeen int64 // unix time in seconds
|
||||
ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo
|
||||
ecShardsLock sync.RWMutex
|
||||
Ip string
|
||||
Port int
|
||||
PublicUrl string
|
||||
LastSeen int64 // unix time in seconds
|
||||
}
|
||||
|
||||
func NewDataNode(id string) *DataNode {
|
||||
s := &DataNode{}
|
||||
s.id = NodeId(id)
|
||||
s.nodeType = "DataNode"
|
||||
s.volumes = make(map[needle.VolumeId]storage.VolumeInfo)
|
||||
s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
|
||||
s.NodeImpl.value = s
|
||||
return s
|
||||
dn := &DataNode{}
|
||||
dn.id = NodeId(id)
|
||||
dn.nodeType = "DataNode"
|
||||
dn.diskUsages = newDiskUsages()
|
||||
dn.children = make(map[NodeId]Node)
|
||||
dn.NodeImpl.value = dn
|
||||
return dn
|
||||
}
|
||||
|
||||
func (dn *DataNode) String() string {
|
||||
dn.RLock()
|
||||
defer dn.RUnlock()
|
||||
return fmt.Sprintf("Node:%s, volumes:%v, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl)
|
||||
return fmt.Sprintf("Node:%s, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.Ip, dn.Port, dn.PublicUrl)
|
||||
}
|
||||
|
||||
func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
|
@ -47,37 +42,23 @@ func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO
|
|||
return dn.doAddOrUpdateVolume(v)
|
||||
}
|
||||
|
||||
func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
if oldV, ok := dn.volumes[v.Id]; !ok {
|
||||
dn.volumes[v.Id] = v
|
||||
if v.DiskType == storage.SsdType {
|
||||
dn.UpAdjustSsdVolumeCountDelta(1)
|
||||
} else {
|
||||
dn.UpAdjustVolumeCountDelta(1)
|
||||
}
|
||||
if v.IsRemote() {
|
||||
dn.UpAdjustRemoteVolumeCountDelta(1)
|
||||
}
|
||||
if !v.ReadOnly {
|
||||
dn.UpAdjustActiveVolumeCountDelta(1)
|
||||
}
|
||||
dn.UpAdjustMaxVolumeId(v.Id)
|
||||
isNew = true
|
||||
} else {
|
||||
if oldV.IsRemote() != v.IsRemote() {
|
||||
if v.IsRemote() {
|
||||
dn.UpAdjustRemoteVolumeCountDelta(1)
|
||||
}
|
||||
if oldV.IsRemote() {
|
||||
dn.UpAdjustRemoteVolumeCountDelta(-1)
|
||||
}
|
||||
}
|
||||
isChangedRO = dn.volumes[v.Id].ReadOnly != v.ReadOnly
|
||||
dn.volumes[v.Id] = v
|
||||
func (dn *DataNode) getOrCreateDisk(diskType string) *Disk {
|
||||
c, found := dn.children[NodeId(diskType)]
|
||||
if !found {
|
||||
c = NewDisk(diskType)
|
||||
dn.doLinkChildNode(c)
|
||||
}
|
||||
return
|
||||
disk := c.(*Disk)
|
||||
return disk
|
||||
}
|
||||
|
||||
func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
disk := dn.getOrCreateDisk(v.DiskType)
|
||||
return disk.AddOrUpdateVolume(v)
|
||||
}
|
||||
|
||||
// UpdateVolumes detects new/deleted/changed volumes on a volume server
|
||||
// used in master to notify master clients of these changes.
|
||||
func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changeRO []storage.VolumeInfo) {
|
||||
|
||||
actualVolumeMap := make(map[needle.VolumeId]storage.VolumeInfo)
|
||||
|
@ -88,22 +69,26 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume
|
|||
dn.Lock()
|
||||
defer dn.Unlock()
|
||||
|
||||
for vid, v := range dn.volumes {
|
||||
existingVolumes := dn.getVolumes()
|
||||
|
||||
for _, v := range existingVolumes {
|
||||
vid := v.Id
|
||||
if _, ok := actualVolumeMap[vid]; !ok {
|
||||
glog.V(0).Infoln("Deleting volume id:", vid)
|
||||
delete(dn.volumes, vid)
|
||||
disk := dn.getOrCreateDisk(v.DiskType)
|
||||
delete(disk.volumes, vid)
|
||||
deletedVolumes = append(deletedVolumes, v)
|
||||
if v.DiskType == storage.SsdType {
|
||||
dn.UpAdjustSsdVolumeCountDelta(-1)
|
||||
} else {
|
||||
dn.UpAdjustVolumeCountDelta(-1)
|
||||
}
|
||||
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType))
|
||||
deltaDiskUsage.volumeCount = -1
|
||||
if v.IsRemote() {
|
||||
dn.UpAdjustRemoteVolumeCountDelta(-1)
|
||||
deltaDiskUsage.remoteVolumeCount = -1
|
||||
}
|
||||
if !v.ReadOnly {
|
||||
dn.UpAdjustActiveVolumeCountDelta(-1)
|
||||
deltaDiskUsage.activeVolumeCount = -1
|
||||
}
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
}
|
||||
}
|
||||
for _, v := range actualVolumes {
|
||||
|
@ -123,18 +108,19 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu
|
|||
defer dn.Unlock()
|
||||
|
||||
for _, v := range deletedVolumes {
|
||||
delete(dn.volumes, v.Id)
|
||||
if v.DiskType == storage.SsdType {
|
||||
dn.UpAdjustSsdVolumeCountDelta(-1)
|
||||
} else {
|
||||
dn.UpAdjustVolumeCountDelta(-1)
|
||||
}
|
||||
disk := dn.getOrCreateDisk(v.DiskType)
|
||||
delete(disk.volumes, v.Id)
|
||||
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType))
|
||||
deltaDiskUsage.volumeCount = -1
|
||||
if v.IsRemote() {
|
||||
dn.UpAdjustRemoteVolumeCountDelta(-1)
|
||||
deltaDiskUsage.remoteVolumeCount = -1
|
||||
}
|
||||
if !v.ReadOnly {
|
||||
dn.UpAdjustActiveVolumeCountDelta(-1)
|
||||
deltaDiskUsage.activeVolumeCount = -1
|
||||
}
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
}
|
||||
for _, v := range newVolumes {
|
||||
dn.doAddOrUpdateVolume(v)
|
||||
|
@ -142,20 +128,47 @@ func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.Volu
|
|||
return
|
||||
}
|
||||
|
||||
func (dn *DataNode) AdjustMaxVolumeCounts(maxVolumeCounts map[string]uint32) {
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
for diskType, maxVolumeCount := range maxVolumeCounts {
|
||||
if maxVolumeCount == 0 {
|
||||
// the volume server may have set the max to zero
|
||||
continue
|
||||
}
|
||||
dt := types.ToDiskType(diskType)
|
||||
currentDiskUsage := dn.diskUsages.getOrCreateDisk(dt)
|
||||
if currentDiskUsage.maxVolumeCount == int64(maxVolumeCount) {
|
||||
continue
|
||||
}
|
||||
disk := dn.getOrCreateDisk(dt.String())
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(dt)
|
||||
deltaDiskUsage.maxVolumeCount = int64(maxVolumeCount) - currentDiskUsage.maxVolumeCount
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
}
|
||||
}
|
||||
|
||||
func (dn *DataNode) GetVolumes() (ret []storage.VolumeInfo) {
|
||||
dn.RLock()
|
||||
for _, v := range dn.volumes {
|
||||
ret = append(ret, v)
|
||||
for _, c := range dn.children {
|
||||
disk := c.(*Disk)
|
||||
ret = append(ret, disk.GetVolumes()...)
|
||||
}
|
||||
dn.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (dn *DataNode) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) {
|
||||
func (dn *DataNode) GetVolumesById(id needle.VolumeId) (vInfo storage.VolumeInfo, err error) {
|
||||
dn.RLock()
|
||||
defer dn.RUnlock()
|
||||
vInfo, ok := dn.volumes[id]
|
||||
if ok {
|
||||
found := false
|
||||
for _, c := range dn.children {
|
||||
disk := c.(*Disk)
|
||||
vInfo, found = disk.volumes[id]
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
return vInfo, nil
|
||||
} else {
|
||||
return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found")
|
||||
|
@ -193,31 +206,19 @@ func (dn *DataNode) Url() string {
|
|||
func (dn *DataNode) ToMap() interface{} {
|
||||
ret := make(map[string]interface{})
|
||||
ret["Url"] = dn.Url()
|
||||
ret["Volumes"] = dn.GetVolumeCount() + dn.GetSsdVolumeCount()
|
||||
ret["VolumeIds"] = dn.GetVolumeIds()
|
||||
ret["EcShards"] = dn.GetEcShardCount()
|
||||
ret["Max"] = dn.GetMaxVolumeCount() + dn.GetMaxSsdVolumeCount()
|
||||
ret["Free"] = dn.FreeSpace()
|
||||
ret["PublicUrl"] = dn.PublicUrl
|
||||
ret["Disks"] = dn.diskUsages.ToMap()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
|
||||
m := &master_pb.DataNodeInfo{
|
||||
Id: string(dn.Id()),
|
||||
VolumeCount: uint64(dn.GetVolumeCount()),
|
||||
MaxVolumeCount: uint64(dn.GetMaxVolumeCount()),
|
||||
MaxSsdVolumeCount: uint64(dn.GetMaxSsdVolumeCount()),
|
||||
SsdVolumeCount: uint64(dn.GetSsdVolumeCount()),
|
||||
FreeVolumeCount: uint64(dn.FreeSpace()),
|
||||
ActiveVolumeCount: uint64(dn.GetActiveVolumeCount()),
|
||||
RemoteVolumeCount: uint64(dn.GetRemoteVolumeCount()),
|
||||
Id: string(dn.Id()),
|
||||
DiskInfos: make(map[string]*master_pb.DiskInfo),
|
||||
}
|
||||
for _, v := range dn.GetVolumes() {
|
||||
m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage())
|
||||
}
|
||||
for _, ecv := range dn.GetEcShards() {
|
||||
m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage())
|
||||
for _, c := range dn.Children() {
|
||||
disk := c.(*Disk)
|
||||
m.DiskInfos[string(disk.Id())] = disk.ToDiskInfo()
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
@ -226,11 +227,21 @@ func (dn *DataNode) ToDataNodeInfo() *master_pb.DataNodeInfo {
|
|||
func (dn *DataNode) GetVolumeIds() string {
|
||||
dn.RLock()
|
||||
defer dn.RUnlock()
|
||||
ids := make([]int, 0, len(dn.volumes))
|
||||
existingVolumes := dn.getVolumes()
|
||||
ids := make([]int, 0, len(existingVolumes))
|
||||
|
||||
for k := range dn.volumes {
|
||||
for k := range existingVolumes {
|
||||
ids = append(ids, int(k))
|
||||
}
|
||||
|
||||
return util.HumanReadableIntsMax(100, ids...)
|
||||
}
|
||||
|
||||
func (dn *DataNode) getVolumes() []storage.VolumeInfo {
|
||||
var existingVolumes []storage.VolumeInfo
|
||||
for _, c := range dn.children {
|
||||
disk := c.(*Disk)
|
||||
existingVolumes = append(existingVolumes, disk.GetVolumes()...)
|
||||
}
|
||||
return existingVolumes
|
||||
}
|
||||
|
|
|
@ -3,12 +3,14 @@ package topology
|
|||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func (dn *DataNode) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) {
|
||||
dn.RLock()
|
||||
for _, ecVolumeInfo := range dn.ecShards {
|
||||
ret = append(ret, ecVolumeInfo)
|
||||
for _, c := range dn.children {
|
||||
disk := c.(*Disk)
|
||||
ret = append(ret, disk.GetEcShards()...)
|
||||
}
|
||||
dn.RUnlock()
|
||||
return ret
|
||||
|
@ -21,10 +23,17 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo)
|
|||
actualEcShardMap[ecShards.VolumeId] = ecShards
|
||||
}
|
||||
|
||||
existingEcShards := dn.GetEcShards()
|
||||
|
||||
// found out the newShards and deletedShards
|
||||
var newShardCount, deletedShardCount int
|
||||
dn.ecShardsLock.RLock()
|
||||
for vid, ecShards := range dn.ecShards {
|
||||
for _, ecShards := range existingEcShards {
|
||||
|
||||
disk := dn.getOrCreateDisk(ecShards.DiskType)
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(ecShards.DiskType))
|
||||
|
||||
vid := ecShards.VolumeId
|
||||
if actualEcShards, ok := actualEcShardMap[vid]; !ok {
|
||||
// dn registered ec shards not found in the new set of ec shards
|
||||
deletedShards = append(deletedShards, ecShards)
|
||||
|
@ -42,26 +51,61 @@ func (dn *DataNode) UpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo)
|
|||
deletedShardCount += d.ShardIdCount()
|
||||
}
|
||||
}
|
||||
|
||||
deltaDiskUsage.ecShardCount = int64(newShardCount - deletedShardCount)
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
|
||||
}
|
||||
for _, ecShards := range actualShards {
|
||||
if _, found := dn.ecShards[ecShards.VolumeId]; !found {
|
||||
|
||||
disk := dn.getOrCreateDisk(ecShards.DiskType)
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(ecShards.DiskType))
|
||||
|
||||
if !dn.hasEcShards(ecShards.VolumeId) {
|
||||
newShards = append(newShards, ecShards)
|
||||
newShardCount += ecShards.ShardIdCount()
|
||||
}
|
||||
|
||||
deltaDiskUsage.ecShardCount = int64(newShardCount)
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
|
||||
}
|
||||
dn.ecShardsLock.RUnlock()
|
||||
|
||||
if len(newShards) > 0 || len(deletedShards) > 0 {
|
||||
// if changed, set to the new ec shard map
|
||||
dn.ecShardsLock.Lock()
|
||||
dn.ecShards = actualEcShardMap
|
||||
dn.UpAdjustEcShardCountDelta(int64(newShardCount - deletedShardCount))
|
||||
dn.ecShardsLock.Unlock()
|
||||
dn.doUpdateEcShards(actualShards)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (dn *DataNode) hasEcShards(volumeId needle.VolumeId) (found bool) {
|
||||
dn.RLock()
|
||||
defer dn.RUnlock()
|
||||
for _, c := range dn.children {
|
||||
disk := c.(*Disk)
|
||||
_, found = disk.ecShards[volumeId]
|
||||
if found {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (dn *DataNode) doUpdateEcShards(actualShards []*erasure_coding.EcVolumeInfo) {
|
||||
dn.Lock()
|
||||
for _, c := range dn.children {
|
||||
disk := c.(*Disk)
|
||||
disk.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo)
|
||||
}
|
||||
for _, shard := range actualShards {
|
||||
disk := dn.getOrCreateDisk(shard.DiskType)
|
||||
disk.ecShards[shard.VolumeId] = shard
|
||||
}
|
||||
dn.Unlock()
|
||||
}
|
||||
|
||||
func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_coding.EcVolumeInfo) {
|
||||
|
||||
for _, newShard := range newShards {
|
||||
|
@ -75,61 +119,25 @@ func (dn *DataNode) DeltaUpdateEcShards(newShards, deletedShards []*erasure_codi
|
|||
}
|
||||
|
||||
func (dn *DataNode) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
|
||||
dn.ecShardsLock.Lock()
|
||||
defer dn.ecShardsLock.Unlock()
|
||||
|
||||
delta := 0
|
||||
if existing, ok := dn.ecShards[s.VolumeId]; !ok {
|
||||
dn.ecShards[s.VolumeId] = s
|
||||
delta = s.ShardBits.ShardIdCount()
|
||||
} else {
|
||||
oldCount := existing.ShardBits.ShardIdCount()
|
||||
existing.ShardBits = existing.ShardBits.Plus(s.ShardBits)
|
||||
delta = existing.ShardBits.ShardIdCount() - oldCount
|
||||
}
|
||||
|
||||
dn.UpAdjustEcShardCountDelta(int64(delta))
|
||||
|
||||
disk := dn.getOrCreateDisk(s.DiskType)
|
||||
disk.AddOrUpdateEcShard(s)
|
||||
}
|
||||
|
||||
func (dn *DataNode) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
|
||||
dn.ecShardsLock.Lock()
|
||||
defer dn.ecShardsLock.Unlock()
|
||||
disk := dn.getOrCreateDisk(s.DiskType)
|
||||
disk.DeleteEcShard(s)
|
||||
}
|
||||
|
||||
if existing, ok := dn.ecShards[s.VolumeId]; ok {
|
||||
oldCount := existing.ShardBits.ShardIdCount()
|
||||
existing.ShardBits = existing.ShardBits.Minus(s.ShardBits)
|
||||
delta := existing.ShardBits.ShardIdCount() - oldCount
|
||||
dn.UpAdjustEcShardCountDelta(int64(delta))
|
||||
if existing.ShardBits.ShardIdCount() == 0 {
|
||||
delete(dn.ecShards, s.VolumeId)
|
||||
func (dn *DataNode) HasVolumesById(volumeId needle.VolumeId) (hasVolumeId bool) {
|
||||
|
||||
dn.RLock()
|
||||
defer dn.RUnlock()
|
||||
for _, c := range dn.children {
|
||||
disk := c.(*Disk)
|
||||
if disk.HasVolumesById(volumeId) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (dn *DataNode) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) {
|
||||
|
||||
// check whether normal volumes has this volume id
|
||||
dn.RLock()
|
||||
_, ok := dn.volumes[id]
|
||||
if ok {
|
||||
hasVolumeId = true
|
||||
}
|
||||
dn.RUnlock()
|
||||
|
||||
if hasVolumeId {
|
||||
return
|
||||
}
|
||||
|
||||
// check whether ec shards has this volume id
|
||||
dn.ecShardsLock.RLock()
|
||||
_, ok = dn.ecShards[id]
|
||||
if ok {
|
||||
hasVolumeId = true
|
||||
}
|
||||
dn.ecShardsLock.RUnlock()
|
||||
|
||||
return
|
||||
return false
|
||||
|
||||
}
|
||||
|
|
289
weed/topology/disk.go
Normal file
289
weed/topology/disk.go
Normal file
|
@ -0,0 +1,289 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"sync"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
)
|
||||
|
||||
type Disk struct {
|
||||
NodeImpl
|
||||
volumes map[needle.VolumeId]storage.VolumeInfo
|
||||
ecShards map[needle.VolumeId]*erasure_coding.EcVolumeInfo
|
||||
ecShardsLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewDisk(diskType string) *Disk {
|
||||
s := &Disk{}
|
||||
s.id = NodeId(diskType)
|
||||
s.nodeType = "Disk"
|
||||
s.diskUsages = newDiskUsages()
|
||||
s.volumes = make(map[needle.VolumeId]storage.VolumeInfo, 2)
|
||||
s.ecShards = make(map[needle.VolumeId]*erasure_coding.EcVolumeInfo, 2)
|
||||
s.NodeImpl.value = s
|
||||
return s
|
||||
}
|
||||
|
||||
type DiskUsages struct {
|
||||
sync.RWMutex
|
||||
usages map[types.DiskType]*DiskUsageCounts
|
||||
}
|
||||
|
||||
func newDiskUsages() *DiskUsages {
|
||||
return &DiskUsages{
|
||||
usages: make(map[types.DiskType]*DiskUsageCounts),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DiskUsages) negative() *DiskUsages {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
t := newDiskUsages()
|
||||
for diskType, b := range d.usages {
|
||||
a := t.getOrCreateDisk(diskType)
|
||||
a.volumeCount = - b.volumeCount
|
||||
a.remoteVolumeCount = - b.remoteVolumeCount
|
||||
a.activeVolumeCount = - b.activeVolumeCount
|
||||
a.ecShardCount = - b.ecShardCount
|
||||
a.maxVolumeCount = - b.maxVolumeCount
|
||||
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (d *DiskUsages) ToMap() interface{} {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
ret := make(map[string]interface{})
|
||||
for diskType, diskUsage := range d.usages {
|
||||
ret[diskType.String()] = diskUsage.ToMap()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (d *DiskUsages) ToDiskInfo() (map[string]*master_pb.DiskInfo) {
|
||||
ret := make(map[string]*master_pb.DiskInfo)
|
||||
for diskType, diskUsageCounts := range d.usages {
|
||||
m := &master_pb.DiskInfo{
|
||||
VolumeCount: uint64(diskUsageCounts.volumeCount),
|
||||
MaxVolumeCount: uint64(diskUsageCounts.maxVolumeCount),
|
||||
FreeVolumeCount: uint64(diskUsageCounts.maxVolumeCount - diskUsageCounts.volumeCount),
|
||||
ActiveVolumeCount: uint64(diskUsageCounts.activeVolumeCount),
|
||||
RemoteVolumeCount: uint64(diskUsageCounts.remoteVolumeCount),
|
||||
}
|
||||
ret[string(diskType)] = m
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (d *DiskUsages) FreeSpace() (freeSpace int64) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
for _, diskUsage := range d.usages {
|
||||
freeSpace += diskUsage.FreeSpace()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *DiskUsages) GetMaxVolumeCount() (maxVolumeCount int64) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
for _, diskUsage := range d.usages {
|
||||
maxVolumeCount += diskUsage.maxVolumeCount
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type DiskUsageCounts struct {
|
||||
volumeCount int64
|
||||
remoteVolumeCount int64
|
||||
activeVolumeCount int64
|
||||
ecShardCount int64
|
||||
maxVolumeCount int64
|
||||
}
|
||||
|
||||
func (a *DiskUsageCounts) addDiskUsageCounts(b *DiskUsageCounts) {
|
||||
a.volumeCount += b.volumeCount
|
||||
a.remoteVolumeCount += b.remoteVolumeCount
|
||||
a.activeVolumeCount += b.activeVolumeCount
|
||||
a.ecShardCount += b.ecShardCount
|
||||
a.maxVolumeCount += b.maxVolumeCount
|
||||
}
|
||||
|
||||
func (a *DiskUsageCounts) FreeSpace() int64 {
|
||||
freeVolumeSlotCount := a.maxVolumeCount + a.remoteVolumeCount - a.volumeCount
|
||||
if a.ecShardCount > 0 {
|
||||
freeVolumeSlotCount = freeVolumeSlotCount - a.ecShardCount/erasure_coding.DataShardsCount - 1
|
||||
}
|
||||
return freeVolumeSlotCount
|
||||
}
|
||||
|
||||
func (a *DiskUsageCounts) minus(b *DiskUsageCounts) *DiskUsageCounts {
|
||||
return &DiskUsageCounts{
|
||||
volumeCount: a.volumeCount - b.volumeCount,
|
||||
remoteVolumeCount: a.remoteVolumeCount - b.remoteVolumeCount,
|
||||
activeVolumeCount: a.activeVolumeCount - b.activeVolumeCount,
|
||||
ecShardCount: a.ecShardCount - b.ecShardCount,
|
||||
maxVolumeCount: a.maxVolumeCount - b.maxVolumeCount,
|
||||
}
|
||||
}
|
||||
|
||||
func (diskUsage *DiskUsageCounts) ToMap() interface{} {
|
||||
ret := make(map[string]interface{})
|
||||
ret["Volumes"] = diskUsage.volumeCount
|
||||
ret["EcShards"] = diskUsage.ecShardCount
|
||||
ret["Max"] = diskUsage.maxVolumeCount
|
||||
ret["Free"] = diskUsage.FreeSpace()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
|
||||
du.Lock()
|
||||
defer du.Unlock()
|
||||
t, found := du.usages[diskType]
|
||||
if found {
|
||||
return t
|
||||
}
|
||||
t = &DiskUsageCounts{}
|
||||
du.usages[diskType] = t
|
||||
return t
|
||||
}
|
||||
|
||||
func (d *Disk) String() string {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
return fmt.Sprintf("Disk:%s, volumes:%v, ecShards:%v", d.NodeImpl.String(), d.volumes, d.ecShards)
|
||||
}
|
||||
|
||||
func (d *Disk) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
return d.doAddOrUpdateVolume(v)
|
||||
}
|
||||
|
||||
func (d *Disk) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(v.DiskType))
|
||||
if oldV, ok := d.volumes[v.Id]; !ok {
|
||||
d.volumes[v.Id] = v
|
||||
deltaDiskUsage.volumeCount = 1
|
||||
if v.IsRemote() {
|
||||
deltaDiskUsage.remoteVolumeCount = 1
|
||||
}
|
||||
if !v.ReadOnly {
|
||||
deltaDiskUsage.activeVolumeCount = 1
|
||||
}
|
||||
d.UpAdjustMaxVolumeId(v.Id)
|
||||
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
isNew = true
|
||||
} else {
|
||||
if oldV.IsRemote() != v.IsRemote() {
|
||||
if v.IsRemote() {
|
||||
deltaDiskUsage.remoteVolumeCount = 1
|
||||
}
|
||||
if oldV.IsRemote() {
|
||||
deltaDiskUsage.remoteVolumeCount = -1
|
||||
}
|
||||
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
}
|
||||
isChangedRO = d.volumes[v.Id].ReadOnly != v.ReadOnly
|
||||
d.volumes[v.Id] = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Disk) GetVolumes() (ret []storage.VolumeInfo) {
|
||||
d.RLock()
|
||||
for _, v := range d.volumes {
|
||||
ret = append(ret, v)
|
||||
}
|
||||
d.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (d *Disk) GetVolumesById(id needle.VolumeId) (storage.VolumeInfo, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
vInfo, ok := d.volumes[id]
|
||||
if ok {
|
||||
return vInfo, nil
|
||||
} else {
|
||||
return storage.VolumeInfo{}, fmt.Errorf("volumeInfo not found")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Disk) GetDataCenter() *DataCenter {
|
||||
dn := d.Parent()
|
||||
rack := dn.Parent()
|
||||
dcNode := rack.Parent()
|
||||
dcValue := dcNode.GetValue()
|
||||
return dcValue.(*DataCenter)
|
||||
}
|
||||
|
||||
func (d *Disk) GetRack() *Rack {
|
||||
return d.Parent().Parent().(*NodeImpl).value.(*Rack)
|
||||
}
|
||||
|
||||
func (d *Disk) GetTopology() *Topology {
|
||||
p := d.Parent()
|
||||
for p.Parent() != nil {
|
||||
p = p.Parent()
|
||||
}
|
||||
t := p.(*Topology)
|
||||
return t
|
||||
}
|
||||
|
||||
func (d *Disk) ToMap() interface{} {
|
||||
ret := make(map[string]interface{})
|
||||
diskUsage := d.diskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
|
||||
ret["Volumes"] = diskUsage.volumeCount
|
||||
ret["VolumeIds"] = d.GetVolumeIds()
|
||||
ret["EcShards"] = diskUsage.ecShardCount
|
||||
ret["Max"] = diskUsage.maxVolumeCount
|
||||
ret["Free"] = d.FreeSpace()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (d *Disk) FreeSpace() int64 {
|
||||
t := d.diskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
|
||||
return t.FreeSpace()
|
||||
}
|
||||
|
||||
func (d *Disk) ToDiskInfo() *master_pb.DiskInfo {
|
||||
diskUsage := d.diskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
|
||||
m := &master_pb.DiskInfo{
|
||||
Type: string(d.Id()),
|
||||
VolumeCount: uint64(diskUsage.volumeCount),
|
||||
MaxVolumeCount: uint64(diskUsage.maxVolumeCount),
|
||||
FreeVolumeCount: uint64(diskUsage.maxVolumeCount - diskUsage.volumeCount),
|
||||
ActiveVolumeCount: uint64(diskUsage.activeVolumeCount),
|
||||
RemoteVolumeCount: uint64(diskUsage.remoteVolumeCount),
|
||||
}
|
||||
for _, v := range d.GetVolumes() {
|
||||
m.VolumeInfos = append(m.VolumeInfos, v.ToVolumeInformationMessage())
|
||||
}
|
||||
for _, ecv := range d.GetEcShards() {
|
||||
m.EcShardInfos = append(m.EcShardInfos, ecv.ToVolumeEcShardInformationMessage())
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// GetVolumeIds returns the human readable volume ids limited to count of max 100.
|
||||
func (d *Disk) GetVolumeIds() string {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
ids := make([]int, 0, len(d.volumes))
|
||||
|
||||
for k := range d.volumes {
|
||||
ids = append(ids, int(k))
|
||||
}
|
||||
|
||||
return util.HumanReadableIntsMax(100, ids...)
|
||||
}
|
84
weed/topology/disk_ec.go
Normal file
84
weed/topology/disk_ec.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func (d *Disk) GetEcShards() (ret []*erasure_coding.EcVolumeInfo) {
|
||||
d.RLock()
|
||||
for _, ecVolumeInfo := range d.ecShards {
|
||||
ret = append(ret, ecVolumeInfo)
|
||||
}
|
||||
d.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (d *Disk) AddOrUpdateEcShard(s *erasure_coding.EcVolumeInfo) {
|
||||
d.ecShardsLock.Lock()
|
||||
defer d.ecShardsLock.Unlock()
|
||||
|
||||
delta := 0
|
||||
if existing, ok := d.ecShards[s.VolumeId]; !ok {
|
||||
d.ecShards[s.VolumeId] = s
|
||||
delta = s.ShardBits.ShardIdCount()
|
||||
} else {
|
||||
oldCount := existing.ShardBits.ShardIdCount()
|
||||
existing.ShardBits = existing.ShardBits.Plus(s.ShardBits)
|
||||
delta = existing.ShardBits.ShardIdCount() - oldCount
|
||||
}
|
||||
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
|
||||
deltaDiskUsage.ecShardCount = int64(delta)
|
||||
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
|
||||
}
|
||||
|
||||
func (d *Disk) DeleteEcShard(s *erasure_coding.EcVolumeInfo) {
|
||||
d.ecShardsLock.Lock()
|
||||
defer d.ecShardsLock.Unlock()
|
||||
|
||||
if existing, ok := d.ecShards[s.VolumeId]; ok {
|
||||
oldCount := existing.ShardBits.ShardIdCount()
|
||||
existing.ShardBits = existing.ShardBits.Minus(s.ShardBits)
|
||||
delta := existing.ShardBits.ShardIdCount() - oldCount
|
||||
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(string(d.Id())))
|
||||
deltaDiskUsage.ecShardCount = int64(delta)
|
||||
d.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
|
||||
if existing.ShardBits.ShardIdCount() == 0 {
|
||||
delete(d.ecShards, s.VolumeId)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (d *Disk) HasVolumesById(id needle.VolumeId) (hasVolumeId bool) {
|
||||
|
||||
// check whether normal volumes has this volume id
|
||||
d.RLock()
|
||||
_, ok := d.volumes[id]
|
||||
if ok {
|
||||
hasVolumeId = true
|
||||
}
|
||||
d.RUnlock()
|
||||
|
||||
if hasVolumeId {
|
||||
return
|
||||
}
|
||||
|
||||
// check whether ec shards has this volume id
|
||||
d.ecShardsLock.RLock()
|
||||
_, ok = d.ecShards[id]
|
||||
if ok {
|
||||
hasVolumeId = true
|
||||
}
|
||||
d.ecShardsLock.RUnlock()
|
||||
|
||||
return
|
||||
|
||||
}
|
|
@ -2,40 +2,25 @@ package topology
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type NodeId string
|
||||
type Node interface {
|
||||
Id() NodeId
|
||||
String() string
|
||||
FreeSpace() int64
|
||||
AvailableSpaceFor(option *VolumeGrowOption) int64
|
||||
ReserveOneVolume(r int64, option *VolumeGrowOption) (*DataNode, error)
|
||||
UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64)
|
||||
UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64)
|
||||
UpAdjustVolumeCountDelta(volumeCountDelta int64)
|
||||
UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64)
|
||||
UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64)
|
||||
UpAdjustEcShardCountDelta(ecShardCountDelta int64)
|
||||
UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64)
|
||||
UpAdjustDiskUsageDelta(deltaDiskUsages *DiskUsages)
|
||||
UpAdjustMaxVolumeId(vid needle.VolumeId)
|
||||
GetDiskUsages() *DiskUsages
|
||||
|
||||
GetVolumeCount() int64
|
||||
GetSsdVolumeCount() int64
|
||||
GetEcShardCount() int64
|
||||
GetActiveVolumeCount() int64
|
||||
GetRemoteVolumeCount() int64
|
||||
GetMaxVolumeCount() int64
|
||||
GetMaxSsdVolumeCount() int64
|
||||
GetMaxVolumeId() needle.VolumeId
|
||||
SetParent(Node)
|
||||
LinkChildNode(node Node)
|
||||
|
@ -51,24 +36,22 @@ type Node interface {
|
|||
GetValue() interface{} //get reference to the topology,dc,rack,datanode
|
||||
}
|
||||
type NodeImpl struct {
|
||||
volumeCount int64
|
||||
remoteVolumeCount int64
|
||||
ssdVolumeCount int64
|
||||
activeVolumeCount int64
|
||||
ecShardCount int64
|
||||
maxVolumeCount int64
|
||||
maxSsdVolumeCount int64
|
||||
id NodeId
|
||||
parent Node
|
||||
sync.RWMutex // lock children
|
||||
children map[NodeId]Node
|
||||
maxVolumeId needle.VolumeId
|
||||
diskUsages *DiskUsages
|
||||
id NodeId
|
||||
parent Node
|
||||
sync.RWMutex // lock children
|
||||
children map[NodeId]Node
|
||||
maxVolumeId needle.VolumeId
|
||||
|
||||
//for rack, data center, topology
|
||||
nodeType string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (n *NodeImpl) GetDiskUsages() *DiskUsages {
|
||||
return n.diskUsages
|
||||
}
|
||||
|
||||
// the first node must satisfy filterFirstNodeFn(), the rest nodes must have one free slot
|
||||
func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, option *VolumeGrowOption, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) {
|
||||
var totalWeights int64
|
||||
|
@ -150,20 +133,14 @@ func (n *NodeImpl) String() string {
|
|||
func (n *NodeImpl) Id() NodeId {
|
||||
return n.id
|
||||
}
|
||||
func (n *NodeImpl) AvailableSpaceFor(option *VolumeGrowOption) int64 {
|
||||
freeVolumeSlotCount := n.maxVolumeCount + n.remoteVolumeCount - n.volumeCount
|
||||
if option.DiskType == storage.SsdType {
|
||||
freeVolumeSlotCount = n.maxSsdVolumeCount - n.ssdVolumeCount
|
||||
}
|
||||
if n.ecShardCount > 0 {
|
||||
freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1
|
||||
}
|
||||
return freeVolumeSlotCount
|
||||
func (n *NodeImpl) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
|
||||
return n.diskUsages.getOrCreateDisk(diskType)
|
||||
}
|
||||
func (n *NodeImpl) FreeSpace() int64 {
|
||||
freeVolumeSlotCount := n.maxVolumeCount + n.maxSsdVolumeCount + n.remoteVolumeCount - n.volumeCount - n.ssdVolumeCount
|
||||
if n.ecShardCount > 0 {
|
||||
freeVolumeSlotCount = freeVolumeSlotCount - n.ecShardCount/erasure_coding.DataShardsCount - 1
|
||||
func (n *NodeImpl) AvailableSpaceFor(option *VolumeGrowOption) int64 {
|
||||
t := n.getOrCreateDisk(option.DiskType)
|
||||
freeVolumeSlotCount := t.maxVolumeCount + t.remoteVolumeCount - t.volumeCount
|
||||
if t.ecShardCount > 0 {
|
||||
freeVolumeSlotCount = freeVolumeSlotCount - t.ecShardCount/erasure_coding.DataShardsCount - 1
|
||||
}
|
||||
return freeVolumeSlotCount
|
||||
}
|
||||
|
@ -209,67 +186,13 @@ func (n *NodeImpl) ReserveOneVolume(r int64, option *VolumeGrowOption) (assigned
|
|||
return nil, errors.New("No free volume slot found!")
|
||||
}
|
||||
|
||||
func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //can be negative
|
||||
if maxVolumeCountDelta == 0 {
|
||||
return
|
||||
func (n *NodeImpl) UpAdjustDiskUsageDelta(deltaDiskUsages *DiskUsages) { //can be negative
|
||||
for diskType, diskUsage := range deltaDiskUsages.usages {
|
||||
existingDisk := n.getOrCreateDisk(diskType)
|
||||
existingDisk.addDiskUsageCounts(diskUsage)
|
||||
}
|
||||
atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta)
|
||||
if n.parent != nil {
|
||||
n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta)
|
||||
}
|
||||
}
|
||||
func (n *NodeImpl) UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta int64) { //can be negative
|
||||
if maxSsdVolumeCountDelta == 0 {
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&n.maxSsdVolumeCount, maxSsdVolumeCountDelta)
|
||||
if n.parent != nil {
|
||||
n.parent.UpAdjustMaxSsdVolumeCountDelta(maxSsdVolumeCountDelta)
|
||||
}
|
||||
}
|
||||
func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative
|
||||
if volumeCountDelta == 0 {
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&n.volumeCount, volumeCountDelta)
|
||||
if n.parent != nil {
|
||||
n.parent.UpAdjustVolumeCountDelta(volumeCountDelta)
|
||||
}
|
||||
}
|
||||
func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative
|
||||
if remoteVolumeCountDelta == 0 {
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta)
|
||||
if n.parent != nil {
|
||||
n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta)
|
||||
}
|
||||
}
|
||||
func (n *NodeImpl) UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta int64) { //can be negative
|
||||
if ssdVolumeCountDelta == 0 {
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&n.ssdVolumeCount, ssdVolumeCountDelta)
|
||||
if n.parent != nil {
|
||||
n.parent.UpAdjustSsdVolumeCountDelta(ssdVolumeCountDelta)
|
||||
}
|
||||
}
|
||||
func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative
|
||||
if ecShardCountDelta == 0 {
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&n.ecShardCount, ecShardCountDelta)
|
||||
if n.parent != nil {
|
||||
n.parent.UpAdjustEcShardCountDelta(ecShardCountDelta)
|
||||
}
|
||||
}
|
||||
func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) { //can be negative
|
||||
if activeVolumeCountDelta == 0 {
|
||||
return
|
||||
}
|
||||
atomic.AddInt64(&n.activeVolumeCount, activeVolumeCountDelta)
|
||||
if n.parent != nil {
|
||||
n.parent.UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta)
|
||||
n.parent.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
}
|
||||
}
|
||||
func (n *NodeImpl) UpAdjustMaxVolumeId(vid needle.VolumeId) { //can be negative
|
||||
|
@ -283,41 +206,18 @@ func (n *NodeImpl) UpAdjustMaxVolumeId(vid needle.VolumeId) { //can be negative
|
|||
func (n *NodeImpl) GetMaxVolumeId() needle.VolumeId {
|
||||
return n.maxVolumeId
|
||||
}
|
||||
func (n *NodeImpl) GetVolumeCount() int64 {
|
||||
return n.volumeCount
|
||||
}
|
||||
func (n *NodeImpl) GetSsdVolumeCount() int64 {
|
||||
return n.ssdVolumeCount
|
||||
}
|
||||
func (n *NodeImpl) GetEcShardCount() int64 {
|
||||
return n.ecShardCount
|
||||
}
|
||||
func (n *NodeImpl) GetRemoteVolumeCount() int64 {
|
||||
return n.remoteVolumeCount
|
||||
}
|
||||
func (n *NodeImpl) GetActiveVolumeCount() int64 {
|
||||
return n.activeVolumeCount
|
||||
}
|
||||
func (n *NodeImpl) GetMaxVolumeCount() int64 {
|
||||
return n.maxVolumeCount
|
||||
}
|
||||
func (n *NodeImpl) GetMaxSsdVolumeCount() int64 {
|
||||
return n.maxSsdVolumeCount
|
||||
}
|
||||
|
||||
func (n *NodeImpl) LinkChildNode(node Node) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.doLinkChildNode(node)
|
||||
}
|
||||
|
||||
func (n *NodeImpl) doLinkChildNode(node Node) {
|
||||
if n.children[node.Id()] == nil {
|
||||
n.children[node.Id()] = node
|
||||
n.UpAdjustMaxVolumeCountDelta(node.GetMaxVolumeCount())
|
||||
n.UpAdjustMaxSsdVolumeCountDelta(node.GetMaxSsdVolumeCount())
|
||||
n.UpAdjustDiskUsageDelta(node.GetDiskUsages())
|
||||
n.UpAdjustMaxVolumeId(node.GetMaxVolumeId())
|
||||
n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
|
||||
n.UpAdjustSsdVolumeCountDelta(node.GetSsdVolumeCount())
|
||||
n.UpAdjustRemoteVolumeCountDelta(node.GetRemoteVolumeCount())
|
||||
n.UpAdjustEcShardCountDelta(node.GetEcShardCount())
|
||||
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
|
||||
node.SetParent(n)
|
||||
glog.V(0).Infoln(n, "adds child", node.Id())
|
||||
}
|
||||
|
@ -330,13 +230,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
|
|||
if node != nil {
|
||||
node.SetParent(nil)
|
||||
delete(n.children, node.Id())
|
||||
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
|
||||
n.UpAdjustSsdVolumeCountDelta(-node.GetSsdVolumeCount())
|
||||
n.UpAdjustRemoteVolumeCountDelta(-node.GetRemoteVolumeCount())
|
||||
n.UpAdjustEcShardCountDelta(-node.GetEcShardCount())
|
||||
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
|
||||
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
|
||||
n.UpAdjustMaxSsdVolumeCountDelta(-node.GetMaxSsdVolumeCount())
|
||||
n.UpAdjustDiskUsageDelta(node.GetDiskUsages().negative())
|
||||
glog.V(0).Infoln(n, "removes", node.Id())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
@ -14,6 +15,7 @@ func NewRack(id string) *Rack {
|
|||
r := &Rack{}
|
||||
r.id = NodeId(id)
|
||||
r.nodeType = "Rack"
|
||||
r.diskUsages = newDiskUsages()
|
||||
r.children = make(map[NodeId]Node)
|
||||
r.NodeImpl.value = r
|
||||
return r
|
||||
|
@ -28,7 +30,7 @@ func (r *Rack) FindDataNode(ip string, port int) *DataNode {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCount int64, maxSsdVolumeCount int64) *DataNode {
|
||||
func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVolumeCounts map[string]uint32) *DataNode {
|
||||
for _, c := range r.Children() {
|
||||
dn := c.(*DataNode)
|
||||
if dn.MatchLocation(ip, port) {
|
||||
|
@ -40,19 +42,19 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol
|
|||
dn.Ip = ip
|
||||
dn.Port = port
|
||||
dn.PublicUrl = publicUrl
|
||||
dn.maxVolumeCount = maxVolumeCount
|
||||
dn.maxSsdVolumeCount = maxSsdVolumeCount
|
||||
dn.LastSeen = time.Now().Unix()
|
||||
r.LinkChildNode(dn)
|
||||
for diskType, maxVolumeCount := range maxVolumeCounts {
|
||||
disk := NewDisk(diskType)
|
||||
disk.diskUsages.getOrCreateDisk(types.ToDiskType(diskType)).maxVolumeCount = int64(maxVolumeCount)
|
||||
dn.LinkChildNode(disk)
|
||||
}
|
||||
return dn
|
||||
}
|
||||
|
||||
func (r *Rack) ToMap() interface{} {
|
||||
m := make(map[string]interface{})
|
||||
m["Id"] = r.Id()
|
||||
m["Max"] = r.GetMaxVolumeCount()
|
||||
m["MaxSsd"] = r.GetMaxSsdVolumeCount()
|
||||
m["Free"] = r.FreeSpace()
|
||||
var dns []interface{}
|
||||
for _, c := range r.Children() {
|
||||
dn := c.(*DataNode)
|
||||
|
@ -64,14 +66,8 @@ func (r *Rack) ToMap() interface{} {
|
|||
|
||||
func (r *Rack) ToRackInfo() *master_pb.RackInfo {
|
||||
m := &master_pb.RackInfo{
|
||||
Id: string(r.Id()),
|
||||
VolumeCount: uint64(r.GetVolumeCount()),
|
||||
MaxVolumeCount: uint64(r.GetMaxVolumeCount()),
|
||||
MaxSsdVolumeCount: uint64(r.GetMaxSsdVolumeCount()),
|
||||
SsdVolumeCount: uint64(r.GetSsdVolumeCount()),
|
||||
FreeVolumeCount: uint64(r.FreeSpace()),
|
||||
ActiveVolumeCount: uint64(r.GetActiveVolumeCount()),
|
||||
RemoteVolumeCount: uint64(r.GetRemoteVolumeCount()),
|
||||
Id: string(r.Id()),
|
||||
DiskInfos: r.diskUsages.ToDiskInfo(),
|
||||
}
|
||||
for _, c := range r.Children() {
|
||||
dn := c.(*DataNode)
|
||||
|
|
|
@ -3,6 +3,7 @@ package topology
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -45,6 +46,7 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls
|
|||
t.id = NodeId(id)
|
||||
t.nodeType = "Topology"
|
||||
t.NodeImpl.value = t
|
||||
t.diskUsages = newDiskUsages()
|
||||
t.children = make(map[NodeId]Node)
|
||||
t.collectionMap = util.NewConcurrentReadMap()
|
||||
t.ecShardMap = make(map[needle.VolumeId]*EcShardLocations)
|
||||
|
@ -137,7 +139,7 @@ func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string,
|
|||
return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil
|
||||
}
|
||||
|
||||
func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) *VolumeLayout {
|
||||
func (t *Topology) GetVolumeLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) *VolumeLayout {
|
||||
return t.collectionMap.Get(collectionName, func() interface{} {
|
||||
return NewCollection(collectionName, t.volumeSizeLimit, t.replicationAsMin)
|
||||
}).(*Collection).GetOrCreateVolumeLayout(rp, ttl, diskType)
|
||||
|
@ -176,7 +178,7 @@ func (t *Topology) DeleteCollection(collectionName string) {
|
|||
t.collectionMap.Delete(collectionName)
|
||||
}
|
||||
|
||||
func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType) {
|
||||
func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType) {
|
||||
collection, found := t.FindCollection(collectionName)
|
||||
if !found {
|
||||
return
|
||||
|
@ -188,14 +190,14 @@ func (t *Topology) DeleteLayout(collectionName string, rp *super_block.ReplicaPl
|
|||
}
|
||||
|
||||
func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
|
||||
diskType := storage.ToDiskType(v.DiskType)
|
||||
diskType := types.ToDiskType(v.DiskType)
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
|
||||
vl.RegisterVolume(&v, dn)
|
||||
vl.EnsureCorrectWritables(&v)
|
||||
}
|
||||
func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) {
|
||||
glog.Infof("removing volume info: %+v", v)
|
||||
diskType := storage.ToDiskType(v.DiskType)
|
||||
diskType := types.ToDiskType(v.DiskType)
|
||||
volumeLayout := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
|
||||
volumeLayout.UnRegisterVolume(&v, dn)
|
||||
if volumeLayout.isEmpty() {
|
||||
|
@ -235,7 +237,7 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati
|
|||
t.UnRegisterVolumeLayout(v, dn)
|
||||
}
|
||||
for _, v := range changedVolumes {
|
||||
diskType := storage.ToDiskType(v.DiskType)
|
||||
diskType := types.ToDiskType(v.DiskType)
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
|
||||
vl.EnsureCorrectWritables(&v)
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ func (t *Topology) SyncDataNodeEcShards(shardInfos []*master_pb.VolumeEcShardInf
|
|||
for _, shardInfo := range shardInfos {
|
||||
shards = append(shards,
|
||||
erasure_coding.NewEcVolumeInfo(
|
||||
shardInfo.DiskType,
|
||||
shardInfo.Collection,
|
||||
needle.VolumeId(shardInfo.Id),
|
||||
erasure_coding.ShardBits(shardInfo.EcIndexBits)))
|
||||
|
@ -39,6 +40,7 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
|
|||
for _, shardInfo := range newEcShards {
|
||||
newShards = append(newShards,
|
||||
erasure_coding.NewEcVolumeInfo(
|
||||
shardInfo.DiskType,
|
||||
shardInfo.Collection,
|
||||
needle.VolumeId(shardInfo.Id),
|
||||
erasure_coding.ShardBits(shardInfo.EcIndexBits)))
|
||||
|
@ -46,6 +48,7 @@ func (t *Topology) IncrementalSyncDataNodeEcShards(newEcShards, deletedEcShards
|
|||
for _, shardInfo := range deletedEcShards {
|
||||
deletedShards = append(deletedShards,
|
||||
erasure_coding.NewEcVolumeInfo(
|
||||
shardInfo.DiskType,
|
||||
shardInfo.Collection,
|
||||
needle.VolumeId(shardInfo.Id),
|
||||
erasure_coding.ShardBits(shardInfo.EcIndexBits)))
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package topology
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"google.golang.org/grpc"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
@ -37,7 +38,7 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g
|
|||
}()
|
||||
}
|
||||
func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
||||
diskType := storage.ToDiskType(volumeInfo.DiskType)
|
||||
diskType := types.ToDiskType(volumeInfo.DiskType)
|
||||
vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType)
|
||||
if !vl.SetVolumeCapacityFull(volumeInfo.Id) {
|
||||
return false
|
||||
|
@ -48,7 +49,13 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
|||
|
||||
for _, dn := range vl.vid2location[volumeInfo.Id].list {
|
||||
if !volumeInfo.ReadOnly {
|
||||
dn.UpAdjustActiveVolumeCountDelta(-1)
|
||||
|
||||
disk := dn.getOrCreateDisk(volumeInfo.DiskType)
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk(types.ToDiskType(volumeInfo.DiskType))
|
||||
deltaDiskUsage.activeVolumeCount = -1
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
@ -56,16 +63,14 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
|||
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
|
||||
for _, v := range dn.GetVolumes() {
|
||||
glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id())
|
||||
diskType := storage.ToDiskType(v.DiskType)
|
||||
diskType := types.ToDiskType(v.DiskType)
|
||||
vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType)
|
||||
vl.SetVolumeUnavailable(dn, v.Id)
|
||||
}
|
||||
dn.UpAdjustVolumeCountDelta(-dn.GetVolumeCount())
|
||||
dn.UpAdjustSsdVolumeCountDelta(-dn.GetSsdVolumeCount())
|
||||
dn.UpAdjustRemoteVolumeCountDelta(-dn.GetRemoteVolumeCount())
|
||||
dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount())
|
||||
dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount())
|
||||
dn.UpAdjustMaxSsdVolumeCountDelta(-dn.GetMaxSsdVolumeCount())
|
||||
|
||||
negativeUsages := dn.GetDiskUsages().negative()
|
||||
dn.UpAdjustDiskUsageDelta(negativeUsages)
|
||||
|
||||
if dn.Parent() != nil {
|
||||
dn.Parent().UnlinkChildNode(dn.Id())
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
|||
|
||||
func (t *Topology) ToMap() interface{} {
|
||||
m := make(map[string]interface{})
|
||||
m["Max"] = t.GetMaxVolumeCount() + t.GetMaxSsdVolumeCount()
|
||||
m["Free"] = t.FreeSpace()
|
||||
m["Max"] = t.diskUsages.GetMaxVolumeCount()
|
||||
m["Free"] = t.diskUsages.FreeSpace()
|
||||
var dcs []interface{}
|
||||
for _, c := range t.Children() {
|
||||
dc := c.(*DataCenter)
|
||||
|
@ -29,8 +29,8 @@ func (t *Topology) ToMap() interface{} {
|
|||
|
||||
func (t *Topology) ToVolumeMap() interface{} {
|
||||
m := make(map[string]interface{})
|
||||
m["Max"] = t.GetMaxVolumeCount() + t.GetMaxSsdVolumeCount()
|
||||
m["Free"] = t.FreeSpace()
|
||||
m["Max"] = t.diskUsages.GetMaxVolumeCount()
|
||||
m["Free"] = t.diskUsages.FreeSpace()
|
||||
dcs := make(map[NodeId]interface{})
|
||||
for _, c := range t.Children() {
|
||||
dc := c.(*DataCenter)
|
||||
|
@ -80,14 +80,8 @@ func (t *Topology) ToVolumeLocations() (volumeLocations []*master_pb.VolumeLocat
|
|||
|
||||
func (t *Topology) ToTopologyInfo() *master_pb.TopologyInfo {
|
||||
m := &master_pb.TopologyInfo{
|
||||
Id: string(t.Id()),
|
||||
VolumeCount: uint64(t.GetVolumeCount()),
|
||||
MaxVolumeCount: uint64(t.GetMaxVolumeCount()),
|
||||
MaxSsdVolumeCount: uint64(t.GetMaxSsdVolumeCount()),
|
||||
FreeVolumeCount: uint64(t.FreeSpace()),
|
||||
ActiveVolumeCount: uint64(t.GetActiveVolumeCount()),
|
||||
RemoteVolumeCount: uint64(t.GetRemoteVolumeCount()),
|
||||
SsdVolumeCount: uint64(t.GetSsdVolumeCount()),
|
||||
Id: string(t.Id()),
|
||||
DiskInfos: t.diskUsages.ToDiskInfo(),
|
||||
}
|
||||
for _, c := range t.Children() {
|
||||
dc := c.(*DataCenter)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
@ -13,11 +14,11 @@ import (
|
|||
func TestRemoveDataCenter(t *testing.T) {
|
||||
topo := setup(topologyLayout)
|
||||
topo.UnlinkChildNode(NodeId("dc2"))
|
||||
if topo.GetActiveVolumeCount() != 15 {
|
||||
if topo.diskUsages.usages[types.HardDriveType].activeVolumeCount != 15 {
|
||||
t.Fail()
|
||||
}
|
||||
topo.UnlinkChildNode(NodeId("dc3"))
|
||||
if topo.GetActiveVolumeCount() != 12 {
|
||||
if topo.diskUsages.usages[types.HardDriveType].activeVolumeCount != 12 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +28,10 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
|
|||
|
||||
dc := topo.GetOrCreateDataCenter("dc1")
|
||||
rack := dc.GetOrCreateRack("rack1")
|
||||
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25, 12)
|
||||
maxVolumeCounts := make(map[string]uint32)
|
||||
maxVolumeCounts[""] = 25
|
||||
maxVolumeCounts["ssd"] = 12
|
||||
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", maxVolumeCounts)
|
||||
|
||||
{
|
||||
volumeCount := 7
|
||||
|
@ -67,9 +71,11 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
|
|||
|
||||
topo.SyncDataNodeRegistration(volumeMessages, dn)
|
||||
|
||||
assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount*2)
|
||||
assert(t, "volumeCount", int(topo.volumeCount), volumeCount)
|
||||
assert(t, "ssdVolumeCount", int(topo.ssdVolumeCount), volumeCount)
|
||||
usageCounts := topo.diskUsages.usages[types.HardDriveType]
|
||||
|
||||
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount)
|
||||
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount)
|
||||
assert(t, "ssdVolumeCount", int(topo.diskUsages.usages[types.SsdType].volumeCount), volumeCount)
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -96,8 +102,10 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
|
|||
//layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL)
|
||||
//assert(t, "writables", len(layout.writables), volumeCount)
|
||||
|
||||
assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount)
|
||||
assert(t, "volumeCount", int(topo.volumeCount), volumeCount)
|
||||
usageCounts := topo.diskUsages.usages[types.HardDriveType]
|
||||
|
||||
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount)
|
||||
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount)
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -114,19 +122,21 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
|
|||
nil,
|
||||
dn)
|
||||
rp, _ := super_block.NewReplicaPlacementFromString("000")
|
||||
layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, storage.HardDriveType)
|
||||
layout := topo.GetVolumeLayout("", rp, needle.EMPTY_TTL, types.HardDriveType)
|
||||
assert(t, "writables after repeated add", len(layout.writables), volumeCount)
|
||||
|
||||
assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount)
|
||||
assert(t, "volumeCount", int(topo.volumeCount), volumeCount)
|
||||
usageCounts := topo.diskUsages.usages[types.HardDriveType]
|
||||
|
||||
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount)
|
||||
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount)
|
||||
|
||||
topo.IncrementalSyncDataNodeRegistration(
|
||||
nil,
|
||||
[]*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
|
||||
dn)
|
||||
assert(t, "writables after deletion", len(layout.writables), volumeCount-1)
|
||||
assert(t, "activeVolumeCount1", int(topo.activeVolumeCount), volumeCount-1)
|
||||
assert(t, "volumeCount", int(topo.volumeCount), volumeCount-1)
|
||||
assert(t, "activeVolumeCount1", int(usageCounts.activeVolumeCount), volumeCount-1)
|
||||
assert(t, "volumeCount", int(usageCounts.volumeCount), volumeCount-1)
|
||||
|
||||
topo.IncrementalSyncDataNodeRegistration(
|
||||
[]*master_pb.VolumeShortInformationMessage{newVolumeShortMessage},
|
||||
|
@ -146,7 +156,9 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
|
|||
|
||||
topo.UnRegisterDataNode(dn)
|
||||
|
||||
assert(t, "activeVolumeCount2", int(topo.activeVolumeCount), 0)
|
||||
usageCounts := topo.diskUsages.usages[types.HardDriveType]
|
||||
|
||||
assert(t, "activeVolumeCount2", int(usageCounts.activeVolumeCount), 0)
|
||||
|
||||
}
|
||||
|
||||
|
@ -162,7 +174,10 @@ func TestAddRemoveVolume(t *testing.T) {
|
|||
|
||||
dc := topo.GetOrCreateDataCenter("dc1")
|
||||
rack := dc.GetOrCreateRack("rack1")
|
||||
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", 25, 12)
|
||||
maxVolumeCounts := make(map[string]uint32)
|
||||
maxVolumeCounts[""] = 25
|
||||
maxVolumeCounts["ssd"] = 12
|
||||
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, "127.0.0.1", maxVolumeCounts)
|
||||
|
||||
v := storage.VolumeInfo{
|
||||
Id: needle.VolumeId(1),
|
||||
|
|
|
@ -2,6 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
|
@ -27,7 +28,7 @@ type VolumeGrowOption struct {
|
|||
Collection string
|
||||
ReplicaPlacement *super_block.ReplicaPlacement
|
||||
Ttl *needle.TTL
|
||||
DiskType storage.DiskType
|
||||
DiskType types.DiskType
|
||||
Prealloacte int64
|
||||
DataCenter string
|
||||
Rack string
|
||||
|
@ -219,6 +220,7 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid
|
|||
ReplicaPlacement: option.ReplicaPlacement,
|
||||
Ttl: option.Ttl,
|
||||
Version: needle.CurrentVersion,
|
||||
DiskType: string(option.DiskType),
|
||||
}
|
||||
server.AddOrUpdateVolume(vi)
|
||||
topo.RegisterVolumeLayout(vi, server)
|
||||
|
|
|
@ -103,7 +103,13 @@ func setup(topologyLayout string) *Topology {
|
|||
Version: needle.CurrentVersion}
|
||||
server.AddOrUpdateVolume(vi)
|
||||
}
|
||||
server.UpAdjustMaxVolumeCountDelta(int64(serverMap["limit"].(float64)))
|
||||
|
||||
disk := server.getOrCreateDisk("")
|
||||
deltaDiskUsages := newDiskUsages()
|
||||
deltaDiskUsage := deltaDiskUsages.getOrCreateDisk("")
|
||||
deltaDiskUsage.maxVolumeCount = int64(serverMap["limit"].(float64))
|
||||
disk.UpAdjustDiskUsageDelta(deltaDiskUsages)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package topology
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -103,7 +104,7 @@ func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
|
|||
type VolumeLayout struct {
|
||||
rp *super_block.ReplicaPlacement
|
||||
ttl *needle.TTL
|
||||
diskType storage.DiskType
|
||||
diskType types.DiskType
|
||||
vid2location map[needle.VolumeId]*VolumeLocationList
|
||||
writables []needle.VolumeId // transient array of writable volume id
|
||||
readonlyVolumes *volumesBinaryState // readonly volumes
|
||||
|
@ -119,7 +120,7 @@ type VolumeLayoutStats struct {
|
|||
FileCount uint64
|
||||
}
|
||||
|
||||
func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType storage.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
|
||||
func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
|
||||
return &VolumeLayout{
|
||||
rp: rp,
|
||||
ttl: ttl,
|
||||
|
|
Loading…
Reference in a new issue