refactoring

This commit is contained in:
Chris Lu 2021-02-22 00:28:42 -08:00
parent 151c281f36
commit 1c233ad986
11 changed files with 63 additions and 98 deletions

View file

@ -201,17 +201,14 @@ type EcRack struct {
func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) {
// list all possible locations
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return nil, 0, err
return
}
// find out all volume servers with one slot left.
ecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(resp.TopologyInfo, selectedDataCenter)
ecNodes, totalFreeEcSlots = collectEcVolumeServersByDc(topologyInfo, selectedDataCenter)
sortEcNodesByFreeslotsDecending(ecNodes)

View file

@ -51,7 +51,7 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
vid := needle.VolumeId(*volumeId)
// collect topology information
topologyInfo, err := collectTopologyInfo(commandEnv)
topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
@ -208,7 +208,7 @@ func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasur
}
func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) {
func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, err error) {
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
@ -219,7 +219,7 @@ func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyIn
return
}
return resp.TopologyInfo, nil
return resp.TopologyInfo, resp.VolumeSizeLimitMb, nil
}

View file

@ -265,11 +265,8 @@ func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) {
func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
if err != nil {
return
}
@ -280,11 +277,11 @@ func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection stri
fmt.Printf("ec encode volumes quiet for: %d seconds\n", quietSeconds)
vidMap := make(map[uint32]bool)
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
for _, diskInfo := range dn.DiskInfos {
for _, v := range diskInfo.VolumeInfos {
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds {
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 {
vidMap[v.Id] = true
}
}

View file

@ -1,7 +1,6 @@
package shell
import (
"context"
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@ -75,18 +74,15 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
return nil
}
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
volumeServers := collectVolumeServersByDc(resp.TopologyInfo, *dc)
volumeReplicas, _ := collectVolumeReplicaLocations(resp)
diskTypes := collectVolumeDiskTypes(resp.TopologyInfo)
volumeServers := collectVolumeServersByDc(topologyInfo, *dc)
volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
diskTypes := collectVolumeDiskTypes(topologyInfo)
if *collection == "EACH_COLLECTION" {
collections, err := ListCollectionNames(commandEnv, true, false)
@ -94,16 +90,16 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer
return err
}
for _, c := range collections {
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil {
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, c, *applyBalancing); err != nil {
return err
}
}
} else if *collection == "ALL_COLLECTIONS" {
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil {
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, "ALL_COLLECTIONS", *applyBalancing); err != nil {
return err
}
} else {
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, resp.VolumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil {
if err = balanceVolumeServers(commandEnv, diskTypes, volumeReplicas, volumeServers, volumeSizeLimitMb*1024*1024, *collection, *applyBalancing); err != nil {
return err
}
}

View file

@ -56,11 +56,8 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
}
replicaPlacementInt32 := uint32(replicaPlacement.Byte())
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
@ -69,7 +66,7 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman
// find all data nodes with volumes that needs replication change
var allLocations []location
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
loc := newLocation(dc, string(rack), dn)
for _, diskInfo := range dn.DiskInfos {
for _, v := range diskInfo.VolumeInfos {

View file

@ -64,18 +64,15 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
takeAction := !*skipChange
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
// find all volumes that needs replication
// collect all data nodes
volumeReplicas, allLocations := collectVolumeReplicaLocations(resp)
volumeReplicas, allLocations := collectVolumeReplicaLocations(topologyInfo)
if len(allLocations) == 0 {
return fmt.Errorf("no data nodes at all")
@ -107,10 +104,10 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
}
func collectVolumeReplicaLocations(resp *master_pb.VolumeListResponse) (map[uint32][]*VolumeReplica, []location) {
func collectVolumeReplicaLocations(topologyInfo *master_pb.TopologyInfo) (map[uint32][]*VolumeReplica, []location) {
volumeReplicas := make(map[uint32][]*VolumeReplica)
var allLocations []location
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
loc := newLocation(dc, string(rack), dn)
for _, diskInfo := range dn.DiskInfos {
for _, v := range diskInfo.VolumeInfos {

View file

@ -73,7 +73,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
defer os.RemoveAll(tempFolder)
// collect all volume id locations
volumeIdToVInfo, err := c.collectVolumeIds(*verbose, writer)
volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
if err != nil {
return fmt.Errorf("failed to collect all volume locations: %v", err)
}
@ -268,23 +268,20 @@ type VInfo struct {
isEcVolume bool
}
func (c *commandVolumeFsck) collectVolumeIds(verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
if verbose {
fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
}
volumeIdToServer = make(map[uint32]VInfo)
var resp *master_pb.VolumeListResponse
err = c.env.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return
}
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
for _, diskInfo := range t.DiskInfos {
for _, vi := range diskInfo.VolumeInfos {
volumeIdToServer[vi.Id] = VInfo{

View file

@ -2,7 +2,6 @@ package shell
import (
"bytes"
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
@ -32,16 +31,13 @@ func (c *commandVolumeList) Help() string {
func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
writeTopologyInfo(writer, resp.TopologyInfo, resp.VolumeSizeLimitMb)
writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb)
return nil
}

View file

@ -1,7 +1,6 @@
package shell
import (
"context"
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -71,36 +70,33 @@ func volumeServerEvacuate(commandEnv *CommandEnv, volumeServer string, skipNonMo
// 3. move to any other volume server as long as it satisfy the replication requirements
// list all the volumes
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
// collect topology information
topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
if err := evacuateNormalVolumes(commandEnv, resp, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
if err := evacuateNormalVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
return err
}
if err := evacuateEcVolumes(commandEnv, resp, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
if err := evacuateEcVolumes(commandEnv, topologyInfo, volumeServer, skipNonMoveable, applyChange, writer); err != nil {
return err
}
return nil
}
func evacuateNormalVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListResponse, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
func evacuateNormalVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
// find this volume server
volumeServers := collectVolumeServersByDc(resp.TopologyInfo, "")
volumeServers := collectVolumeServersByDc(topologyInfo, "")
thisNode, otherNodes := nodesOtherThan(volumeServers, volumeServer)
if thisNode == nil {
return fmt.Errorf("%s is not found in this cluster", volumeServer)
}
// move away normal volumes
volumeReplicas, _ := collectVolumeReplicaLocations(resp)
volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
for _, diskInfo := range thisNode.info.DiskInfos {
for _, vol := range diskInfo.VolumeInfos {
hasMoved, err := moveAwayOneNormalVolume(commandEnv, volumeReplicas, vol, thisNode, otherNodes, applyChange)
@ -120,9 +116,9 @@ func evacuateNormalVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListRes
return nil
}
func evacuateEcVolumes(commandEnv *CommandEnv, resp *master_pb.VolumeListResponse, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
func evacuateEcVolumes(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeServer string, skipNonMoveable, applyChange bool, writer io.Writer) error {
// find this ec volume server
ecNodes, _ := collectEcVolumeServersByDc(resp.TopologyInfo, "")
ecNodes, _ := collectEcVolumeServersByDc(topologyInfo, "")
thisNode, otherNodes := ecNodesOtherThan(ecNodes, volumeServer)
if thisNode == nil {
return fmt.Errorf("%s is not found in this cluster\n", volumeServer)

View file

@ -56,7 +56,7 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr
vid := needle.VolumeId(*volumeId)
// collect topology information
topologyInfo, err := collectTopologyInfo(commandEnv)
topologyInfo, _, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}

View file

@ -1,7 +1,6 @@
package shell
import (
"context"
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -26,8 +25,10 @@ func (c *commandVolumeTierMove) Name() string {
func (c *commandVolumeTierMove) Help() string {
return `<WIP> change a volume from one disk type to another
volume.tier.move -source=hdd -target=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
volume.tier.move -target=hdd [-collection=""] -volumeId=<volume_id>
volume.tier.move -fromDiskType=hdd -toDiskType=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
Even if the volume is replicated, only one replica will be changed and the rest replicas will be dropped.
So "volume.fix.replication" and "volume.balance" should be followed.
`
}
@ -39,7 +40,6 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
}
tierCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeId := tierCommand.Int("volumeId", 0, "the volume id")
collection := tierCommand.String("collection", "", "the collection name")
fullPercentage := tierCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
quietPeriod := tierCommand.Duration("quietFor", 24*time.Hour, "select volumes without no writes for this period")
@ -49,20 +49,21 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
return nil
}
if *source == *target {
return fmt.Errorf("source tier %s is the same as target tier %s", *source, *target)
fromDiskType := types.ToDiskType(*source)
toDiskType := types.ToDiskType(*target)
if fromDiskType == toDiskType {
return fmt.Errorf("source tier %s is the same as target tier %s", fromDiskType, toDiskType)
}
vid := needle.VolumeId(*volumeId)
// volumeId is provided
if vid != 0 {
// return doVolumeTierMove(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile)
// collect topology information
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv)
if err != nil {
return err
}
// apply to all volumes in the collection
// reusing collectVolumeIdsForEcEncode for now
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, *source, *collection, *fullPercentage, *quietPeriod)
volumeIds, err := collectVolumeIdsForTierChange(commandEnv, topologyInfo, volumeSizeLimitMb, fromDiskType, *collection, *fullPercentage, *quietPeriod)
if err != nil {
return err
}
@ -71,16 +72,7 @@ func (c *commandVolumeTierMove) Do(args []string, commandEnv *CommandEnv, writer
return nil
}
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, sourceTier string, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
return err
})
if err != nil {
return
}
func collectVolumeIdsForTierChange(commandEnv *CommandEnv, topologyInfo *master_pb.TopologyInfo, volumeSizeLimitMb uint64, sourceTier types.DiskType, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) {
quietSeconds := int64(quietPeriod / time.Second)
nowUnixSeconds := time.Now().Unix()
@ -88,11 +80,11 @@ func collectVolumeIdsForTierChange(commandEnv *CommandEnv, sourceTier string, se
fmt.Printf("collect %s volumes quiet for: %d seconds\n", sourceTier, quietSeconds)
vidMap := make(map[uint32]bool)
eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
for _, diskInfo := range dn.DiskInfos {
for _, v := range diskInfo.VolumeInfos {
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == types.ToDiskType(sourceTier) {
if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 {
if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds && types.ToDiskType(v.DiskType) == sourceTier {
if float64(v.Size) > fullPercentage/100*float64(volumeSizeLimitMb)*1024*1024 {
vidMap[v.Id] = true
}
}