seaweedfs/weed/shell/command_volume_list.go

224 lines
7.3 KiB
Go
Raw Normal View History

2019-03-18 03:27:08 +00:00
package shell
import (
2021-02-16 10:47:02 +00:00
"bytes"
"flag"
2019-03-18 03:27:08 +00:00
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"golang.org/x/exp/slices"
"path/filepath"
"strings"
2019-03-18 03:27:08 +00:00
"io"
)
func init() {
Commands = append(Commands, &commandVolumeList{})
2019-03-18 03:27:08 +00:00
}
type commandVolumeList struct {
collectionPattern *string
dataCenter *string
rack *string
dataNode *string
readonly *bool
volumeId *uint64
2019-03-18 03:27:08 +00:00
}
func (c *commandVolumeList) Name() string {
return "volume.list"
}
func (c *commandVolumeList) Help() string {
2019-03-23 18:54:26 +00:00
return `list all volumes
This command list all volumes as a tree of dataCenter > rack > dataNode > volume.
`
2019-03-18 03:27:08 +00:00
}
func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
2019-03-18 03:27:08 +00:00
volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
c.dataCenter = volumeListCommand.String("dataCenter", "", "show volumes only from the specified data center")
c.rack = volumeListCommand.String("rack", "", "show volumes only from the specified rack")
c.dataNode = volumeListCommand.String("dataNode", "", "show volumes only from the specified data node")
if err = volumeListCommand.Parse(args); err != nil {
return nil
}
2021-02-22 08:28:42 +00:00
// collect topology information
topologyInfo, volumeSizeLimitMb, err := collectTopologyInfo(commandEnv, 0)
2019-03-18 03:27:08 +00:00
if err != nil {
return err
}
c.writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
2019-03-18 03:27:08 +00:00
return nil
}
2021-02-16 10:47:02 +00:00
func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string {
var buf bytes.Buffer
for diskType, diskInfo := range diskInfos {
2021-02-16 18:55:30 +00:00
if diskType == "" {
diskType = "hdd"
}
2021-02-16 10:47:02 +00:00
fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
}
return buf.String()
}
func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
return buf.String()
}
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) int {
return strings.Compare(a.Id, b.Id)
})
var s statistics
2019-03-18 03:27:08 +00:00
for _, dc := range t.DataCenterInfos {
if *c.dataCenter != "" && *c.dataCenter != dc.Id {
continue
}
s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
2019-03-18 03:27:08 +00:00
}
output(verbosityLevel >= 0, writer, "%+v \n", s)
return s
2019-03-18 03:27:08 +00:00
}
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) int {
return strings.Compare(a.Id, b.Id)
})
2019-03-18 03:27:08 +00:00
for _, r := range t.RackInfos {
if *c.rack != "" && *c.rack != r.Id {
continue
}
s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
2019-03-18 03:27:08 +00:00
}
output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
return s
2019-03-18 03:27:08 +00:00
}
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) int {
return strings.Compare(a.Id, b.Id)
})
2019-03-18 03:27:08 +00:00
for _, dn := range t.DataNodeInfos {
if *c.dataNode != "" && *c.dataNode != dn.Id {
continue
}
s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
2019-03-18 03:27:08 +00:00
}
output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)
return s
2019-03-18 03:27:08 +00:00
}
func (c *commandVolumeList) writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 3, writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
2021-02-16 10:47:02 +00:00
for _, diskInfo := range t.DiskInfos {
s = s.plus(c.writeDiskInfo(writer, diskInfo, verbosityLevel))
2021-02-16 10:47:02 +00:00
}
output(verbosityLevel >= 3, writer, " DataNode %s %+v \n", t.Id, s)
2021-02-16 10:47:02 +00:00
return s
}
func (c *commandVolumeList) isNotMatchDiskInfo(readOnly bool, collection string, volumeId uint32) bool {
if *c.readonly && !readOnly {
return true
}
if *c.collectionPattern != "" {
if matched, _ := filepath.Match(*c.collectionPattern, collection); !matched {
return true
}
}
if *c.volumeId > 0 && *c.volumeId != uint64(volumeId) {
return true
}
return false
}
func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
2021-02-16 10:47:02 +00:00
var s statistics
2021-02-16 18:55:30 +00:00
diskType := t.Type
if diskType == "" {
diskType = "hdd"
}
output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) int {
return int(a.Id - b.Id)
})
2019-03-18 03:27:08 +00:00
for _, vi := range t.VolumeInfos {
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
continue
}
s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
2019-03-18 03:27:08 +00:00
}
2019-05-24 18:52:23 +00:00
for _, ecShardInfo := range t.EcShardInfos {
if c.isNotMatchDiskInfo(false, ecShardInfo.Collection, ecShardInfo.Id) {
continue
}
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
2019-05-24 18:52:23 +00:00
}
output(verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
return s
2019-03-18 03:27:08 +00:00
}
2021-02-16 10:47:02 +00:00
func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage, verbosityLevel int) statistics {
output(verbosityLevel >= 5, writer, " volume %+v \n", t)
return newStatistics(t)
}
func output(condition bool, w io.Writer, format string, a ...interface{}) {
if condition {
fmt.Fprintf(w, format, a...)
}
}
type statistics struct {
Size uint64
FileCount uint64
DeletedFileCount uint64
DeletedBytes uint64
}
func newStatistics(t *master_pb.VolumeInformationMessage) statistics {
return statistics{
Size: t.Size,
FileCount: t.FileCount,
DeletedFileCount: t.DeleteCount,
DeletedBytes: t.DeletedByteCount,
}
}
func (s statistics) plus(t statistics) statistics {
return statistics{
Size: s.Size + t.Size,
FileCount: s.FileCount + t.FileCount,
DeletedFileCount: s.DeletedFileCount + t.DeletedFileCount,
DeletedBytes: s.DeletedBytes + t.DeletedBytes,
}
}
func (s statistics) String() string {
2019-04-06 18:12:35 +00:00
if s.DeletedFileCount > 0 {
return fmt.Sprintf("total size:%d file_count:%d deleted_file:%d deleted_bytes:%d", s.Size, s.FileCount, s.DeletedFileCount, s.DeletedBytes)
}
return fmt.Sprintf("total size:%d file_count:%d", s.Size, s.FileCount)
2019-03-18 03:27:08 +00:00
}