seaweedfs/weed/shell/command_volume_list.go

167 lines
5.1 KiB
Go
Raw Normal View History

2019-03-18 03:27:08 +00:00
package shell
import (
2021-02-16 10:47:02 +00:00
"bytes"
2019-03-18 03:27:08 +00:00
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
2019-03-18 03:27:08 +00:00
"io"
"sort"
2019-03-18 03:27:08 +00:00
)
func init() {
Commands = append(Commands, &commandVolumeList{})
2019-03-18 03:27:08 +00:00
}
type commandVolumeList struct {
}
func (c *commandVolumeList) Name() string {
return "volume.list"
}
func (c *commandVolumeList) Help() string {
2019-03-23 18:54:26 +00:00
return `list all volumes
This command list all volumes as a tree of dataCenter > rack > dataNode > volume.
`
2019-03-18 03:27:08 +00:00
}
func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
2019-03-18 03:27:08 +00:00
2019-03-19 12:19:37 +00:00
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
2019-03-19 12:19:37 +00:00
return err
})
2019-03-18 03:27:08 +00:00
if err != nil {
return err
}
writeTopologyInfo(writer, resp.TopologyInfo, resp.VolumeSizeLimitMb)
2019-03-18 03:27:08 +00:00
return nil
}
2021-02-16 10:47:02 +00:00
func diskInfosToString(diskInfos map[string]*master_pb.DiskInfo) string {
var buf bytes.Buffer
for diskType, diskInfo := range diskInfos {
2021-02-16 18:55:30 +00:00
if diskType == "" {
diskType = "hdd"
}
2021-02-16 10:47:02 +00:00
fmt.Fprintf(&buf, " %s(volume:%d/%d active:%d free:%d remote:%d)", diskType, diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
}
return buf.String()
}
func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "volume:%d/%d active:%d free:%d remote:%d", diskInfo.VolumeCount, diskInfo.MaxVolumeCount, diskInfo.ActiveVolumeCount, diskInfo.FreeVolumeCount, diskInfo.RemoteVolumeCount)
return buf.String()
}
func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64) statistics {
2021-02-16 10:47:02 +00:00
fmt.Fprintf(writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
sort.Slice(t.DataCenterInfos, func(i, j int) bool {
return t.DataCenterInfos[i].Id < t.DataCenterInfos[j].Id
})
var s statistics
2019-03-18 03:27:08 +00:00
for _, dc := range t.DataCenterInfos {
s = s.plus(writeDataCenterInfo(writer, dc))
2019-03-18 03:27:08 +00:00
}
fmt.Fprintf(writer, "%+v \n", s)
return s
2019-03-18 03:27:08 +00:00
}
2019-04-06 18:12:35 +00:00
func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo) statistics {
2021-02-16 10:47:02 +00:00
fmt.Fprintf(writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
sort.Slice(t.RackInfos, func(i, j int) bool {
return t.RackInfos[i].Id < t.RackInfos[j].Id
})
2019-03-18 03:27:08 +00:00
for _, r := range t.RackInfos {
s = s.plus(writeRackInfo(writer, r))
2019-03-18 03:27:08 +00:00
}
fmt.Fprintf(writer, " DataCenter %s %+v \n", t.Id, s)
return s
2019-03-18 03:27:08 +00:00
}
2019-04-06 18:12:35 +00:00
func writeRackInfo(writer io.Writer, t *master_pb.RackInfo) statistics {
2021-02-16 10:47:02 +00:00
fmt.Fprintf(writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
sort.Slice(t.DataNodeInfos, func(i, j int) bool {
return t.DataNodeInfos[i].Id < t.DataNodeInfos[j].Id
})
2019-03-18 03:27:08 +00:00
for _, dn := range t.DataNodeInfos {
s = s.plus(writeDataNodeInfo(writer, dn))
2019-03-18 03:27:08 +00:00
}
fmt.Fprintf(writer, " Rack %s %+v \n", t.Id, s)
return s
2019-03-18 03:27:08 +00:00
}
2019-04-06 18:12:35 +00:00
func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics {
2021-02-16 10:47:02 +00:00
fmt.Fprintf(writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
2021-02-16 10:47:02 +00:00
for _, diskInfo := range t.DiskInfos {
s = s.plus(writeDiskInfo(writer, diskInfo))
}
fmt.Fprintf(writer, " DataNode %s %+v \n", t.Id, s)
return s
}
func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo) statistics {
var s statistics
2021-02-16 18:55:30 +00:00
diskType := t.Type
if diskType == "" {
diskType = "hdd"
}
fmt.Fprintf(writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
sort.Slice(t.VolumeInfos, func(i, j int) bool {
return t.VolumeInfos[i].Id < t.VolumeInfos[j].Id
})
2019-03-18 03:27:08 +00:00
for _, vi := range t.VolumeInfos {
s = s.plus(writeVolumeInformationMessage(writer, vi))
2019-03-18 03:27:08 +00:00
}
2019-05-24 18:52:23 +00:00
for _, ecShardInfo := range t.EcShardInfos {
2021-02-16 11:03:00 +00:00
fmt.Fprintf(writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
2019-05-24 18:52:23 +00:00
}
2021-02-16 18:55:30 +00:00
fmt.Fprintf(writer, " Disk %s %+v \n", diskType, s)
return s
2019-03-18 03:27:08 +00:00
}
2021-02-16 10:47:02 +00:00
func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics {
2021-02-16 11:03:00 +00:00
fmt.Fprintf(writer, " volume %+v \n", t)
return newStatistics(t)
}
type statistics struct {
Size uint64
FileCount uint64
DeletedFileCount uint64
DeletedBytes uint64
}
func newStatistics(t *master_pb.VolumeInformationMessage) statistics {
return statistics{
Size: t.Size,
FileCount: t.FileCount,
DeletedFileCount: t.DeleteCount,
DeletedBytes: t.DeletedByteCount,
}
}
func (s statistics) plus(t statistics) statistics {
return statistics{
Size: s.Size + t.Size,
FileCount: s.FileCount + t.FileCount,
DeletedFileCount: s.DeletedFileCount + t.DeletedFileCount,
DeletedBytes: s.DeletedBytes + t.DeletedBytes,
}
}
func (s statistics) String() string {
2019-04-06 18:12:35 +00:00
if s.DeletedFileCount > 0 {
return fmt.Sprintf("total size:%d file_count:%d deleted_file:%d deleted_bytes:%d", s.Size, s.FileCount, s.DeletedFileCount, s.DeletedBytes)
}
return fmt.Sprintf("total size:%d file_count:%d", s.Size, s.FileCount)
2019-03-18 03:27:08 +00:00
}