mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge branch 'master' of https://github.com/chrislusf/seaweedfs
This commit is contained in:
commit
f3fdfb07b8
14
.github/workflows/depsreview.yml
vendored
Normal file
14
.github/workflows/depsreview.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
name: 'Dependency Review'
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dependency-review:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: 'Checkout Repository'
|
||||||
|
uses: actions/checkout@dcd71f646680f2efd8db4afa5ad64fdcba30e748
|
||||||
|
- name: 'Dependency Review'
|
||||||
|
uses: actions/dependency-review-action@3f943b86c9a289f4e632c632695e2e0898d9d67d
|
|
@ -377,6 +377,18 @@ func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleImplicitUsername adds username who signs the request to values if 'username' is not specified
|
||||||
|
// According to https://awscli.amazonaws.com/v2/documentation/api/latest/reference/iam/create-access-key.html/
|
||||||
|
// "If you do not specify a user name, IAM determines the user name implicitly based on the Amazon Web
|
||||||
|
// Services access key ID signing the request."
|
||||||
|
func handleImplicitUsername(r *http.Request, values url.Values) {
|
||||||
|
if values.Get("UserName") == "" {
|
||||||
|
// get username who signs the request
|
||||||
|
userName := strings.Split(r.Header["Authorization"][0], "/")[2]
|
||||||
|
values.Set("UserName", userName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
|
||||||
|
@ -401,6 +413,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||||
response = iama.ListUsers(s3cfg, values)
|
response = iama.ListUsers(s3cfg, values)
|
||||||
changed = false
|
changed = false
|
||||||
case "ListAccessKeys":
|
case "ListAccessKeys":
|
||||||
|
handleImplicitUsername(r, values)
|
||||||
response = iama.ListAccessKeys(s3cfg, values)
|
response = iama.ListAccessKeys(s3cfg, values)
|
||||||
changed = false
|
changed = false
|
||||||
case "CreateUser":
|
case "CreateUser":
|
||||||
|
@ -428,8 +441,10 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case "CreateAccessKey":
|
case "CreateAccessKey":
|
||||||
|
handleImplicitUsername(r, values)
|
||||||
response = iama.CreateAccessKey(s3cfg, values)
|
response = iama.CreateAccessKey(s3cfg, values)
|
||||||
case "DeleteAccessKey":
|
case "DeleteAccessKey":
|
||||||
|
handleImplicitUsername(r, values)
|
||||||
response = iama.DeleteAccessKey(s3cfg, values)
|
response = iama.DeleteAccessKey(s3cfg, values)
|
||||||
case "CreatePolicy":
|
case "CreatePolicy":
|
||||||
response, err = iama.CreatePolicy(s3cfg, values)
|
response, err = iama.CreatePolicy(s3cfg, values)
|
||||||
|
|
|
@ -5,16 +5,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
@ -25,6 +15,17 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -65,8 +66,11 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
verbose := fsckCommand.Bool("v", false, "verbose mode")
|
verbose := fsckCommand.Bool("v", false, "verbose mode")
|
||||||
findMissingChunksInFiler := fsckCommand.Bool("findMissingChunksInFiler", false, "see \"help volume.fsck\"")
|
findMissingChunksInFiler := fsckCommand.Bool("findMissingChunksInFiler", false, "see \"help volume.fsck\"")
|
||||||
findMissingChunksInFilerPath := fsckCommand.String("findMissingChunksInFilerPath", "/", "used together with findMissingChunksInFiler")
|
findMissingChunksInFilerPath := fsckCommand.String("findMissingChunksInFilerPath", "/", "used together with findMissingChunksInFiler")
|
||||||
|
findMissingChunksInVolumeId := fsckCommand.Int("findMissingChunksInVolumeId", 0, "used together with findMissingChunksInFiler")
|
||||||
applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only!> after detection, delete missing data from volumes / delete missing file entries from filer")
|
applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only!> after detection, delete missing data from volumes / delete missing file entries from filer")
|
||||||
purgeAbsent := fsckCommand.Bool("reallyDeleteFilerEntries", false, "<expert only!> delete missing file entries from filer if the corresponding volume is missing for any reason, please ensure all still existing/expected volumes are connected! used together with findMissingChunksInFiler")
|
purgeAbsent := fsckCommand.Bool("reallyDeleteFilerEntries", false, "<expert only!> delete missing file entries from filer if the corresponding volume is missing for any reason, please ensure all still existing/expected volumes are connected! used together with findMissingChunksInFiler")
|
||||||
|
tempPath := fsckCommand.String("tempPath", path.Join(os.TempDir()), "path for temporary idx files")
|
||||||
|
|
||||||
if err = fsckCommand.Parse(args); err != nil {
|
if err = fsckCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -78,7 +82,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
c.env = commandEnv
|
c.env = commandEnv
|
||||||
|
|
||||||
// create a temp folder
|
// create a temp folder
|
||||||
tempFolder, err := os.MkdirTemp("", "sw_fsck")
|
tempFolder, err := os.MkdirTemp(*tempPath, "sw_fsck")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create temp folder: %v", err)
|
return fmt.Errorf("failed to create temp folder: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -88,14 +92,14 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
defer os.RemoveAll(tempFolder)
|
defer os.RemoveAll(tempFolder)
|
||||||
|
|
||||||
// collect all volume id locations
|
// collect all volume id locations
|
||||||
volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
|
dataNodeVolumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to collect all volume locations: %v", err)
|
return fmt.Errorf("failed to collect all volume locations: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
isBucketsPath := false
|
isBucketsPath := false
|
||||||
var fillerBucketsPath string
|
var fillerBucketsPath string
|
||||||
if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "" {
|
if *findMissingChunksInFiler && *findMissingChunksInFilerPath != "/" {
|
||||||
fillerBucketsPath, err = readFilerBucketsPath(commandEnv)
|
fillerBucketsPath, err = readFilerBucketsPath(commandEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("read filer buckets path: %v", err)
|
return fmt.Errorf("read filer buckets path: %v", err)
|
||||||
|
@ -108,34 +112,43 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
return fmt.Errorf("read filer buckets path: %v", err)
|
return fmt.Errorf("read filer buckets path: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
collectMtime := time.Now().Unix()
|
||||||
// collect each volume file ids
|
// collect each volume file ids
|
||||||
for volumeId, vinfo := range volumeIdToVInfo {
|
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
|
||||||
if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
|
for volumeId, vinfo := range volumeIdToVInfo {
|
||||||
delete(volumeIdToVInfo, volumeId)
|
if *findMissingChunksInVolumeId > 0 && uint32(*findMissingChunksInVolumeId) != volumeId {
|
||||||
continue
|
delete(volumeIdToVInfo, volumeId)
|
||||||
}
|
continue
|
||||||
err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
|
}
|
||||||
if err != nil {
|
if isBucketsPath && !strings.HasPrefix(*findMissingChunksInFilerPath, fillerBucketsPath+"/"+vinfo.collection) {
|
||||||
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
|
delete(volumeIdToVInfo, volumeId)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = c.collectOneVolumeFileIds(tempFolder, dataNodeId, volumeId, vinfo, *verbose, writer)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if *findMissingChunksInFiler {
|
if *findMissingChunksInFiler {
|
||||||
// collect all filer file ids and paths
|
// collect all filer file ids and paths
|
||||||
if err = c.collectFilerFileIdAndPaths(volumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent); err != nil {
|
if err = c.collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, *purgeAbsent, collectMtime); err != nil {
|
||||||
return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
|
return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
|
||||||
}
|
}
|
||||||
// for each volume, check filer file ids
|
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
|
||||||
if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
|
// for each volume, check filer file ids
|
||||||
return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
|
if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, dataNodeId, writer, *verbose, *applyPurging); err != nil {
|
||||||
|
return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// collect all filer file ids
|
// collect all filer file ids
|
||||||
if err = c.collectFilerFileIds(volumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
|
if err = c.collectFilerFileIds(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose); err != nil {
|
||||||
return fmt.Errorf("failed to collect file ids from filer: %v", err)
|
return fmt.Errorf("failed to collect file ids from filer: %v", err)
|
||||||
}
|
}
|
||||||
// volume file ids subtract filer file ids
|
// volume file ids subtract filer file ids
|
||||||
if err = c.findExtraChunksInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
|
if err = c.findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo, tempFolder, writer, *verbose, *applyPurging); err != nil {
|
||||||
return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
|
return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -143,19 +156,24 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool) error {
|
func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, purgeAbsent bool, collectMtime int64) error {
|
||||||
|
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Fprintf(writer, "checking each file from filer ...\n")
|
fmt.Fprintf(writer, "checking each file from filer ...\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
files := make(map[uint32]*os.File)
|
files := make(map[uint32]*os.File)
|
||||||
for vid := range volumeIdToServer {
|
for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
|
||||||
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
for vid := range volumeIdToServer {
|
||||||
if openErr != nil {
|
if _, ok := files[vid]; ok {
|
||||||
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
|
continue
|
||||||
|
}
|
||||||
|
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
if openErr != nil {
|
||||||
|
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
|
||||||
|
}
|
||||||
|
files[vid] = dst
|
||||||
}
|
}
|
||||||
files[vid] = dst
|
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
|
@ -179,6 +197,9 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint
|
||||||
}
|
}
|
||||||
dataChunks = append(dataChunks, manifestChunks...)
|
dataChunks = append(dataChunks, manifestChunks...)
|
||||||
for _, chunk := range dataChunks {
|
for _, chunk := range dataChunks {
|
||||||
|
if chunk.Mtime > collectMtime {
|
||||||
|
continue
|
||||||
|
}
|
||||||
outputChan <- &Item{
|
outputChan <- &Item{
|
||||||
vid: chunk.Fid.VolumeId,
|
vid: chunk.Fid.VolumeId,
|
||||||
fileKey: chunk.Fid.FileKey,
|
fileKey: chunk.Fid.FileKey,
|
||||||
|
@ -210,10 +231,10 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
|
func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, dataNodeId string, writer io.Writer, verbose bool, applyPurging bool) error {
|
||||||
|
|
||||||
for volumeId, vinfo := range volumeIdToVInfo {
|
for volumeId, vinfo := range volumeIdToVInfo {
|
||||||
checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, volumeId, writer, verbose, applyPurging)
|
checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, dataNodeId, volumeId, writer, verbose, applyPurging)
|
||||||
if checkErr != nil {
|
if checkErr != nil {
|
||||||
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
|
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
|
||||||
}
|
}
|
||||||
|
@ -221,55 +242,93 @@ func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInf
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
|
func (c *commandVolumeFsck) findExtraChunksInVolumeServers(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging bool) error {
|
||||||
|
|
||||||
var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
|
var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
|
||||||
|
volumeIdOrphanFileIds := make(map[uint32]map[string]bool)
|
||||||
for volumeId, vinfo := range volumeIdToVInfo {
|
isSeveralReplicas := make(map[uint32]bool)
|
||||||
inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, verbose)
|
isEcVolumeReplicas := make(map[uint32]bool)
|
||||||
if checkErr != nil {
|
isReadOnlyReplicas := make(map[uint32]bool)
|
||||||
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
|
serverReplicas := make(map[uint32][]pb.ServerAddress)
|
||||||
}
|
for dataNodeId, volumeIdToVInfo := range dataNodeVolumeIdToVInfo {
|
||||||
totalInUseCount += inUseCount
|
for volumeId, vinfo := range volumeIdToVInfo {
|
||||||
totalOrphanChunkCount += uint64(len(orphanFileIds))
|
inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, dataNodeId, volumeId, writer, verbose)
|
||||||
totalOrphanDataSize += orphanDataSize
|
if checkErr != nil {
|
||||||
|
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
|
||||||
if verbose {
|
|
||||||
for _, fid := range orphanFileIds {
|
|
||||||
fmt.Fprintf(writer, "%s\n", fid)
|
|
||||||
}
|
}
|
||||||
|
isSeveralReplicas[volumeId] = false
|
||||||
|
if _, found := volumeIdOrphanFileIds[volumeId]; !found {
|
||||||
|
volumeIdOrphanFileIds[volumeId] = make(map[string]bool)
|
||||||
|
} else {
|
||||||
|
isSeveralReplicas[volumeId] = true
|
||||||
|
}
|
||||||
|
for _, fid := range orphanFileIds {
|
||||||
|
if isSeveralReplicas[volumeId] {
|
||||||
|
if _, found := volumeIdOrphanFileIds[volumeId][fid]; !found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
volumeIdOrphanFileIds[volumeId][fid] = isSeveralReplicas[volumeId]
|
||||||
|
}
|
||||||
|
|
||||||
|
totalInUseCount += inUseCount
|
||||||
|
totalOrphanChunkCount += uint64(len(orphanFileIds))
|
||||||
|
totalOrphanDataSize += orphanDataSize
|
||||||
|
|
||||||
|
if verbose {
|
||||||
|
for _, fid := range orphanFileIds {
|
||||||
|
fmt.Fprintf(writer, "%s\n", fid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
isEcVolumeReplicas[volumeId] = vinfo.isEcVolume
|
||||||
|
if isReadOnly, found := isReadOnlyReplicas[volumeId]; !(found && isReadOnly) {
|
||||||
|
isReadOnlyReplicas[volumeId] = vinfo.isReadOnly
|
||||||
|
}
|
||||||
|
serverReplicas[volumeId] = append(serverReplicas[volumeId], vinfo.server)
|
||||||
}
|
}
|
||||||
|
|
||||||
if applyPurging && len(orphanFileIds) > 0 {
|
for volumeId, orphanReplicaFileIds := range volumeIdOrphanFileIds {
|
||||||
|
if !(applyPurging && len(orphanReplicaFileIds) > 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
orphanFileIds := []string{}
|
||||||
|
for fid, foundInAllReplicas := range orphanReplicaFileIds {
|
||||||
|
if !isSeveralReplicas[volumeId] || (isSeveralReplicas[volumeId] && foundInAllReplicas) {
|
||||||
|
orphanFileIds = append(orphanFileIds, fid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !(len(orphanFileIds) > 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Fprintf(writer, "purging process for volume %d", volumeId)
|
fmt.Fprintf(writer, "purging process for volume %d", volumeId)
|
||||||
}
|
}
|
||||||
|
|
||||||
if vinfo.isEcVolume {
|
if isEcVolumeReplicas[volumeId] {
|
||||||
fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
|
fmt.Fprintf(writer, "skip purging for Erasure Coded volume %d.\n", volumeId)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
for _, server := range serverReplicas[volumeId] {
|
||||||
|
needleVID := needle.VolumeId(volumeId)
|
||||||
|
|
||||||
needleVID := needle.VolumeId(volumeId)
|
if isReadOnlyReplicas[volumeId] {
|
||||||
|
err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
|
||||||
|
}
|
||||||
|
|
||||||
if vinfo.isReadOnly {
|
fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, server)
|
||||||
err := markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, true)
|
defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, server, false)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("mark volume %d read/write: %v", volumeId, err)
|
fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, server)
|
||||||
|
}
|
||||||
|
if verbose {
|
||||||
|
fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(writer, "temporarily marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
|
if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
|
||||||
defer markVolumeWritable(c.env.option.GrpcDialOption, needleVID, vinfo.server, false)
|
return fmt.Errorf("purging volume %d: %v", volumeId, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(writer, "marked %d on server %v writable for forced purge\n", volumeId, vinfo.server)
|
|
||||||
|
|
||||||
if verbose {
|
|
||||||
fmt.Fprintf(writer, "purging files from volume %d\n", volumeId)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
|
|
||||||
return fmt.Errorf("purging volume %d: %v", volumeId, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -290,7 +349,7 @@ func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[u
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
|
func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, dataNodeId string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
|
||||||
|
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
|
fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
|
||||||
|
@ -316,7 +375,7 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
|
||||||
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
|
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
|
err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, dataNodeId, volumeId))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
|
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
|
||||||
}
|
}
|
||||||
|
@ -327,19 +386,21 @@ func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
|
func (c *commandVolumeFsck) collectFilerFileIds(dataNodeVolumeIdToVInfo map[string]map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool) error {
|
||||||
|
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Fprintf(writer, "collecting file ids from filer ...\n")
|
fmt.Fprintf(writer, "collecting file ids from filer ...\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
files := make(map[uint32]*os.File)
|
files := make(map[uint32]*os.File)
|
||||||
for vid := range volumeIdToServer {
|
for _, volumeIdToServer := range dataNodeVolumeIdToVInfo {
|
||||||
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
for vid := range volumeIdToServer {
|
||||||
if openErr != nil {
|
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
|
if openErr != nil {
|
||||||
|
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
|
||||||
|
}
|
||||||
|
files[vid] = dst
|
||||||
}
|
}
|
||||||
files[vid] = dst
|
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
|
@ -377,16 +438,16 @@ func (c *commandVolumeFsck) collectFilerFileIds(volumeIdToServer map[uint32]VInf
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
|
func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool, applyPurging bool) (err error) {
|
||||||
|
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Fprintf(writer, "find missing file chunks in volume %d ...\n", volumeId)
|
fmt.Fprintf(writer, "find missing file chunks in dataNodeId %s volume %d ...\n", dataNodeId, volumeId)
|
||||||
}
|
}
|
||||||
|
|
||||||
db := needle_map.NewMemDb()
|
db := needle_map.NewMemDb()
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
|
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -473,12 +534,12 @@ func (c *commandVolumeFsck) httpDelete(path util.FullPath, verbose bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
|
func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, dataNodeId string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
|
||||||
|
|
||||||
db := needle_map.NewMemDb()
|
db := needle_map.NewMemDb()
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
|
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, dataNodeId, volumeId)); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,8 +570,8 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder stri
|
||||||
|
|
||||||
if orphanFileCount > 0 {
|
if orphanFileCount > 0 {
|
||||||
pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
|
pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
|
||||||
fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
|
fmt.Fprintf(writer, "dataNode:%s\tvolume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
|
||||||
volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
|
dataNodeId, volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -524,13 +585,13 @@ type VInfo struct {
|
||||||
isReadOnly bool
|
isReadOnly bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
|
func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[string]map[uint32]VInfo, err error) {
|
||||||
|
|
||||||
if verbose {
|
if verbose {
|
||||||
fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
|
fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeIdToServer = make(map[uint32]VInfo)
|
volumeIdToServer = make(map[string]map[uint32]VInfo)
|
||||||
// collect topology information
|
// collect topology information
|
||||||
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
|
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -539,8 +600,10 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
|
||||||
|
|
||||||
eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
|
eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
|
||||||
for _, diskInfo := range t.DiskInfos {
|
for _, diskInfo := range t.DiskInfos {
|
||||||
|
dataNodeId := t.GetId()
|
||||||
|
volumeIdToServer[dataNodeId] = make(map[uint32]VInfo)
|
||||||
for _, vi := range diskInfo.VolumeInfos {
|
for _, vi := range diskInfo.VolumeInfos {
|
||||||
volumeIdToServer[vi.Id] = VInfo{
|
volumeIdToServer[dataNodeId][vi.Id] = VInfo{
|
||||||
server: pb.NewServerAddressFromDataNode(t),
|
server: pb.NewServerAddressFromDataNode(t),
|
||||||
collection: vi.Collection,
|
collection: vi.Collection,
|
||||||
isEcVolume: false,
|
isEcVolume: false,
|
||||||
|
@ -548,7 +611,7 @@ func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose boo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
||||||
volumeIdToServer[ecShardInfo.Id] = VInfo{
|
volumeIdToServer[dataNodeId][ecShardInfo.Id] = VInfo{
|
||||||
server: pb.NewServerAddressFromDataNode(t),
|
server: pb.NewServerAddressFromDataNode(t),
|
||||||
collection: ecShardInfo.Collection,
|
collection: ecShardInfo.Collection,
|
||||||
isEcVolume: true,
|
isEcVolume: true,
|
||||||
|
@ -600,8 +663,8 @@ func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeFileIdFile(tempFolder string, vid uint32) string {
|
func getVolumeFileIdFile(tempFolder string, dataNodeid string, vid uint32) string {
|
||||||
return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
|
return filepath.Join(tempFolder, fmt.Sprintf("%s_%d.idx", dataNodeid, vid))
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFilerFileIdFile(tempFolder string, vid uint32) string {
|
func getFilerFileIdFile(tempFolder string, vid uint32) string {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
@ -16,6 +17,9 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
type commandVolumeList struct {
|
type commandVolumeList struct {
|
||||||
|
collectionPattern *string
|
||||||
|
readonly *bool
|
||||||
|
volumeId *uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeList) Name() string {
|
func (c *commandVolumeList) Name() string {
|
||||||
|
@ -34,6 +38,10 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
|
|
||||||
volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
volumeListCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
|
verbosityLevel := volumeListCommand.Int("v", 5, "verbose mode: 0, 1, 2, 3, 4, 5")
|
||||||
|
c.collectionPattern = volumeListCommand.String("collectionPattern", "", "match with wildcard characters '*' and '?'")
|
||||||
|
c.readonly = volumeListCommand.Bool("readonly", false, "show only readonly")
|
||||||
|
c.volumeId = volumeListCommand.Uint64("volumeId", 0, "show only volume id")
|
||||||
|
|
||||||
if err = volumeListCommand.Parse(args); err != nil {
|
if err = volumeListCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -44,7 +52,7 @@ func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
|
c.writeTopologyInfo(writer, topologyInfo, volumeSizeLimitMb, *verbosityLevel)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,53 +73,71 @@ func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
|
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
|
||||||
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
|
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
|
||||||
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
|
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
|
||||||
return a.Id < b.Id
|
return a.Id < b.Id
|
||||||
})
|
})
|
||||||
var s statistics
|
var s statistics
|
||||||
for _, dc := range t.DataCenterInfos {
|
for _, dc := range t.DataCenterInfos {
|
||||||
s = s.plus(writeDataCenterInfo(writer, dc, verbosityLevel))
|
s = s.plus(c.writeDataCenterInfo(writer, dc, verbosityLevel))
|
||||||
}
|
}
|
||||||
output(verbosityLevel >= 0, writer, "%+v \n", s)
|
output(verbosityLevel >= 0, writer, "%+v \n", s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
func writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
|
|
||||||
|
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
|
||||||
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||||
var s statistics
|
var s statistics
|
||||||
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
|
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
|
||||||
return a.Id < b.Id
|
return a.Id < b.Id
|
||||||
})
|
})
|
||||||
for _, r := range t.RackInfos {
|
for _, r := range t.RackInfos {
|
||||||
s = s.plus(writeRackInfo(writer, r, verbosityLevel))
|
s = s.plus(c.writeRackInfo(writer, r, verbosityLevel))
|
||||||
}
|
}
|
||||||
output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
|
output(verbosityLevel >= 1, writer, " DataCenter %s %+v \n", t.Id, s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
func writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
|
|
||||||
|
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
|
||||||
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||||
var s statistics
|
var s statistics
|
||||||
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
|
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
|
||||||
return a.Id < b.Id
|
return a.Id < b.Id
|
||||||
})
|
})
|
||||||
for _, dn := range t.DataNodeInfos {
|
for _, dn := range t.DataNodeInfos {
|
||||||
s = s.plus(writeDataNodeInfo(writer, dn, verbosityLevel))
|
s = s.plus(c.writeDataNodeInfo(writer, dn, verbosityLevel))
|
||||||
}
|
}
|
||||||
output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)
|
output(verbosityLevel >= 2, writer, " Rack %s %+v \n", t.Id, s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
|
|
||||||
|
func (c *commandVolumeList) writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo, verbosityLevel int) statistics {
|
||||||
output(verbosityLevel >= 3, writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
output(verbosityLevel >= 3, writer, " DataNode %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||||
var s statistics
|
var s statistics
|
||||||
for _, diskInfo := range t.DiskInfos {
|
for _, diskInfo := range t.DiskInfos {
|
||||||
s = s.plus(writeDiskInfo(writer, diskInfo, verbosityLevel))
|
s = s.plus(c.writeDiskInfo(writer, diskInfo, verbosityLevel))
|
||||||
}
|
}
|
||||||
output(verbosityLevel >= 3, writer, " DataNode %s %+v \n", t.Id, s)
|
output(verbosityLevel >= 3, writer, " DataNode %s %+v \n", t.Id, s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
|
func (c *commandVolumeList) isNotMatchDiskInfo(readOnly bool, collection string, volumeId uint32) bool {
|
||||||
|
if *c.readonly && !readOnly {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if *c.collectionPattern != "" {
|
||||||
|
if matched, _ := filepath.Match(*c.collectionPattern, collection); !matched {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if *c.volumeId > 0 && *c.volumeId != uint64(volumeId) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int) statistics {
|
||||||
var s statistics
|
var s statistics
|
||||||
diskType := t.Type
|
diskType := t.Type
|
||||||
if diskType == "" {
|
if diskType == "" {
|
||||||
|
@ -122,9 +148,15 @@ func writeDiskInfo(writer io.Writer, t *master_pb.DiskInfo, verbosityLevel int)
|
||||||
return a.Id < b.Id
|
return a.Id < b.Id
|
||||||
})
|
})
|
||||||
for _, vi := range t.VolumeInfos {
|
for _, vi := range t.VolumeInfos {
|
||||||
|
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
|
s = s.plus(writeVolumeInformationMessage(writer, vi, verbosityLevel))
|
||||||
}
|
}
|
||||||
for _, ecShardInfo := range t.EcShardInfos {
|
for _, ecShardInfo := range t.EcShardInfos {
|
||||||
|
if c.isNotMatchDiskInfo(false, ecShardInfo.Collection, ecShardInfo.Id) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
|
output(verbosityLevel >= 5, writer, " ec volume id:%v collection:%v shards:%v\n", ecShardInfo.Id, ecShardInfo.Collection, erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds())
|
||||||
}
|
}
|
||||||
output(verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
|
output(verbosityLevel >= 4, writer, " Disk %s %+v \n", diskType, s)
|
||||||
|
|
Loading…
Reference in a new issue