2020-03-24 09:18:13 +00:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
2021-06-25 06:56:24 +00:00
|
|
|
"bufio"
|
2020-03-24 09:18:13 +00:00
|
|
|
"context"
|
2020-03-25 05:38:33 +00:00
|
|
|
"flag"
|
2020-03-24 09:18:13 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-03-25 09:21:15 +00:00
|
|
|
"sync"
|
2020-03-24 09:18:13 +00:00
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2020-03-24 09:18:13 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
2021-10-14 04:27:58 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
2020-03-24 09:18:13 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
2021-10-14 04:27:58 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2020-03-24 09:18:13 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
Commands = append(Commands, &commandVolumeFsck{})
|
|
|
|
}
|
|
|
|
|
|
|
|
type commandVolumeFsck struct {
|
|
|
|
env *CommandEnv
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeFsck) Name() string {
|
|
|
|
return "volume.fsck"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeFsck) Help() string {
|
|
|
|
return `check all volumes to find entries not used by the filer
|
|
|
|
|
|
|
|
Important assumption!!!
|
|
|
|
the system is all used by one filer.
|
|
|
|
|
|
|
|
This command works this way:
|
|
|
|
1. collect all file ids from all volumes, as set A
|
|
|
|
2. collect all file ids from the filer, as set B
|
|
|
|
3. find out the set A subtract B
|
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
If -findMissingChunksInFiler is enabled, this works
|
|
|
|
in a reverse way:
|
|
|
|
1. collect all file ids from all volumes, as set A
|
|
|
|
2. collect all file ids from the filer, as set B
|
|
|
|
3. find out the set B subtract A
|
|
|
|
|
2020-03-24 09:18:13 +00:00
|
|
|
`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
|
|
|
|
2020-03-25 05:38:33 +00:00
|
|
|
fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
|
|
|
verbose := fsckCommand.Bool("v", false, "verbose mode")
|
2021-06-28 06:32:57 +00:00
|
|
|
findMissingChunksInFiler := fsckCommand.Bool("findMissingChunksInFiler", false, "see \"help volume.fsck\"")
|
|
|
|
findMissingChunksInFilerPath := fsckCommand.String("findMissingChunksInFilerPath", "/", "used together with findMissingChunksInFiler")
|
2020-03-25 05:38:33 +00:00
|
|
|
applyPurging := fsckCommand.Bool("reallyDeleteFromVolume", false, "<expert only> delete data not referenced by the filer")
|
|
|
|
if err = fsckCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-10 21:24:38 +00:00
|
|
|
if err = commandEnv.confirmIsLocked(args); err != nil {
|
2021-09-14 05:13:34 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-24 09:18:13 +00:00
|
|
|
c.env = commandEnv
|
|
|
|
|
|
|
|
// create a temp folder
|
2021-10-14 04:27:58 +00:00
|
|
|
tempFolder, err := os.MkdirTemp("", "sw_fsck")
|
2020-03-24 09:18:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create temp folder: %v", err)
|
|
|
|
}
|
2020-03-25 05:38:33 +00:00
|
|
|
if *verbose {
|
|
|
|
fmt.Fprintf(writer, "working directory: %s\n", tempFolder)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(tempFolder)
|
2020-03-24 09:18:13 +00:00
|
|
|
|
2020-03-25 07:56:47 +00:00
|
|
|
// collect all volume id locations
|
2021-02-22 08:28:42 +00:00
|
|
|
volumeIdToVInfo, err := c.collectVolumeIds(commandEnv, *verbose, writer)
|
2020-03-25 07:56:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to collect all volume locations: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-03-24 09:18:13 +00:00
|
|
|
// collect each volume file ids
|
2020-04-02 05:10:09 +00:00
|
|
|
for volumeId, vinfo := range volumeIdToVInfo {
|
2020-03-25 07:56:47 +00:00
|
|
|
err = c.collectOneVolumeFileIds(tempFolder, volumeId, vinfo, *verbose, writer)
|
2020-03-24 09:18:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
if *findMissingChunksInFiler {
|
|
|
|
// collect all filer file ids and paths
|
2021-06-28 06:32:57 +00:00
|
|
|
if err = c.collectFilerFileIdAndPaths(volumeIdToVInfo, tempFolder, writer, *findMissingChunksInFilerPath, *verbose, applyPurging); err != nil {
|
2021-06-25 06:56:24 +00:00
|
|
|
return fmt.Errorf("collectFilerFileIdAndPaths: %v", err)
|
|
|
|
}
|
|
|
|
// for each volume, check filer file ids
|
|
|
|
if err = c.findFilerChunksMissingInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, applyPurging); err != nil {
|
2021-07-29 05:25:04 +00:00
|
|
|
return fmt.Errorf("findFilerChunksMissingInVolumeServers: %v", err)
|
2021-06-25 06:56:24 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// collect all filer file ids
|
|
|
|
if err = c.collectFilerFileIds(tempFolder, volumeIdToVInfo, *verbose, writer); err != nil {
|
|
|
|
return fmt.Errorf("failed to collect file ids from filer: %v", err)
|
|
|
|
}
|
|
|
|
// volume file ids substract filer file ids
|
|
|
|
if err = c.findExtraChunksInVolumeServers(volumeIdToVInfo, tempFolder, writer, *verbose, applyPurging); err != nil {
|
|
|
|
return fmt.Errorf("findExtraChunksInVolumeServers: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-28 06:32:57 +00:00
|
|
|
func (c *commandVolumeFsck) collectFilerFileIdAndPaths(volumeIdToServer map[uint32]VInfo, tempFolder string, writer io.Writer, filerPath string, verbose bool, applyPurging *bool) error {
|
2021-06-25 06:56:24 +00:00
|
|
|
|
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "checking each file from filer ...\n")
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
files := make(map[uint32]*os.File)
|
|
|
|
for vid := range volumeIdToServer {
|
|
|
|
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
|
|
|
if openErr != nil {
|
|
|
|
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
|
|
|
|
}
|
|
|
|
files[vid] = dst
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
for _, f := range files {
|
|
|
|
f.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
type Item struct {
|
|
|
|
vid uint32
|
|
|
|
fileKey uint64
|
|
|
|
cookie uint32
|
|
|
|
path util.FullPath
|
|
|
|
}
|
2022-01-21 20:08:58 +00:00
|
|
|
return doTraverseBfsAndSaving(c.env, nil, filerPath, false, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
|
2021-06-28 06:32:57 +00:00
|
|
|
if verbose && entry.Entry.IsDirectory {
|
|
|
|
fmt.Fprintf(writer, "checking directory %s\n", util.NewFullPath(entry.Dir, entry.Entry.Name))
|
|
|
|
}
|
2021-07-20 06:07:22 +00:00
|
|
|
dChunks, mChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks, 0, math.MaxInt64)
|
2021-06-25 06:56:24 +00:00
|
|
|
if resolveErr != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
dChunks = append(dChunks, mChunks...)
|
|
|
|
for _, chunk := range dChunks {
|
|
|
|
outputChan <- &Item{
|
|
|
|
vid: chunk.Fid.VolumeId,
|
|
|
|
fileKey: chunk.Fid.FileKey,
|
|
|
|
cookie: chunk.Fid.Cookie,
|
|
|
|
path: util.NewFullPath(entry.Dir, entry.Entry.Name),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2022-01-21 20:08:58 +00:00
|
|
|
}, func(outputChan chan interface{}) {
|
|
|
|
buffer := make([]byte, 16)
|
|
|
|
for item := range outputChan {
|
|
|
|
i := item.(*Item)
|
|
|
|
if f, ok := files[i.vid]; ok {
|
|
|
|
util.Uint64toBytes(buffer, i.fileKey)
|
|
|
|
util.Uint32toBytes(buffer[8:], i.cookie)
|
|
|
|
util.Uint32toBytes(buffer[12:], uint32(len(i.path)))
|
|
|
|
f.Write(buffer)
|
|
|
|
f.Write([]byte(i.path))
|
|
|
|
// fmt.Fprintf(writer, "%d,%x%08x %d %s\n", i.vid, i.fileKey, i.cookie, len(i.path), i.path)
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(writer, "%d,%x%08x %s volume not found\n", i.vid, i.fileKey, i.cookie, i.path)
|
|
|
|
}
|
|
|
|
}
|
2021-06-25 06:56:24 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *commandVolumeFsck) findFilerChunksMissingInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging *bool) error {
|
2021-06-25 00:22:53 +00:00
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
for volumeId, vinfo := range volumeIdToVInfo {
|
|
|
|
checkErr := c.oneVolumeFileIdsCheckOneVolume(tempFolder, volumeId, writer, verbose)
|
|
|
|
if checkErr != nil {
|
|
|
|
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2021-06-25 00:22:53 +00:00
|
|
|
}
|
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
func (c *commandVolumeFsck) findExtraChunksInVolumeServers(volumeIdToVInfo map[uint32]VInfo, tempFolder string, writer io.Writer, verbose bool, applyPurging *bool) error {
|
2020-03-25 05:38:33 +00:00
|
|
|
var totalInUseCount, totalOrphanChunkCount, totalOrphanDataSize uint64
|
2020-04-02 05:10:09 +00:00
|
|
|
for volumeId, vinfo := range volumeIdToVInfo {
|
2021-06-25 06:56:24 +00:00
|
|
|
inUseCount, orphanFileIds, orphanDataSize, checkErr := c.oneVolumeFileIdsSubtractFilerFileIds(tempFolder, volumeId, writer, verbose)
|
2020-03-24 09:18:13 +00:00
|
|
|
if checkErr != nil {
|
2020-03-25 01:40:52 +00:00
|
|
|
return fmt.Errorf("failed to collect file ids from volume %d on %s: %v", volumeId, vinfo.server, checkErr)
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
2020-03-25 05:38:33 +00:00
|
|
|
totalInUseCount += inUseCount
|
2020-03-25 09:21:15 +00:00
|
|
|
totalOrphanChunkCount += uint64(len(orphanFileIds))
|
2020-03-24 09:18:13 +00:00
|
|
|
totalOrphanDataSize += orphanDataSize
|
2020-03-25 09:21:15 +00:00
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
if verbose {
|
2020-10-22 06:48:07 +00:00
|
|
|
for _, fid := range orphanFileIds {
|
2021-07-11 06:16:06 +00:00
|
|
|
fmt.Fprintf(writer, "%s\n", fid)
|
2020-10-22 06:48:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-25 09:21:15 +00:00
|
|
|
if *applyPurging && len(orphanFileIds) > 0 {
|
2020-03-25 09:41:22 +00:00
|
|
|
if vinfo.isEcVolume {
|
2021-12-13 06:55:27 +00:00
|
|
|
fmt.Fprintf(writer, "Skip purging for Erasure Coded volume %d.\n", volumeId)
|
|
|
|
continue
|
2020-03-25 09:41:22 +00:00
|
|
|
}
|
2022-01-06 17:52:28 +00:00
|
|
|
if vinfo.isReadOnly {
|
|
|
|
fmt.Fprintf(writer, "Skip purging for read only volume %d.\n", volumeId)
|
|
|
|
continue
|
|
|
|
}
|
2021-07-12 18:22:00 +00:00
|
|
|
if inUseCount == 0 {
|
|
|
|
if err := deleteVolume(c.env.option.GrpcDialOption, needle.VolumeId(volumeId), vinfo.server); err != nil {
|
2021-11-29 06:39:24 +00:00
|
|
|
return fmt.Errorf("delete volume %d: %v", volumeId, err)
|
2021-07-12 18:22:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := c.purgeFileIdsForOneVolume(volumeId, orphanFileIds, writer); err != nil {
|
2021-11-29 06:39:24 +00:00
|
|
|
return fmt.Errorf("purge for volume %d: %v", volumeId, err)
|
2021-07-12 18:22:00 +00:00
|
|
|
}
|
2020-03-25 09:21:15 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 05:38:33 +00:00
|
|
|
if totalOrphanChunkCount == 0 {
|
2020-03-24 09:18:13 +00:00
|
|
|
fmt.Fprintf(writer, "no orphan data\n")
|
2020-03-25 09:21:15 +00:00
|
|
|
return nil
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 09:21:15 +00:00
|
|
|
if !*applyPurging {
|
|
|
|
pct := float64(totalOrphanChunkCount*100) / (float64(totalOrphanChunkCount + totalInUseCount))
|
|
|
|
fmt.Fprintf(writer, "\nTotal\t\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
|
|
|
|
totalOrphanChunkCount+totalInUseCount, totalOrphanChunkCount, pct, totalOrphanDataSize)
|
2020-03-25 05:38:33 +00:00
|
|
|
|
2020-03-25 09:21:15 +00:00
|
|
|
fmt.Fprintf(writer, "This could be normal if multiple filers or no filers are used.\n")
|
2020-03-25 05:38:33 +00:00
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-25 07:56:47 +00:00
|
|
|
func (c *commandVolumeFsck) collectOneVolumeFileIds(tempFolder string, volumeId uint32, vinfo VInfo, verbose bool, writer io.Writer) error {
|
|
|
|
|
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "collecting volume %d file ids from %s ...\n", volumeId, vinfo.server)
|
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
|
2021-12-26 08:15:03 +00:00
|
|
|
return operation.WithVolumeServerClient(false, vinfo.server, c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
2020-03-24 09:18:13 +00:00
|
|
|
|
2020-04-02 05:10:09 +00:00
|
|
|
ext := ".idx"
|
|
|
|
if vinfo.isEcVolume {
|
|
|
|
ext = ".ecx"
|
|
|
|
}
|
|
|
|
|
2020-03-24 09:18:13 +00:00
|
|
|
copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
|
|
|
|
VolumeId: volumeId,
|
2020-04-02 05:10:09 +00:00
|
|
|
Ext: ext,
|
2020-03-24 09:18:13 +00:00
|
|
|
CompactionRevision: math.MaxUint32,
|
|
|
|
StopOffset: math.MaxInt64,
|
|
|
|
Collection: vinfo.collection,
|
|
|
|
IsEcVolume: vinfo.isEcVolume,
|
|
|
|
IgnoreSourceFileNotFound: false,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2020-11-27 11:17:10 +00:00
|
|
|
return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = writeToFile(copyFileClient, getVolumeFileIdFile(tempFolder, volumeId))
|
|
|
|
if err != nil {
|
2020-11-27 11:17:10 +00:00
|
|
|
return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, vinfo.server, err)
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-03-25 07:56:47 +00:00
|
|
|
func (c *commandVolumeFsck) collectFilerFileIds(tempFolder string, volumeIdToServer map[uint32]VInfo, verbose bool, writer io.Writer) error {
|
|
|
|
|
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "collecting file ids from filer ...\n")
|
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
|
|
|
|
files := make(map[uint32]*os.File)
|
|
|
|
for vid := range volumeIdToServer {
|
|
|
|
dst, openErr := os.OpenFile(getFilerFileIdFile(tempFolder, vid), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
|
|
|
if openErr != nil {
|
|
|
|
return fmt.Errorf("failed to create file %s: %v", getFilerFileIdFile(tempFolder, vid), openErr)
|
|
|
|
}
|
|
|
|
files[vid] = dst
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
for _, f := range files {
|
|
|
|
f.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
type Item struct {
|
|
|
|
vid uint32
|
|
|
|
fileKey uint64
|
|
|
|
}
|
2022-01-21 20:08:58 +00:00
|
|
|
return doTraverseBfsAndSaving(c.env, nil, "/", false, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
|
2021-07-20 06:07:22 +00:00
|
|
|
dChunks, mChunks, resolveErr := filer.ResolveChunkManifest(filer.LookupFn(c.env), entry.Entry.Chunks, 0, math.MaxInt64)
|
2020-07-21 05:02:05 +00:00
|
|
|
if resolveErr != nil {
|
2021-08-13 09:57:14 +00:00
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "resolving manifest chunks in %s: %v\n", util.NewFullPath(entry.Dir, entry.Entry.Name), resolveErr)
|
|
|
|
}
|
2020-07-21 05:02:05 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
dChunks = append(dChunks, mChunks...)
|
|
|
|
for _, chunk := range dChunks {
|
2020-03-24 09:18:13 +00:00
|
|
|
outputChan <- &Item{
|
|
|
|
vid: chunk.Fid.VolumeId,
|
|
|
|
fileKey: chunk.Fid.FileKey,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2022-01-21 20:08:58 +00:00
|
|
|
}, func(outputChan chan interface{}) {
|
|
|
|
buffer := make([]byte, 8)
|
|
|
|
for item := range outputChan {
|
|
|
|
i := item.(*Item)
|
|
|
|
util.Uint64toBytes(buffer, i.fileKey)
|
|
|
|
files[i.vid].Write(buffer)
|
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
func (c *commandVolumeFsck) oneVolumeFileIdsCheckOneVolume(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (err error) {
|
|
|
|
|
2021-06-28 06:32:57 +00:00
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "find missing file chuns in volume %d ...\n", volumeId)
|
|
|
|
}
|
|
|
|
|
2021-06-25 06:56:24 +00:00
|
|
|
db := needle_map.NewMemDb()
|
|
|
|
defer db.Close()
|
|
|
|
|
|
|
|
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
file := getFilerFileIdFile(tempFolder, volumeId)
|
|
|
|
fp, err := os.Open(file)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer fp.Close()
|
|
|
|
|
|
|
|
type Item struct {
|
|
|
|
fileKey uint64
|
|
|
|
cookie uint32
|
|
|
|
path util.FullPath
|
|
|
|
}
|
|
|
|
|
|
|
|
br := bufio.NewReader(fp)
|
|
|
|
buffer := make([]byte, 16)
|
|
|
|
item := &Item{}
|
|
|
|
var readSize int
|
|
|
|
for {
|
2021-06-28 06:32:57 +00:00
|
|
|
readSize, err = io.ReadFull(br, buffer)
|
2021-06-25 06:56:24 +00:00
|
|
|
if err != nil || readSize != 16 {
|
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
item.fileKey = util.BytesToUint64(buffer[:8])
|
|
|
|
item.cookie = util.BytesToUint32(buffer[8:12])
|
|
|
|
pathSize := util.BytesToUint32(buffer[12:16])
|
|
|
|
pathBytes := make([]byte, int(pathSize))
|
2021-06-28 06:32:57 +00:00
|
|
|
n, err := io.ReadFull(br, pathBytes)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(writer, "%d,%x%08x in unexpected error: %v\n", volumeId, item.fileKey, item.cookie, err)
|
|
|
|
}
|
|
|
|
if n != int(pathSize) {
|
|
|
|
fmt.Fprintf(writer, "%d,%x%08x %d unexpected file name size %d\n", volumeId, item.fileKey, item.cookie, pathSize, n)
|
|
|
|
}
|
2021-06-25 06:56:24 +00:00
|
|
|
item.path = util.FullPath(string(pathBytes))
|
|
|
|
|
|
|
|
if _, found := db.Get(types.NeedleId(item.fileKey)); !found {
|
2021-06-28 06:32:57 +00:00
|
|
|
fmt.Fprintf(writer, "%d,%x%08x in %s %d not found\n", volumeId, item.fileKey, item.cookie, item.path, pathSize)
|
2021-06-25 06:56:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-03-25 09:21:15 +00:00
|
|
|
func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder string, volumeId uint32, writer io.Writer, verbose bool) (inUseCount uint64, orphanFileIds []string, orphanDataSize uint64, err error) {
|
2020-03-24 09:18:13 +00:00
|
|
|
|
|
|
|
db := needle_map.NewMemDb()
|
|
|
|
defer db.Close()
|
|
|
|
|
|
|
|
if err = db.LoadFromIdx(getVolumeFileIdFile(tempFolder, volumeId)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-10-14 04:27:58 +00:00
|
|
|
filerFileIdsData, err := os.ReadFile(getFilerFileIdFile(tempFolder, volumeId))
|
2020-03-24 09:18:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dataLen := len(filerFileIdsData)
|
|
|
|
if dataLen%8 != 0 {
|
2020-03-25 09:21:15 +00:00
|
|
|
return 0, nil, 0, fmt.Errorf("filer data is corrupted")
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < len(filerFileIdsData); i += 8 {
|
|
|
|
fileKey := util.BytesToUint64(filerFileIdsData[i : i+8])
|
|
|
|
db.Delete(types.NeedleId(fileKey))
|
2020-03-25 05:38:33 +00:00
|
|
|
inUseCount++
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 09:21:15 +00:00
|
|
|
var orphanFileCount uint64
|
2020-03-24 09:18:13 +00:00
|
|
|
db.AscendingVisit(func(n needle_map.NeedleValue) error {
|
2020-03-24 09:34:28 +00:00
|
|
|
// fmt.Printf("%d,%x\n", volumeId, n.Key)
|
2021-07-11 06:16:06 +00:00
|
|
|
orphanFileIds = append(orphanFileIds, fmt.Sprintf("%d,%s00000000", volumeId, n.Key.String()))
|
2020-03-25 09:21:15 +00:00
|
|
|
orphanFileCount++
|
2020-03-24 09:18:13 +00:00
|
|
|
orphanDataSize += uint64(n.Size)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2020-03-25 09:21:15 +00:00
|
|
|
if orphanFileCount > 0 {
|
|
|
|
pct := float64(orphanFileCount*100) / (float64(orphanFileCount + inUseCount))
|
2020-03-25 05:38:33 +00:00
|
|
|
fmt.Fprintf(writer, "volume:%d\tentries:%d\torphan:%d\t%.2f%%\t%dB\n",
|
2020-03-25 09:21:15 +00:00
|
|
|
volumeId, orphanFileCount+inUseCount, orphanFileCount, pct, orphanDataSize)
|
2020-03-24 09:34:28 +00:00
|
|
|
}
|
|
|
|
|
2020-03-24 09:18:13 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
type VInfo struct {
|
2021-09-13 05:47:52 +00:00
|
|
|
server pb.ServerAddress
|
2020-03-24 09:18:13 +00:00
|
|
|
collection string
|
|
|
|
isEcVolume bool
|
2022-01-06 17:52:28 +00:00
|
|
|
isReadOnly bool
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
2021-02-22 08:28:42 +00:00
|
|
|
func (c *commandVolumeFsck) collectVolumeIds(commandEnv *CommandEnv, verbose bool, writer io.Writer) (volumeIdToServer map[uint32]VInfo, err error) {
|
2020-03-25 07:56:47 +00:00
|
|
|
|
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "collecting volume id and locations from master ...\n")
|
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
|
|
|
|
volumeIdToServer = make(map[uint32]VInfo)
|
2021-02-22 08:28:42 +00:00
|
|
|
// collect topology information
|
2022-02-08 08:53:55 +00:00
|
|
|
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
|
2020-03-24 09:18:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-02-22 08:28:42 +00:00
|
|
|
eachDataNode(topologyInfo, func(dc string, rack RackId, t *master_pb.DataNodeInfo) {
|
2021-02-18 04:57:08 +00:00
|
|
|
for _, diskInfo := range t.DiskInfos {
|
2021-02-16 10:47:02 +00:00
|
|
|
for _, vi := range diskInfo.VolumeInfos {
|
|
|
|
volumeIdToServer[vi.Id] = VInfo{
|
2021-09-13 05:47:52 +00:00
|
|
|
server: pb.NewServerAddressFromDataNode(t),
|
2021-02-16 10:47:02 +00:00
|
|
|
collection: vi.Collection,
|
|
|
|
isEcVolume: false,
|
2022-01-06 17:52:28 +00:00
|
|
|
isReadOnly: vi.ReadOnly,
|
2021-02-16 10:47:02 +00:00
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
2021-02-16 10:47:02 +00:00
|
|
|
for _, ecShardInfo := range diskInfo.EcShardInfos {
|
|
|
|
volumeIdToServer[ecShardInfo.Id] = VInfo{
|
2021-09-13 05:47:52 +00:00
|
|
|
server: pb.NewServerAddressFromDataNode(t),
|
2021-02-16 10:47:02 +00:00
|
|
|
collection: ecShardInfo.Collection,
|
|
|
|
isEcVolume: true,
|
2022-01-06 17:52:28 +00:00
|
|
|
isReadOnly: true,
|
2021-02-16 10:47:02 +00:00
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-03-25 07:56:47 +00:00
|
|
|
if verbose {
|
|
|
|
fmt.Fprintf(writer, "collected %d volumes and locations.\n", len(volumeIdToServer))
|
|
|
|
}
|
2020-03-24 09:18:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-25 09:21:15 +00:00
|
|
|
func (c *commandVolumeFsck) purgeFileIdsForOneVolume(volumeId uint32, fileIds []string, writer io.Writer) (err error) {
|
|
|
|
fmt.Fprintf(writer, "purging orphan data for volume %d...\n", volumeId)
|
|
|
|
locations, found := c.env.MasterClient.GetLocations(volumeId)
|
|
|
|
if !found {
|
|
|
|
return fmt.Errorf("failed to find volume %d locations", volumeId)
|
|
|
|
}
|
|
|
|
|
|
|
|
resultChan := make(chan []*volume_server_pb.DeleteResult, len(locations))
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, location := range locations {
|
|
|
|
wg.Add(1)
|
2021-09-13 05:47:52 +00:00
|
|
|
go func(server pb.ServerAddress, fidList []string) {
|
2020-03-25 09:21:15 +00:00
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
if deleteResults, deleteErr := operation.DeleteFilesAtOneVolumeServer(server, c.env.option.GrpcDialOption, fidList, false); deleteErr != nil {
|
|
|
|
err = deleteErr
|
|
|
|
} else if deleteResults != nil {
|
|
|
|
resultChan <- deleteResults
|
|
|
|
}
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
}(location.ServerAddress(), fileIds)
|
2020-03-25 09:21:15 +00:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(resultChan)
|
|
|
|
|
|
|
|
for results := range resultChan {
|
|
|
|
for _, result := range results {
|
|
|
|
if result.Error != "" {
|
|
|
|
fmt.Fprintf(writer, "purge error: %s\n", result.Error)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-24 09:18:13 +00:00
|
|
|
func getVolumeFileIdFile(tempFolder string, vid uint32) string {
|
|
|
|
return filepath.Join(tempFolder, fmt.Sprintf("%d.idx", vid))
|
|
|
|
}
|
|
|
|
|
|
|
|
func getFilerFileIdFile(tempFolder string, vid uint32) string {
|
|
|
|
return filepath.Join(tempFolder, fmt.Sprintf("%d.fid", vid))
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string) error {
|
|
|
|
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
|
|
|
|
dst, err := os.OpenFile(fileName, flags, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer dst.Close()
|
|
|
|
|
|
|
|
for {
|
|
|
|
resp, receiveErr := client.Recv()
|
|
|
|
if receiveErr == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if receiveErr != nil {
|
|
|
|
return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
|
|
|
|
}
|
|
|
|
dst.Write(resp.FileContent)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|