seaweedfs/weed/command/fix.go

134 lines
3.7 KiB
Go
Raw Normal View History

package command
import (
"fmt"
"io/fs"
2012-12-21 06:32:21 +00:00
"os"
"path"
"strconv"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
2019-04-19 04:43:36 +00:00
"github.com/chrislusf/seaweedfs/weed/storage/needle"
2019-12-24 18:18:56 +00:00
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
2019-12-23 20:48:20 +00:00
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
func init() {
2012-12-21 06:32:21 +00:00
cmdFix.Run = runFix // break init cycle
}
var cmdFix = &Command{
UsageLine: "fix [-volumeId=234] [-collection=bigData] /tmp",
Short: "run weed tool fix on files or whole folders to recreate index file(s) if corrupted",
Long: `Fix runs the SeaweedFS fix command on dat files or whole folders to re-create the index .idx file.
`,
}
var (
fixVolumeCollection = cmdFix.Flag.String("collection", "", "an optional volume collection name, if specified only it will be processed")
fixVolumeId = cmdFix.Flag.Int64("volumeId", 0, "an optional volume id, if not 0 (default) only it will be processed")
)
type VolumeFileScanner4Fix struct {
2019-04-19 04:43:36 +00:00
version needle.Version
2019-12-24 18:18:56 +00:00
nm *needle_map.MemDb
}
2019-12-23 20:48:20 +00:00
func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock super_block.SuperBlock) error {
scanner.version = superBlock.Version
return nil
}
func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
return false
}
2019-10-22 07:50:30 +00:00
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
2020-06-20 05:45:27 +00:00
glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
2020-08-19 02:22:16 +00:00
if n.Size.IsValid() {
2019-12-24 18:18:56 +00:00
pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
} else {
glog.V(2).Infof("skipping deleted file ...")
2019-12-24 18:18:56 +00:00
return scanner.nm.Delete(n.Id)
}
return nil
}
func runFix(cmd *Command, args []string) bool {
for _, arg := range args {
basePath, f := path.Split(util.ResolvePath(arg))
files := []fs.DirEntry{}
if f == "" {
fileInfo, err := os.ReadDir(basePath)
if err != nil {
fmt.Println(err)
return false
}
files = fileInfo
} else {
fileInfo, err := os.Stat(basePath + f)
if err != nil {
fmt.Println(err)
return false
}
files = []fs.DirEntry{fs.FileInfoToDirEntry(fileInfo)}
}
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".dat") {
continue
}
if *fixVolumeCollection != "" {
if !strings.HasPrefix(file.Name(), *fixVolumeCollection+"_") {
continue
}
}
baseFileName := file.Name()[:len(file.Name())-4]
collection, volumeIdStr := "", baseFileName
if sepIndex := strings.LastIndex(baseFileName, "_"); sepIndex > 0 {
collection = baseFileName[:sepIndex]
volumeIdStr = baseFileName[sepIndex+1:]
}
volumeId, parseErr := strconv.ParseInt(volumeIdStr, 10, 64)
if parseErr != nil {
fmt.Printf("Failed to parse volume id from %s: %v\n", baseFileName, parseErr)
return false
}
if *fixVolumeId != 0 && *fixVolumeId != volumeId {
continue
}
doFixOneVolume(basePath, baseFileName, collection, volumeId)
}
2013-11-12 10:21:22 +00:00
}
return true
}
func doFixOneVolume(basepath string, baseFileName string, collection string, volumeId int64) {
indexFileName := path.Join(basepath, baseFileName+".idx")
2019-12-24 18:18:56 +00:00
nm := needle_map.NewMemDb()
defer nm.Close()
vid := needle.VolumeId(volumeId)
scanner := &VolumeFileScanner4Fix{
nm: nm,
}
if err := storage.ScanVolumeFile(basepath, collection, vid, storage.NeedleMapInMemory, scanner); err != nil {
glog.Fatalf("scan .dat File: %v", err)
os.Remove(indexFileName)
}
if err := nm.SaveToIdx(indexFileName); err != nil {
glog.Fatalf("save to .idx File: %v", err)
2018-07-29 09:25:24 +00:00
os.Remove(indexFileName)
2012-12-21 06:32:21 +00:00
}
}