mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
one-off fix for issues/93
https://github.com/chrislusf/weed-fs/issues/93
This commit is contained in:
parent
62f5e35cbe
commit
981c981fa8
9
unmaintained/README.txt
Normal file
9
unmaintained/README.txt
Normal file
|
@ -0,0 +1,9 @@
|
|||
Here is the folder of unmaintained go code.
|
||||
|
||||
Although its content is useful sometimes,
|
||||
Its content would not be released.
|
||||
And the code can be ugly, e.g., not checking exceptions.
|
||||
|
||||
If any code is used often, it will be productionized.
|
||||
So it is helpful if anyone wants to cleanup a bit.
|
||||
|
130
unmaintained/fix_dat/fix_dat.go
Normal file
130
unmaintained/fix_dat/fix_dat.go
Normal file
|
@ -0,0 +1,130 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/chrislusf/weed-fs/go/glog"
|
||||
"github.com/chrislusf/weed-fs/go/storage"
|
||||
"github.com/chrislusf/weed-fs/go/util"
|
||||
)
|
||||
|
||||
var (
|
||||
fixVolumePath = flag.String("dir", "/tmp", "data directory to store files")
|
||||
fixVolumeCollection = flag.String("collection", "", "the volume collection name")
|
||||
fixVolumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
|
||||
)
|
||||
|
||||
/*
|
||||
This is to resolve an one-time issue that caused inconsistency with .dat and .idx files.
|
||||
1. fix the .dat file, a new .dat_fixed file will be generated.
|
||||
go run fix_dat.go -volumeId=9 -dir=/Users/chrislu/Downloads
|
||||
2. move the original .dat and .idx files to some backup folder, and rename .dat_fixed to .dat file
|
||||
mv 9.dat_fixed 9.dat
|
||||
3. fix the .idx file with the "weed fix"
|
||||
weed fix -volumeId=9 -dir=/Users/chrislu/Downloads
|
||||
*/
|
||||
func main() {
|
||||
flag.Parse()
|
||||
fileName := strconv.Itoa(*fixVolumeId)
|
||||
if *fixVolumeCollection != "" {
|
||||
fileName = *fixVolumeCollection + "_" + fileName
|
||||
}
|
||||
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Read Volume Index [ERROR] %s\n", err)
|
||||
}
|
||||
defer indexFile.Close()
|
||||
datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
glog.Fatalf("Read Volume Data [ERROR] %s\n", err)
|
||||
}
|
||||
defer datFile.Close()
|
||||
|
||||
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Write New Volume Data [ERROR] %s\n", err)
|
||||
}
|
||||
defer newDatFile.Close()
|
||||
|
||||
header := make([]byte, storage.SuperBlockSize)
|
||||
datFile.Read(header)
|
||||
newDatFile.Write(header)
|
||||
|
||||
iterateEntries(datFile, indexFile, func(n *storage.Needle, offset int64) {
|
||||
fmt.Printf("file id=%d name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize)
|
||||
s, e := n.Append(newDatFile, storage.Version2)
|
||||
fmt.Printf("size %d error %v\n", s, e)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needle, offset int64)) {
|
||||
// start to read index file
|
||||
var readerOffset int64
|
||||
bytes := make([]byte, 16)
|
||||
count, _ := idxFile.ReadAt(bytes, readerOffset)
|
||||
readerOffset += int64(count)
|
||||
|
||||
// start to read dat file
|
||||
offset := int64(storage.SuperBlockSize)
|
||||
version := storage.Version2
|
||||
n, rest, err := storage.ReadNeedleHeader(datFile, version, offset)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot read needle header: %v", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Needle %+v, rest %d\n", n, rest)
|
||||
for n != nil && count > 0 {
|
||||
// parse index file entry
|
||||
key := util.BytesToUint64(bytes[0:8])
|
||||
offsetFromIndex := util.BytesToUint32(bytes[8:12])
|
||||
sizeFromIndex := util.BytesToUint32(bytes[12:16])
|
||||
count, _ = idxFile.ReadAt(bytes, readerOffset)
|
||||
readerOffset += int64(count)
|
||||
|
||||
if offsetFromIndex != 0 && offset != int64(offsetFromIndex)*8 {
|
||||
//t := offset
|
||||
offset = int64(offsetFromIndex) * 8
|
||||
//fmt.Printf("Offset change %d => %d\n", t, offset)
|
||||
}
|
||||
|
||||
fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex)
|
||||
|
||||
padding := storage.NeedlePaddingSize - ((sizeFromIndex + storage.NeedleHeaderSize + storage.NeedleChecksumSize) % storage.NeedlePaddingSize)
|
||||
rest = sizeFromIndex + storage.NeedleChecksumSize + padding
|
||||
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Println("Recovered in f", r)
|
||||
}
|
||||
}()
|
||||
if err = n.ReadNeedleBody(datFile, version, offset+int64(storage.NeedleHeaderSize), rest); err != nil {
|
||||
fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if n.Size <= n.DataSize {
|
||||
continue
|
||||
}
|
||||
visitNeedle(n, offset)
|
||||
|
||||
offset += int64(storage.NeedleHeaderSize) + int64(rest)
|
||||
//fmt.Printf("==> new entry offset %d\n", offset)
|
||||
if n, rest, err = storage.ReadNeedleHeader(datFile, version, offset); err != nil {
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("cannot read needle header: %v\n", err)
|
||||
return
|
||||
}
|
||||
//fmt.Printf("new entry needle size:%d rest:%d\n", n.Size, rest)
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in a new issue