mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #1397 from levenlabs/diff
Added diffing of multiple volume servers
This commit is contained in:
commit
2c06298169
194
unmaintained/diff_volume_servers/diff_volume_servers.go
Normal file
194
unmaintained/diff_volume_servers/diff_volume_servers.go
Normal file
|
@ -0,0 +1,194 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
serversStr = flag.String("volumeServers", "", "comma-delimited list of volume servers to diff the volume against")
|
||||
volumeId = flag.Int("volumeId", -1, "a volume id to diff from servers")
|
||||
volumeCollection = flag.String("collection", "", "the volume collection name")
|
||||
grpcDialOption grpc.DialOption
|
||||
)
|
||||
|
||||
/*
|
||||
Diff the volume's files across multiple volume servers.
|
||||
diff_volume_servers -volumeServers 127.0.0.1:8080,127.0.0.1:8081 -volumeId 5
|
||||
|
||||
Example Output:
|
||||
reference 127.0.0.1:8081
|
||||
fileId volumeServer message
|
||||
5,01617c3f61 127.0.0.1:8080 wrongSize
|
||||
*/
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
util.LoadConfiguration("security", false)
|
||||
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||
|
||||
vid := uint32(*volumeId)
|
||||
servers := strings.Split(*serversStr, ",")
|
||||
if len(servers) < 2 {
|
||||
glog.Fatalf("You must specify more than 1 server\n")
|
||||
}
|
||||
var referenceServer string
|
||||
var maxOffset int64
|
||||
allFiles := map[string]map[types.NeedleId]needleState{}
|
||||
for _, addr := range servers {
|
||||
files, offset, err := getVolumeFiles(vid, addr)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to copy idx from volume server %s\n", err)
|
||||
}
|
||||
allFiles[addr] = files
|
||||
if offset > maxOffset {
|
||||
referenceServer = addr
|
||||
}
|
||||
}
|
||||
|
||||
same := true
|
||||
fmt.Println("reference", referenceServer)
|
||||
fmt.Println("fileId volumeServer message")
|
||||
for nid, n := range allFiles[referenceServer] {
|
||||
for addr, files := range allFiles {
|
||||
if addr == referenceServer {
|
||||
continue
|
||||
}
|
||||
var diffMsg string
|
||||
n2, ok := files[nid]
|
||||
if !ok {
|
||||
if n.state == stateDeleted {
|
||||
continue
|
||||
}
|
||||
diffMsg = "missing"
|
||||
} else if n2.state != n.state {
|
||||
switch n.state {
|
||||
case stateDeleted:
|
||||
diffMsg = "notDeleted"
|
||||
case statePresent:
|
||||
diffMsg = "deleted"
|
||||
}
|
||||
} else if n2.size != n.size {
|
||||
diffMsg = "wrongSize"
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
same = false
|
||||
|
||||
// fetch the needle details
|
||||
var id string
|
||||
var err error
|
||||
if n.state == statePresent {
|
||||
id, err = getNeedleFileId(vid, nid, referenceServer)
|
||||
} else {
|
||||
id, err = getNeedleFileId(vid, nid, addr)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
|
||||
}
|
||||
fmt.Println(id, addr, diffMsg)
|
||||
}
|
||||
}
|
||||
if !same {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
stateDeleted uint8 = 1
|
||||
statePresent uint8 = 2
|
||||
)
|
||||
|
||||
type needleState struct {
|
||||
state uint8
|
||||
size uint32
|
||||
}
|
||||
|
||||
func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) {
|
||||
var idxFile *bytes.Reader
|
||||
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||
copyFileClient, err := vs.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: v,
|
||||
Ext: ".idx",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: math.MaxInt64,
|
||||
Collection: *volumeCollection,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
resp, err := copyFileClient.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Write(resp.FileContent)
|
||||
}
|
||||
idxFile = bytes.NewReader(buf.Bytes())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var maxOffset int64
|
||||
files := map[types.NeedleId]needleState{}
|
||||
err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||
if offset.IsZero() || size == types.TombstoneFileSize {
|
||||
files[key] = needleState{
|
||||
state: stateDeleted,
|
||||
size: size,
|
||||
}
|
||||
} else {
|
||||
files[key] = needleState{
|
||||
state: statePresent,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
if actual := offset.ToAcutalOffset(); actual > maxOffset {
|
||||
maxOffset = actual
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return files, maxOffset, nil
|
||||
}
|
||||
|
||||
func getNeedleFileId(v uint32, nid types.NeedleId, addr string) (string, error) {
|
||||
var id string
|
||||
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||
resp, err := vs.VolumeNeedleStatus(context.Background(), &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
VolumeId: v,
|
||||
NeedleId: uint64(nid),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id = needle.NewFileId(needle.VolumeId(v), resp.NeedleId, resp.Cookie).String()
|
||||
return nil
|
||||
})
|
||||
return id, err
|
||||
}
|
|
@ -86,6 +86,8 @@ service VolumeServer {
|
|||
rpc Query (QueryRequest) returns (stream QueriedStripe) {
|
||||
}
|
||||
|
||||
rpc VolumeNeedleStatus (VolumeNeedleStatusRequest) returns (VolumeNeedleStatusResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
@ -463,3 +465,16 @@ message QueryRequest {
|
|||
message QueriedStripe {
|
||||
bytes records = 1;
|
||||
}
|
||||
|
||||
message VolumeNeedleStatusRequest {
|
||||
uint32 volume_id = 1;
|
||||
uint64 needle_id = 2;
|
||||
}
|
||||
message VolumeNeedleStatusResponse {
|
||||
uint64 needle_id = 1;
|
||||
uint32 cookie = 2;
|
||||
uint32 size = 3;
|
||||
uint64 last_modified = 4;
|
||||
uint32 crc = 5;
|
||||
string ttl = 6;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) {
|
||||
|
@ -166,3 +167,44 @@ func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_serv
|
|||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
func (vs *VolumeServer) VolumeNeedleStatus(ctx context.Context, req *volume_server_pb.VolumeNeedleStatusRequest) (*volume_server_pb.VolumeNeedleStatusResponse, error) {
|
||||
|
||||
resp := &volume_server_pb.VolumeNeedleStatusResponse{}
|
||||
|
||||
volumeId := needle.VolumeId(req.VolumeId)
|
||||
|
||||
n := &needle.Needle{
|
||||
Id: types.NeedleId(req.NeedleId),
|
||||
}
|
||||
|
||||
var count int
|
||||
var err error
|
||||
hasVolume := vs.store.HasVolume(volumeId)
|
||||
if !hasVolume {
|
||||
_, hasEcVolume := vs.store.FindEcVolume(volumeId)
|
||||
if !hasEcVolume {
|
||||
return nil, fmt.Errorf("volume not found %d", req.VolumeId)
|
||||
}
|
||||
count, err = vs.store.ReadEcShardNeedle(volumeId, n)
|
||||
} else {
|
||||
count, err = vs.store.ReadVolumeNeedle(volumeId, n)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if count < 0 {
|
||||
return nil, fmt.Errorf("needle not found %d", n.Id)
|
||||
}
|
||||
|
||||
resp.NeedleId = uint64(n.Id)
|
||||
resp.Cookie = uint32(n.Cookie)
|
||||
resp.Size = n.Size
|
||||
resp.LastModified = n.LastModified
|
||||
resp.Crc = n.Checksum.Value()
|
||||
if n.HasTtl() {
|
||||
resp.Ttl = n.Ttl.String()
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
|
|
@ -11,11 +11,17 @@ import (
|
|||
|
||||
// walks through the index file, calls fn function with each key, offset, size
|
||||
// stops with the error returned by the fn function
|
||||
func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
|
||||
func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
|
||||
var readerOffset int64
|
||||
bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead)
|
||||
count, e := r.ReadAt(bytes, readerOffset)
|
||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
||||
var name string
|
||||
if f, ok := r.(*os.File); ok {
|
||||
name = f.Name()
|
||||
} else {
|
||||
name = "memory"
|
||||
}
|
||||
glog.V(3).Infoln("file", name, "readerOffset", readerOffset, "count", count, "e", e)
|
||||
readerOffset += int64(count)
|
||||
var (
|
||||
key types.NeedleId
|
||||
|
@ -35,7 +41,7 @@ func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset,
|
|||
return nil
|
||||
}
|
||||
count, e = r.ReadAt(bytes, readerOffset)
|
||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
||||
glog.V(3).Infoln("file", name, "readerOffset", readerOffset, "count", count, "e", e)
|
||||
readerOffset += int64(count)
|
||||
}
|
||||
return e
|
||||
|
|
Loading…
Reference in a new issue