2019-03-25 16:16:12 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2021-09-13 05:47:52 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
2019-04-19 04:43:36 +00:00
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2019-03-25 16:16:12 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
2019-05-22 05:41:20 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
2019-04-19 04:43:36 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2019-12-23 20:48:20 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
2019-03-25 16:16:12 +00:00
|
|
|
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
|
|
|
)
|
|
|
|
|
2019-03-26 06:18:40 +00:00
|
|
|
func (v *Volume) GetVolumeSyncStatus() *volume_server_pb.VolumeSyncStatusResponse {
|
2019-12-06 14:59:57 +00:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2019-08-14 08:08:01 +00:00
|
|
|
|
2019-03-26 06:18:40 +00:00
|
|
|
var syncStatus = &volume_server_pb.VolumeSyncStatusResponse{}
|
2019-10-29 07:35:16 +00:00
|
|
|
if datSize, _, err := v.DataBackend.GetStat(); err == nil {
|
|
|
|
syncStatus.TailOffset = uint64(datSize)
|
2019-03-26 06:18:40 +00:00
|
|
|
}
|
|
|
|
syncStatus.Collection = v.Collection
|
2019-08-15 16:24:54 +00:00
|
|
|
syncStatus.IdxFileSize = v.nm.IndexFileSize()
|
2019-04-19 19:29:49 +00:00
|
|
|
syncStatus.CompactRevision = uint32(v.SuperBlock.CompactionRevision)
|
2019-03-26 06:18:40 +00:00
|
|
|
syncStatus.Ttl = v.SuperBlock.Ttl.String()
|
|
|
|
syncStatus.Replication = v.SuperBlock.ReplicaPlacement.String()
|
|
|
|
return syncStatus
|
|
|
|
}
|
|
|
|
|
2019-03-25 16:16:12 +00:00
|
|
|
// The volume sync with a master volume via 2 steps:
|
|
|
|
// 1. The slave checks master side to find subscription checkpoint
|
|
|
|
// to setup the replication.
|
|
|
|
// 2. The slave receives the updates from master
|
|
|
|
|
|
|
|
/*
|
|
|
|
Assume the slave volume needs to follow the master volume.
|
|
|
|
|
|
|
|
The master volume could be compacted, and could be many files ahead of
|
|
|
|
slave volume.
|
|
|
|
|
|
|
|
Step 0: // implemented in command/backup.go, to avoid dat file size overflow.
|
|
|
|
0.1 If slave compact version is less than the master, do a local compaction, and set
|
|
|
|
local compact version the same as the master.
|
|
|
|
0.2 If the slave size is still bigger than the master, discard local copy and do a full copy.
|
|
|
|
|
|
|
|
Step 1:
|
|
|
|
The slave volume ask the master by the last modification time t.
|
|
|
|
The master do a binary search in volume (use .idx as an array, and check the appendAtNs in .dat file),
|
|
|
|
to find the first entry with appendAtNs > t.
|
|
|
|
|
|
|
|
Step 2:
|
|
|
|
The master send content bytes to the slave. The bytes are not chunked by needle.
|
|
|
|
|
|
|
|
Step 3:
|
|
|
|
The slave generate the needle map for the new bytes. (This may be optimized to incrementally
|
|
|
|
update needle map when receiving new .dat bytes. But seems not necessary now.)
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
2021-09-13 05:47:52 +00:00
|
|
|
func (v *Volume) IncrementalBackup(volumeServer pb.ServerAddress, grpcDialOption grpc.DialOption) error {
|
2019-03-25 16:16:12 +00:00
|
|
|
|
2019-04-19 07:39:34 +00:00
|
|
|
startFromOffset, _, _ := v.FileStat()
|
2019-03-25 16:16:12 +00:00
|
|
|
appendAtNs, err := v.findLastAppendAtNs()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-29 07:35:16 +00:00
|
|
|
writeOffset := int64(startFromOffset)
|
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
2019-03-25 16:16:12 +00:00
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
stream, err := client.VolumeIncrementalCopy(context.Background(), &volume_server_pb.VolumeIncrementalCopyRequest{
|
2019-03-25 16:16:12 +00:00
|
|
|
VolumeId: uint32(v.Id),
|
2019-04-18 07:18:29 +00:00
|
|
|
SinceNs: appendAtNs,
|
2019-03-25 16:16:12 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
resp, recvErr := stream.Recv()
|
|
|
|
if recvErr != nil {
|
|
|
|
if recvErr == io.EOF {
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
return recvErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-29 07:35:16 +00:00
|
|
|
n, writeErr := v.DataBackend.WriteAt(resp.FileContent, writeOffset)
|
2019-03-25 16:16:12 +00:00
|
|
|
if writeErr != nil {
|
|
|
|
return writeErr
|
|
|
|
}
|
2019-10-29 07:35:16 +00:00
|
|
|
writeOffset += int64(n)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-03-26 06:01:53 +00:00
|
|
|
// add to needle map
|
2019-12-23 20:48:20 +00:00
|
|
|
return ScanVolumeFileFrom(v.Version(), v.DataBackend, int64(startFromOffset), &VolumeFileScanner4GenIdx{v: v})
|
2019-03-25 16:16:12 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) findLastAppendAtNs() (uint64, error) {
|
|
|
|
offset, err := v.locateLastAppendEntry()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2019-04-09 02:40:56 +00:00
|
|
|
if offset.IsZero() {
|
2019-03-25 16:16:12 +00:00
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return v.readAppendAtNs(offset)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) locateLastAppendEntry() (Offset, error) {
|
2020-11-27 11:17:10 +00:00
|
|
|
indexFile, e := os.OpenFile(v.FileName(".idx"), os.O_RDONLY, 0644)
|
2019-03-25 16:16:12 +00:00
|
|
|
if e != nil {
|
2020-11-27 11:17:10 +00:00
|
|
|
return Offset{}, fmt.Errorf("cannot read %s: %v", v.FileName(".idx"), e)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
defer indexFile.Close()
|
|
|
|
|
|
|
|
fi, err := indexFile.Stat()
|
|
|
|
if err != nil {
|
2019-04-09 02:40:56 +00:00
|
|
|
return Offset{}, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
fileSize := fi.Size()
|
2019-04-19 07:39:34 +00:00
|
|
|
if fileSize%NeedleMapEntrySize != 0 {
|
2019-04-09 02:40:56 +00:00
|
|
|
return Offset{}, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
if fileSize == 0 {
|
2019-04-09 02:40:56 +00:00
|
|
|
return Offset{}, nil
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
|
2019-04-19 07:39:34 +00:00
|
|
|
bytes := make([]byte, NeedleMapEntrySize)
|
|
|
|
n, e := indexFile.ReadAt(bytes, fileSize-NeedleMapEntrySize)
|
|
|
|
if n != NeedleMapEntrySize {
|
2019-04-09 02:40:56 +00:00
|
|
|
return Offset{}, fmt.Errorf("file %s read error: %v", indexFile.Name(), e)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
2019-05-22 05:41:20 +00:00
|
|
|
_, offset, _ := idx.IdxFileEntry(bytes)
|
2019-03-25 16:16:12 +00:00
|
|
|
|
|
|
|
return offset, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) {
|
|
|
|
|
2021-02-07 04:11:51 +00:00
|
|
|
n, _, bodyLength, err := needle.ReadNeedleHeader(v.DataBackend, v.SuperBlock.Version, offset.ToActualOffset())
|
2019-03-25 16:16:12 +00:00
|
|
|
if err != nil {
|
2021-02-07 04:11:51 +00:00
|
|
|
return 0, fmt.Errorf("ReadNeedleHeader %s [%d,%d): %v", v.DataBackend.Name(), offset.ToActualOffset(), offset.ToActualOffset()+NeedleHeaderSize, err)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
2021-02-07 04:11:51 +00:00
|
|
|
_, err = n.ReadNeedleBody(v.DataBackend, v.SuperBlock.Version, offset.ToActualOffset()+NeedleHeaderSize, bodyLength)
|
2019-03-25 16:16:12 +00:00
|
|
|
if err != nil {
|
2021-02-07 04:11:51 +00:00
|
|
|
return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToActualOffset(), bodyLength, err)
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
return n.AppendAtNs, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// on server side
|
|
|
|
func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast bool, err error) {
|
|
|
|
|
2020-11-28 00:18:48 +00:00
|
|
|
fileSize := int64(v.IndexFileSize())
|
2019-04-19 07:39:34 +00:00
|
|
|
if fileSize%NeedleMapEntrySize != 0 {
|
2020-11-28 00:18:48 +00:00
|
|
|
err = fmt.Errorf("unexpected file %s.idx size: %d", v.IndexFileName(), fileSize)
|
2019-03-25 16:16:12 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-19 07:39:34 +00:00
|
|
|
entryCount := fileSize / NeedleMapEntrySize
|
2019-03-25 16:16:12 +00:00
|
|
|
l := int64(0)
|
|
|
|
h := entryCount
|
|
|
|
|
|
|
|
for l < h {
|
|
|
|
|
|
|
|
m := (l + h) / 2
|
|
|
|
|
|
|
|
if m == entryCount {
|
2019-04-09 02:40:56 +00:00
|
|
|
return Offset{}, true, nil
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// read the appendAtNs for entry m
|
2020-11-28 00:18:48 +00:00
|
|
|
offset, err = v.readOffsetFromIndex(m)
|
2019-03-25 16:16:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mNs, nsReadErr := v.readAppendAtNs(offset)
|
|
|
|
if nsReadErr != nil {
|
|
|
|
err = nsReadErr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// move the boundary
|
|
|
|
if mNs <= sinceNs {
|
|
|
|
l = m + 1
|
|
|
|
} else {
|
|
|
|
h = m
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if l == entryCount {
|
2019-04-09 02:40:56 +00:00
|
|
|
return Offset{}, true, nil
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
|
|
|
|
2020-11-28 00:18:48 +00:00
|
|
|
offset, err = v.readOffsetFromIndex(l)
|
2019-03-25 16:16:12 +00:00
|
|
|
|
|
|
|
return offset, false, err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-04-19 07:39:34 +00:00
|
|
|
// bytes is of size NeedleMapEntrySize
|
2020-11-28 00:18:48 +00:00
|
|
|
func (v *Volume) readOffsetFromIndex(m int64) (Offset, error) {
|
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
|
|
|
if v.nm == nil {
|
|
|
|
return Offset{}, io.EOF
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
2020-11-28 00:18:48 +00:00
|
|
|
_, offset, _, err := v.nm.ReadIndexEntry(m)
|
|
|
|
return offset, err
|
2019-03-25 16:16:12 +00:00
|
|
|
}
|
2019-03-26 06:01:53 +00:00
|
|
|
|
|
|
|
// generate the volume idx
|
|
|
|
type VolumeFileScanner4GenIdx struct {
|
|
|
|
v *Volume
|
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
func (scanner *VolumeFileScanner4GenIdx) VisitSuperBlock(superBlock super_block.SuperBlock) error {
|
2019-03-26 06:01:53 +00:00
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
func (scanner *VolumeFileScanner4GenIdx) ReadNeedleBody() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-10-22 07:50:30 +00:00
|
|
|
func (scanner *VolumeFileScanner4GenIdx) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
|
2020-08-19 01:01:37 +00:00
|
|
|
if n.Size > 0 && n.Size.IsValid() {
|
2019-04-09 02:40:56 +00:00
|
|
|
return scanner.v.nm.Put(n.Id, ToOffset(offset), n.Size)
|
2019-03-26 06:01:53 +00:00
|
|
|
}
|
2020-09-12 19:42:36 +00:00
|
|
|
return scanner.v.nm.Delete(n.Id, ToOffset(offset))
|
2019-03-26 06:01:53 +00:00
|
|
|
}
|