seaweedfs/weed/server/volume_grpc_copy_incremental.go

67 lines
1.9 KiB
Go
Raw Permalink Normal View History

2019-03-25 16:16:12 +00:00
package weed_server
import (
2019-03-26 06:18:40 +00:00
"context"
2019-03-25 16:16:12 +00:00
"fmt"
"io"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
2019-03-25 16:16:12 +00:00
)
2019-04-18 05:04:49 +00:00
func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrementalCopyRequest, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error {
2019-03-25 16:16:12 +00:00
2019-04-19 04:43:36 +00:00
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
2019-03-25 16:16:12 +00:00
if v == nil {
return fmt.Errorf("not found volume id %d", req.VolumeId)
}
2019-04-19 07:39:34 +00:00
stopOffset, _, _ := v.FileStat()
foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(req.SinceNs)
2019-03-25 16:16:12 +00:00
if err != nil {
return fmt.Errorf("fail to locate by appendAtNs %d: %s", req.SinceNs, err)
2019-03-25 16:16:12 +00:00
}
if isLastOne {
return nil
}
startOffset := foundOffset.ToActualOffset()
2019-03-25 16:16:12 +00:00
buf := make([]byte, 1024*1024*2)
return sendFileContent(v.DataBackend, buf, startOffset, int64(stopOffset), stream)
2019-03-25 16:16:12 +00:00
}
2019-03-26 06:18:40 +00:00
func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) {
2019-04-19 04:43:36 +00:00
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
2019-03-26 06:18:40 +00:00
if v == nil {
return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
}
resp := v.GetVolumeSyncStatus()
return resp, nil
}
func sendFileContent(datBackend backend.BackendStorageFile, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error {
2019-03-25 16:16:12 +00:00
var blockSizeLimit = int64(len(buf))
for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit {
n, readErr := datBackend.ReadAt(buf, startOffset+i)
2019-03-25 16:16:12 +00:00
if readErr == nil || readErr == io.EOF {
2019-04-18 05:04:49 +00:00
resp := &volume_server_pb.VolumeIncrementalCopyResponse{}
resp.FileContent = buf[:int64(n)]
2019-03-25 16:16:12 +00:00
sendErr := stream.Send(resp)
if sendErr != nil {
return sendErr
}
} else {
return readErr
}
}
return nil
}