2019-03-23 18:33:34 +00:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-04-19 07:39:34 +00:00
|
|
|
"io"
|
2019-05-20 07:53:17 +00:00
|
|
|
"math"
|
2019-04-19 07:39:34 +00:00
|
|
|
"os"
|
2019-06-03 17:38:21 +00:00
|
|
|
"path"
|
2019-04-19 07:39:34 +00:00
|
|
|
"time"
|
2019-04-19 04:43:36 +00:00
|
|
|
|
2019-04-10 11:41:55 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2019-03-23 18:33:34 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
2019-06-03 17:38:21 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
2019-04-19 04:43:36 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2019-05-06 20:58:42 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2019-03-23 18:33:34 +00:00
|
|
|
)
|
|
|
|
|
2019-05-27 18:59:03 +00:00
|
|
|
const BufferSizeLimit = 1024 * 1024 * 2
|
|
|
|
|
2020-01-04 19:28:29 +00:00
|
|
|
// VolumeCopy copy the .idx .dat .vif files, and mount the volume
|
2019-04-18 05:04:49 +00:00
|
|
|
func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) {
|
2019-03-23 18:33:34 +00:00
|
|
|
|
2019-04-19 04:43:36 +00:00
|
|
|
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
2019-03-23 18:33:34 +00:00
|
|
|
if v != nil {
|
2019-04-20 18:35:20 +00:00
|
|
|
return nil, fmt.Errorf("volume %d already exists", req.VolumeId)
|
2019-03-23 18:33:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
location := vs.store.FindFreeLocation()
|
|
|
|
if location == nil {
|
|
|
|
return nil, fmt.Errorf("no space left")
|
|
|
|
}
|
|
|
|
|
|
|
|
// the master will not start compaction for read-only volumes, so it is safe to just copy files directly
|
|
|
|
// copy .dat and .idx files
|
|
|
|
// read .idx .dat file size and timestamp
|
|
|
|
// send .idx file
|
|
|
|
// send .dat file
|
|
|
|
// confirm size and timestamp
|
2019-04-10 11:41:55 +00:00
|
|
|
var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse
|
2019-04-20 18:35:20 +00:00
|
|
|
var volumeFileName, idxFileName, datFileName string
|
2020-01-26 22:42:11 +00:00
|
|
|
err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error {
|
2019-04-10 11:41:55 +00:00
|
|
|
var err error
|
|
|
|
volFileInfoResp, err = client.ReadVolumeFileStatus(ctx,
|
|
|
|
&volume_server_pb.ReadVolumeFileStatusRequest{
|
|
|
|
VolumeId: req.VolumeId,
|
|
|
|
})
|
|
|
|
if nil != err {
|
|
|
|
return fmt.Errorf("read volume file status failed, %v", err)
|
|
|
|
}
|
2019-03-23 18:33:34 +00:00
|
|
|
|
2019-06-03 07:13:31 +00:00
|
|
|
volumeFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
|
2019-04-20 18:35:20 +00:00
|
|
|
|
2019-04-19 19:29:49 +00:00
|
|
|
// println("source:", volFileInfoResp.String())
|
2019-05-20 07:53:17 +00:00
|
|
|
// copy ecx file
|
2019-12-23 20:48:20 +00:00
|
|
|
if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil {
|
2019-05-20 07:53:17 +00:00
|
|
|
return err
|
2019-03-23 18:33:34 +00:00
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil {
|
2019-05-20 07:53:17 +00:00
|
|
|
return err
|
2019-03-23 18:33:34 +00:00
|
|
|
}
|
|
|
|
|
2020-01-04 19:28:29 +00:00
|
|
|
if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-03-23 18:33:34 +00:00
|
|
|
return nil
|
|
|
|
})
|
2019-06-03 07:13:31 +00:00
|
|
|
|
|
|
|
idxFileName = volumeFileName + ".idx"
|
|
|
|
datFileName = volumeFileName + ".dat"
|
|
|
|
|
2019-04-20 18:35:20 +00:00
|
|
|
if err != nil && volumeFileName != "" {
|
2020-01-04 19:28:29 +00:00
|
|
|
os.Remove(idxFileName)
|
|
|
|
os.Remove(datFileName)
|
|
|
|
os.Remove(volumeFileName + ".vif")
|
2019-03-23 18:33:34 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-04-18 07:19:18 +00:00
|
|
|
if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16
|
2019-04-10 11:41:55 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-03-23 18:33:34 +00:00
|
|
|
|
|
|
|
// mount the volume
|
2019-04-19 04:43:36 +00:00
|
|
|
err = vs.store.MountVolume(needle.VolumeId(req.VolumeId))
|
2019-03-23 18:33:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err)
|
|
|
|
}
|
|
|
|
|
2019-04-19 07:39:34 +00:00
|
|
|
return &volume_server_pb.VolumeCopyResponse{
|
2019-04-19 19:29:49 +00:00
|
|
|
LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second),
|
2019-04-19 07:39:34 +00:00
|
|
|
}, err
|
2019-04-10 11:41:55 +00:00
|
|
|
}
|
2019-03-23 18:33:34 +00:00
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32,
|
2019-12-23 20:48:20 +00:00
|
|
|
compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool, ignoreSourceFileNotFound bool) error {
|
2019-05-20 07:53:17 +00:00
|
|
|
|
|
|
|
copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
2019-12-23 20:48:20 +00:00
|
|
|
VolumeId: vid,
|
|
|
|
Ext: ext,
|
|
|
|
CompactionRevision: compactRevision,
|
|
|
|
StopOffset: stopOffset,
|
|
|
|
Collection: collection,
|
|
|
|
IsEcVolume: isEcVolume,
|
|
|
|
IgnoreSourceFileNotFound: ignoreSourceFileNotFound,
|
2019-05-20 07:53:17 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err)
|
|
|
|
}
|
|
|
|
|
2019-06-20 05:57:14 +00:00
|
|
|
err = writeToFile(copyFileClient, baseFileName+ext, util.NewWriteThrottler(vs.compactionBytePerSecond), isAppend)
|
2019-05-20 07:53:17 +00:00
|
|
|
if err != nil {
|
2019-06-03 09:26:31 +00:00
|
|
|
return fmt.Errorf("failed to copy %s file: %v", baseFileName+ext, err)
|
2019-05-20 07:53:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-04-10 11:41:55 +00:00
|
|
|
/**
|
2019-04-18 07:19:18 +00:00
|
|
|
only check the the differ of the file size
|
|
|
|
todo: maybe should check the received count and deleted count of the volume
|
|
|
|
*/
|
2019-04-10 11:41:55 +00:00
|
|
|
func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) error {
|
|
|
|
stat, err := os.Stat(idxFileName)
|
|
|
|
if err != nil {
|
2019-06-03 07:13:31 +00:00
|
|
|
return fmt.Errorf("stat idx file %s failed, %v", idxFileName, err)
|
2019-04-10 11:41:55 +00:00
|
|
|
}
|
|
|
|
if originFileInf.IdxFileSize != uint64(stat.Size()) {
|
2019-06-03 07:13:31 +00:00
|
|
|
return fmt.Errorf("idx file %s size [%v] is not same as origin file size [%v]",
|
|
|
|
idxFileName, stat.Size(), originFileInf.IdxFileSize)
|
2019-04-10 11:41:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
stat, err = os.Stat(datFileName)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("get dat file info failed, %v", err)
|
|
|
|
}
|
|
|
|
if originFileInf.DatFileSize != uint64(stat.Size()) {
|
|
|
|
return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]",
|
|
|
|
stat.Size(), originFileInf.DatFileSize)
|
|
|
|
}
|
|
|
|
return nil
|
2019-03-23 18:33:34 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 05:57:14 +00:00
|
|
|
func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool) error {
|
2019-04-11 01:53:31 +00:00
|
|
|
glog.V(4).Infof("writing to %s", fileName)
|
2019-06-20 07:55:52 +00:00
|
|
|
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
|
2019-06-20 05:57:14 +00:00
|
|
|
if isAppend {
|
2019-06-20 07:55:52 +00:00
|
|
|
flags = os.O_WRONLY | os.O_CREATE
|
2019-06-20 05:57:14 +00:00
|
|
|
}
|
|
|
|
dst, err := os.OpenFile(fileName, flags, 0644)
|
2019-03-23 18:33:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer dst.Close()
|
|
|
|
|
|
|
|
for {
|
|
|
|
resp, receiveErr := client.Recv()
|
|
|
|
if receiveErr == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if receiveErr != nil {
|
|
|
|
return fmt.Errorf("receiving %s: %v", fileName, receiveErr)
|
|
|
|
}
|
|
|
|
dst.Write(resp.FileContent)
|
2019-05-06 20:58:42 +00:00
|
|
|
wt.MaybeSlowdown(int64(len(resp.FileContent)))
|
2019-03-23 18:33:34 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_server_pb.ReadVolumeFileStatusRequest) (*volume_server_pb.ReadVolumeFileStatusResponse, error) {
|
|
|
|
resp := &volume_server_pb.ReadVolumeFileStatusResponse{}
|
2019-04-19 04:43:36 +00:00
|
|
|
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
2019-04-10 11:41:55 +00:00
|
|
|
if v == nil {
|
|
|
|
return nil, fmt.Errorf("not found volume id %d", req.VolumeId)
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.VolumeId = req.VolumeId
|
2019-04-19 07:39:34 +00:00
|
|
|
datSize, idxSize, modTime := v.FileStat()
|
|
|
|
resp.DatFileSize = datSize
|
|
|
|
resp.IdxFileSize = idxSize
|
|
|
|
resp.DatFileTimestampSeconds = uint64(modTime.Unix())
|
|
|
|
resp.IdxFileTimestampSeconds = uint64(modTime.Unix())
|
2019-04-11 06:39:53 +00:00
|
|
|
resp.FileCount = v.FileCount()
|
2019-04-19 19:29:49 +00:00
|
|
|
resp.CompactionRevision = uint32(v.CompactionRevision)
|
2019-04-20 18:35:20 +00:00
|
|
|
resp.Collection = v.Collection
|
2019-03-23 18:33:34 +00:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-05-20 07:53:17 +00:00
|
|
|
// CopyFile client pulls the volume related file from the source server.
|
|
|
|
// if req.CompactionRevision != math.MaxUint32, it ensures the compact revision is as expected
|
|
|
|
// The copying still stop at req.StopOffset, but you can set it to math.MaxUint64 in order to read all data.
|
2019-03-26 06:18:40 +00:00
|
|
|
func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) error {
|
2019-03-23 18:33:34 +00:00
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
var fileName string
|
|
|
|
if !req.IsEcVolume {
|
|
|
|
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
|
|
|
if v == nil {
|
|
|
|
return fmt.Errorf("not found volume id %d", req.VolumeId)
|
|
|
|
}
|
2019-03-23 18:33:34 +00:00
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
if uint32(v.CompactionRevision) != req.CompactionRevision && req.CompactionRevision != math.MaxUint32 {
|
|
|
|
return fmt.Errorf("volume %d is compacted", req.VolumeId)
|
|
|
|
}
|
|
|
|
fileName = v.FileName() + req.Ext
|
|
|
|
} else {
|
2019-06-03 17:38:21 +00:00
|
|
|
baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext
|
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
tName := path.Join(location.Directory, baseFileName)
|
2019-06-05 05:04:10 +00:00
|
|
|
if util.FileExists(tName) {
|
2019-06-03 17:38:21 +00:00
|
|
|
fileName = tName
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fileName == "" {
|
2019-12-28 20:59:31 +00:00
|
|
|
if req.IgnoreSourceFileNotFound {
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-03 17:38:21 +00:00
|
|
|
return fmt.Errorf("CopyFile not found ec volume id %d", req.VolumeId)
|
2019-06-03 09:26:31 +00:00
|
|
|
}
|
2019-04-19 19:29:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bytesToRead := int64(req.StopOffset)
|
|
|
|
|
2019-03-23 18:33:34 +00:00
|
|
|
file, err := os.Open(fileName)
|
|
|
|
if err != nil {
|
2019-12-23 20:48:20 +00:00
|
|
|
if req.IgnoreSourceFileNotFound && err == os.ErrNotExist {
|
|
|
|
return nil
|
|
|
|
}
|
2019-03-23 18:33:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
2019-05-27 18:59:03 +00:00
|
|
|
buffer := make([]byte, BufferSizeLimit)
|
2019-03-23 18:33:34 +00:00
|
|
|
|
2019-04-19 19:29:49 +00:00
|
|
|
for bytesToRead > 0 {
|
2019-03-23 18:33:34 +00:00
|
|
|
bytesread, err := file.Read(buffer)
|
|
|
|
|
2019-04-19 19:29:49 +00:00
|
|
|
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
|
|
|
|
|
2019-03-23 18:33:34 +00:00
|
|
|
if err != nil {
|
|
|
|
if err != io.EOF {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-19 19:29:49 +00:00
|
|
|
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error())
|
2019-03-23 18:33:34 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2019-04-19 19:29:49 +00:00
|
|
|
if int64(bytesread) > bytesToRead {
|
|
|
|
bytesread = int(bytesToRead)
|
|
|
|
}
|
|
|
|
err = stream.Send(&volume_server_pb.CopyFileResponse{
|
2019-03-23 18:33:34 +00:00
|
|
|
FileContent: buffer[:bytesread],
|
|
|
|
})
|
2019-04-19 19:29:49 +00:00
|
|
|
if err != nil {
|
|
|
|
// println("sending", bytesread, "bytes err", err.Error())
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
bytesToRead -= int64(bytesread)
|
2019-03-23 18:33:34 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|