2019-05-20 07:53:17 +00:00
|
|
|
package weed_server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-05-27 18:59:03 +00:00
|
|
|
"io"
|
2019-06-03 09:26:31 +00:00
|
|
|
"io/ioutil"
|
2019-05-20 07:53:17 +00:00
|
|
|
"math"
|
2019-05-25 21:02:06 +00:00
|
|
|
"os"
|
2019-06-03 09:26:31 +00:00
|
|
|
"path"
|
2019-12-23 20:48:20 +00:00
|
|
|
"path/filepath"
|
2019-06-03 09:26:31 +00:00
|
|
|
"strings"
|
2019-05-20 07:53:17 +00:00
|
|
|
|
2019-05-26 06:23:19 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2019-05-20 07:53:17 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
2019-12-28 20:44:59 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
2019-05-20 07:53:17 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2019-06-20 07:17:11 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
2019-06-03 09:26:31 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2019-05-20 07:53:17 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
Steps to apply erasure coding to .dat .idx files
|
|
|
|
0. ensure the volume is readonly
|
2019-12-23 20:48:20 +00:00
|
|
|
1. client call VolumeEcShardsGenerate to generate the .ecx and .ec00 ~ .ec13 files
|
2019-05-20 07:53:17 +00:00
|
|
|
2. client ask master for possible servers to hold the ec files, at least 4 servers
|
2019-05-25 21:02:06 +00:00
|
|
|
3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server
|
2019-05-20 07:53:17 +00:00
|
|
|
4. target servers report the new ec files to the master
|
|
|
|
5. master stores vid -> [14]*DataNode
|
|
|
|
6. client checks master. If all 14 slices are ready, delete the original .idx, .idx files
|
|
|
|
|
2019-05-25 21:02:06 +00:00
|
|
|
*/
|
2019-05-20 07:53:17 +00:00
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
// VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files
|
2019-05-25 21:02:06 +00:00
|
|
|
func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) {
|
2019-05-20 07:53:17 +00:00
|
|
|
|
|
|
|
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
|
|
|
if v == nil {
|
|
|
|
return nil, fmt.Errorf("volume %d not found", req.VolumeId)
|
|
|
|
}
|
|
|
|
baseFileName := v.FileName()
|
|
|
|
|
2019-05-25 21:02:06 +00:00
|
|
|
if v.Collection != req.Collection {
|
|
|
|
return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
|
|
|
|
}
|
|
|
|
|
2019-05-20 07:53:17 +00:00
|
|
|
// write .ecx file
|
2019-12-18 09:21:21 +00:00
|
|
|
if err := erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx"); err != nil {
|
|
|
|
return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", baseFileName, err)
|
2019-05-20 07:53:17 +00:00
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
// write .ec00 ~ .ec13 files
|
2019-05-20 07:53:17 +00:00
|
|
|
if err := erasure_coding.WriteEcFiles(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
2019-12-28 20:44:59 +00:00
|
|
|
// write .vif files
|
|
|
|
if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil {
|
|
|
|
return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
2019-05-25 21:02:06 +00:00
|
|
|
return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil
|
2019-05-20 07:53:17 +00:00
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
// VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files
|
2019-06-03 09:26:31 +00:00
|
|
|
func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) {
|
|
|
|
|
|
|
|
baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
|
|
|
|
|
|
|
|
var rebuiltShardIds []uint32
|
|
|
|
|
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) {
|
2019-12-23 20:48:20 +00:00
|
|
|
// write .ec00 ~ .ec13 files
|
2019-06-03 18:50:54 +00:00
|
|
|
baseFileName = path.Join(location.Directory, baseFileName)
|
2019-06-03 09:26:31 +00:00
|
|
|
if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err)
|
|
|
|
} else {
|
|
|
|
rebuiltShardIds = generatedShardIds
|
|
|
|
}
|
2019-06-20 05:57:14 +00:00
|
|
|
|
|
|
|
if err := erasure_coding.RebuildEcxFile(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("RebuildEcxFile %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsRebuildResponse{
|
|
|
|
RebuiltShardIds: rebuiltShardIds,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-05-25 21:02:06 +00:00
|
|
|
// VolumeEcShardsCopy copy the .ecx and some ec data slices
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) {
|
2019-05-20 07:53:17 +00:00
|
|
|
|
|
|
|
location := vs.store.FindFreeLocation()
|
|
|
|
if location == nil {
|
|
|
|
return nil, fmt.Errorf("no space left")
|
|
|
|
}
|
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId))
|
2019-05-20 07:53:17 +00:00
|
|
|
|
2020-02-26 05:50:12 +00:00
|
|
|
err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
2019-05-20 07:53:17 +00:00
|
|
|
|
|
|
|
// copy ec data slices
|
2019-05-27 18:59:03 +00:00
|
|
|
for _, shardId := range req.ShardIds {
|
2020-02-26 06:23:59 +00:00
|
|
|
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
|
2019-05-20 07:53:17 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
if req.CopyEcxFile {
|
2019-06-03 09:26:31 +00:00
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
// copy ecx file
|
2020-02-26 06:23:59 +00:00
|
|
|
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil {
|
2019-12-23 20:48:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2019-06-20 05:57:14 +00:00
|
|
|
}
|
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
if req.CopyEcjFile {
|
|
|
|
// copy ecj file
|
2020-02-26 06:23:59 +00:00
|
|
|
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil {
|
2019-12-23 20:48:20 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-06-03 09:26:31 +00:00
|
|
|
}
|
2019-12-28 20:44:59 +00:00
|
|
|
|
|
|
|
if req.CopyVifFile {
|
|
|
|
// copy vif file
|
2020-02-26 06:23:59 +00:00
|
|
|
if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil {
|
2019-12-28 20:44:59 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-20 07:53:17 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2019-05-25 21:02:06 +00:00
|
|
|
return nil, fmt.Errorf("VolumeEcShardsCopy volume %d: %v", req.VolumeId, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsCopyResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
// VolumeEcShardsDelete local delete the .ecx and some ec data slices if not needed
|
|
|
|
// the shard should not be mounted before calling this.
|
2019-05-25 21:02:06 +00:00
|
|
|
func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) {
|
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId))
|
2019-06-01 08:41:22 +00:00
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds)
|
|
|
|
|
2019-06-04 03:25:02 +00:00
|
|
|
found := false
|
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) {
|
|
|
|
found = true
|
|
|
|
baseFilename = path.Join(location.Directory, baseFilename)
|
|
|
|
for _, shardId := range req.ShardIds {
|
|
|
|
os.Remove(baseFilename + erasure_coding.ToExt(int(shardId)))
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
return nil, nil
|
2019-05-25 21:02:06 +00:00
|
|
|
}
|
2019-06-01 08:41:22 +00:00
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
// check whether to delete the .ecx and .ecj file also
|
2019-06-03 09:26:31 +00:00
|
|
|
hasEcxFile := false
|
2020-01-03 20:46:39 +00:00
|
|
|
hasIdxFile := false
|
2019-06-03 09:26:31 +00:00
|
|
|
existingShardCount := 0
|
2019-05-25 21:02:06 +00:00
|
|
|
|
2019-12-23 20:48:20 +00:00
|
|
|
bName := filepath.Base(baseFilename)
|
2019-06-03 09:26:31 +00:00
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
fileInfos, err := ioutil.ReadDir(location.Directory)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
2019-05-25 21:02:06 +00:00
|
|
|
}
|
2019-06-03 09:26:31 +00:00
|
|
|
for _, fileInfo := range fileInfos {
|
2019-12-23 20:48:53 +00:00
|
|
|
if fileInfo.Name() == bName+".ecx" || fileInfo.Name() == bName+".ecj" {
|
2019-06-03 09:26:31 +00:00
|
|
|
hasEcxFile = true
|
|
|
|
continue
|
|
|
|
}
|
2020-01-03 20:46:39 +00:00
|
|
|
if fileInfo.Name() == bName+".idx" {
|
|
|
|
hasIdxFile = true
|
|
|
|
continue
|
|
|
|
}
|
2019-12-23 20:48:20 +00:00
|
|
|
if strings.HasPrefix(fileInfo.Name(), bName+".ec") {
|
2019-06-03 09:26:31 +00:00
|
|
|
existingShardCount++
|
|
|
|
}
|
2019-05-25 21:02:06 +00:00
|
|
|
}
|
2019-05-20 07:53:17 +00:00
|
|
|
}
|
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
if hasEcxFile && existingShardCount == 0 {
|
|
|
|
if err := os.Remove(baseFilename + ".ecx"); err != nil {
|
|
|
|
return nil, err
|
2019-06-01 08:41:22 +00:00
|
|
|
}
|
2020-06-18 16:52:35 +00:00
|
|
|
os.Remove(baseFilename + ".ecj")
|
2019-06-01 08:41:22 +00:00
|
|
|
}
|
2020-01-03 20:46:39 +00:00
|
|
|
if !hasIdxFile {
|
|
|
|
// .vif is used for ec volumes and normal volumes
|
|
|
|
os.Remove(baseFilename + ".vif")
|
|
|
|
}
|
2019-06-01 08:41:22 +00:00
|
|
|
|
2019-06-03 09:26:31 +00:00
|
|
|
return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil
|
2019-05-20 07:53:17 +00:00
|
|
|
}
|
2019-05-26 06:23:19 +00:00
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) {
|
|
|
|
|
2019-05-27 18:59:03 +00:00
|
|
|
for _, shardId := range req.ShardIds {
|
2019-05-26 06:23:19 +00:00
|
|
|
err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("ec shard mount %v: %v", req, err)
|
|
|
|
} else {
|
|
|
|
glog.V(2).Infof("ec shard mount %v", req)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("mount %d.%d: %v", req.VolumeId, shardId, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsMountResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) {
|
|
|
|
|
2019-05-27 18:59:03 +00:00
|
|
|
for _, shardId := range req.ShardIds {
|
2019-05-26 06:23:19 +00:00
|
|
|
err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("ec shard unmount %v: %v", req, err)
|
|
|
|
} else {
|
|
|
|
glog.V(2).Infof("ec shard unmount %v", req)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unmount %d.%d: %v", req.VolumeId, shardId, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsUnmountResponse{}, nil
|
|
|
|
}
|
2019-05-27 18:59:03 +00:00
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardReadRequest, stream volume_server_pb.VolumeServer_VolumeEcShardReadServer) error {
|
|
|
|
|
2019-05-28 04:40:51 +00:00
|
|
|
ecVolume, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId))
|
2019-05-27 18:59:03 +00:00
|
|
|
if !found {
|
2019-06-03 17:38:21 +00:00
|
|
|
return fmt.Errorf("VolumeEcShardRead not found ec volume id %d", req.VolumeId)
|
2019-05-27 18:59:03 +00:00
|
|
|
}
|
2019-05-28 04:40:51 +00:00
|
|
|
ecShard, found := ecVolume.FindEcVolumeShard(erasure_coding.ShardId(req.ShardId))
|
2019-05-27 18:59:03 +00:00
|
|
|
if !found {
|
|
|
|
return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId)
|
|
|
|
}
|
|
|
|
|
2019-06-21 08:14:10 +00:00
|
|
|
if req.FileKey != 0 {
|
|
|
|
_, size, _ := ecVolume.FindNeedleFromEcx(types.Uint64ToNeedleId(req.FileKey))
|
|
|
|
if size == types.TombstoneFileSize {
|
|
|
|
return stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
|
|
|
|
IsDeleted: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 04:29:07 +00:00
|
|
|
bufSize := req.Size
|
|
|
|
if bufSize > BufferSizeLimit {
|
|
|
|
bufSize = BufferSizeLimit
|
|
|
|
}
|
|
|
|
buffer := make([]byte, bufSize)
|
|
|
|
|
2019-05-27 18:59:03 +00:00
|
|
|
startOffset, bytesToRead := req.Offset, req.Size
|
|
|
|
|
|
|
|
for bytesToRead > 0 {
|
2019-11-09 06:40:28 +00:00
|
|
|
// min of bytesToRead and bufSize
|
|
|
|
bufferSize := bufSize
|
|
|
|
if bufferSize > bytesToRead {
|
|
|
|
bufferSize = bytesToRead
|
|
|
|
}
|
|
|
|
bytesread, err := ecShard.ReadAt(buffer[0:bufferSize], startOffset)
|
2019-05-27 18:59:03 +00:00
|
|
|
|
2019-11-09 06:40:28 +00:00
|
|
|
// println("read", ecShard.FileName(), "startOffset", startOffset, bytesread, "bytes, with target", bufferSize)
|
2019-05-29 04:29:07 +00:00
|
|
|
if bytesread > 0 {
|
2019-05-27 18:59:03 +00:00
|
|
|
|
2019-05-29 04:29:07 +00:00
|
|
|
if int64(bytesread) > bytesToRead {
|
|
|
|
bytesread = int(bytesToRead)
|
|
|
|
}
|
|
|
|
err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
|
|
|
|
Data: buffer[:bytesread],
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
// println("sending", bytesread, "bytes err", err.Error())
|
2019-05-27 18:59:03 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-11-09 06:40:28 +00:00
|
|
|
startOffset += int64(bytesread)
|
2019-05-29 04:29:07 +00:00
|
|
|
bytesToRead -= int64(bytesread)
|
|
|
|
|
2019-05-27 18:59:03 +00:00
|
|
|
}
|
2019-05-29 04:29:07 +00:00
|
|
|
|
2019-05-27 18:59:03 +00:00
|
|
|
if err != nil {
|
2019-05-29 04:29:07 +00:00
|
|
|
if err != io.EOF {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2019-05-27 18:59:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
2019-06-20 07:17:11 +00:00
|
|
|
|
|
|
|
func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) {
|
|
|
|
|
|
|
|
resp := &volume_server_pb.VolumeEcBlobDeleteResponse{}
|
|
|
|
|
|
|
|
for _, location := range vs.store.Locations {
|
|
|
|
if localEcVolume, found := location.FindEcVolume(needle.VolumeId(req.VolumeId)); found {
|
|
|
|
|
|
|
|
_, size, _, err := localEcVolume.LocateEcShardNeedle(types.NeedleId(req.FileKey), needle.Version(req.Version))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("locate in local ec volume: %v", err)
|
|
|
|
}
|
|
|
|
if size == types.TombstoneFileSize {
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err = localEcVolume.DeleteNeedleFromEcx(types.NeedleId(req.FileKey))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
2019-12-23 20:48:20 +00:00
|
|
|
|
|
|
|
// VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files
|
|
|
|
func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) {
|
|
|
|
|
|
|
|
v, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId))
|
|
|
|
if !found {
|
|
|
|
return nil, fmt.Errorf("ec volume %d not found", req.VolumeId)
|
|
|
|
}
|
|
|
|
baseFileName := v.FileName()
|
|
|
|
|
|
|
|
if v.Collection != req.Collection {
|
|
|
|
return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection)
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculate .dat file size
|
|
|
|
datFileSize, err := erasure_coding.FindDatFileSize(baseFileName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("FindDatFileSize %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// write .dat file from .ec00 ~ .ec09 files
|
|
|
|
if err := erasure_coding.WriteDatFile(baseFileName, datFileSize); err != nil {
|
|
|
|
return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// write .idx file from .ecx and .ecj files
|
|
|
|
if err := erasure_coding.WriteIdxFileFromEcIndex(baseFileName); err != nil {
|
|
|
|
return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", baseFileName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil
|
|
|
|
}
|