erasure coding: fix cases where there are no .ecj files

This commit is contained in:
Chris Lu 2020-06-18 09:52:35 -07:00
parent ce79ec0bd4
commit ae1994cbc1
4 changed files with 16 additions and 5 deletions

View file

@ -201,9 +201,7 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
if err := os.Remove(baseFilename + ".ecx"); err != nil { if err := os.Remove(baseFilename + ".ecx"); err != nil {
return nil, err return nil, err
} }
if err := os.Remove(baseFilename + ".ecj"); err != nil { os.Remove(baseFilename + ".ecj")
return nil, err
}
} }
if !hasIdxFile { if !hasIdxFile {
// .vif is used for ec volumes and normal volumes // .vif is used for ec volumes and normal volumes

View file

@ -123,6 +123,8 @@ func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId
for _, location := range locations { for _, location := range locations {
fmt.Printf("markVolumeReadonly %d on %s ...\n", volumeId, location.Url)
err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{
VolumeId: uint32(volumeId), VolumeId: uint32(volumeId),
@ -141,6 +143,8 @@ func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId
func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error {
fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer)
err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{ _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{
VolumeId: uint32(volumeId), VolumeId: uint32(volumeId),
@ -204,6 +208,8 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection
func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url)
// parallelize // parallelize
shardIdChan := make(chan []uint32, len(targetServers)) shardIdChan := make(chan []uint32, len(targetServers))
var wg sync.WaitGroup var wg sync.WaitGroup

View file

@ -121,7 +121,10 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
VolumeId: volumeInfo.Id, VolumeId: volumeInfo.Id,
SourceDataNode: sourceNode.dataNode.Id, SourceDataNode: sourceNode.dataNode.Id,
}) })
if replicateErr != nil {
return fmt.Errorf("copying from %s => %s : %v", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr) return fmt.Errorf("copying from %s => %s : %v", sourceNode.dataNode.Id, dst.dataNode.Id, replicateErr)
}
return nil
}) })
if err != nil { if err != nil {

View file

@ -11,6 +11,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
) )
// write .idx file from .ecx and .ecj files // write .idx file from .ecx and .ecj files
@ -118,9 +119,12 @@ func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId
} }
func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error { func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error {
if !util.FileExists(baseFileName+".ecj") {
return nil
}
ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644) ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644)
if openErr != nil { if openErr != nil {
return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr) return fmt.Errorf("cannot open ec index %s.ecj: %v", baseFileName, openErr)
} }
defer ecjFile.Close() defer ecjFile.Close()