mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
working with reading remote intervals
This commit is contained in:
parent
302d9fbc6d
commit
3f9ecee40f
|
@ -8,8 +8,8 @@ import (
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/command"
|
"github.com/chrislusf/seaweedfs/weed/command"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/kardianos/osext"
|
|
||||||
"github.com/jacobsa/daemonize"
|
"github.com/jacobsa/daemonize"
|
||||||
|
"github.com/kardianos/osext"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -30,9 +30,9 @@ type FilerStoreWrapper struct {
|
||||||
actualStore FilerStore
|
actualStore FilerStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper{
|
func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
|
||||||
return &FilerStoreWrapper{
|
return &FilerStoreWrapper{
|
||||||
actualStore:store,
|
actualStore: store,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,9 +29,9 @@ type Option struct {
|
||||||
DirListingLimit int
|
DirListingLimit int
|
||||||
EntryCacheTtl time.Duration
|
EntryCacheTtl time.Duration
|
||||||
|
|
||||||
MountUid uint32
|
MountUid uint32
|
||||||
MountGid uint32
|
MountGid uint32
|
||||||
MountMode os.FileMode
|
MountMode os.FileMode
|
||||||
MountCtime time.Time
|
MountCtime time.Time
|
||||||
MountMtime time.Time
|
MountMtime time.Time
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,4 +15,3 @@ func TestFileIdSize(t *testing.T) {
|
||||||
println(len(fileIdStr))
|
println(len(fileIdStr))
|
||||||
println(len(bytes))
|
println(len(bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ type Guard struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGuard(whiteList []string, signingKey string, expiresAfterSec int) *Guard {
|
func NewGuard(whiteList []string, signingKey string, expiresAfterSec int) *Guard {
|
||||||
g := &Guard{whiteList: whiteList, SigningKey: SigningKey(signingKey), ExpiresAfterSec:expiresAfterSec}
|
g := &Guard{whiteList: whiteList, SigningKey: SigningKey(signingKey), ExpiresAfterSec: expiresAfterSec}
|
||||||
g.isActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0
|
g.isActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0
|
||||||
return g
|
return g
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,11 +53,11 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||||
|
|
||||||
// println("source:", volFileInfoResp.String())
|
// println("source:", volFileInfoResp.String())
|
||||||
// copy ecx file
|
// copy ecx file
|
||||||
if err:=vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx"); err!=nil{
|
if err := vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err:=vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat"); err!=nil{
|
if err := vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -160,35 +160,42 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea
|
||||||
return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId)
|
return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId)
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer := make([]byte, BufferSizeLimit)
|
bufSize := req.Size
|
||||||
|
if bufSize > BufferSizeLimit {
|
||||||
|
bufSize = BufferSizeLimit
|
||||||
|
}
|
||||||
|
buffer := make([]byte, bufSize)
|
||||||
|
|
||||||
startOffset, bytesToRead := req.Offset, req.Size
|
startOffset, bytesToRead := req.Offset, req.Size
|
||||||
|
|
||||||
for bytesToRead > 0 {
|
for bytesToRead > 0 {
|
||||||
bytesread, err := ecShard.ReadAt(buffer, startOffset)
|
bytesread, err := ecShard.ReadAt(buffer, startOffset)
|
||||||
|
|
||||||
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
|
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
|
||||||
|
if bytesread > 0 {
|
||||||
|
|
||||||
|
if int64(bytesread) > bytesToRead {
|
||||||
|
bytesread = int(bytesToRead)
|
||||||
|
}
|
||||||
|
err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
|
||||||
|
Data: buffer[:bytesread],
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// println("sending", bytesread, "bytes err", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesToRead -= int64(bytesread)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error())
|
return nil
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if int64(bytesread) > bytesToRead {
|
|
||||||
bytesread = int(bytesToRead)
|
|
||||||
}
|
|
||||||
err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
|
|
||||||
Data: buffer[:bytesread],
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
// println("sending", bytesread, "bytes err", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
bytesToRead -= int64(bytesread)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
|
@ -196,9 +197,10 @@ func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Di
|
||||||
targetServer *master_pb.DataNodeInfo, startFromShardId uint32, shardCount uint32,
|
targetServer *master_pb.DataNodeInfo, startFromShardId uint32, shardCount uint32,
|
||||||
volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (copiedShardIds []uint32, err error) {
|
volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (copiedShardIds []uint32, err error) {
|
||||||
|
|
||||||
|
var shardIdsToCopy []uint32
|
||||||
for shardId := startFromShardId; shardId < startFromShardId+shardCount; shardId++ {
|
for shardId := startFromShardId; shardId < startFromShardId+shardCount; shardId++ {
|
||||||
fmt.Printf("allocate %d.%d %s => %s\n", volumeId, shardId, existingLocation.Url, targetServer.Id)
|
fmt.Printf("allocate %d.%d %s => %s\n", volumeId, shardId, existingLocation.Url, targetServer.Id)
|
||||||
copiedShardIds = append(copiedShardIds, shardId)
|
shardIdsToCopy = append(shardIdsToCopy, shardId)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = operation.WithVolumeServerClient(targetServer.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
err = operation.WithVolumeServerClient(targetServer.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
||||||
|
@ -208,7 +210,7 @@ func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Di
|
||||||
_, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
|
_, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
|
||||||
VolumeId: uint32(volumeId),
|
VolumeId: uint32(volumeId),
|
||||||
Collection: collection,
|
Collection: collection,
|
||||||
ShardIds: copiedShardIds,
|
ShardIds: shardIdsToCopy,
|
||||||
SourceDataNode: existingLocation.Url,
|
SourceDataNode: existingLocation.Url,
|
||||||
})
|
})
|
||||||
if copyErr != nil {
|
if copyErr != nil {
|
||||||
|
@ -219,12 +221,17 @@ func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Di
|
||||||
_, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
_, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||||
VolumeId: uint32(volumeId),
|
VolumeId: uint32(volumeId),
|
||||||
Collection: collection,
|
Collection: collection,
|
||||||
ShardIds: copiedShardIds,
|
ShardIds: shardIdsToCopy,
|
||||||
})
|
})
|
||||||
if mountErr != nil {
|
if mountErr != nil {
|
||||||
return mountErr
|
return mountErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if targetServer.Id != existingLocation.Url {
|
||||||
|
copiedShardIds = shardIdsToCopy
|
||||||
|
glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation.Url, volumeId, copiedShardIds)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -243,7 +250,7 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt
|
||||||
return operation.WithVolumeServerClient(sourceLocation.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
return operation.WithVolumeServerClient(sourceLocation.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
||||||
_, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
|
_, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
|
||||||
VolumeId: uint32(volumeId),
|
VolumeId: uint32(volumeId),
|
||||||
ShardIds: toBeDeletedShardIds,
|
ShardIds: toBeDeletedShardIds,
|
||||||
ShouldDeleteEcx: shouldDeleteEcx,
|
ShouldDeleteEcx: shouldDeleteEcx,
|
||||||
})
|
})
|
||||||
return deleteErr
|
return deleteErr
|
||||||
|
|
|
@ -11,7 +11,7 @@ func TestLoadingEcShards(t *testing.T) {
|
||||||
t.Errorf("load all ec shards: %v", err)
|
t.Errorf("load all ec shards: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dl.ecVolumes)!=1 {
|
if len(dl.ecVolumes) != 1 {
|
||||||
t.Errorf("loading err")
|
t.Errorf("loading err")
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -111,7 +111,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ev *EcVolume) LocateEcShardNeedle(n *needle.Needle) (offset types.Offset, size uint32, intervals []Interval, err error) {
|
func (ev *EcVolume) LocateEcShardNeedle(n *needle.Needle, version needle.Version) (offset types.Offset, size uint32, intervals []Interval, err error) {
|
||||||
|
|
||||||
// find the needle from ecx file
|
// find the needle from ecx file
|
||||||
offset, size, err = ev.findNeedleFromEcx(n.Id)
|
offset, size, err = ev.findNeedleFromEcx(n.Id)
|
||||||
|
@ -122,7 +122,7 @@ func (ev *EcVolume) LocateEcShardNeedle(n *needle.Needle) (offset types.Offset,
|
||||||
shard := ev.Shards[0]
|
shard := ev.Shards[0]
|
||||||
|
|
||||||
// calculate the locations in the ec shards
|
// calculate the locations in the ec shards
|
||||||
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), size)
|
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), uint32(needle.GetActualSize(size, version)))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (ecInfo *EcVolumeInfo) ShardIdCount() (count int) {
|
||||||
return ecInfo.ShardBits.ShardIdCount()
|
return ecInfo.ShardBits.ShardIdCount()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) (*EcVolumeInfo) {
|
func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo {
|
||||||
ret := &EcVolumeInfo{
|
ret := &EcVolumeInfo{
|
||||||
VolumeId: ecInfo.VolumeId,
|
VolumeId: ecInfo.VolumeId,
|
||||||
Collection: ecInfo.Collection,
|
Collection: ecInfo.Collection,
|
||||||
|
@ -88,10 +88,10 @@ func (b ShardBits) ShardIdCount() (count int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b ShardBits) Minus(other ShardBits) (ShardBits) {
|
func (b ShardBits) Minus(other ShardBits) ShardBits {
|
||||||
return b &^ other
|
return b &^ other
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b ShardBits) Plus(other ShardBits) (ShardBits) {
|
func (b ShardBits) Plus(other ShardBits) ShardBits {
|
||||||
return b | other
|
return b | other
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,4 +51,3 @@ func IdxFileEntry(bytes []byte) (key types.NeedleId, offset types.Offset, size u
|
||||||
const (
|
const (
|
||||||
RowsToRead = 1024
|
RowsToRead = 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -248,16 +248,16 @@ func (cm *CompactMap) AscendingVisit(visit func(NeedleValue) error) error {
|
||||||
for _, cs := range cm.list {
|
for _, cs := range cm.list {
|
||||||
cs.RLock()
|
cs.RLock()
|
||||||
var i, j int
|
var i, j int
|
||||||
for i, j = 0, 0; i < len(cs.overflow) && j < len(cs.values) && j<cs.counter; {
|
for i, j = 0, 0; i < len(cs.overflow) && j < len(cs.values) && j < cs.counter; {
|
||||||
if cs.overflow[i].Key < cs.values[j].Key {
|
if cs.overflow[i].Key < cs.values[j].Key {
|
||||||
if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
|
if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
|
||||||
cs.RUnlock()
|
cs.RUnlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}else if cs.overflow[i].Key == cs.values[j].Key {
|
} else if cs.overflow[i].Key == cs.values[j].Key {
|
||||||
j++
|
j++
|
||||||
}else{
|
} else {
|
||||||
if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
|
if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
|
||||||
cs.RUnlock()
|
cs.RUnlock()
|
||||||
return err
|
return err
|
||||||
|
@ -265,13 +265,13 @@ func (cm *CompactMap) AscendingVisit(visit func(NeedleValue) error) error {
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for ;i < len(cs.overflow);i++{
|
for ; i < len(cs.overflow); i++ {
|
||||||
if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
|
if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
|
||||||
cs.RUnlock()
|
cs.RUnlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for ; j < len(cs.values)&& j<cs.counter;j++{
|
for ; j < len(cs.values) && j < cs.counter; j++ {
|
||||||
if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
|
if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
|
||||||
cs.RUnlock()
|
cs.RUnlock()
|
||||||
return err
|
return err
|
||||||
|
@ -292,10 +292,10 @@ func toNeedleValue(snve SectionalNeedleValueExtra, snv SectionalNeedleValue, cs
|
||||||
|
|
||||||
func (nv NeedleValue) toSectionalNeedleValue(cs *CompactSection) (SectionalNeedleValue, SectionalNeedleValueExtra) {
|
func (nv NeedleValue) toSectionalNeedleValue(cs *CompactSection) (SectionalNeedleValue, SectionalNeedleValueExtra) {
|
||||||
return SectionalNeedleValue{
|
return SectionalNeedleValue{
|
||||||
SectionalNeedleId(nv.Key - cs.start),
|
SectionalNeedleId(nv.Key - cs.start),
|
||||||
nv.Offset.OffsetLower,
|
nv.Offset.OffsetLower,
|
||||||
nv.Size,
|
nv.Size,
|
||||||
}, SectionalNeedleValueExtra{
|
}, SectionalNeedleValueExtra{
|
||||||
nv.Offset.OffsetHigher,
|
nv.Offset.OffsetHigher,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,19 +96,20 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n
|
||||||
for _, location := range s.Locations {
|
for _, location := range s.Locations {
|
||||||
if localEcVolume, found := location.FindEcVolume(vid); found {
|
if localEcVolume, found := location.FindEcVolume(vid); found {
|
||||||
|
|
||||||
offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n)
|
// TODO need to read the version
|
||||||
|
version := needle.CurrentVersion
|
||||||
|
|
||||||
|
offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n, version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
|
glog.V(4).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
|
||||||
|
|
||||||
// TODO need to read the version
|
if len(intervals) > 1 {
|
||||||
version := needle.CurrentVersion
|
glog.V(4).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
|
||||||
|
}
|
||||||
// TODO the interval size should be the actual size
|
bytes, err := s.readEcShardIntervals(ctx, vid, localEcVolume, intervals)
|
||||||
|
|
||||||
bytes, err := s.readEcShardIntervals(ctx, vid, localEcVolume, version, intervals)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
|
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -124,14 +125,14 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n
|
||||||
return 0, fmt.Errorf("ec shard %d not found", vid)
|
return 0, fmt.Errorf("ec shard %d not found", vid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, ecVolume *erasure_coding.EcVolume, version needle.Version, intervals []erasure_coding.Interval) (data []byte, err error) {
|
func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, err error) {
|
||||||
|
|
||||||
if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil {
|
if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil {
|
||||||
return nil, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err)
|
return nil, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, interval := range intervals {
|
for i, interval := range intervals {
|
||||||
if d, e := s.readOneEcShardInterval(ctx, ecVolume, version, interval); e != nil {
|
if d, e := s.readOneEcShardInterval(ctx, ecVolume, interval); e != nil {
|
||||||
return nil, e
|
return nil, e
|
||||||
} else {
|
} else {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
@ -144,11 +145,10 @@ func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, e
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) readOneEcShardInterval(ctx context.Context, ecVolume *erasure_coding.EcVolume, version needle.Version, interval erasure_coding.Interval) (data []byte, err error) {
|
func (s *Store) readOneEcShardInterval(ctx context.Context, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, err error) {
|
||||||
shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
|
shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
|
||||||
data = make([]byte, int(needle.GetActualSize(interval.Size, version)))
|
data = make([]byte, interval.Size)
|
||||||
if shard, found := ecVolume.FindEcVolumeShard(shardId); found {
|
if shard, found := ecVolume.FindEcVolumeShard(shardId); found {
|
||||||
glog.V(3).Infof("read local ec shard %d.%d", ecVolume.VolumeId, shardId)
|
|
||||||
if _, err = shard.ReadAt(data, actualOffset); err != nil {
|
if _, err = shard.ReadAt(data, actualOffset); err != nil {
|
||||||
glog.V(0).Infof("read local ec shard %d.%d: %v", ecVolume.VolumeId, shardId, err)
|
glog.V(0).Infof("read local ec shard %d.%d: %v", ecVolume.VolumeId, shardId, err)
|
||||||
return
|
return
|
||||||
|
@ -160,7 +160,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, ecVolume *erasure_co
|
||||||
if !found || len(sourceDataNodes) == 0 {
|
if !found || len(sourceDataNodes) == 0 {
|
||||||
return nil, fmt.Errorf("failed to find ec shard %d.%d", ecVolume.VolumeId, shardId)
|
return nil, fmt.Errorf("failed to find ec shard %d.%d", ecVolume.VolumeId, shardId)
|
||||||
}
|
}
|
||||||
glog.V(3).Infof("read remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNodes[0])
|
glog.V(4).Infof("read remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNodes[0])
|
||||||
_, err = s.readOneRemoteEcShardInterval(ctx, sourceDataNodes[0], ecVolume.VolumeId, shardId, data, actualOffset)
|
_, err = s.readOneRemoteEcShardInterval(ctx, sourceDataNodes[0], ecVolume.VolumeId, shardId, data, actualOffset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("failed to read from %s for ec shard %d.%d : %v", sourceDataNodes[0], ecVolume.VolumeId, shardId, err)
|
glog.V(1).Infof("failed to read from %s for ec shard %d.%d : %v", sourceDataNodes[0], ecVolume.VolumeId, shardId, err)
|
||||||
|
@ -195,6 +195,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras
|
||||||
ecVolume.ShardLocations[shardId] = append(ecVolume.ShardLocations[shardId], loc.Url)
|
ecVolume.ShardLocations[shardId] = append(ecVolume.ShardLocations[shardId], loc.Url)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ecVolume.ShardLocationsRefreshTime = time.Now()
|
||||||
ecVolume.ShardLocationsLock.Unlock()
|
ecVolume.ShardLocationsLock.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -123,7 +123,7 @@ func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Topology) LookupEcShards(vid needle.VolumeId)(locations *EcShardLocations, found bool) {
|
func (t *Topology) LookupEcShards(vid needle.VolumeId) (locations *EcShardLocations, found bool) {
|
||||||
t.ecShardMapLock.RLock()
|
t.ecShardMapLock.RLock()
|
||||||
defer t.ecShardMapLock.RUnlock()
|
defer t.ecShardMapLock.RUnlock()
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue