working with reading remote intervals

This commit is contained in:
Chris Lu 2019-05-28 21:29:07 -07:00
parent 302d9fbc6d
commit 3f9ecee40f
15 changed files with 76 additions and 63 deletions

View file

@ -8,8 +8,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/command"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/kardianos/osext"
"github.com/jacobsa/daemonize"
"github.com/kardianos/osext"
)
var (

View file

@ -30,9 +30,9 @@ type FilerStoreWrapper struct {
actualStore FilerStore
}
func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper{
func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper {
return &FilerStoreWrapper{
actualStore:store,
actualStore: store,
}
}

View file

@ -29,9 +29,9 @@ type Option struct {
DirListingLimit int
EntryCacheTtl time.Duration
MountUid uint32
MountGid uint32
MountMode os.FileMode
MountUid uint32
MountGid uint32
MountMode os.FileMode
MountCtime time.Time
MountMtime time.Time
}

View file

@ -15,4 +15,3 @@ func TestFileIdSize(t *testing.T) {
println(len(fileIdStr))
println(len(bytes))
}

View file

@ -49,7 +49,7 @@ type Guard struct {
}
func NewGuard(whiteList []string, signingKey string, expiresAfterSec int) *Guard {
g := &Guard{whiteList: whiteList, SigningKey: SigningKey(signingKey), ExpiresAfterSec:expiresAfterSec}
g := &Guard{whiteList: whiteList, SigningKey: SigningKey(signingKey), ExpiresAfterSec: expiresAfterSec}
g.isActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0
return g
}

View file

@ -53,11 +53,11 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
// println("source:", volFileInfoResp.String())
// copy ecx file
if err:=vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx"); err!=nil{
if err := vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx"); err != nil {
return err
}
if err:=vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat"); err!=nil{
if err := vs.doCopyFile(ctx, client, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat"); err != nil {
return err
}

View file

@ -160,35 +160,42 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea
return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId)
}
buffer := make([]byte, BufferSizeLimit)
bufSize := req.Size
if bufSize > BufferSizeLimit {
bufSize = BufferSizeLimit
}
buffer := make([]byte, bufSize)
startOffset, bytesToRead := req.Offset, req.Size
for bytesToRead > 0 {
bytesread, err := ecShard.ReadAt(buffer, startOffset)
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead)
if bytesread > 0 {
if int64(bytesread) > bytesToRead {
bytesread = int(bytesToRead)
}
err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
Data: buffer[:bytesread],
})
if err != nil {
// println("sending", bytesread, "bytes err", err.Error())
return err
}
bytesToRead -= int64(bytesread)
}
if err != nil {
if err != io.EOF {
return err
}
// println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error())
break
return nil
}
if int64(bytesread) > bytesToRead {
bytesread = int(bytesToRead)
}
err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{
Data: buffer[:bytesread],
})
if err != nil {
// println("sending", bytesread, "bytes err", err.Error())
return err
}
bytesToRead -= int64(bytesread)
}
return nil

View file

@ -8,6 +8,7 @@ import (
"sort"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
@ -196,9 +197,10 @@ func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Di
targetServer *master_pb.DataNodeInfo, startFromShardId uint32, shardCount uint32,
volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (copiedShardIds []uint32, err error) {
var shardIdsToCopy []uint32
for shardId := startFromShardId; shardId < startFromShardId+shardCount; shardId++ {
fmt.Printf("allocate %d.%d %s => %s\n", volumeId, shardId, existingLocation.Url, targetServer.Id)
copiedShardIds = append(copiedShardIds, shardId)
shardIdsToCopy = append(shardIdsToCopy, shardId)
}
err = operation.WithVolumeServerClient(targetServer.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
@ -208,7 +210,7 @@ func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Di
_, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: copiedShardIds,
ShardIds: shardIdsToCopy,
SourceDataNode: existingLocation.Url,
})
if copyErr != nil {
@ -219,12 +221,17 @@ func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Di
_, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
VolumeId: uint32(volumeId),
Collection: collection,
ShardIds: copiedShardIds,
ShardIds: shardIdsToCopy,
})
if mountErr != nil {
return mountErr
}
if targetServer.Id != existingLocation.Url {
copiedShardIds = shardIdsToCopy
glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation.Url, volumeId, copiedShardIds)
}
return nil
})
@ -243,7 +250,7 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt
return operation.WithVolumeServerClient(sourceLocation.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
VolumeId: uint32(volumeId),
ShardIds: toBeDeletedShardIds,
ShardIds: toBeDeletedShardIds,
ShouldDeleteEcx: shouldDeleteEcx,
})
return deleteErr

View file

@ -11,7 +11,7 @@ func TestLoadingEcShards(t *testing.T) {
t.Errorf("load all ec shards: %v", err)
}
if len(dl.ecVolumes)!=1 {
if len(dl.ecVolumes) != 1 {
t.Errorf("loading err")
}
}

View file

@ -111,7 +111,7 @@ func (ev *EcVolume) ToVolumeEcShardInformationMessage() (messages []*master_pb.V
return
}
func (ev *EcVolume) LocateEcShardNeedle(n *needle.Needle) (offset types.Offset, size uint32, intervals []Interval, err error) {
func (ev *EcVolume) LocateEcShardNeedle(n *needle.Needle, version needle.Version) (offset types.Offset, size uint32, intervals []Interval, err error) {
// find the needle from ecx file
offset, size, err = ev.findNeedleFromEcx(n.Id)
@ -122,7 +122,7 @@ func (ev *EcVolume) LocateEcShardNeedle(n *needle.Needle) (offset types.Offset,
shard := ev.Shards[0]
// calculate the locations in the ec shards
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), size)
intervals = LocateData(ErasureCodingLargeBlockSize, ErasureCodingSmallBlockSize, DataShardsCount*shard.ecdFileSize, offset.ToAcutalOffset(), uint32(needle.GetActualSize(size, version)))
return
}

View file

@ -40,7 +40,7 @@ func (ecInfo *EcVolumeInfo) ShardIdCount() (count int) {
return ecInfo.ShardBits.ShardIdCount()
}
func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) (*EcVolumeInfo) {
func (ecInfo *EcVolumeInfo) Minus(other *EcVolumeInfo) *EcVolumeInfo {
ret := &EcVolumeInfo{
VolumeId: ecInfo.VolumeId,
Collection: ecInfo.Collection,
@ -88,10 +88,10 @@ func (b ShardBits) ShardIdCount() (count int) {
return
}
func (b ShardBits) Minus(other ShardBits) (ShardBits) {
func (b ShardBits) Minus(other ShardBits) ShardBits {
return b &^ other
}
func (b ShardBits) Plus(other ShardBits) (ShardBits) {
func (b ShardBits) Plus(other ShardBits) ShardBits {
return b | other
}

View file

@ -51,4 +51,3 @@ func IdxFileEntry(bytes []byte) (key types.NeedleId, offset types.Offset, size u
const (
RowsToRead = 1024
)

View file

@ -248,16 +248,16 @@ func (cm *CompactMap) AscendingVisit(visit func(NeedleValue) error) error {
for _, cs := range cm.list {
cs.RLock()
var i, j int
for i, j = 0, 0; i < len(cs.overflow) && j < len(cs.values) && j<cs.counter; {
for i, j = 0, 0; i < len(cs.overflow) && j < len(cs.values) && j < cs.counter; {
if cs.overflow[i].Key < cs.values[j].Key {
if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
cs.RUnlock()
return err
}
i++
}else if cs.overflow[i].Key == cs.values[j].Key {
} else if cs.overflow[i].Key == cs.values[j].Key {
j++
}else{
} else {
if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
cs.RUnlock()
return err
@ -265,13 +265,13 @@ func (cm *CompactMap) AscendingVisit(visit func(NeedleValue) error) error {
j++
}
}
for ;i < len(cs.overflow);i++{
for ; i < len(cs.overflow); i++ {
if err := visit(toNeedleValue(cs.overflowExtra[i], cs.overflow[i], cs)); err != nil {
cs.RUnlock()
return err
}
}
for ; j < len(cs.values)&& j<cs.counter;j++{
for ; j < len(cs.values) && j < cs.counter; j++ {
if err := visit(toNeedleValue(cs.valuesExtra[j], cs.values[j], cs)); err != nil {
cs.RUnlock()
return err
@ -292,10 +292,10 @@ func toNeedleValue(snve SectionalNeedleValueExtra, snv SectionalNeedleValue, cs
func (nv NeedleValue) toSectionalNeedleValue(cs *CompactSection) (SectionalNeedleValue, SectionalNeedleValueExtra) {
return SectionalNeedleValue{
SectionalNeedleId(nv.Key - cs.start),
nv.Offset.OffsetLower,
nv.Size,
}, SectionalNeedleValueExtra{
nv.Offset.OffsetHigher,
}
SectionalNeedleId(nv.Key - cs.start),
nv.Offset.OffsetLower,
nv.Size,
}, SectionalNeedleValueExtra{
nv.Offset.OffsetHigher,
}
}

View file

@ -96,19 +96,20 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n
for _, location := range s.Locations {
if localEcVolume, found := location.FindEcVolume(vid); found {
offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n)
// TODO need to read the version
version := needle.CurrentVersion
offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n, version)
if err != nil {
return 0, err
}
glog.V(4).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)
// TODO need to read the version
version := needle.CurrentVersion
// TODO the interval size should be the actual size
bytes, err := s.readEcShardIntervals(ctx, vid, localEcVolume, version, intervals)
if len(intervals) > 1 {
glog.V(4).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals)
}
bytes, err := s.readEcShardIntervals(ctx, vid, localEcVolume, intervals)
if err != nil {
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
}
@ -124,14 +125,14 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n
return 0, fmt.Errorf("ec shard %d not found", vid)
}
func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, ecVolume *erasure_coding.EcVolume, version needle.Version, intervals []erasure_coding.Interval) (data []byte, err error) {
func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, err error) {
if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil {
return nil, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err)
}
for i, interval := range intervals {
if d, e := s.readOneEcShardInterval(ctx, ecVolume, version, interval); e != nil {
if d, e := s.readOneEcShardInterval(ctx, ecVolume, interval); e != nil {
return nil, e
} else {
if i == 0 {
@ -144,11 +145,10 @@ func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, e
return
}
func (s *Store) readOneEcShardInterval(ctx context.Context, ecVolume *erasure_coding.EcVolume, version needle.Version, interval erasure_coding.Interval) (data []byte, err error) {
func (s *Store) readOneEcShardInterval(ctx context.Context, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, err error) {
shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
data = make([]byte, int(needle.GetActualSize(interval.Size, version)))
data = make([]byte, interval.Size)
if shard, found := ecVolume.FindEcVolumeShard(shardId); found {
glog.V(3).Infof("read local ec shard %d.%d", ecVolume.VolumeId, shardId)
if _, err = shard.ReadAt(data, actualOffset); err != nil {
glog.V(0).Infof("read local ec shard %d.%d: %v", ecVolume.VolumeId, shardId, err)
return
@ -160,7 +160,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, ecVolume *erasure_co
if !found || len(sourceDataNodes) == 0 {
return nil, fmt.Errorf("failed to find ec shard %d.%d", ecVolume.VolumeId, shardId)
}
glog.V(3).Infof("read remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNodes[0])
glog.V(4).Infof("read remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNodes[0])
_, err = s.readOneRemoteEcShardInterval(ctx, sourceDataNodes[0], ecVolume.VolumeId, shardId, data, actualOffset)
if err != nil {
glog.V(1).Infof("failed to read from %s for ec shard %d.%d : %v", sourceDataNodes[0], ecVolume.VolumeId, shardId, err)
@ -195,6 +195,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras
ecVolume.ShardLocations[shardId] = append(ecVolume.ShardLocations[shardId], loc.Url)
}
}
ecVolume.ShardLocationsRefreshTime = time.Now()
ecVolume.ShardLocationsLock.Unlock()
return nil

View file

@ -123,7 +123,7 @@ func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo,
}
}
func (t *Topology) LookupEcShards(vid needle.VolumeId)(locations *EcShardLocations, found bool) {
func (t *Topology) LookupEcShards(vid needle.VolumeId) (locations *EcShardLocations, found bool) {
t.ecShardMapLock.RLock()
defer t.ecShardMapLock.RUnlock()