volum server passes grpc option and master location to store

This commit is contained in:
Chris Lu 2019-05-27 21:22:23 -07:00
parent ac1ba3b667
commit 03b9291e5d
6 changed files with 81 additions and 23 deletions

View file

@ -21,7 +21,7 @@ func (vs *VolumeServer) GetMaster() string {
}
func (vs *VolumeServer) heartbeat() {
glog.V(0).Infof("Volume server start with masters: %v", vs.MasterNodes)
glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes)
vs.store.SetDataCenter(vs.dataCenter)
vs.store.SetRack(vs.rack)
@ -30,7 +30,7 @@ func (vs *VolumeServer) heartbeat() {
var err error
var newLeader string
for {
for _, master := range vs.MasterNodes {
for _, master := range vs.SeedMasterNodes {
if newLeader != "" {
master = newLeader
}
@ -39,11 +39,13 @@ func (vs *VolumeServer) heartbeat() {
glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
continue
}
vs.store.MasterGrpcAddress = masterGrpcAddress
newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil {
glog.V(0).Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
newLeader = ""
vs.store.MasterGrpcAddress = ""
}
}
}

View file

@ -11,14 +11,14 @@ import (
)
type VolumeServer struct {
MasterNodes []string
currentMaster string
pulseSeconds int
dataCenter string
rack string
store *storage.Store
guard *security.Guard
grpcDialOption grpc.DialOption
SeedMasterNodes []string
currentMaster string
pulseSeconds int
dataCenter string
rack string
store *storage.Store
guard *security.Guard
grpcDialOption grpc.DialOption
needleMapKind storage.NeedleMapType
FixJpgOrientation bool
@ -54,8 +54,8 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"),
compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024,
}
vs.MasterNodes = masterNodes
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
vs.SeedMasterNodes = masterNodes
vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec)

View file

@ -2,6 +2,7 @@ package weed_server
import (
"bytes"
"context"
"io"
"mime"
"mime/multipart"
@ -71,7 +72,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if hasVolume {
count, err = vs.store.ReadVolumeNeedle(volumeId, n)
} else if hasEcShard {
count, err = vs.store.ReadEcShardNeedle(volumeId, n)
count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n)
}
glog.V(4).Infoln("read bytes", count, "error", err)
if err != nil || count < 0 {

View file

@ -29,7 +29,7 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
Counters *stats.ServerStats
}{
util.VERSION,
vs.MasterNodes,
vs.SeedMasterNodes,
vs.store.Status(),
ds,
infos,

View file

@ -8,6 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"google.golang.org/grpc"
)
const (
@ -18,6 +19,8 @@ const (
* A VolumeServer contains one Store
*/
type Store struct {
MasterGrpcAddress string
grpcDialOption grpc.DialOption
volumeSizeLimit uint64 //read from the master
Ip string
Port int
@ -38,8 +41,8 @@ func (s *Store) String() (str string) {
return
}
func NewStore(port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, needleMapKind NeedleMapType) (s *Store) {
s = &Store{Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind}
func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, needleMapKind NeedleMapType) (s *Store) {
s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind}
s.Locations = make([]*DiskLocation, 0)
for i := 0; i < len(dirnames); i++ {
location := NewDiskLocation(dirnames[i], maxVolumeCounts[i])

View file

@ -1,10 +1,14 @@
package storage
import (
"context"
"fmt"
"io"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
@ -87,7 +91,7 @@ func (s *Store) HasEcShard(vid needle.VolumeId) (erasure_coding.EcVolumeShards,
return nil, false
}
func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, error) {
func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *needle.Needle) (int, error) {
for _, location := range s.Locations {
if localEcShards, found := location.HasEcShard(vid); found {
@ -96,7 +100,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, err
}
bytes, err := s.ReadEcShardIntervals(vid, localEcShards, intervals)
bytes, err := s.readEcShardIntervals(ctx, vid, localEcShards, intervals)
if err != nil {
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
}
@ -114,9 +118,14 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, fmt.Errorf("ec shard %d not found", vid)
}
func (s *Store) ReadEcShardIntervals(vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, intervals []erasure_coding.Interval) (data []byte, err error) {
func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, intervals []erasure_coding.Interval) (data []byte, err error) {
shardLocations, err := s.cachedLookupEcShardLocations(ctx, vid)
if err != nil {
return nil, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterGrpcAddress, err)
}
for i, interval := range intervals {
if d, e := s.readOneEcShardInterval(vid, localEcShards, interval); e != nil {
if d, e := s.readOneEcShardInterval(ctx, vid, localEcShards, shardLocations, interval); e != nil {
return nil, e
} else {
if i == 0 {
@ -129,7 +138,7 @@ func (s *Store) ReadEcShardIntervals(vid needle.VolumeId, localEcShards erasure_
return
}
func (s *Store) readOneEcShardInterval(vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, interval erasure_coding.Interval) (data []byte, err error) {
func (s *Store) readOneEcShardInterval(ctx context.Context, vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, shardLocations map[erasure_coding.ShardId]string, interval erasure_coding.Interval) (data []byte, err error) {
shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
data = make([]byte, interval.Size)
if shard, found := localEcShards.FindEcVolumeShard(shardId); found {
@ -137,14 +146,57 @@ func (s *Store) readOneEcShardInterval(vid needle.VolumeId, localEcShards erasur
return
}
} else {
s.readOneRemoteEcShardInterval(vid, shardId, data, actualOffset)
sourceDataNode := shardLocations[shardId]
_, err = s.readOneRemoteEcShardInterval(ctx, sourceDataNode, vid, shardId, data, actualOffset)
if err != nil {
glog.V(1).Infof("failed to read from %s for ec shard %d.%d : %v", sourceDataNode, vid, shardId, err)
}
}
return
}
func (s *Store) readOneRemoteEcShardInterval(vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, err error) {
func (s *Store) cachedLookupEcShardLocations(ctx context.Context, vid needle.VolumeId) (shardLocations map[erasure_coding.ShardId]string, err error) {
return
}
func (s *Store) readOneRemoteEcShardInterval(ctx context.Context, sourceDataNode string, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, err error) {
err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
// copy data slice
shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
VolumeId: uint32(vid),
ShardId: uint32(shardId),
Offset: offset,
Size: int64(len(buf)),
})
if err != nil {
return fmt.Errorf("failed to start reading ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
}
for {
resp, receiveErr := shardReadClient.Recv()
if receiveErr == io.EOF {
break
}
if receiveErr != nil {
return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
}
copy(buf[n:n+len(resp.Data)], resp.Data)
n += len(resp.Data)
}
return nil
})
if err != nil {
return 0, fmt.Errorf("read ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
}
return
}
func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, string, vid needle.VolumeId, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, err error) {
glog.V(1).Infof("recover ec shard %d.%d from other locations", vid, shardIdToRecover)
// TODO add recovering
return 0, fmt.Errorf("recover is not implemented yet")
}