mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
volum server passes grpc option and master location to store
This commit is contained in:
parent
ac1ba3b667
commit
03b9291e5d
|
@ -21,7 +21,7 @@ func (vs *VolumeServer) GetMaster() string {
|
||||||
}
|
}
|
||||||
func (vs *VolumeServer) heartbeat() {
|
func (vs *VolumeServer) heartbeat() {
|
||||||
|
|
||||||
glog.V(0).Infof("Volume server start with masters: %v", vs.MasterNodes)
|
glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes)
|
||||||
vs.store.SetDataCenter(vs.dataCenter)
|
vs.store.SetDataCenter(vs.dataCenter)
|
||||||
vs.store.SetRack(vs.rack)
|
vs.store.SetRack(vs.rack)
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ func (vs *VolumeServer) heartbeat() {
|
||||||
var err error
|
var err error
|
||||||
var newLeader string
|
var newLeader string
|
||||||
for {
|
for {
|
||||||
for _, master := range vs.MasterNodes {
|
for _, master := range vs.SeedMasterNodes {
|
||||||
if newLeader != "" {
|
if newLeader != "" {
|
||||||
master = newLeader
|
master = newLeader
|
||||||
}
|
}
|
||||||
|
@ -39,11 +39,13 @@ func (vs *VolumeServer) heartbeat() {
|
||||||
glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
|
glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
vs.store.MasterGrpcAddress = masterGrpcAddress
|
||||||
newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
|
newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("heartbeat error: %v", err)
|
glog.V(0).Infof("heartbeat error: %v", err)
|
||||||
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
|
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
|
||||||
newLeader = ""
|
newLeader = ""
|
||||||
|
vs.store.MasterGrpcAddress = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,14 +11,14 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type VolumeServer struct {
|
type VolumeServer struct {
|
||||||
MasterNodes []string
|
SeedMasterNodes []string
|
||||||
currentMaster string
|
currentMaster string
|
||||||
pulseSeconds int
|
pulseSeconds int
|
||||||
dataCenter string
|
dataCenter string
|
||||||
rack string
|
rack string
|
||||||
store *storage.Store
|
store *storage.Store
|
||||||
guard *security.Guard
|
guard *security.Guard
|
||||||
grpcDialOption grpc.DialOption
|
grpcDialOption grpc.DialOption
|
||||||
|
|
||||||
needleMapKind storage.NeedleMapType
|
needleMapKind storage.NeedleMapType
|
||||||
FixJpgOrientation bool
|
FixJpgOrientation bool
|
||||||
|
@ -54,8 +54,8 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
|
||||||
grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"),
|
grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"),
|
||||||
compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024,
|
compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024,
|
||||||
}
|
}
|
||||||
vs.MasterNodes = masterNodes
|
vs.SeedMasterNodes = masterNodes
|
||||||
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
|
vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
|
||||||
|
|
||||||
vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec)
|
vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec)
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package weed_server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
|
@ -71,7 +72,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
||||||
if hasVolume {
|
if hasVolume {
|
||||||
count, err = vs.store.ReadVolumeNeedle(volumeId, n)
|
count, err = vs.store.ReadVolumeNeedle(volumeId, n)
|
||||||
} else if hasEcShard {
|
} else if hasEcShard {
|
||||||
count, err = vs.store.ReadEcShardNeedle(volumeId, n)
|
count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n)
|
||||||
}
|
}
|
||||||
glog.V(4).Infoln("read bytes", count, "error", err)
|
glog.V(4).Infoln("read bytes", count, "error", err)
|
||||||
if err != nil || count < 0 {
|
if err != nil || count < 0 {
|
||||||
|
|
|
@ -29,7 +29,7 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
|
||||||
Counters *stats.ServerStats
|
Counters *stats.ServerStats
|
||||||
}{
|
}{
|
||||||
util.VERSION,
|
util.VERSION,
|
||||||
vs.MasterNodes,
|
vs.SeedMasterNodes,
|
||||||
vs.store.Status(),
|
vs.store.Status(),
|
||||||
ds,
|
ds,
|
||||||
infos,
|
infos,
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
. "github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -18,6 +19,8 @@ const (
|
||||||
* A VolumeServer contains one Store
|
* A VolumeServer contains one Store
|
||||||
*/
|
*/
|
||||||
type Store struct {
|
type Store struct {
|
||||||
|
MasterGrpcAddress string
|
||||||
|
grpcDialOption grpc.DialOption
|
||||||
volumeSizeLimit uint64 //read from the master
|
volumeSizeLimit uint64 //read from the master
|
||||||
Ip string
|
Ip string
|
||||||
Port int
|
Port int
|
||||||
|
@ -38,8 +41,8 @@ func (s *Store) String() (str string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStore(port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, needleMapKind NeedleMapType) (s *Store) {
|
func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, needleMapKind NeedleMapType) (s *Store) {
|
||||||
s = &Store{Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind}
|
s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapType: needleMapKind}
|
||||||
s.Locations = make([]*DiskLocation, 0)
|
s.Locations = make([]*DiskLocation, 0)
|
||||||
for i := 0; i < len(dirnames); i++ {
|
for i := 0; i < len(dirnames); i++ {
|
||||||
location := NewDiskLocation(dirnames[i], maxVolumeCounts[i])
|
location := NewDiskLocation(dirnames[i], maxVolumeCounts[i])
|
||||||
|
|
|
@ -1,10 +1,14 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
)
|
)
|
||||||
|
@ -87,7 +91,7 @@ func (s *Store) HasEcShard(vid needle.VolumeId) (erasure_coding.EcVolumeShards,
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, error) {
|
func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *needle.Needle) (int, error) {
|
||||||
for _, location := range s.Locations {
|
for _, location := range s.Locations {
|
||||||
if localEcShards, found := location.HasEcShard(vid); found {
|
if localEcShards, found := location.HasEcShard(vid); found {
|
||||||
|
|
||||||
|
@ -96,7 +100,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes, err := s.ReadEcShardIntervals(vid, localEcShards, intervals)
|
bytes, err := s.readEcShardIntervals(ctx, vid, localEcShards, intervals)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
|
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -114,9 +118,14 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
|
||||||
return 0, fmt.Errorf("ec shard %d not found", vid)
|
return 0, fmt.Errorf("ec shard %d not found", vid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) ReadEcShardIntervals(vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, intervals []erasure_coding.Interval) (data []byte, err error) {
|
func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, intervals []erasure_coding.Interval) (data []byte, err error) {
|
||||||
|
shardLocations, err := s.cachedLookupEcShardLocations(ctx, vid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterGrpcAddress, err)
|
||||||
|
}
|
||||||
|
|
||||||
for i, interval := range intervals {
|
for i, interval := range intervals {
|
||||||
if d, e := s.readOneEcShardInterval(vid, localEcShards, interval); e != nil {
|
if d, e := s.readOneEcShardInterval(ctx, vid, localEcShards, shardLocations, interval); e != nil {
|
||||||
return nil, e
|
return nil, e
|
||||||
} else {
|
} else {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
@ -129,7 +138,7 @@ func (s *Store) ReadEcShardIntervals(vid needle.VolumeId, localEcShards erasure_
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) readOneEcShardInterval(vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, interval erasure_coding.Interval) (data []byte, err error) {
|
func (s *Store) readOneEcShardInterval(ctx context.Context, vid needle.VolumeId, localEcShards erasure_coding.EcVolumeShards, shardLocations map[erasure_coding.ShardId]string, interval erasure_coding.Interval) (data []byte, err error) {
|
||||||
shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
|
shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize)
|
||||||
data = make([]byte, interval.Size)
|
data = make([]byte, interval.Size)
|
||||||
if shard, found := localEcShards.FindEcVolumeShard(shardId); found {
|
if shard, found := localEcShards.FindEcVolumeShard(shardId); found {
|
||||||
|
@ -137,14 +146,57 @@ func (s *Store) readOneEcShardInterval(vid needle.VolumeId, localEcShards erasur
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.readOneRemoteEcShardInterval(vid, shardId, data, actualOffset)
|
sourceDataNode := shardLocations[shardId]
|
||||||
|
_, err = s.readOneRemoteEcShardInterval(ctx, sourceDataNode, vid, shardId, data, actualOffset)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(1).Infof("failed to read from %s for ec shard %d.%d : %v", sourceDataNode, vid, shardId, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) readOneRemoteEcShardInterval(vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, err error) {
|
func (s *Store) cachedLookupEcShardLocations(ctx context.Context, vid needle.VolumeId) (shardLocations map[erasure_coding.ShardId]string, err error) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Store) readOneRemoteEcShardInterval(ctx context.Context, sourceDataNode string, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, err error) {
|
||||||
|
|
||||||
|
err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
|
||||||
|
|
||||||
|
// copy data slice
|
||||||
|
shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
|
||||||
|
VolumeId: uint32(vid),
|
||||||
|
ShardId: uint32(shardId),
|
||||||
|
Offset: offset,
|
||||||
|
Size: int64(len(buf)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to start reading ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
resp, receiveErr := shardReadClient.Recv()
|
||||||
|
if receiveErr == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if receiveErr != nil {
|
||||||
|
return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
|
||||||
|
}
|
||||||
|
copy(buf[n:n+len(resp.Data)], resp.Data)
|
||||||
|
n += len(resp.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("read ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, string, vid needle.VolumeId, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, err error) {
|
||||||
|
glog.V(1).Infof("recover ec shard %d.%d from other locations", vid, shardIdToRecover)
|
||||||
|
// TODO add recovering
|
||||||
|
return 0, fmt.Errorf("recover is not implemented yet")
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue