seaweedfs/weed/server/master_grpc_server_volume.go

324 lines
9 KiB
Go
Raw Normal View History

package weed_server
import (
"context"
"fmt"
2021-05-06 10:46:14 +00:00
"reflect"
2021-08-13 04:40:33 +00:00
"strings"
2021-05-06 10:46:14 +00:00
"sync"
"time"
2019-12-23 20:48:20 +00:00
"github.com/seaweedfs/raft"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/security"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/topology"
)
2021-05-06 10:46:14 +00:00
func (ms *MasterServer) ProcessGrowRequest() {
go func() {
filter := sync.Map{}
for {
req, ok := <-ms.vgCh
if !ok {
break
}
if !ms.Topo.IsLeader() {
//discard buffered requests
time.Sleep(time.Second * 1)
continue
}
// filter out identical requests being processed
found := false
filter.Range(func(k, v interface{}) bool {
if reflect.DeepEqual(k, req) {
found = true
}
return !found
})
2021-10-05 07:40:04 +00:00
option := req.Option
vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
2021-05-06 10:46:14 +00:00
// not atomic but it's okay
2021-10-05 07:40:04 +00:00
if !found && vl.ShouldGrowVolumes(option) {
2021-05-06 10:46:14 +00:00
filter.Store(req, nil)
// we have lock called inside vg
go func() {
glog.V(1).Infoln("starting automatic volume grow")
start := time.Now()
newVidLocations, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count)
2021-05-06 10:46:14 +00:00
glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start))
if err == nil {
for _, newVidLocation := range newVidLocations {
ms.broadcastToClients(&master_pb.KeepConnectedResponse{VolumeLocation: newVidLocation})
}
}
2021-10-05 08:58:30 +00:00
vl.DoneGrowRequest()
2021-05-06 10:46:14 +00:00
if req.ErrCh != nil {
req.ErrCh <- err
close(req.ErrCh)
}
filter.Delete(req)
}()
} else {
glog.V(4).Infoln("discard volume grow request")
}
}
}()
}
func (ms *MasterServer) LookupVolume(ctx context.Context, req *master_pb.LookupVolumeRequest) (*master_pb.LookupVolumeResponse, error) {
resp := &master_pb.LookupVolumeResponse{}
2021-08-13 04:40:33 +00:00
volumeLocations := ms.lookupVolumeId(req.VolumeOrFileIds, req.Collection)
for _, result := range volumeLocations {
var locations []*master_pb.Location
for _, loc := range result.Locations {
locations = append(locations, &master_pb.Location{
Url: loc.Url,
PublicUrl: loc.PublicUrl,
DataCenter: loc.DataCenter,
})
}
2021-08-13 04:40:33 +00:00
var auth string
if strings.Contains(result.VolumeOrFileId, ",") { // this is a file id
auth = string(security.GenJwtForVolumeServer(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, result.VolumeOrFileId))
2021-08-13 04:40:33 +00:00
}
resp.VolumeIdLocations = append(resp.VolumeIdLocations, &master_pb.LookupVolumeResponse_VolumeIdLocation{
2021-08-13 04:40:33 +00:00
VolumeOrFileId: result.VolumeOrFileId,
Locations: locations,
Error: result.Error,
Auth: auth,
})
}
return resp, nil
}
func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest) (*master_pb.AssignResponse, error) {
if !ms.Topo.IsLeader() {
return nil, raft.NotLeaderError
}
if req.Count == 0 {
req.Count = 1
}
if req.Replication == "" {
2019-06-23 10:08:27 +00:00
req.Replication = ms.option.DefaultReplicaPlacement
}
2019-12-23 20:48:20 +00:00
replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication)
if err != nil {
return nil, err
}
2019-04-19 04:43:36 +00:00
ttl, err := needle.ReadTTL(req.Ttl)
if err != nil {
return nil, err
}
2021-02-16 10:47:02 +00:00
diskType := types.ToDiskType(req.DiskType)
option := &topology.VolumeGrowOption{
Collection: req.Collection,
ReplicaPlacement: replicaPlacement,
Ttl: ttl,
2020-12-16 17:14:05 +00:00
DiskType: diskType,
2021-05-06 10:46:14 +00:00
Preallocate: ms.preallocateSize,
DataCenter: req.DataCenter,
Rack: req.Rack,
DataNode: req.DataNode,
2019-10-22 05:57:01 +00:00
MemoryMapMaxSizeMb: req.MemoryMapMaxSizeMb,
}
2021-10-05 07:40:04 +00:00
vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
2021-10-05 08:58:30 +00:00
if !vl.HasGrowRequest() && vl.ShouldGrowVolumes(option) {
if ms.Topo.AvailableSpaceFor(option) <= 0 {
return nil, fmt.Errorf("no free volumes left for " + option.String())
}
2021-10-05 08:58:30 +00:00
vl.AddGrowRequest()
2021-05-06 10:46:14 +00:00
ms.vgCh <- &topology.VolumeGrowRequest{
Option: option,
Count: int(req.WritableVolumeCount),
}
}
2021-05-06 10:46:14 +00:00
var (
lastErr error
maxTimeout = time.Second * 10
startTime = time.Now()
)
2021-07-01 08:21:14 +00:00
2021-05-06 10:46:14 +00:00
for time.Now().Sub(startTime) < maxTimeout {
2021-09-06 06:17:15 +00:00
fid, count, dnList, err := ms.Topo.PickForWrite(req.Count, option)
2021-05-06 10:46:14 +00:00
if err == nil {
2021-09-06 06:17:15 +00:00
dn := dnList.Head()
var replicas []*master_pb.Location
for _, r := range dnList.Rest() {
replicas = append(replicas, &master_pb.Location{
Url: r.Url(),
PublicUrl: r.PublicUrl,
GrpcPort: uint32(r.GrpcPort),
DataCenter: r.GetDataCenterId(),
})
}
2021-05-06 10:46:14 +00:00
return &master_pb.AssignResponse{
2021-09-14 17:37:06 +00:00
Fid: fid,
Location: &master_pb.Location{
Url: dn.Url(),
PublicUrl: dn.PublicUrl,
GrpcPort: uint32(dn.GrpcPort),
DataCenter: dn.GetDataCenterId(),
},
2021-09-14 17:37:06 +00:00
Count: count,
Auth: string(security.GenJwtForVolumeServer(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
2021-09-14 17:37:06 +00:00
Replicas: replicas,
2021-05-06 10:46:14 +00:00
}, nil
}
//glog.V(4).Infoln("waiting for volume growing...")
lastErr = err
time.Sleep(200 * time.Millisecond)
}
return nil, lastErr
}
func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.StatisticsRequest) (*master_pb.StatisticsResponse, error) {
if !ms.Topo.IsLeader() {
return nil, raft.NotLeaderError
}
if req.Replication == "" {
2019-06-23 10:08:27 +00:00
req.Replication = ms.option.DefaultReplicaPlacement
}
2019-12-23 20:48:20 +00:00
replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication)
if err != nil {
return nil, err
}
2019-04-19 04:43:36 +00:00
ttl, err := needle.ReadTTL(req.Ttl)
if err != nil {
return nil, err
}
2021-02-16 11:03:00 +00:00
volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl, types.ToDiskType(req.DiskType))
stats := volumeLayout.Stats()
2022-06-26 19:21:38 +00:00
totalSize := ms.Topo.GetDiskUsages().GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024
resp := &master_pb.StatisticsResponse{
TotalSize: uint64(totalSize),
UsedSize: stats.UsedSize,
FileCount: stats.FileCount,
}
return resp, nil
}
2019-03-18 03:27:08 +00:00
func (ms *MasterServer) VolumeList(ctx context.Context, req *master_pb.VolumeListRequest) (*master_pb.VolumeListResponse, error) {
if !ms.Topo.IsLeader() {
return nil, raft.NotLeaderError
}
resp := &master_pb.VolumeListResponse{
2019-05-06 04:17:23 +00:00
TopologyInfo: ms.Topo.ToTopologyInfo(),
2019-06-23 10:08:27 +00:00
VolumeSizeLimitMb: uint64(ms.option.VolumeSizeLimitMB),
2019-03-18 03:27:08 +00:00
}
return resp, nil
}
func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.LookupEcVolumeRequest) (*master_pb.LookupEcVolumeResponse, error) {
if !ms.Topo.IsLeader() {
return nil, raft.NotLeaderError
}
resp := &master_pb.LookupEcVolumeResponse{}
ecLocations, found := ms.Topo.LookupEcShards(needle.VolumeId(req.VolumeId))
if !found {
return resp, fmt.Errorf("ec volume %d not found", req.VolumeId)
}
resp.VolumeId = req.VolumeId
for shardId, shardLocations := range ecLocations.Locations {
var locations []*master_pb.Location
for _, dn := range shardLocations {
locations = append(locations, &master_pb.Location{
Url: string(dn.Id()),
PublicUrl: dn.PublicUrl,
DataCenter: dn.GetDataCenterId(),
})
}
resp.ShardIdLocations = append(resp.ShardIdLocations, &master_pb.LookupEcVolumeResponse_EcShardIdLocation{
ShardId: uint32(shardId),
Locations: locations,
})
}
return resp, nil
}
2020-11-29 07:18:02 +00:00
func (ms *MasterServer) VacuumVolume(ctx context.Context, req *master_pb.VacuumVolumeRequest) (*master_pb.VacuumVolumeResponse, error) {
if !ms.Topo.IsLeader() {
return nil, raft.NotLeaderError
}
resp := &master_pb.VacuumVolumeResponse{}
ms.Topo.Vacuum(ms.grpcDialOption, float64(req.GarbageThreshold), req.VolumeId, req.Collection, ms.preallocateSize)
2020-11-29 07:18:02 +00:00
return resp, nil
}
func (ms *MasterServer) DisableVacuum(ctx context.Context, req *master_pb.DisableVacuumRequest) (*master_pb.DisableVacuumResponse, error) {
ms.Topo.DisableVacuum()
resp := &master_pb.DisableVacuumResponse{}
return resp, nil
}
func (ms *MasterServer) EnableVacuum(ctx context.Context, req *master_pb.EnableVacuumRequest) (*master_pb.EnableVacuumResponse, error) {
ms.Topo.EnableVacuum()
resp := &master_pb.EnableVacuumResponse{}
return resp, nil
}
func (ms *MasterServer) VolumeMarkReadonly(ctx context.Context, req *master_pb.VolumeMarkReadonlyRequest) (*master_pb.VolumeMarkReadonlyResponse, error) {
if !ms.Topo.IsLeader() {
return nil, raft.NotLeaderError
}
resp := &master_pb.VolumeMarkReadonlyResponse{}
replicaPlacement, _ := super_block.NewReplicaPlacementFromByte(byte(req.ReplicaPlacement))
vl := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, needle.LoadTTLFromUint32(req.Ttl), types.ToDiskType(req.DiskType))
dataNodes := ms.Topo.Lookup(req.Collection, needle.VolumeId(req.VolumeId))
for _, dn := range dataNodes {
if dn.Ip == req.Ip && dn.Port == int(req.Port) {
if req.IsReadonly {
vl.SetVolumeUnavailable(dn, needle.VolumeId(req.VolumeId))
} else {
vl.SetVolumeAvailable(dn, needle.VolumeId(req.VolumeId), false)
}
}
}
return resp, nil
}