2014-04-13 08:29:52 +00:00
|
|
|
package topology
|
2012-08-27 20:52:02 +00:00
|
|
|
|
|
|
|
import (
|
2021-02-19 03:10:20 +00:00
|
|
|
"encoding/json"
|
2014-03-13 19:13:39 +00:00
|
|
|
"fmt"
|
2013-02-27 06:54:22 +00:00
|
|
|
"math/rand"
|
2013-01-17 08:56:56 +00:00
|
|
|
"sync"
|
2014-10-26 18:34:55 +00:00
|
|
|
|
2019-04-19 04:43:36 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2019-09-12 13:18:21 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage"
|
2021-05-06 10:46:14 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2012-08-27 20:52:02 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
This package is created to resolve these replica placement issues:
|
|
|
|
1. growth factor for each replica level, e.g., add 10 volumes for 1 copy, 20 volumes for 2 copies, 30 volumes for 3 copies
|
|
|
|
2. in time of tight storage, how to reduce replica level
|
|
|
|
3. optimizing for hot data on faster disk, cold data on cheaper storage,
|
|
|
|
4. volume allocation for each bucket
|
|
|
|
*/
|
|
|
|
|
2021-05-06 10:46:14 +00:00
|
|
|
type VolumeGrowRequest struct {
|
|
|
|
Option *VolumeGrowOption
|
|
|
|
Count int
|
|
|
|
ErrCh chan error
|
|
|
|
}
|
|
|
|
|
2014-04-13 08:29:52 +00:00
|
|
|
type VolumeGrowOption struct {
|
2021-02-19 03:10:20 +00:00
|
|
|
Collection string `json:"collection,omitempty"`
|
|
|
|
ReplicaPlacement *super_block.ReplicaPlacement `json:"replication,omitempty"`
|
|
|
|
Ttl *needle.TTL `json:"ttl,omitempty"`
|
|
|
|
DiskType types.DiskType `json:"disk,omitempty"`
|
2021-05-06 10:46:14 +00:00
|
|
|
Preallocate int64 `json:"preallocate,omitempty"`
|
2021-02-19 03:10:20 +00:00
|
|
|
DataCenter string `json:"dataCenter,omitempty"`
|
|
|
|
Rack string `json:"rack,omitempty"`
|
|
|
|
DataNode string `json:"dataNode,omitempty"`
|
|
|
|
MemoryMapMaxSizeMb uint32 `json:"memoryMapMaxSizeMb,omitempty"`
|
2014-04-13 08:29:52 +00:00
|
|
|
}
|
|
|
|
|
2012-08-27 20:52:02 +00:00
|
|
|
type VolumeGrowth struct {
|
2013-01-17 08:56:56 +00:00
|
|
|
accessLock sync.Mutex
|
2012-08-27 20:52:02 +00:00
|
|
|
}
|
|
|
|
|
2015-01-08 07:54:50 +00:00
|
|
|
func (o *VolumeGrowOption) String() string {
|
2021-02-19 03:10:20 +00:00
|
|
|
blob, _ := json.Marshal(o)
|
|
|
|
return string(blob)
|
2015-01-08 07:54:50 +00:00
|
|
|
}
|
|
|
|
|
2021-05-06 10:46:14 +00:00
|
|
|
func (o *VolumeGrowOption) Threshold() float64 {
|
|
|
|
v := util.GetViper()
|
|
|
|
return v.GetFloat64("master.volume_growth.threshold")
|
|
|
|
}
|
|
|
|
|
2012-09-17 00:31:15 +00:00
|
|
|
func NewDefaultVolumeGrowth() *VolumeGrowth {
|
2014-03-03 06:16:54 +00:00
|
|
|
return &VolumeGrowth{}
|
2012-09-17 00:31:15 +00:00
|
|
|
}
|
|
|
|
|
2014-03-03 06:16:54 +00:00
|
|
|
// one replication type may need rp.GetCopyCount() actual volumes
|
|
|
|
// given copyCount, how many logical volumes to create
|
|
|
|
func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
|
2020-02-04 02:15:16 +00:00
|
|
|
v := util.GetViper()
|
2014-03-03 06:16:54 +00:00
|
|
|
switch copyCount {
|
|
|
|
case 1:
|
2020-02-04 02:15:16 +00:00
|
|
|
count = v.GetInt("master.volume_growth.copy_1")
|
2014-03-03 06:16:54 +00:00
|
|
|
case 2:
|
2020-02-04 02:15:16 +00:00
|
|
|
count = v.GetInt("master.volume_growth.copy_2")
|
2014-03-03 06:16:54 +00:00
|
|
|
case 3:
|
2020-02-04 02:15:16 +00:00
|
|
|
count = v.GetInt("master.volume_growth.copy_3")
|
2013-07-24 17:31:51 +00:00
|
|
|
default:
|
2020-02-04 02:15:16 +00:00
|
|
|
count = v.GetInt("master.volume_growth.copy_other")
|
2012-09-17 00:31:15 +00:00
|
|
|
}
|
2014-03-03 06:16:54 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-10 12:11:03 +00:00
|
|
|
func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology, targetCount int) (count int, err error) {
|
|
|
|
if targetCount == 0 {
|
|
|
|
targetCount = vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount())
|
|
|
|
}
|
|
|
|
count, err = vg.GrowByCountAndType(grpcDialOption, targetCount, option, topo)
|
2014-04-13 08:29:52 +00:00
|
|
|
if count > 0 && count%option.ReplicaPlacement.GetCopyCount() == 0 {
|
2013-07-24 17:31:51 +00:00
|
|
|
return count, nil
|
|
|
|
}
|
|
|
|
return count, err
|
2012-09-17 00:31:15 +00:00
|
|
|
}
|
2019-02-18 20:11:52 +00:00
|
|
|
func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount int, option *VolumeGrowOption, topo *Topology) (counter int, err error) {
|
2013-01-17 08:56:56 +00:00
|
|
|
vg.accessLock.Lock()
|
|
|
|
defer vg.accessLock.Unlock()
|
2012-11-13 20:13:40 +00:00
|
|
|
|
2014-03-03 06:16:54 +00:00
|
|
|
for i := 0; i < targetCount; i++ {
|
2019-02-18 20:11:52 +00:00
|
|
|
if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
|
2014-03-03 06:16:54 +00:00
|
|
|
counter += c
|
|
|
|
} else {
|
2020-03-23 01:32:49 +00:00
|
|
|
glog.V(0).Infof("create %d volume, created %d: %v", targetCount, counter, e)
|
2014-03-03 06:16:54 +00:00
|
|
|
return counter, e
|
2012-09-30 09:20:33 +00:00
|
|
|
}
|
2014-03-03 06:16:54 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-02-18 20:11:52 +00:00
|
|
|
func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topology, option *VolumeGrowOption) (int, error) {
|
2014-04-13 08:29:52 +00:00
|
|
|
servers, e := vg.findEmptySlotsForOneVolume(topo, option)
|
2014-03-03 06:16:54 +00:00
|
|
|
if e != nil {
|
|
|
|
return 0, e
|
|
|
|
}
|
2019-02-25 02:47:41 +00:00
|
|
|
vid, raftErr := topo.NextVolumeId()
|
|
|
|
if raftErr != nil {
|
|
|
|
return 0, raftErr
|
|
|
|
}
|
2019-02-18 20:11:52 +00:00
|
|
|
err := vg.grow(grpcDialOption, topo, vid, option, servers...)
|
2014-03-03 06:16:54 +00:00
|
|
|
return len(servers), err
|
|
|
|
}
|
|
|
|
|
2014-04-13 10:06:58 +00:00
|
|
|
// 1. find the main data node
|
|
|
|
// 1.1 collect all data nodes that have 1 slots
|
|
|
|
// 2.2 collect all racks that have rp.SameRackCount+1
|
|
|
|
// 2.2 collect all data centers that have DiffRackCount+rp.SameRackCount+1
|
|
|
|
// 2. find rest data nodes
|
2014-04-13 08:29:52 +00:00
|
|
|
func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *VolumeGrowOption) (servers []*DataNode, err error) {
|
2014-03-03 06:16:54 +00:00
|
|
|
//find main datacenter and other data centers
|
2014-04-13 08:29:52 +00:00
|
|
|
rp := option.ReplicaPlacement
|
2020-12-17 21:25:05 +00:00
|
|
|
mainDataCenter, otherDataCenters, dc_err := topo.PickNodesByWeight(rp.DiffDataCenterCount+1, option, func(node Node) error {
|
2014-04-13 08:29:52 +00:00
|
|
|
if option.DataCenter != "" && node.IsDataCenter() && node.Id() != NodeId(option.DataCenter) {
|
|
|
|
return fmt.Errorf("Not matching preferred data center:%s", option.DataCenter)
|
2012-09-03 08:50:04 +00:00
|
|
|
}
|
2014-04-13 09:16:45 +00:00
|
|
|
if len(node.Children()) < rp.DiffRackCount+1 {
|
|
|
|
return fmt.Errorf("Only has %d racks, not enough for %d.", len(node.Children()), rp.DiffRackCount+1)
|
|
|
|
}
|
2020-12-17 21:25:05 +00:00
|
|
|
if node.AvailableSpaceFor(option) < int64(rp.DiffRackCount+rp.SameRackCount+1) {
|
|
|
|
return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), rp.DiffRackCount+rp.SameRackCount+1)
|
2014-03-13 19:13:39 +00:00
|
|
|
}
|
2014-04-13 10:06:58 +00:00
|
|
|
possibleRacksCount := 0
|
|
|
|
for _, rack := range node.Children() {
|
|
|
|
possibleDataNodesCount := 0
|
|
|
|
for _, n := range rack.Children() {
|
2020-12-17 21:25:05 +00:00
|
|
|
if n.AvailableSpaceFor(option) >= 1 {
|
2014-04-13 10:06:58 +00:00
|
|
|
possibleDataNodesCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if possibleDataNodesCount >= rp.SameRackCount+1 {
|
|
|
|
possibleRacksCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if possibleRacksCount < rp.DiffRackCount+1 {
|
|
|
|
return fmt.Errorf("Only has %d racks with more than %d free data nodes, not enough for %d.", possibleRacksCount, rp.SameRackCount+1, rp.DiffRackCount+1)
|
|
|
|
}
|
2014-03-13 19:13:39 +00:00
|
|
|
return nil
|
2014-03-03 06:16:54 +00:00
|
|
|
})
|
|
|
|
if dc_err != nil {
|
|
|
|
return nil, dc_err
|
|
|
|
}
|
|
|
|
|
|
|
|
//find main rack and other racks
|
2020-12-17 21:25:05 +00:00
|
|
|
mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).PickNodesByWeight(rp.DiffRackCount+1, option, func(node Node) error {
|
2014-04-13 08:29:52 +00:00
|
|
|
if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) {
|
|
|
|
return fmt.Errorf("Not matching preferred rack:%s", option.Rack)
|
|
|
|
}
|
2020-12-17 21:25:05 +00:00
|
|
|
if node.AvailableSpaceFor(option) < int64(rp.SameRackCount+1) {
|
|
|
|
return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), rp.SameRackCount+1)
|
2014-04-13 09:26:22 +00:00
|
|
|
}
|
2014-04-13 09:16:45 +00:00
|
|
|
if len(node.Children()) < rp.SameRackCount+1 {
|
2014-04-13 10:06:58 +00:00
|
|
|
// a bit faster way to test free racks
|
2014-04-13 09:16:45 +00:00
|
|
|
return fmt.Errorf("Only has %d data nodes, not enough for %d.", len(node.Children()), rp.SameRackCount+1)
|
|
|
|
}
|
2014-04-13 10:06:58 +00:00
|
|
|
possibleDataNodesCount := 0
|
2014-04-13 09:26:22 +00:00
|
|
|
for _, n := range node.Children() {
|
2020-12-17 21:25:05 +00:00
|
|
|
if n.AvailableSpaceFor(option) >= 1 {
|
2014-04-13 10:06:58 +00:00
|
|
|
possibleDataNodesCount++
|
2014-04-13 09:26:22 +00:00
|
|
|
}
|
|
|
|
}
|
2014-04-13 10:06:58 +00:00
|
|
|
if possibleDataNodesCount < rp.SameRackCount+1 {
|
|
|
|
return fmt.Errorf("Only has %d data nodes with a slot, not enough for %d.", possibleDataNodesCount, rp.SameRackCount+1)
|
2014-03-13 19:13:39 +00:00
|
|
|
}
|
|
|
|
return nil
|
2014-03-03 06:16:54 +00:00
|
|
|
})
|
2019-01-17 01:17:19 +00:00
|
|
|
if rackErr != nil {
|
|
|
|
return nil, rackErr
|
2014-03-03 06:16:54 +00:00
|
|
|
}
|
|
|
|
|
2022-05-09 06:21:16 +00:00
|
|
|
//find main server and other servers
|
2020-12-17 21:25:05 +00:00
|
|
|
mainServer, otherServers, serverErr := mainRack.(*Rack).PickNodesByWeight(rp.SameRackCount+1, option, func(node Node) error {
|
2014-04-13 08:29:52 +00:00
|
|
|
if option.DataNode != "" && node.IsDataNode() && node.Id() != NodeId(option.DataNode) {
|
|
|
|
return fmt.Errorf("Not matching preferred data node:%s", option.DataNode)
|
|
|
|
}
|
2020-12-17 21:25:05 +00:00
|
|
|
if node.AvailableSpaceFor(option) < 1 {
|
|
|
|
return fmt.Errorf("Free:%d < Expected:%d", node.AvailableSpaceFor(option), 1)
|
2014-03-13 19:13:39 +00:00
|
|
|
}
|
|
|
|
return nil
|
2014-03-03 06:16:54 +00:00
|
|
|
})
|
2019-01-17 01:17:19 +00:00
|
|
|
if serverErr != nil {
|
|
|
|
return nil, serverErr
|
2014-03-03 06:16:54 +00:00
|
|
|
}
|
|
|
|
|
2014-04-13 08:29:52 +00:00
|
|
|
servers = append(servers, mainServer.(*DataNode))
|
2014-03-03 06:16:54 +00:00
|
|
|
for _, server := range otherServers {
|
2014-04-13 08:29:52 +00:00
|
|
|
servers = append(servers, server.(*DataNode))
|
2014-03-03 06:16:54 +00:00
|
|
|
}
|
|
|
|
for _, rack := range otherRacks {
|
2020-12-17 21:25:05 +00:00
|
|
|
r := rand.Int63n(rack.AvailableSpaceFor(option))
|
|
|
|
if server, e := rack.ReserveOneVolume(r, option); e == nil {
|
2014-03-03 06:16:54 +00:00
|
|
|
servers = append(servers, server)
|
|
|
|
} else {
|
|
|
|
return servers, e
|
2012-09-03 08:50:04 +00:00
|
|
|
}
|
2014-03-03 06:16:54 +00:00
|
|
|
}
|
|
|
|
for _, datacenter := range otherDataCenters {
|
2020-12-17 21:25:05 +00:00
|
|
|
r := rand.Int63n(datacenter.AvailableSpaceFor(option))
|
|
|
|
if server, e := datacenter.ReserveOneVolume(r, option); e == nil {
|
2014-03-03 06:16:54 +00:00
|
|
|
servers = append(servers, server)
|
|
|
|
} else {
|
|
|
|
return servers, e
|
2012-09-03 08:50:04 +00:00
|
|
|
}
|
2012-08-27 20:52:02 +00:00
|
|
|
}
|
2012-09-17 06:18:47 +00:00
|
|
|
return
|
2012-08-27 20:52:02 +00:00
|
|
|
}
|
2014-03-03 06:16:54 +00:00
|
|
|
|
2019-04-19 04:43:36 +00:00
|
|
|
func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid needle.VolumeId, option *VolumeGrowOption, servers ...*DataNode) error {
|
2012-09-03 08:50:04 +00:00
|
|
|
for _, server := range servers {
|
2019-02-18 20:11:52 +00:00
|
|
|
if err := AllocateVolume(server, grpcDialOption, vid, option); err == nil {
|
2014-09-20 19:38:59 +00:00
|
|
|
vi := storage.VolumeInfo{
|
|
|
|
Id: vid,
|
|
|
|
Size: 0,
|
|
|
|
Collection: option.Collection,
|
|
|
|
ReplicaPlacement: option.ReplicaPlacement,
|
|
|
|
Ttl: option.Ttl,
|
2019-04-19 04:43:36 +00:00
|
|
|
Version: needle.CurrentVersion,
|
2021-08-10 20:04:33 +00:00
|
|
|
DiskType: option.DiskType.String(),
|
2014-09-20 19:38:59 +00:00
|
|
|
}
|
2012-09-17 00:31:15 +00:00
|
|
|
server.AddOrUpdateVolume(vi)
|
2014-03-19 11:48:13 +00:00
|
|
|
topo.RegisterVolumeLayout(vi, server)
|
2015-01-19 01:03:38 +00:00
|
|
|
glog.V(0).Infoln("Created Volume", vid, "on", server.NodeImpl.String())
|
2012-09-17 00:31:15 +00:00
|
|
|
} else {
|
2015-02-26 05:29:12 +00:00
|
|
|
glog.V(0).Infoln("Failed to assign volume", vid, "to", servers, "error", err)
|
2015-01-14 01:04:41 +00:00
|
|
|
return fmt.Errorf("Failed to assign %d: %v", vid, err)
|
2012-09-17 00:31:15 +00:00
|
|
|
}
|
2012-09-03 08:50:04 +00:00
|
|
|
}
|
2012-09-17 06:18:47 +00:00
|
|
|
return nil
|
2012-09-03 08:50:04 +00:00
|
|
|
}
|