seaweedfs/weed/storage/store.go

596 lines
18 KiB
Go
Raw Normal View History

package storage
import (
2013-07-04 05:14:16 +00:00
"fmt"
2022-06-05 18:54:04 +00:00
"io"
"path/filepath"
"strings"
"sync"
2019-04-15 06:00:37 +00:00
"sync/atomic"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/storage/volume_info"
"github.com/seaweedfs/seaweedfs/weed/util"
2022-05-16 02:41:18 +00:00
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
. "github.com/seaweedfs/seaweedfs/weed/storage/types"
)
const (
MAX_TTL_VOLUME_REMOVAL_DELAY = 10 // 10 minutes
)
type ReadOption struct {
// request
2022-06-05 18:54:04 +00:00
ReadDeleted bool
AttemptMetaOnly bool
MustMetaOnly bool
// response
IsMetaOnly bool // read status
VolumeRevision uint16
IsOutOfRange bool // whether read over MaxPossibleVolumeSize
// If HasSlowRead is set to true:
// * read requests and write requests compete for the lock.
// * large file read P99 latency on busy sites will go up, due to the need to get locks multiple times.
// * write requests will see lower latency.
// If HasSlowRead is set to false:
// * read requests should complete asap, not blocking other requests.
// * write requests may see high latency when downloading large files.
HasSlowRead bool
// increasing ReadBufferSize can reduce the number of get locks times and shorten read P99 latency.
// but will increase memory usage a bit. Use with hasSlowRead normally.
ReadBufferSize int
}
/*
* A VolumeServer contains one Store
*/
type Store struct {
MasterAddress pb.ServerAddress
grpcDialOption grpc.DialOption
volumeSizeLimit uint64 // read from the master
Ip string
Port int
GrpcPort int
PublicUrl string
Locations []*DiskLocation
dataCenter string // optional information, overwriting master setting if exists
rack string // optional information, overwriting master setting if exists
connected bool
NeedleMapKind NeedleMapKind
NewVolumesChan chan master_pb.VolumeShortInformationMessage
DeletedVolumesChan chan master_pb.VolumeShortInformationMessage
NewEcShardsChan chan master_pb.VolumeEcShardInformationMessage
DeletedEcShardsChan chan master_pb.VolumeEcShardInformationMessage
isStopping bool
}
func (s *Store) String() (str string) {
str = fmt.Sprintf("Ip:%s, Port:%d, GrpcPort:%d PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d", s.Ip, s.Port, s.GrpcPort, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.GetVolumeSizeLimit())
return
}
2022-08-27 00:09:11 +00:00
func NewStore(grpcDialOption grpc.DialOption, ip string, port int, grpcPort int, publicUrl string, dirnames []string, maxVolumeCounts []int32,
2021-04-27 02:37:24 +00:00
minFreeSpaces []util.MinFreeSpace, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType) (s *Store) {
s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, GrpcPort: grpcPort, PublicUrl: publicUrl, NeedleMapKind: needleMapKind}
2014-03-26 20:22:27 +00:00
s.Locations = make([]*DiskLocation, 0)
var wg sync.WaitGroup
for i := 0; i < len(dirnames); i++ {
location := NewDiskLocation(dirnames[i], int32(maxVolumeCounts[i]), minFreeSpaces[i], idxFolder, diskTypes[i])
2014-03-26 20:22:27 +00:00
s.Locations = append(s.Locations, location)
2019-06-18 04:02:50 +00:00
stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i]))
wg.Add(1)
go func() {
defer wg.Done()
location.loadExistingVolumes(needleMapKind)
}()
}
wg.Wait()
2019-04-20 18:35:20 +00:00
s.NewVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
s.DeletedVolumesChan = make(chan master_pb.VolumeShortInformationMessage, 3)
s.NewEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
s.DeletedEcShardsChan = make(chan master_pb.VolumeEcShardInformationMessage, 3)
return
}
func (s *Store) AddVolume(volumeId needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement string, ttlString string, preallocate int64, MemoryMapMaxSizeMb uint32, diskType DiskType) error {
2019-12-23 20:48:20 +00:00
rt, e := super_block.NewReplicaPlacementFromString(replicaPlacement)
2012-09-26 08:55:56 +00:00
if e != nil {
return e
}
2019-04-19 04:43:36 +00:00
ttl, e := needle.ReadTTL(ttlString)
if e != nil {
return e
}
2020-12-14 07:08:21 +00:00
e = s.addVolume(volumeId, collection, needleMapKind, rt, ttl, preallocate, MemoryMapMaxSizeMb, diskType)
2012-09-13 07:04:56 +00:00
return e
}
func (s *Store) DeleteCollection(collection string) (e error) {
2014-03-26 20:22:27 +00:00
for _, location := range s.Locations {
2016-04-27 03:45:35 +00:00
e = location.DeleteCollectionFromDiskLocation(collection)
if e != nil {
return
}
stats.DeleteCollectionMetrics(collection)
2019-04-20 18:35:20 +00:00
// let the heartbeat send the list of volumes, instead of sending the deleted volume ids to DeletedVolumesChan
}
return
}
2016-04-27 03:45:35 +00:00
2019-04-19 04:43:36 +00:00
func (s *Store) findVolume(vid needle.VolumeId) *Volume {
2014-03-26 20:22:27 +00:00
for _, location := range s.Locations {
if v, found := location.FindVolume(vid); found {
return v
}
}
return nil
}
2020-12-14 07:08:21 +00:00
func (s *Store) FindFreeLocation(diskType DiskType) (ret *DiskLocation) {
max := int32(0)
2014-03-26 20:22:27 +00:00
for _, location := range s.Locations {
2020-12-14 07:39:00 +00:00
if diskType != location.DiskType {
continue
}
if location.isDiskSpaceLow {
continue
}
currentFreeCount := location.MaxVolumeCount - int32(location.VolumesLen())
currentFreeCount *= erasure_coding.DataShardsCount
currentFreeCount -= int32(location.EcVolumesLen())
currentFreeCount /= erasure_coding.DataShardsCount
if currentFreeCount > max {
max = currentFreeCount
ret = location
}
}
return ret
}
func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, diskType DiskType) error {
if s.findVolume(vid) != nil {
return fmt.Errorf("Volume Id %d already exists!", vid)
}
2020-12-14 07:08:21 +00:00
if location := s.FindFreeLocation(diskType); location != nil {
glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v",
location.Directory, vid, collection, replicaPlacement, ttl)
if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb); err == nil {
location.SetVolume(vid, volume)
2019-04-21 17:14:17 +00:00
glog.V(0).Infof("add volume %d", vid)
2019-04-20 18:35:20 +00:00
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(vid),
Collection: collection,
ReplicaPlacement: uint32(replicaPlacement.Byte()),
2019-04-21 06:53:37 +00:00
Version: uint32(volume.Version()),
2019-04-20 18:35:20 +00:00
Ttl: ttl.ToUint32(),
2020-12-16 17:10:14 +00:00
DiskType: string(diskType),
2019-04-20 18:35:20 +00:00
}
return nil
2013-07-20 03:38:00 +00:00
} else {
return err
2013-07-20 03:38:00 +00:00
}
}
return fmt.Errorf("No more free space left")
}
2012-11-07 09:51:43 +00:00
func (s *Store) VolumeInfos() (allStats []*VolumeInfo) {
2014-03-26 20:22:27 +00:00
for _, location := range s.Locations {
stats := collectStatsForOneLocation(location)
allStats = append(allStats, stats...)
}
sortVolumeInfos(allStats)
return allStats
}
func collectStatsForOneLocation(location *DiskLocation) (stats []*VolumeInfo) {
location.volumesLock.RLock()
defer location.volumesLock.RUnlock()
for k, v := range location.volumes {
s := collectStatForOneVolume(k, v)
stats = append(stats, s)
}
return stats
}
func collectStatForOneVolume(vid needle.VolumeId, v *Volume) (s *VolumeInfo) {
s = &VolumeInfo{
Id: vid,
Collection: v.Collection,
ReplicaPlacement: v.ReplicaPlacement,
Version: v.Version(),
ReadOnly: v.IsReadOnly(),
Ttl: v.Ttl,
CompactRevision: uint32(v.CompactionRevision),
2021-02-13 23:42:42 +00:00
DiskType: v.DiskType().String(),
}
s.RemoteStorageName, s.RemoteStorageKey = v.RemoteStorageNameKey()
v.dataFileAccessLock.RLock()
defer v.dataFileAccessLock.RUnlock()
if v.nm == nil {
return
}
s.FileCount = v.nm.FileCount()
s.DeleteCount = v.nm.DeletedCount()
s.DeletedByteCount = v.nm.DeletedSize()
s.Size = v.nm.ContentSize()
return
}
func (s *Store) SetDataCenter(dataCenter string) {
s.dataCenter = dataCenter
}
func (s *Store) SetRack(rack string) {
s.rack = rack
}
func (s *Store) GetDataCenter() string {
return s.dataCenter
}
func (s *Store) GetRack() string {
return s.rack
}
2018-05-10 06:11:54 +00:00
func (s *Store) CollectHeartbeat() *master_pb.Heartbeat {
var volumeMessages []*master_pb.VolumeInformationMessage
2021-02-16 10:47:02 +00:00
maxVolumeCounts := make(map[string]uint32)
var maxFileKey NeedleId
collectionVolumeSize := make(map[string]int64)
collectionVolumeReadOnlyCount := make(map[string]map[string]uint8)
2014-03-26 20:22:27 +00:00
for _, location := range s.Locations {
var deleteVids []needle.VolumeId
2021-02-16 10:47:02 +00:00
maxVolumeCounts[string(location.DiskType)] += uint32(location.MaxVolumeCount)
2019-12-03 04:49:50 +00:00
location.volumesLock.RLock()
2019-03-18 03:27:08 +00:00
for _, v := range location.volumes {
2020-10-25 02:40:35 +00:00
curMaxFileKey, volumeMessage := v.ToVolumeInformationMessage()
2021-03-13 19:05:29 +00:00
if volumeMessage == nil {
continue
}
2020-10-25 02:40:35 +00:00
if maxFileKey < curMaxFileKey {
maxFileKey = curMaxFileKey
}
shouldDeleteVolume := false
2020-10-25 02:40:35 +00:00
if !v.expired(volumeMessage.Size, s.GetVolumeSizeLimit()) {
volumeMessages = append(volumeMessages, volumeMessage)
} else {
2019-01-17 01:17:19 +00:00
if v.expiredLongEnough(MAX_TTL_VOLUME_REMOVAL_DELAY) {
deleteVids = append(deleteVids, v.Id)
shouldDeleteVolume = true
} else {
2020-11-28 11:03:43 +00:00
glog.V(0).Infof("volume %d is expired", v.Id)
}
if v.lastIoError != nil {
deleteVids = append(deleteVids, v.Id)
shouldDeleteVolume = true
2020-12-12 00:57:53 +00:00
glog.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError)
}
}
if _, exist := collectionVolumeSize[v.Collection]; !exist {
collectionVolumeSize[v.Collection] = 0
}
if !shouldDeleteVolume {
collectionVolumeSize[v.Collection] += int64(volumeMessage.Size)
2021-06-16 04:11:31 +00:00
} else {
collectionVolumeSize[v.Collection] -= int64(volumeMessage.Size)
if collectionVolumeSize[v.Collection] <= 0 {
delete(collectionVolumeSize, v.Collection)
2021-06-16 04:11:31 +00:00
}
}
if _, exist := collectionVolumeReadOnlyCount[v.Collection]; !exist {
collectionVolumeReadOnlyCount[v.Collection] = map[string]uint8{
stats.IsReadOnly: 0,
stats.NoWriteOrDelete: 0,
stats.NoWriteCanDelete: 0,
stats.IsDiskSpaceLow: 0,
}
}
if !shouldDeleteVolume && v.IsReadOnly() {
collectionVolumeReadOnlyCount[v.Collection][stats.IsReadOnly] += 1
if v.noWriteOrDelete {
collectionVolumeReadOnlyCount[v.Collection][stats.NoWriteOrDelete] += 1
}
if v.noWriteCanDelete {
collectionVolumeReadOnlyCount[v.Collection][stats.NoWriteCanDelete] += 1
}
if v.location.isDiskSpaceLow {
collectionVolumeReadOnlyCount[v.Collection][stats.IsDiskSpaceLow] += 1
}
2020-10-15 10:32:02 +00:00
}
}
2019-12-03 04:49:50 +00:00
location.volumesLock.RUnlock()
if len(deleteVids) > 0 {
2019-11-09 06:47:50 +00:00
// delete expired volumes.
2019-12-03 04:49:50 +00:00
location.volumesLock.Lock()
for _, vid := range deleteVids {
found, err := location.deleteVolumeById(vid)
if err == nil {
if found {
glog.V(0).Infof("volume %d is deleted", vid)
}
} else {
glog.Warningf("delete volume %d: %v", vid, err)
}
}
2019-12-03 04:49:50 +00:00
location.volumesLock.Unlock()
}
}
var uuidList []string
for _, loc := range s.Locations {
uuidList = append(uuidList, loc.DirectoryUuid)
2022-05-16 02:41:18 +00:00
}
for col, size := range collectionVolumeSize {
stats.VolumeServerDiskSizeGauge.WithLabelValues(col, "normal").Set(float64(size))
}
for col, types := range collectionVolumeReadOnlyCount {
for t, count := range types {
stats.VolumeServerReadOnlyVolumeGauge.WithLabelValues(col, t).Set(float64(count))
}
2020-10-15 10:32:02 +00:00
}
2018-05-10 06:11:54 +00:00
return &master_pb.Heartbeat{
2021-02-18 04:57:08 +00:00
Ip: s.Ip,
Port: uint32(s.Port),
GrpcPort: uint32(s.GrpcPort),
2021-02-18 04:57:08 +00:00
PublicUrl: s.PublicUrl,
MaxVolumeCounts: maxVolumeCounts,
MaxFileKey: NeedleIdToUint64(maxFileKey),
DataCenter: s.dataCenter,
Rack: s.rack,
Volumes: volumeMessages,
HasNoVolumes: len(volumeMessages) == 0,
LocationUuids: uuidList,
}
}
func (s *Store) SetStopping() {
s.isStopping = true
2022-02-16 17:11:34 +00:00
for _, location := range s.Locations {
location.SetStopping()
}
}
func (s *Store) LoadNewVolumes() {
for _, location := range s.Locations {
location.loadExistingVolumes(s.NeedleMapKind)
}
}
func (s *Store) Close() {
2014-03-26 20:22:27 +00:00
for _, location := range s.Locations {
location.Close()
}
}
func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, checkCookie bool, fsync bool) (isUnchanged bool, err error) {
if v := s.findVolume(i); v != nil {
2020-03-17 17:01:24 +00:00
if v.IsReadOnly() {
2019-05-26 08:14:42 +00:00
err = fmt.Errorf("volume %d is read only", i)
return
2015-03-10 07:20:31 +00:00
}
_, _, isUnchanged, err = v.writeNeedle2(n, checkCookie, fsync && s.isStopping)
return
2012-09-11 00:08:52 +00:00
}
glog.V(0).Infoln("volume", i, "not found!")
2019-04-21 17:14:17 +00:00
err = fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
return
}
func (s *Store) DeleteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (Size, error) {
2019-07-21 18:21:30 +00:00
if v := s.findVolume(i); v != nil {
if v.noWriteOrDelete {
2019-07-21 18:21:30 +00:00
return 0, fmt.Errorf("volume %d is read only", i)
}
return v.deleteNeedle2(n)
2012-09-11 00:08:52 +00:00
}
2019-07-21 18:21:30 +00:00
return 0, fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port)
}
func (s *Store) ReadVolumeNeedle(i needle.VolumeId, n *needle.Needle, readOption *ReadOption, onReadSizeFn func(size Size)) (int, error) {
if v := s.findVolume(i); v != nil {
return v.readNeedle(n, readOption, onReadSizeFn)
2012-09-11 00:08:52 +00:00
}
2019-05-26 08:14:42 +00:00
return 0, fmt.Errorf("volume %d not found", i)
2012-09-11 00:08:52 +00:00
}
func (s *Store) ReadVolumeNeedleMetaAt(i needle.VolumeId, n *needle.Needle, offset int64, size int32) error {
if v := s.findVolume(i); v != nil {
return v.readNeedleMetaAt(n, offset, size)
}
return fmt.Errorf("volume %d not found", i)
}
2022-06-05 18:54:04 +00:00
func (s *Store) ReadVolumeNeedleDataInto(i needle.VolumeId, n *needle.Needle, readOption *ReadOption, writer io.Writer, offset int64, size int64) error {
if v := s.findVolume(i); v != nil {
return v.readNeedleDataInto(n, readOption, writer, offset, size)
}
return fmt.Errorf("volume %d not found", i)
}
2019-04-19 04:43:36 +00:00
func (s *Store) GetVolume(i needle.VolumeId) *Volume {
return s.findVolume(i)
2012-09-21 00:58:29 +00:00
}
2012-09-11 00:08:52 +00:00
2019-04-19 04:43:36 +00:00
func (s *Store) HasVolume(i needle.VolumeId) bool {
v := s.findVolume(i)
return v != nil
}
2019-06-27 19:18:59 +00:00
func (s *Store) MarkVolumeReadonly(i needle.VolumeId) error {
v := s.findVolume(i)
if v == nil {
return fmt.Errorf("volume %d not found", i)
}
v.noWriteLock.Lock()
v.noWriteOrDelete = true
v.noWriteLock.Unlock()
return nil
}
func (s *Store) MarkVolumeWritable(i needle.VolumeId) error {
v := s.findVolume(i)
if v == nil {
return fmt.Errorf("volume %d not found", i)
}
v.noWriteLock.Lock()
v.noWriteOrDelete = false
v.noWriteLock.Unlock()
return nil
}
2019-04-19 04:43:36 +00:00
func (s *Store) MountVolume(i needle.VolumeId) error {
for _, location := range s.Locations {
if found := location.LoadVolume(i, s.NeedleMapKind); found == true {
2019-04-20 18:35:20 +00:00
glog.V(0).Infof("mount volume %d", i)
v := s.findVolume(i)
s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
Collection: v.Collection,
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
2019-04-21 06:53:37 +00:00
Version: uint32(v.Version()),
2019-04-20 18:35:20 +00:00
Ttl: v.Ttl.ToUint32(),
2020-12-16 17:10:14 +00:00
DiskType: string(v.location.DiskType),
2019-04-20 18:35:20 +00:00
}
return nil
}
}
2019-05-26 08:14:42 +00:00
return fmt.Errorf("volume %d not found on disk", i)
}
2019-04-19 04:43:36 +00:00
func (s *Store) UnmountVolume(i needle.VolumeId) error {
2019-04-20 18:35:20 +00:00
v := s.findVolume(i)
if v == nil {
return nil
}
message := master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
Collection: v.Collection,
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
2019-04-21 06:53:37 +00:00
Version: uint32(v.Version()),
2019-04-20 18:35:20 +00:00
Ttl: v.Ttl.ToUint32(),
2020-12-16 17:10:14 +00:00
DiskType: string(v.location.DiskType),
2019-04-20 18:35:20 +00:00
}
2019-05-26 06:23:19 +00:00
for _, location := range s.Locations {
err := location.UnloadVolume(i)
if err == nil {
2019-04-20 18:35:20 +00:00
glog.V(0).Infof("UnmountVolume %d", i)
stats.DeleteCollectionMetrics(v.Collection)
2019-04-20 18:35:20 +00:00
s.DeletedVolumesChan <- message
return nil
} else if err == ErrVolumeNotFound {
continue
}
}
2019-05-26 08:14:42 +00:00
return fmt.Errorf("volume %d not found on disk", i)
}
2019-04-19 04:43:36 +00:00
func (s *Store) DeleteVolume(i needle.VolumeId) error {
2019-04-20 18:35:20 +00:00
v := s.findVolume(i)
if v == nil {
return fmt.Errorf("delete volume %d not found on disk", i)
2019-04-20 18:35:20 +00:00
}
message := master_pb.VolumeShortInformationMessage{
Id: uint32(v.Id),
Collection: v.Collection,
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
2019-04-21 06:53:37 +00:00
Version: uint32(v.Version()),
2019-04-20 18:35:20 +00:00
Ttl: v.Ttl.ToUint32(),
2020-12-16 17:10:14 +00:00
DiskType: string(v.location.DiskType),
2019-04-20 18:35:20 +00:00
}
for _, location := range s.Locations {
err := location.DeleteVolume(i)
if err == nil {
2019-04-20 18:35:20 +00:00
glog.V(0).Infof("DeleteVolume %d", i)
s.DeletedVolumesChan <- message
return nil
} else if err == ErrVolumeNotFound {
continue
} else {
glog.Errorf("DeleteVolume %d: %v", i, err)
}
}
2019-05-26 08:14:42 +00:00
return fmt.Errorf("volume %d not found on disk", i)
}
2019-04-15 06:00:37 +00:00
func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error {
for _, location := range s.Locations {
fileInfo, found := location.LocateVolume(i)
if !found {
continue
}
// load, modify, save
baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name()))
2020-02-27 00:52:57 +00:00
vifFile := filepath.Join(location.Directory, baseFileName+".vif")
2021-08-26 22:18:34 +00:00
volumeInfo, _, _, err := volume_info.MaybeLoadVolumeInfo(vifFile)
if err != nil {
return fmt.Errorf("volume %d fail to load vif: %v", i, err)
}
volumeInfo.Replication = replication
2021-08-26 22:18:34 +00:00
err = volume_info.SaveVolumeInfo(vifFile, volumeInfo)
if err != nil {
return fmt.Errorf("volume %d fail to save vif: %v", i, err)
}
return nil
}
return fmt.Errorf("volume %d not found on disk", i)
}
2019-04-15 06:00:37 +00:00
func (s *Store) SetVolumeSizeLimit(x uint64) {
atomic.StoreUint64(&s.volumeSizeLimit, x)
}
func (s *Store) GetVolumeSizeLimit() uint64 {
return atomic.LoadUint64(&s.volumeSizeLimit)
}
func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
volumeSizeLimit := s.GetVolumeSizeLimit()
2021-02-16 13:59:24 +00:00
if volumeSizeLimit == 0 {
return
}
for _, diskLocation := range s.Locations {
if diskLocation.OriginalMaxVolumeCount == 0 {
currentMaxVolumeCount := atomic.LoadInt32(&diskLocation.MaxVolumeCount)
diskStatus := stats.NewDiskStatus(diskLocation.Directory)
unusedSpace := diskLocation.UnUsedSpace(volumeSizeLimit)
unclaimedSpaces := int64(diskStatus.Free) - int64(unusedSpace)
volCount := diskLocation.VolumesLen()
maxVolumeCount := int32(volCount)
if unclaimedSpaces > int64(volumeSizeLimit) {
maxVolumeCount += int32(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
}
atomic.StoreInt32(&diskLocation.MaxVolumeCount, maxVolumeCount)
2022-09-05 05:21:24 +00:00
glog.V(4).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
hasChanges = hasChanges || currentMaxVolumeCount != atomic.LoadInt32(&diskLocation.MaxVolumeCount)
}
}
return
}