2016-04-27 03:10:26 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2020-06-05 22:27:10 +00:00
|
|
|
"fmt"
|
2021-02-16 10:47:02 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
2020-06-05 22:27:10 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2020-06-05 15:18:15 +00:00
|
|
|
"path/filepath"
|
2020-06-05 22:27:10 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2020-06-05 15:18:15 +00:00
|
|
|
"time"
|
2016-04-27 03:10:26 +00:00
|
|
|
|
2019-09-12 13:18:21 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2020-07-03 23:34:31 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/stats"
|
2019-09-12 13:18:21 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
|
|
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
2020-10-27 22:56:49 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2016-04-27 03:10:26 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type DiskLocation struct {
|
2020-11-03 22:43:17 +00:00
|
|
|
Directory string
|
2020-11-26 22:59:03 +00:00
|
|
|
IdxDirectory string
|
2021-02-16 10:47:02 +00:00
|
|
|
DiskType types.DiskType
|
2020-11-03 22:43:17 +00:00
|
|
|
MaxVolumeCount int
|
|
|
|
OriginalMaxVolumeCount int
|
2021-04-27 02:37:24 +00:00
|
|
|
MinFreeSpace util.MinFreeSpace
|
|
|
|
volumes map[needle.VolumeId]*Volume
|
|
|
|
volumesLock sync.RWMutex
|
2019-05-22 05:41:20 +00:00
|
|
|
|
|
|
|
// erasure coding
|
2019-05-28 04:40:51 +00:00
|
|
|
ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume
|
|
|
|
ecVolumesLock sync.RWMutex
|
2020-07-03 23:34:31 +00:00
|
|
|
|
|
|
|
isDiskSpaceLow bool
|
2016-04-27 03:10:26 +00:00
|
|
|
}
|
|
|
|
|
2021-04-27 02:37:24 +00:00
|
|
|
func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
|
2020-11-26 22:59:03 +00:00
|
|
|
dir = util.ResolvePath(dir)
|
|
|
|
if idxDir == "" {
|
|
|
|
idxDir = dir
|
|
|
|
} else {
|
|
|
|
idxDir = util.ResolvePath(idxDir)
|
|
|
|
}
|
|
|
|
location := &DiskLocation{
|
2020-11-26 23:08:05 +00:00
|
|
|
Directory: dir,
|
|
|
|
IdxDirectory: idxDir,
|
2020-12-16 17:14:05 +00:00
|
|
|
DiskType: diskType,
|
2020-11-26 23:08:05 +00:00
|
|
|
MaxVolumeCount: maxVolumeCount,
|
2020-11-26 22:59:03 +00:00
|
|
|
OriginalMaxVolumeCount: maxVolumeCount,
|
2021-04-26 10:48:34 +00:00
|
|
|
MinFreeSpace: minFreeSpace,
|
2020-11-26 22:59:03 +00:00
|
|
|
}
|
2019-04-19 04:43:36 +00:00
|
|
|
location.volumes = make(map[needle.VolumeId]*Volume)
|
2019-05-28 04:40:51 +00:00
|
|
|
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
|
2020-06-05 15:18:15 +00:00
|
|
|
go location.CheckDiskSpace()
|
2016-04-27 03:45:35 +00:00
|
|
|
return location
|
2016-04-27 03:10:26 +00:00
|
|
|
}
|
|
|
|
|
2020-11-27 01:21:55 +00:00
|
|
|
func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) {
|
2020-12-17 21:03:39 +00:00
|
|
|
if isValidVolume(filename) {
|
2020-11-27 11:17:10 +00:00
|
|
|
base := filename[:len(filename)-4]
|
2019-05-22 05:41:20 +00:00
|
|
|
collection, volumeId, err := parseCollectionVolumeId(base)
|
|
|
|
return volumeId, collection, err
|
2017-01-20 11:49:20 +00:00
|
|
|
}
|
|
|
|
|
2020-11-26 23:19:43 +00:00
|
|
|
return 0, "", fmt.Errorf("file is not a volume: %s", filename)
|
2017-01-20 11:49:20 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 05:41:20 +00:00
|
|
|
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
|
|
|
|
i := strings.LastIndex(base, "_")
|
|
|
|
if i > 0 {
|
|
|
|
collection, base = base[0:i], base[i+1:]
|
|
|
|
}
|
|
|
|
vol, err := needle.NewVolumeId(base)
|
|
|
|
return collection, vol, err
|
|
|
|
}
|
|
|
|
|
2020-12-17 21:03:39 +00:00
|
|
|
func isValidVolume(basename string) bool {
|
|
|
|
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
|
|
|
|
}
|
|
|
|
|
|
|
|
func getValidVolumeName(basename string) string {
|
|
|
|
if isValidVolume(basename) {
|
|
|
|
return basename[:len(basename)-4]
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2021-02-07 01:00:03 +00:00
|
|
|
func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapKind) bool {
|
2020-11-26 23:19:43 +00:00
|
|
|
basename := fileInfo.Name()
|
2020-11-26 23:08:05 +00:00
|
|
|
if fileInfo.IsDir() {
|
|
|
|
return false
|
|
|
|
}
|
2020-12-17 21:03:39 +00:00
|
|
|
volumeName := getValidVolumeName(basename)
|
|
|
|
if volumeName == "" {
|
2020-11-26 23:08:05 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-11-27 01:21:55 +00:00
|
|
|
|
|
|
|
// check for incomplete volume
|
2020-11-26 23:19:43 +00:00
|
|
|
noteFile := l.Directory + "/" + volumeName + ".note"
|
2020-11-26 23:08:05 +00:00
|
|
|
if util.FileExists(noteFile) {
|
|
|
|
note, _ := ioutil.ReadFile(noteFile)
|
2020-11-26 23:19:43 +00:00
|
|
|
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
|
|
|
|
removeVolumeFiles(l.Directory + "/" + volumeName)
|
|
|
|
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
|
2020-11-26 23:08:05 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-11-27 01:21:55 +00:00
|
|
|
|
|
|
|
// parse out collection, volume id
|
|
|
|
vid, collection, err := volumeIdFromFileName(basename)
|
2020-11-26 23:08:05 +00:00
|
|
|
if err != nil {
|
2020-11-26 23:19:43 +00:00
|
|
|
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
|
2020-11-26 23:08:05 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-02-13 07:19:00 +00:00
|
|
|
|
2020-11-27 01:21:55 +00:00
|
|
|
// avoid loading one volume more than once
|
2020-12-17 20:46:20 +00:00
|
|
|
l.volumesLock.RLock()
|
|
|
|
_, found := l.volumes[vid]
|
|
|
|
l.volumesLock.RUnlock()
|
|
|
|
if found {
|
2020-11-26 23:08:05 +00:00
|
|
|
glog.V(1).Infof("loaded volume, %v", vid)
|
|
|
|
return true
|
|
|
|
}
|
2020-02-13 07:19:00 +00:00
|
|
|
|
2020-11-27 01:21:55 +00:00
|
|
|
// load the volume
|
2020-11-27 11:17:10 +00:00
|
|
|
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0)
|
2020-11-26 23:08:05 +00:00
|
|
|
if e != nil {
|
2020-11-26 23:19:43 +00:00
|
|
|
glog.V(0).Infof("new volume %s error %s", volumeName, e)
|
2020-11-26 23:08:05 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-12-16 17:14:05 +00:00
|
|
|
|
2020-11-26 23:08:05 +00:00
|
|
|
l.SetVolume(vid, v)
|
2020-07-03 23:34:31 +00:00
|
|
|
|
2020-11-26 23:08:05 +00:00
|
|
|
size, _, _ := v.FileStat()
|
2021-04-15 18:29:58 +00:00
|
|
|
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
|
2020-11-26 23:19:43 +00:00
|
|
|
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
|
2020-11-26 23:08:05 +00:00
|
|
|
return true
|
2016-11-13 05:24:52 +00:00
|
|
|
}
|
2016-04-27 03:45:35 +00:00
|
|
|
|
2021-02-07 01:00:03 +00:00
|
|
|
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int) {
|
2016-11-13 05:24:52 +00:00
|
|
|
|
|
|
|
task_queue := make(chan os.FileInfo, 10*concurrency)
|
2016-11-11 03:53:22 +00:00
|
|
|
go func() {
|
2020-12-17 21:03:39 +00:00
|
|
|
foundVolumeNames := make(map[string]bool)
|
2020-11-26 23:08:05 +00:00
|
|
|
if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil {
|
|
|
|
for _, fi := range fileInfos {
|
2020-12-17 21:03:39 +00:00
|
|
|
volumeName := getValidVolumeName(fi.Name())
|
|
|
|
if volumeName == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, found := foundVolumeNames[volumeName]; !found {
|
|
|
|
foundVolumeNames[volumeName] = true
|
|
|
|
task_queue <- fi
|
|
|
|
}
|
2016-11-11 03:53:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
close(task_queue)
|
|
|
|
}()
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for workerNum := 0; workerNum < concurrency; workerNum++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2020-11-26 23:08:05 +00:00
|
|
|
for fi := range task_queue {
|
|
|
|
_ = l.loadExistingVolume(fi, needleMapKind)
|
2016-04-27 03:10:26 +00:00
|
|
|
}
|
2016-11-11 03:53:22 +00:00
|
|
|
}()
|
2016-04-27 03:10:26 +00:00
|
|
|
}
|
2016-11-11 03:53:22 +00:00
|
|
|
wg.Wait()
|
|
|
|
|
2016-11-13 05:24:52 +00:00
|
|
|
}
|
|
|
|
|
2021-02-07 01:00:03 +00:00
|
|
|
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind) {
|
2016-11-13 05:24:52 +00:00
|
|
|
|
2019-03-18 05:32:01 +00:00
|
|
|
l.concurrentLoadingVolumes(needleMapKind, 10)
|
2019-05-22 05:41:20 +00:00
|
|
|
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
|
|
|
|
|
|
|
|
l.loadAllEcShards()
|
2019-05-28 04:40:51 +00:00
|
|
|
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
|
2016-11-13 05:24:52 +00:00
|
|
|
|
2016-04-27 03:10:26 +00:00
|
|
|
}
|
2016-04-27 03:45:35 +00:00
|
|
|
|
|
|
|
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
|
2016-11-07 04:55:22 +00:00
|
|
|
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.Lock()
|
2019-12-24 09:20:34 +00:00
|
|
|
delVolsMap := l.unmountVolumeByCollection(collection)
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.Unlock()
|
2019-05-30 16:47:54 +00:00
|
|
|
|
|
|
|
l.ecVolumesLock.Lock()
|
2019-12-24 09:20:34 +00:00
|
|
|
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
|
|
|
|
l.ecVolumesLock.Unlock()
|
|
|
|
|
|
|
|
errChain := make(chan error, 2)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(2)
|
|
|
|
go func() {
|
|
|
|
for _, v := range delVolsMap {
|
|
|
|
if err := v.Destroy(); err != nil {
|
|
|
|
errChain <- err
|
2016-04-27 03:45:35 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-24 09:20:34 +00:00
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for _, v := range delEcVolsMap {
|
|
|
|
v.Destroy()
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(errChain)
|
|
|
|
}()
|
|
|
|
|
|
|
|
errBuilder := strings.Builder{}
|
|
|
|
for err := range errChain {
|
|
|
|
errBuilder.WriteString(err.Error())
|
|
|
|
errBuilder.WriteString("; ")
|
2016-04-27 03:45:35 +00:00
|
|
|
}
|
2019-12-24 09:20:34 +00:00
|
|
|
if errBuilder.Len() > 0 {
|
|
|
|
e = fmt.Errorf(errBuilder.String())
|
|
|
|
}
|
|
|
|
|
2016-04-27 03:45:35 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-04-21 21:49:58 +00:00
|
|
|
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (found bool, e error) {
|
2016-04-27 03:45:35 +00:00
|
|
|
v, ok := l.volumes[vid]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
e = v.Destroy()
|
|
|
|
if e != nil {
|
|
|
|
return
|
|
|
|
}
|
2020-04-21 21:49:58 +00:00
|
|
|
found = true
|
2016-04-27 03:45:35 +00:00
|
|
|
delete(l.volumes, vid)
|
|
|
|
return
|
|
|
|
}
|
2016-11-07 04:55:22 +00:00
|
|
|
|
2021-02-07 01:00:03 +00:00
|
|
|
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
|
2020-02-02 23:37:23 +00:00
|
|
|
if fileInfo, found := l.LocateVolume(vid); found {
|
2020-02-13 07:19:00 +00:00
|
|
|
return l.loadExistingVolume(fileInfo, needleMapKind)
|
2017-01-20 11:49:20 +00:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-04-19 04:43:36 +00:00
|
|
|
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId) error {
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.Lock()
|
|
|
|
defer l.volumesLock.Unlock()
|
2017-01-20 18:18:43 +00:00
|
|
|
|
2017-01-20 12:02:37 +00:00
|
|
|
_, ok := l.volumes[vid]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Volume not found, VolumeId: %d", vid)
|
|
|
|
}
|
2020-04-21 21:49:58 +00:00
|
|
|
_, err := l.deleteVolumeById(vid)
|
|
|
|
return err
|
2017-01-20 12:02:37 +00:00
|
|
|
}
|
|
|
|
|
2019-04-19 04:43:36 +00:00
|
|
|
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.Lock()
|
|
|
|
defer l.volumesLock.Unlock()
|
2017-01-20 18:18:43 +00:00
|
|
|
|
2018-05-03 04:13:53 +00:00
|
|
|
v, ok := l.volumes[vid]
|
2017-01-20 11:49:20 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Volume not loaded, VolumeId: %d", vid)
|
|
|
|
}
|
2018-05-03 04:13:53 +00:00
|
|
|
v.Close()
|
2017-01-20 11:49:20 +00:00
|
|
|
delete(l.volumes, vid)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-24 09:20:34 +00:00
|
|
|
func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
|
|
|
|
deltaVols := make(map[needle.VolumeId]*Volume, 0)
|
|
|
|
for k, v := range l.volumes {
|
|
|
|
if v.Collection == collectionName && !v.isCompacting {
|
|
|
|
deltaVols[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-02 23:37:23 +00:00
|
|
|
for k := range deltaVols {
|
2019-12-24 09:20:34 +00:00
|
|
|
delete(l.volumes, k)
|
|
|
|
}
|
|
|
|
return deltaVols
|
|
|
|
}
|
|
|
|
|
2019-04-19 04:43:36 +00:00
|
|
|
func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.Lock()
|
|
|
|
defer l.volumesLock.Unlock()
|
2016-11-07 04:55:22 +00:00
|
|
|
|
|
|
|
l.volumes[vid] = volume
|
2020-07-03 23:34:31 +00:00
|
|
|
volume.location = l
|
2016-11-07 04:55:22 +00:00
|
|
|
}
|
|
|
|
|
2019-04-19 04:43:36 +00:00
|
|
|
func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.RLock()
|
|
|
|
defer l.volumesLock.RUnlock()
|
2016-11-07 04:55:22 +00:00
|
|
|
|
|
|
|
v, ok := l.volumes[vid]
|
|
|
|
return v, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *DiskLocation) VolumesLen() int {
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.RLock()
|
|
|
|
defer l.volumesLock.RUnlock()
|
2016-11-07 04:55:22 +00:00
|
|
|
|
|
|
|
return len(l.volumes)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *DiskLocation) Close() {
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.Lock()
|
2016-11-07 04:55:22 +00:00
|
|
|
for _, v := range l.volumes {
|
|
|
|
v.Close()
|
|
|
|
}
|
2019-12-03 04:49:50 +00:00
|
|
|
l.volumesLock.Unlock()
|
2019-05-22 05:41:20 +00:00
|
|
|
|
2019-05-28 04:40:51 +00:00
|
|
|
l.ecVolumesLock.Lock()
|
2019-06-06 06:20:26 +00:00
|
|
|
for _, ecVolume := range l.ecVolumes {
|
|
|
|
ecVolume.Close()
|
2019-05-22 05:41:20 +00:00
|
|
|
}
|
2019-05-28 04:40:51 +00:00
|
|
|
l.ecVolumesLock.Unlock()
|
2019-05-22 05:41:20 +00:00
|
|
|
|
2016-11-07 04:55:22 +00:00
|
|
|
return
|
|
|
|
}
|
2020-02-02 23:37:23 +00:00
|
|
|
|
|
|
|
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) {
|
|
|
|
if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil {
|
|
|
|
for _, fileInfo := range fileInfos {
|
2020-11-27 01:21:55 +00:00
|
|
|
volId, _, err := volumeIdFromFileName(fileInfo.Name())
|
2020-02-02 23:37:23 +00:00
|
|
|
if vid == volId && err == nil {
|
|
|
|
return fileInfo, true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, false
|
|
|
|
}
|
2020-03-22 23:21:42 +00:00
|
|
|
|
|
|
|
func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
|
|
|
|
|
|
|
|
l.volumesLock.RLock()
|
|
|
|
defer l.volumesLock.RUnlock()
|
|
|
|
|
|
|
|
for _, vol := range l.volumes {
|
|
|
|
if vol.IsReadOnly() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
datSize, idxSize, _ := vol.FileStat()
|
|
|
|
unUsedSpace += volumeSizeLimit - (datSize + idxSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2020-06-05 15:18:15 +00:00
|
|
|
|
|
|
|
func (l *DiskLocation) CheckDiskSpace() {
|
2020-07-03 23:34:31 +00:00
|
|
|
for {
|
2020-06-05 15:18:15 +00:00
|
|
|
if dir, e := filepath.Abs(l.Directory); e == nil {
|
|
|
|
s := stats.NewDiskStatus(dir)
|
2020-10-22 16:13:47 +00:00
|
|
|
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
|
|
|
|
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
|
|
|
|
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
|
2021-04-26 10:48:34 +00:00
|
|
|
|
2021-04-27 02:37:24 +00:00
|
|
|
isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
|
2021-04-26 10:48:34 +00:00
|
|
|
if isLow != l.isDiskSpaceLow {
|
2020-07-03 23:34:31 +00:00
|
|
|
l.isDiskSpaceLow = !l.isDiskSpaceLow
|
|
|
|
}
|
2021-04-26 10:48:34 +00:00
|
|
|
|
|
|
|
logLevel := glog.Level(4)
|
2020-07-03 23:34:31 +00:00
|
|
|
if l.isDiskSpaceLow {
|
2021-04-26 10:48:34 +00:00
|
|
|
logLevel = glog.Level(0)
|
2020-06-05 15:18:15 +00:00
|
|
|
}
|
2021-04-26 10:48:34 +00:00
|
|
|
|
2021-04-27 02:37:24 +00:00
|
|
|
glog.V(logLevel).Infof("dir %s %s", dir, desc)
|
2020-06-05 15:18:15 +00:00
|
|
|
}
|
2020-07-03 23:34:31 +00:00
|
|
|
time.Sleep(time.Minute)
|
2020-06-05 15:18:15 +00:00
|
|
|
}
|
|
|
|
|
2020-06-05 22:27:10 +00:00
|
|
|
}
|