2022-01-21 10:15:27 +00:00
|
|
|
package shell
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
2022-07-29 07:17:28 +00:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
2022-01-21 10:15:27 +00:00
|
|
|
"io"
|
|
|
|
"math"
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2022-01-21 10:34:42 +00:00
|
|
|
Commands = append(Commands, &commandS3BucketQuotaEnforce{})
|
2022-01-21 10:15:27 +00:00
|
|
|
}
|
|
|
|
|
2022-01-21 10:34:42 +00:00
|
|
|
type commandS3BucketQuotaEnforce struct {
|
2022-01-21 10:15:27 +00:00
|
|
|
}
|
|
|
|
|
2022-01-21 10:34:42 +00:00
|
|
|
func (c *commandS3BucketQuotaEnforce) Name() string {
|
|
|
|
return "s3.bucket.quota.enforce"
|
2022-01-21 10:15:27 +00:00
|
|
|
}
|
|
|
|
|
2022-01-21 10:34:42 +00:00
|
|
|
func (c *commandS3BucketQuotaEnforce) Help() string {
|
2022-01-21 10:15:27 +00:00
|
|
|
return `check quota for all buckets, make the bucket read only if over the limit
|
|
|
|
|
|
|
|
Example:
|
2022-01-21 10:34:42 +00:00
|
|
|
s3.bucket.quota.enforce -apply
|
2022-01-21 10:15:27 +00:00
|
|
|
`
|
|
|
|
}
|
|
|
|
|
2022-01-21 10:34:42 +00:00
|
|
|
func (c *commandS3BucketQuotaEnforce) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
|
2022-01-21 10:15:27 +00:00
|
|
|
|
|
|
|
bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
2022-01-21 10:34:42 +00:00
|
|
|
applyQuotaLimit := bucketCommand.Bool("apply", false, "actually change the buckets readonly attribute")
|
2022-01-21 10:15:27 +00:00
|
|
|
if err = bucketCommand.Parse(args); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2022-05-31 21:48:46 +00:00
|
|
|
infoAboutSimulationMode(writer, *applyQuotaLimit, "-apply")
|
2022-01-21 10:15:27 +00:00
|
|
|
|
|
|
|
// collect collection information
|
2022-02-08 08:53:55 +00:00
|
|
|
topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
|
2022-01-21 10:15:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
collectionInfos := make(map[string]*CollectionInfo)
|
|
|
|
collectCollectionInfo(topologyInfo, collectionInfos)
|
|
|
|
|
|
|
|
// read buckets path
|
|
|
|
var filerBucketsPath string
|
|
|
|
filerBucketsPath, err = readFilerBucketsPath(commandEnv)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("read buckets: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// read existing filer configuration
|
|
|
|
fc, err := filer.ReadFilerConf(commandEnv.option.FilerAddress, commandEnv.option.GrpcDialOption, commandEnv.MasterClient)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// process each bucket
|
|
|
|
hasConfChanges := false
|
|
|
|
err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error {
|
|
|
|
if !entry.IsDirectory {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
collection := entry.Name
|
2022-03-05 02:47:44 +00:00
|
|
|
var collectionSize float64
|
2022-01-21 10:15:27 +00:00
|
|
|
if collectionInfo, found := collectionInfos[collection]; found {
|
|
|
|
collectionSize = collectionInfo.Size
|
|
|
|
}
|
|
|
|
if c.processEachBucket(fc, filerBucketsPath, entry, writer, collectionSize) {
|
|
|
|
hasConfChanges = true
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}, "", false, math.MaxUint32)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// apply the configuration changes
|
2022-01-21 10:34:42 +00:00
|
|
|
if hasConfChanges && *applyQuotaLimit {
|
2022-01-21 10:15:27 +00:00
|
|
|
|
|
|
|
var buf2 bytes.Buffer
|
|
|
|
fc.ToText(&buf2)
|
|
|
|
|
|
|
|
if err = commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
|
|
|
|
return filer.SaveInsideFiler(client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, buf2.Bytes())
|
|
|
|
}); err != nil && err != filer_pb.ErrNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-03-05 02:47:44 +00:00
|
|
|
func (c *commandS3BucketQuotaEnforce) processEachBucket(fc *filer.FilerConf, filerBucketsPath string, entry *filer_pb.Entry, writer io.Writer, collectionSize float64) (hasConfChanges bool) {
|
2022-01-21 10:15:27 +00:00
|
|
|
|
|
|
|
locPrefix := filerBucketsPath + "/" + entry.Name + "/"
|
|
|
|
locConf := fc.MatchStorageRule(locPrefix)
|
2022-01-21 10:34:42 +00:00
|
|
|
locConf.LocationPrefix = locPrefix
|
2022-01-21 10:15:27 +00:00
|
|
|
|
|
|
|
if entry.Quota > 0 {
|
|
|
|
if locConf.ReadOnly {
|
2022-03-05 02:47:44 +00:00
|
|
|
if collectionSize < float64(entry.Quota) {
|
2022-01-21 10:15:27 +00:00
|
|
|
locConf.ReadOnly = false
|
|
|
|
hasConfChanges = true
|
|
|
|
}
|
|
|
|
} else {
|
2022-03-05 02:47:44 +00:00
|
|
|
if collectionSize > float64(entry.Quota) {
|
2022-01-21 10:15:27 +00:00
|
|
|
locConf.ReadOnly = true
|
|
|
|
hasConfChanges = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if locConf.ReadOnly {
|
|
|
|
locConf.ReadOnly = false
|
|
|
|
hasConfChanges = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hasConfChanges {
|
2022-03-07 01:09:55 +00:00
|
|
|
fmt.Fprintf(writer, " %s\tsize:%.0f", entry.Name, collectionSize)
|
2022-03-05 02:47:44 +00:00
|
|
|
fmt.Fprintf(writer, "\tquota:%d\tusage:%.2f%%", entry.Quota, collectionSize*100/float64(entry.Quota))
|
2022-01-21 10:15:27 +00:00
|
|
|
fmt.Fprintln(writer)
|
|
|
|
if locConf.ReadOnly {
|
|
|
|
fmt.Fprintf(writer, " changing bucket %s to read only!\n", entry.Name)
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(writer, " changing bucket %s to writable.\n", entry.Name)
|
|
|
|
}
|
|
|
|
fc.AddLocationConf(locConf)
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|