add command line option to control garbage threshold

This commit is contained in:
Chris Lu 2012-11-23 17:31:54 -08:00
parent 94dbe60233
commit 3ef7a34f49
3 changed files with 39 additions and 38 deletions

View file

@ -37,6 +37,7 @@ var (
defaultRepType = cmdMaster.Flag.String("defaultReplicationType", "000", "Default replication type if not specified.") defaultRepType = cmdMaster.Flag.String("defaultReplicationType", "000", "Default replication type if not specified.")
mReadTimeout = cmdMaster.Flag.Int("readTimeout", 1, "connection read timeout in seconds") mReadTimeout = cmdMaster.Flag.Int("readTimeout", 1, "connection read timeout in seconds")
mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
garbageThreshold = cmdMaster.Flag.String("garbageThreshold", "0.3", "threshold to vacuum and reclaim spaces")
) )
var topo *topology.Topology var topo *topology.Topology
@ -123,24 +124,24 @@ func dirStatusHandler(w http.ResponseWriter, r *http.Request) {
} }
func volumeVacuumHandler(w http.ResponseWriter, r *http.Request) { func volumeVacuumHandler(w http.ResponseWriter, r *http.Request) {
count := 0 count := 0
rt, err := storage.NewReplicationTypeFromString(r.FormValue("replication")) rt, err := storage.NewReplicationTypeFromString(r.FormValue("replication"))
if err == nil { if err == nil {
if count, err = strconv.Atoi(r.FormValue("count")); err == nil { if count, err = strconv.Atoi(r.FormValue("count")); err == nil {
if topo.FreeSpace() < count*rt.GetCopyCount() { if topo.FreeSpace() < count*rt.GetCopyCount() {
err = errors.New("Only " + strconv.Itoa(topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*rt.GetCopyCount())) err = errors.New("Only " + strconv.Itoa(topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*rt.GetCopyCount()))
} else { } else {
count, err = vg.GrowByCountAndType(count, rt, topo) count, err = vg.GrowByCountAndType(count, rt, topo)
} }
} }
} }
if err != nil { if err != nil {
w.WriteHeader(http.StatusNotAcceptable) w.WriteHeader(http.StatusNotAcceptable)
writeJson(w, r, map[string]string{"error": err.Error()}) writeJson(w, r, map[string]string{"error": err.Error()})
} else { } else {
w.WriteHeader(http.StatusNotAcceptable) w.WriteHeader(http.StatusNotAcceptable)
writeJson(w, r, map[string]interface{}{"count": count}) writeJson(w, r, map[string]interface{}{"count": count})
} }
} }
func volumeGrowHandler(w http.ResponseWriter, r *http.Request) { func volumeGrowHandler(w http.ResponseWriter, r *http.Request) {
@ -153,24 +154,24 @@ func volumeGrowHandler(w http.ResponseWriter, r *http.Request) {
} else { } else {
count, err = vg.GrowByCountAndType(count, rt, topo) count, err = vg.GrowByCountAndType(count, rt, topo)
} }
}else{ } else {
err = errors.New("parameter count is not found") err = errors.New("parameter count is not found")
} }
} }
if err != nil { if err != nil {
w.WriteHeader(http.StatusNotAcceptable) w.WriteHeader(http.StatusNotAcceptable)
writeJson(w, r, map[string]string{"error": "parameter replication "+err.Error()}) writeJson(w, r, map[string]string{"error": "parameter replication " + err.Error()})
} else { } else {
w.WriteHeader(http.StatusNotAcceptable) w.WriteHeader(http.StatusNotAcceptable)
writeJson(w, r, map[string]interface{}{"count": count}) writeJson(w, r, map[string]interface{}{"count": count})
} }
} }
func volumeStatusHandler(w http.ResponseWriter, r *http.Request) { func volumeStatusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{}) m := make(map[string]interface{})
m["Version"] = VERSION m["Version"] = VERSION
m["Volumes"] = topo.ToVolumeMap() m["Volumes"] = topo.ToVolumeMap()
writeJson(w, r, m) writeJson(w, r, m)
} }
func runMaster(cmd *Command, args []string) bool { func runMaster(cmd *Command, args []string) bool {
@ -186,9 +187,9 @@ func runMaster(cmd *Command, args []string) bool {
http.HandleFunc("/dir/join", dirJoinHandler) http.HandleFunc("/dir/join", dirJoinHandler)
http.HandleFunc("/dir/status", dirStatusHandler) http.HandleFunc("/dir/status", dirStatusHandler)
http.HandleFunc("/vol/grow", volumeGrowHandler) http.HandleFunc("/vol/grow", volumeGrowHandler)
http.HandleFunc("/vol/status", volumeStatusHandler) http.HandleFunc("/vol/status", volumeStatusHandler)
topo.StartRefreshWritableVolumes() topo.StartRefreshWritableVolumes(*garbageThreshold)
log.Println("Start Weed Master", VERSION, "at port", strconv.Itoa(*mport)) log.Println("Start Weed Master", VERSION, "at port", strconv.Itoa(*mport))
srv := &http.Server{ srv := &http.Server{

View file

@ -10,12 +10,12 @@ import (
"time" "time"
) )
func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool { func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold string) bool {
ch := make(chan bool, locationlist.Length()) ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list { for index, dn := range locationlist.list {
go func(index int, url string, vid storage.VolumeId) { go func(index int, url string, vid storage.VolumeId) {
//fmt.Println(index, "Check vacuuming", vid, "on", dn.Url()) //fmt.Println(index, "Check vacuuming", vid, "on", dn.Url())
if e, ret := vacuumVolume_Check(url, vid); e != nil { if e, ret := vacuumVolume_Check(url, vid, garbageThreshold); e != nil {
//fmt.Println(index, "Error when checking vacuuming", vid, "on", url, e) //fmt.Println(index, "Error when checking vacuuming", vid, "on", url, e)
ch <- false ch <- false
} else { } else {
@ -78,11 +78,11 @@ func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlis
} }
return isCommitSuccess return isCommitSuccess
} }
func (t *Topology) Vacuum() int { func (t *Topology) Vacuum(garbageThreshold string) int {
for _, vl := range t.replicaType2VolumeLayout { for _, vl := range t.replicaType2VolumeLayout {
if vl != nil { if vl != nil {
for vid, locationlist := range vl.vid2location { for vid, locationlist := range vl.vid2location {
if batchVacuumVolumeCheck(vl, vid, locationlist) { if batchVacuumVolumeCheck(vl, vid, locationlist, garbageThreshold) {
if batchVacuumVolumeCompact(vl, vid, locationlist) { if batchVacuumVolumeCompact(vl, vid, locationlist) {
batchVacuumVolumeCommit(vl, vid, locationlist) batchVacuumVolumeCommit(vl, vid, locationlist)
} }
@ -98,10 +98,10 @@ type VacuumVolumeResult struct {
Error string Error string
} }
func vacuumVolume_Check(urlLocation string, vid storage.VolumeId) (error, bool) { func vacuumVolume_Check(urlLocation string, vid storage.VolumeId, garbageThreshold string) (error, bool) {
values := make(url.Values) values := make(url.Values)
values.Add("volume", vid.String()) values.Add("volume", vid.String())
values.Add("garbageThreshold", "0.3") values.Add("garbageThreshold", garbageThreshold)
jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values) jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values)
if err != nil { if err != nil {
return err, false return err, false

View file

@ -7,7 +7,7 @@ import (
"time" "time"
) )
func (t *Topology) StartRefreshWritableVolumes() { func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
go func() { go func() {
for { for {
freshThreshHold := time.Now().Unix() - 3*t.pulse //3 times of sleep interval freshThreshHold := time.Now().Unix() - 3*t.pulse //3 times of sleep interval
@ -15,12 +15,12 @@ func (t *Topology) StartRefreshWritableVolumes() {
time.Sleep(time.Duration(float32(t.pulse*1e3)*(1+rand.Float32())) * time.Millisecond) time.Sleep(time.Duration(float32(t.pulse*1e3)*(1+rand.Float32())) * time.Millisecond)
} }
}() }()
go func() { go func(garbageThreshold string) {
c := time.Tick(15 * time.Minute) c := time.Tick(15 * time.Minute)
for _ = range c { for _ = range c {
t.Vacuum() t.Vacuum(garbageThreshold)
} }
}() }(garbageThreshold)
go func() { go func() {
for { for {
select { select {