mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
clean up log fmt usage. Move to log for important data changes,
warnings.
This commit is contained in:
parent
ae3245f1dc
commit
ac15868694
|
@ -6,6 +6,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/topology"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
@ -106,7 +107,6 @@ func (vg *VolumeGrowth) GrowByCountAndType(count int, repType storage.Replicatio
|
|||
nl := topology.NewNodeList(topo.Children(), nil)
|
||||
picked, ret := nl.RandomlyPickN(2, 1, dataCenter)
|
||||
vid := topo.NextVolumeId()
|
||||
println("growing on picked servers", picked)
|
||||
if ret {
|
||||
var servers []*topology.DataNode
|
||||
for _, n := range picked {
|
||||
|
@ -116,7 +116,6 @@ func (vg *VolumeGrowth) GrowByCountAndType(count int, repType storage.Replicatio
|
|||
}
|
||||
}
|
||||
}
|
||||
println("growing on servers", servers)
|
||||
if len(servers) == 2 {
|
||||
if err = vg.grow(topo, vid, repType, servers...); err == nil {
|
||||
counter++
|
||||
|
@ -193,9 +192,9 @@ func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, repT
|
|||
vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version: storage.CurrentVersion}
|
||||
server.AddOrUpdateVolume(vi)
|
||||
topo.RegisterVolumeLayout(&vi, server)
|
||||
fmt.Println("Created Volume", vid, "on", server)
|
||||
log.Println("Created Volume", vid, "on", server)
|
||||
} else {
|
||||
fmt.Println("Failed to assign", vid, "to", servers, "error", err)
|
||||
log.Println("Failed to assign", vid, "to", servers, "error", err)
|
||||
return errors.New("Failed to assign " + vid.String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/util"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
@ -43,13 +43,13 @@ func NewNeedle(r *http.Request) (n *Needle, e error) {
|
|||
n = new(Needle)
|
||||
form, fe := r.MultipartReader()
|
||||
if fe != nil {
|
||||
fmt.Println("MultipartReader [ERROR]", fe)
|
||||
log.Println("MultipartReader [ERROR]", fe)
|
||||
e = fe
|
||||
return
|
||||
}
|
||||
part, fe := form.NextPart()
|
||||
if fe != nil {
|
||||
fmt.Println("Reading Multi part [ERROR]", fe)
|
||||
log.Println("Reading Multi part [ERROR]", fe)
|
||||
e = fe
|
||||
return
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ func (n *Needle) ParsePath(fid string) {
|
|||
length := len(fid)
|
||||
if length <= 8 {
|
||||
if length > 0 {
|
||||
println("Invalid fid", fid, "length", length)
|
||||
log.Println("Invalid fid", fid, "length", length)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ func ParseKeyHash(key_hash_string string) (uint64, uint32) {
|
|||
key_hash_bytes, khe := hex.DecodeString(key_hash_string)
|
||||
key_hash_len := len(key_hash_bytes)
|
||||
if khe != nil || key_hash_len <= 4 {
|
||||
println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
||||
log.Println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
||||
return 0, 0
|
||||
}
|
||||
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
|
@ -26,7 +27,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
|
|||
defer func(s io.Seeker, off int64) {
|
||||
if err != nil {
|
||||
if _, e = s.Seek(off, 0); e != nil {
|
||||
fmt.Printf("Failed to seek %s back to %d with error: %s\n", w, off, e)
|
||||
log.Printf("Failed to seek %s back to %d with error: %s\n", w, off, e)
|
||||
}
|
||||
}
|
||||
}(s, end)
|
||||
|
|
|
@ -108,7 +108,7 @@ func (v *Volume) Size() int64 {
|
|||
if e == nil {
|
||||
return stat.Size()
|
||||
}
|
||||
fmt.Printf("Failed to read file size %s %s\n", v.dataFile.Name(), e.Error())
|
||||
log.Printf("Failed to read file size %s %s\n", v.dataFile.Name(), e.Error())
|
||||
return -1
|
||||
}
|
||||
func (v *Volume) Close() {
|
||||
|
@ -120,7 +120,7 @@ func (v *Volume) Close() {
|
|||
func (v *Volume) maybeWriteSuperBlock() error {
|
||||
stat, e := v.dataFile.Stat()
|
||||
if e != nil {
|
||||
fmt.Printf("failed to stat datafile %s: %s", v.dataFile, e)
|
||||
log.Printf("failed to stat datafile %s: %s", v.dataFile, e)
|
||||
return e
|
||||
}
|
||||
if stat.Size() == 0 {
|
||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
type NodeId string
|
||||
|
@ -155,7 +155,7 @@ func (n *NodeImpl) LinkChildNode(node Node) {
|
|||
n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
|
||||
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
|
||||
node.SetParent(n)
|
||||
fmt.Println(n, "adds child", node.Id())
|
||||
log.Println(n, "adds child", node.Id())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
|
|||
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
|
||||
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
|
||||
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
|
||||
fmt.Println(n, "removes", node, "volumeCount =", n.activeVolumeCount)
|
||||
log.Println(n, "removes", node, "volumeCount =", n.activeVolumeCount)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
|
@ -58,7 +58,7 @@ func (nl *NodeList) RandomlyPickN(count int, minSpace int, firstNodeName string)
|
|||
list[r], list[i-1] = list[i-1], list[r]
|
||||
}
|
||||
if firstNodeName != "" {
|
||||
list[0] = *preferredNode
|
||||
list[0] = *preferredNode
|
||||
}
|
||||
return list[:count], true
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId
|
|||
randomVolumeIndex -= freeSpace
|
||||
} else {
|
||||
if node.IsDataNode() && node.FreeSpace() > 0 {
|
||||
fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||
log.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||
return true, node.(*DataNode)
|
||||
}
|
||||
children := node.Children()
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"code.google.com/p/weed-fs/go/sequence"
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
|
@ -74,7 +73,7 @@ func (t *Topology) Lookup(vid storage.VolumeId) []*DataNode {
|
|||
|
||||
func (t *Topology) RandomlyReserveOneVolume(dataCenter string) (bool, *DataNode, *storage.VolumeId) {
|
||||
if t.FreeSpace() <= 0 {
|
||||
fmt.Println("Topology does not have free space left!")
|
||||
log.Println("Topology does not have free space left!")
|
||||
return false, nil, nil
|
||||
}
|
||||
vid := t.NextVolumeId()
|
||||
|
@ -103,7 +102,7 @@ func (t *Topology) PickForWrite(repType storage.ReplicationType, count int, data
|
|||
func (t *Topology) GetVolumeLayout(repType storage.ReplicationType) *VolumeLayout {
|
||||
replicationTypeIndex := repType.GetReplicationLevelIndex()
|
||||
if t.replicaType2VolumeLayout[replicationTypeIndex] == nil {
|
||||
fmt.Println("adding replication type", repType)
|
||||
log.Println("adding replication type", repType)
|
||||
t.replicaType2VolumeLayout[replicationTypeIndex] = NewVolumeLayout(repType, t.volumeSizeLimit, t.pulse)
|
||||
}
|
||||
return t.replicaType2VolumeLayout[replicationTypeIndex]
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
@ -14,12 +14,12 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist
|
|||
ch := make(chan bool, locationlist.Length())
|
||||
for index, dn := range locationlist.list {
|
||||
go func(index int, url string, vid storage.VolumeId) {
|
||||
//fmt.Println(index, "Check vacuuming", vid, "on", dn.Url())
|
||||
//log.Println(index, "Check vacuuming", vid, "on", dn.Url())
|
||||
if e, ret := vacuumVolume_Check(url, vid, garbageThreshold); e != nil {
|
||||
//fmt.Println(index, "Error when checking vacuuming", vid, "on", url, e)
|
||||
//log.Println(index, "Error when checking vacuuming", vid, "on", url, e)
|
||||
ch <- false
|
||||
} else {
|
||||
//fmt.Println(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
|
||||
//log.Println(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
|
||||
ch <- ret
|
||||
}
|
||||
}(index, dn.Url(), vid)
|
||||
|
@ -41,12 +41,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
|||
ch := make(chan bool, locationlist.Length())
|
||||
for index, dn := range locationlist.list {
|
||||
go func(index int, url string, vid storage.VolumeId) {
|
||||
fmt.Println(index, "Start vacuuming", vid, "on", url)
|
||||
log.Println(index, "Start vacuuming", vid, "on", url)
|
||||
if e := vacuumVolume_Compact(url, vid); e != nil {
|
||||
fmt.Println(index, "Error when vacuuming", vid, "on", url, e)
|
||||
log.Println(index, "Error when vacuuming", vid, "on", url, e)
|
||||
ch <- false
|
||||
} else {
|
||||
fmt.Println(index, "Complete vacuuming", vid, "on", url)
|
||||
log.Println(index, "Complete vacuuming", vid, "on", url)
|
||||
ch <- true
|
||||
}
|
||||
}(index, dn.Url(), vid)
|
||||
|
@ -65,12 +65,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
|||
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
|
||||
isCommitSuccess := true
|
||||
for _, dn := range locationlist.list {
|
||||
fmt.Println("Start Commiting vacuum", vid, "on", dn.Url())
|
||||
log.Println("Start Commiting vacuum", vid, "on", dn.Url())
|
||||
if e := vacuumVolume_Commit(dn.Url(), vid); e != nil {
|
||||
fmt.Println("Error when committing vacuum", vid, "on", dn.Url(), e)
|
||||
log.Println("Error when committing vacuum", vid, "on", dn.Url(), e)
|
||||
isCommitSuccess = false
|
||||
} else {
|
||||
fmt.Println("Complete Commiting vacuum", vid, "on", dn.Url())
|
||||
log.Println("Complete Commiting vacuum", vid, "on", dn.Url())
|
||||
}
|
||||
}
|
||||
if isCommitSuccess {
|
||||
|
@ -104,7 +104,7 @@ func vacuumVolume_Check(urlLocation string, vid storage.VolumeId, garbageThresho
|
|||
values.Add("garbageThreshold", garbageThreshold)
|
||||
jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values)
|
||||
if err != nil {
|
||||
fmt.Println("parameters:", values)
|
||||
log.Println("parameters:", values)
|
||||
return err, false
|
||||
}
|
||||
var ret VacuumVolumeResult
|
||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
@ -28,10 +28,10 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
|
|||
t.SetVolumeCapacityFull(v)
|
||||
case dn := <-t.chanRecoveredDataNodes:
|
||||
t.RegisterRecoveredDataNode(dn)
|
||||
fmt.Println("DataNode", dn, "is back alive!")
|
||||
log.Println("DataNode", dn, "is back alive!")
|
||||
case dn := <-t.chanDeadDataNodes:
|
||||
t.UnRegisterDataNode(dn)
|
||||
fmt.Println("DataNode", dn, "is dead!")
|
||||
log.Println("DataNode", dn, "is dead!")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -48,7 +48,7 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
|||
}
|
||||
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
|
||||
for _, v := range dn.volumes {
|
||||
fmt.Println("Removing Volume", v.Id, "from the dead volume server", dn)
|
||||
log.Println("Removing Volume", v.Id, "from the dead volume server", dn)
|
||||
vl := t.GetVolumeLayout(v.RepType)
|
||||
vl.SetVolumeUnavailable(dn, v.Id)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package topology
|
|||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
|
@ -54,7 +54,7 @@ func (vl *VolumeLayout) Lookup(vid storage.VolumeId) []*DataNode {
|
|||
func (vl *VolumeLayout) PickForWrite(count int, dataCenter string) (*storage.VolumeId, int, *VolumeLocationList, error) {
|
||||
len_writers := len(vl.writables)
|
||||
if len_writers <= 0 {
|
||||
fmt.Println("No more writable volumes!")
|
||||
log.Println("No more writable volumes!")
|
||||
return nil, 0, nil, errors.New("No more writable volumes!")
|
||||
}
|
||||
if dataCenter == "" {
|
||||
|
@ -102,7 +102,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(dataCenter string) int {
|
|||
func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool {
|
||||
for i, v := range vl.writables {
|
||||
if v == vid {
|
||||
fmt.Println("Volume", vid, "becomes unwritable")
|
||||
log.Println("Volume", vid, "becomes unwritable")
|
||||
vl.writables = append(vl.writables[:i], vl.writables[i+1:]...)
|
||||
return true
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
fmt.Println("Volume", vid, "becomes writable")
|
||||
log.Println("Volume", vid, "becomes writable")
|
||||
vl.writables = append(vl.writables, vid)
|
||||
return true
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
|
|||
func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId) bool {
|
||||
if vl.vid2location[vid].Remove(dn) {
|
||||
if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() {
|
||||
fmt.Println("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
|
||||
log.Println("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
|
||||
return vl.removeFromWritable(vid)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue