mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
switching to temporarily use glog library
This commit is contained in:
parent
b27947b355
commit
ed154053c8
17
go/glog/convenient_api.go
Normal file
17
go/glog/convenient_api.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
package glog
|
||||
|
||||
import ()
|
||||
|
||||
/*
|
||||
Copying the original glog because it is missing several convenient methods.
|
||||
1. change log file size limit to 180MB
|
||||
2. use ToStderrAndLog() in the weed.go
|
||||
3. remove nano time in log format
|
||||
*/
|
||||
|
||||
func ToStderr() {
|
||||
logging.toStderr = true
|
||||
}
|
||||
func ToStderrAndLog() {
|
||||
logging.alsoToStderr = true
|
||||
}
|
|
@ -557,8 +557,8 @@ func (l *loggingT) header(s severity) *buffer {
|
|||
buf.twoDigits(9, minute)
|
||||
buf.tmp[11] = ':'
|
||||
buf.twoDigits(12, second)
|
||||
buf.tmp[14] = '.'
|
||||
buf.nDigits(6, 15, now.Nanosecond()/1000)
|
||||
//buf.tmp[14] = '.'
|
||||
//buf.nDigits(6, 15, now.Nanosecond()/1000)
|
||||
buf.tmp[21] = ' '
|
||||
buf.nDigits(5, 22, pid) // TODO: should be TID
|
||||
buf.tmp[27] = ' '
|
||||
|
@ -798,7 +798,7 @@ func (sb *syncBuffer) rotateFile(now time.Time) error {
|
|||
fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
|
||||
fmt.Fprintf(&buf, "Running on machine: %s\n", host)
|
||||
fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
|
||||
fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss threadid file:line] msg\n")
|
||||
n, err := sb.file.Write(buf.Bytes())
|
||||
sb.nbytes += uint64(n)
|
||||
return err
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
)
|
||||
|
||||
// MaxSize is the maximum size of a log file in bytes.
|
||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
var MaxSize uint64 = 1024 * 1024 * 180
|
||||
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
package operation
|
||||
|
||||
import (
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func Delete(url string) error {
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
log.Println("failing to delete", url)
|
||||
glog.V(0).Infoln("failing to delete", url)
|
||||
return err
|
||||
}
|
||||
_, err = http.DefaultClient.Do(req)
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
|
@ -37,21 +37,21 @@ func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool,
|
|||
}
|
||||
file_writer, err := body_writer.CreatePart(h)
|
||||
if err != nil {
|
||||
log.Println("error creating form file", err)
|
||||
glog.V(0).Infoln("error creating form file", err)
|
||||
return nil, err
|
||||
}
|
||||
if _, err = io.Copy(file_writer, reader); err != nil {
|
||||
log.Println("error copying data", err)
|
||||
glog.V(0).Infoln("error copying data", err)
|
||||
return nil, err
|
||||
}
|
||||
content_type := body_writer.FormDataContentType()
|
||||
if err = body_writer.Close(); err != nil {
|
||||
log.Println("error closing body", err)
|
||||
glog.V(0).Infoln("error closing body", err)
|
||||
return nil, err
|
||||
}
|
||||
resp, err := http.Post(uploadUrl, content_type, body_buf)
|
||||
if err != nil {
|
||||
log.Println("failing to upload to", uploadUrl)
|
||||
glog.V(0).Infoln("failing to upload to", uploadUrl)
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
@ -62,7 +62,7 @@ func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool,
|
|||
var ret UploadResult
|
||||
err = json.Unmarshal(resp_body, &ret)
|
||||
if err != nil {
|
||||
log.Println("failing to read upload resonse", uploadUrl, resp_body)
|
||||
glog.V(0).Infoln("failing to read upload resonse", uploadUrl, resp_body)
|
||||
return nil, err
|
||||
}
|
||||
if ret.Error != "" {
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"bytes"
|
||||
"code.google.com/p/weed-fs/go/operation"
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
@ -50,7 +50,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId storage.Volum
|
|||
func ReplicatedDelete(masterNode string, store *storage.Store, volumeId storage.VolumeId, n *storage.Needle, r *http.Request) (ret uint32) {
|
||||
ret, err := store.Delete(volumeId, n)
|
||||
if err != nil {
|
||||
log.Println("delete error:", err)
|
||||
glog.V(0).Infoln("delete error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ func distributedOperation(masterNode string, store *storage.Store, volumeId stor
|
|||
}
|
||||
return ret
|
||||
} else {
|
||||
log.Println("Failed to lookup for", volumeId, lookupErr.Error())
|
||||
glog.V(0).Infoln("Failed to lookup for", volumeId, lookupErr.Error())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/topology"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
@ -204,9 +204,9 @@ func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, repT
|
|||
vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version: storage.CurrentVersion}
|
||||
server.AddOrUpdateVolume(vi)
|
||||
topo.RegisterVolumeLayout(&vi, server)
|
||||
log.Println("Created Volume", vid, "on", server)
|
||||
glog.V(0).Infoln("Created Volume", vid, "on", server)
|
||||
} else {
|
||||
log.Println("Failed to assign", vid, "to", servers, "error", err)
|
||||
glog.V(0).Infoln("Failed to assign", vid, "to", servers, "error", err)
|
||||
return errors.New("Failed to assign " + vid.String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package sequence
|
|||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
@ -32,16 +32,16 @@ func NewSequencer(dirname string, filename string) (m *SequencerImpl) {
|
|||
seqFile, se := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_RDONLY, 0644)
|
||||
if se != nil {
|
||||
m.FileIdSequence = FileIdSaveInterval
|
||||
log.Println("Setting file id sequence", m.FileIdSequence)
|
||||
glog.V(0).Infoln("Setting file id sequence", m.FileIdSequence)
|
||||
} else {
|
||||
decoder := gob.NewDecoder(seqFile)
|
||||
defer seqFile.Close()
|
||||
if se = decoder.Decode(&m.FileIdSequence); se != nil {
|
||||
log.Printf("error decoding FileIdSequence: %s", se)
|
||||
glog.V(0).Infof("error decoding FileIdSequence: %s", se)
|
||||
m.FileIdSequence = FileIdSaveInterval
|
||||
log.Println("Setting file id sequence", m.FileIdSequence)
|
||||
glog.V(0).Infoln("Setting file id sequence", m.FileIdSequence)
|
||||
} else {
|
||||
log.Println("Loading file id sequence", m.FileIdSequence, "=>", m.FileIdSequence+FileIdSaveInterval)
|
||||
glog.V(0).Infoln("Loading file id sequence", m.FileIdSequence, "=>", m.FileIdSequence+FileIdSaveInterval)
|
||||
m.FileIdSequence += FileIdSaveInterval
|
||||
}
|
||||
//in case the server stops between intervals
|
||||
|
@ -65,14 +65,14 @@ func (m *SequencerImpl) NextFileId(count int) (uint64, int) {
|
|||
return m.FileIdSequence - m.fileIdCounter - uint64(count), count
|
||||
}
|
||||
func (m *SequencerImpl) saveSequence() {
|
||||
log.Println("Saving file id sequence", m.FileIdSequence, "to", path.Join(m.dir, m.fileName+".seq"))
|
||||
glog.V(0).Infoln("Saving file id sequence", m.FileIdSequence, "to", path.Join(m.dir, m.fileName+".seq"))
|
||||
seqFile, e := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if e != nil {
|
||||
log.Fatalf("Sequence File Save [ERROR] %s\n", e)
|
||||
glog.Fatalf("Sequence File Save [ERROR] %s", e)
|
||||
}
|
||||
defer seqFile.Close()
|
||||
encoder := gob.NewEncoder(seqFile)
|
||||
if e = encoder.Encode(m.FileIdSequence); e != nil {
|
||||
log.Fatalf("Sequence File Save [ERROR] %s\n", e)
|
||||
glog.Fatalf("Sequence File Save [ERROR] %s", e)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
|
@ -39,7 +39,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
|||
t.Fatalf("error opening cdb: %s", err)
|
||||
}
|
||||
b := getMemStats()
|
||||
log.Printf("opening cdb consumed %d bytes", b-a)
|
||||
glog.V(0).Infof("opening cdb consumed %d bytes", b-a)
|
||||
defer nm.Close()
|
||||
|
||||
a = getMemStats()
|
||||
|
@ -47,7 +47,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
|||
t.Fatalf("error visiting %s: %s", nm, err)
|
||||
}
|
||||
b = getMemStats()
|
||||
log.Printf("visit cdb %d consumed %d bytes", i, b-a)
|
||||
glog.V(0).Infof("visit cdb %d consumed %d bytes", i, b-a)
|
||||
nm.Close()
|
||||
|
||||
indexFile, err := os.Open(testIndexFilename)
|
||||
|
@ -61,7 +61,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
|||
}
|
||||
defer nm.Close()
|
||||
b = getMemStats()
|
||||
log.Printf("opening idx consumed %d bytes", b-a)
|
||||
glog.V(0).Infof("opening idx consumed %d bytes", b-a)
|
||||
|
||||
i = 0
|
||||
a = getMemStats()
|
||||
|
@ -69,7 +69,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
|||
t.Fatalf("error visiting %s: %s", nm, err)
|
||||
}
|
||||
b = getMemStats()
|
||||
log.Printf("visit idx %d consumed %d bytes", i, b-a)
|
||||
glog.V(0).Infof("visit idx %d consumed %d bytes", i, b-a)
|
||||
}
|
||||
|
||||
func BenchmarkCdbMap9List(t *testing.B) {
|
||||
|
@ -88,7 +88,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
|||
}
|
||||
defer idx.Close()
|
||||
b := getMemStats()
|
||||
log.Printf("LoadNeedleMap consumed %d bytes", b-a)
|
||||
glog.V(0).Infof("LoadNeedleMap consumed %d bytes", b-a)
|
||||
|
||||
cdbFn := testIndexFilename + ".cdb"
|
||||
a = getMemStats()
|
||||
|
@ -99,10 +99,10 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
|||
}
|
||||
defer m.Close()
|
||||
b = getMemStats()
|
||||
log.Printf("OpenCdbMap consumed %d bytes", b-a)
|
||||
glog.V(0).Infof("OpenCdbMap consumed %d bytes", b-a)
|
||||
|
||||
i := 0
|
||||
log.Printf("checking whether the cdb contains every key")
|
||||
glog.V(0).Infoln("checking whether the cdb contains every key")
|
||||
t.StartTimer()
|
||||
err = idx.Visit(func(nv NeedleValue) error {
|
||||
if i > t.N || rand.Intn(10) < 9 {
|
||||
|
@ -110,7 +110,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
|||
}
|
||||
i++
|
||||
if i%1000 == 0 {
|
||||
log.Printf("%d. %s", i, nv)
|
||||
glog.V(0).Infof("%d. %s", i, nv)
|
||||
}
|
||||
if nv2, ok := m.Get(uint64(nv.Key)); !ok || nv2 == nil {
|
||||
t.Errorf("%s in index, not in cdb", nv.Key)
|
||||
|
@ -130,7 +130,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
|||
}
|
||||
|
||||
i = 0
|
||||
log.Printf("checking wheter the cdb contains no stray keys")
|
||||
glog.V(0).Infoln("checking wheter the cdb contains no stray keys")
|
||||
t.StartTimer()
|
||||
err = m.Visit(func(nv NeedleValue) error {
|
||||
if i > t.N || rand.Intn(10) < 9 {
|
||||
|
@ -147,7 +147,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
|||
}
|
||||
i++
|
||||
if i%1000 == 0 {
|
||||
log.Printf("%d. %s", i, nv)
|
||||
glog.V(0).Infof("%d. %s", i, nv)
|
||||
}
|
||||
t.SetBytes(int64(nv.Size))
|
||||
return nil
|
||||
|
|
|
@ -38,13 +38,13 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
|
|||
}
|
||||
if i := cs.binarySearchValues(key); i >= 0 {
|
||||
ret = cs.values[i].Size
|
||||
//println("key", key, "old size", ret)
|
||||
//glog.V(4).Infoln("key", key, "old size", ret)
|
||||
cs.values[i].Offset, cs.values[i].Size = offset, size
|
||||
} else {
|
||||
needOverflow := cs.counter >= batch
|
||||
needOverflow = needOverflow || cs.counter > 0 && cs.values[cs.counter-1].Key > key
|
||||
if needOverflow {
|
||||
//println("start", cs.start, "counter", cs.counter, "key", key)
|
||||
//glog.V(4).Infoln("start", cs.start, "counter", cs.counter, "key", key)
|
||||
if oldValue, found := cs.overflow[key]; found {
|
||||
ret = oldValue.Size
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
|
|||
} else {
|
||||
p := &cs.values[cs.counter]
|
||||
p.Key, p.Offset, p.Size = key, offset, size
|
||||
//println("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
|
||||
//glog.V(4).Infoln("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
|
||||
cs.counter++
|
||||
}
|
||||
}
|
||||
|
@ -88,16 +88,16 @@ func (cs *CompactSection) binarySearchValues(key Key) int {
|
|||
if h >= 0 && cs.values[h].Key < key {
|
||||
return -2
|
||||
}
|
||||
//println("looking for key", key)
|
||||
//glog.V(4).Infoln("looking for key", key)
|
||||
for l <= h {
|
||||
m := (l + h) / 2
|
||||
//println("mid", m, "key", cs.values[m].Key, cs.values[m].Offset, cs.values[m].Size)
|
||||
//glog.V(4).Infoln("mid", m, "key", cs.values[m].Key, cs.values[m].Offset, cs.values[m].Size)
|
||||
if cs.values[m].Key < key {
|
||||
l = m + 1
|
||||
} else if key < cs.values[m].Key {
|
||||
h = m - 1
|
||||
} else {
|
||||
//println("found", m)
|
||||
//glog.V(4).Infoln("found", m)
|
||||
return m
|
||||
}
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ func NewCompactMap() CompactMap {
|
|||
func (cm *CompactMap) Set(key Key, offset uint32, size uint32) uint32 {
|
||||
x := cm.binarySearchCompactSection(key)
|
||||
if x < 0 {
|
||||
//println(x, "creating", len(cm.list), "section1, starting", key)
|
||||
//glog.V(4).Infoln(x, "creating", len(cm.list), "section1, starting", key)
|
||||
cm.list = append(cm.list, NewCompactSection(key))
|
||||
x = len(cm.list) - 1
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package storage
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/util"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
@ -23,7 +23,7 @@ func LoadNewNeedleMap(file *os.File) CompactMap {
|
|||
count, e := file.Read(bytes)
|
||||
if count > 0 {
|
||||
fstat, _ := file.Stat()
|
||||
log.Println("Loading index file", fstat.Name(), "size", fstat.Size())
|
||||
glog.V(0).Infoln("Loading index file", fstat.Name(), "size", fstat.Size())
|
||||
}
|
||||
for count > 0 && e == nil {
|
||||
for i := 0; i < count; i += 16 {
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestXYZ(t *testing.T) {
|
|||
|
||||
// for i := uint32(0); i < 100; i++ {
|
||||
// if v := m.Get(Key(i)); v != nil {
|
||||
// println(i, "=", v.Key, v.Offset, v.Size)
|
||||
// glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size)
|
||||
// }
|
||||
// }
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
@ -36,11 +37,11 @@ func GzipData(input []byte) ([]byte, error) {
|
|||
buf := new(bytes.Buffer)
|
||||
w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
|
||||
if _, err := w.Write(input); err != nil {
|
||||
println("error compressing data:", err)
|
||||
glog.V(4).Infoln("error compressing data:", err)
|
||||
return nil, err
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
println("error closing compressed data:", err)
|
||||
glog.V(4).Infoln("error closing compressed data:", err)
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
|
@ -51,7 +52,7 @@ func UnGzipData(input []byte) ([]byte, error) {
|
|||
defer r.Close()
|
||||
output, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
println("error uncompressing data:", err)
|
||||
glog.V(4).Infoln("error uncompressing data:", err)
|
||||
}
|
||||
return output, err
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/util"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
type FileId struct {
|
||||
|
@ -18,7 +19,7 @@ func NewFileId(VolumeId VolumeId, Key uint64, Hashcode uint32) *FileId {
|
|||
func ParseFileId(fid string) *FileId {
|
||||
a := strings.Split(fid, ",")
|
||||
if len(a) != 2 {
|
||||
println("Invalid fid", fid, ", split length", len(a))
|
||||
glog.V(4).Infoln("Invalid fid", fid, ", split length", len(a))
|
||||
return nil
|
||||
}
|
||||
vid_string, key_hash_string := a[0], a[1]
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
|
@ -41,13 +41,13 @@ type Needle struct {
|
|||
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, e error) {
|
||||
form, fe := r.MultipartReader()
|
||||
if fe != nil {
|
||||
log.Println("MultipartReader [ERROR]", fe)
|
||||
glog.V(0).Infoln("MultipartReader [ERROR]", fe)
|
||||
e = fe
|
||||
return
|
||||
}
|
||||
part, fe := form.NextPart()
|
||||
if fe != nil {
|
||||
log.Println("Reading Multi part [ERROR]", fe)
|
||||
glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
|
||||
e = fe
|
||||
return
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
|
|||
}
|
||||
data, e = ioutil.ReadAll(part)
|
||||
if e != nil {
|
||||
log.Println("Reading Content [ERROR]", e)
|
||||
glog.V(0).Infoln("Reading Content [ERROR]", e)
|
||||
return
|
||||
}
|
||||
dotIndex := strings.LastIndex(fileName, ".")
|
||||
|
@ -131,7 +131,7 @@ func (n *Needle) ParsePath(fid string) {
|
|||
length := len(fid)
|
||||
if length <= 8 {
|
||||
if length > 0 {
|
||||
log.Println("Invalid fid", fid, "length", length)
|
||||
glog.V(0).Infoln("Invalid fid", fid, "length", length)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ func ParseKeyHash(key_hash_string string) (uint64, uint32) {
|
|||
key_hash_bytes, khe := hex.DecodeString(key_hash_string)
|
||||
key_hash_len := len(key_hash_bytes)
|
||||
if khe != nil || key_hash_len <= 4 {
|
||||
log.Println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
||||
glog.V(0).Infoln("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
||||
return 0, 0
|
||||
}
|
||||
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
|
||||
|
|
|
@ -57,14 +57,14 @@ func LoadNeedleMap(file *os.File) (*NeedleMap, error) {
|
|||
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
|
||||
if offset > 0 {
|
||||
oldSize := nm.m.Set(Key(key), offset, size)
|
||||
//log.Println("reading key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
||||
//glog.V(0).Infoln("reading key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
||||
if oldSize > 0 {
|
||||
nm.DeletionCounter++
|
||||
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
|
||||
}
|
||||
} else {
|
||||
oldSize := nm.m.Delete(Key(key))
|
||||
//log.Println("removing key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
||||
//glog.V(0).Infoln("removing key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
||||
nm.DeletionCounter++
|
||||
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"os"
|
||||
)
|
||||
|
||||
|
@ -27,12 +27,12 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
|
|||
defer func(s io.Seeker, off int64) {
|
||||
if err != nil {
|
||||
if _, e = s.Seek(off, 0); e != nil {
|
||||
log.Printf("Failed to seek %s back to %d with error: %s\n", w, off, e)
|
||||
glog.V(0).Infof("Failed to seek %s back to %d with error: %s", w, off, e.Error())
|
||||
}
|
||||
}
|
||||
}(s, end)
|
||||
} else {
|
||||
err = fmt.Errorf("Cnnot Read Current Volume Position: %s", e)
|
||||
err = fmt.Errorf("Cnnot Read Current Volume Position: %s", e.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -95,7 +95,7 @@ func (s *Store) addVolume(vid VolumeId, replicationType ReplicationType) error {
|
|||
return fmt.Errorf("Volume Id %s already exists!", vid)
|
||||
}
|
||||
if location := s.findFreeLocation(); location != nil {
|
||||
log.Println("In dir", location.directory, "adds volume =", vid, ", replicationType =", replicationType)
|
||||
glog.V(0).Infoln("In dir", location.directory, "adds volume =", vid, ", replicationType =", replicationType)
|
||||
if volume, err := NewVolume(location.directory, vid, replicationType); err == nil {
|
||||
location.volumes[vid] = volume
|
||||
return nil
|
||||
|
@ -163,14 +163,14 @@ func (l *DiskLocation) loadExistingVolumes() {
|
|||
if l.volumes[vid] == nil {
|
||||
if v, e := NewVolume(l.directory, vid, CopyNil); e == nil {
|
||||
l.volumes[vid] = v
|
||||
log.Println("In dir", l.directory, "read volume =", vid, "replicationType =", v.ReplicaType, "version =", v.Version(), "size =", v.Size())
|
||||
glog.V(0).Infoln("In dir", l.directory, "read volume =", vid, "replicationType =", v.ReplicaType, "version =", v.Version(), "size =", v.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Println("Store started on dir:", l.directory, "with", len(l.volumes), "volumes", "max", l.maxVolumeCount)
|
||||
glog.V(0).Infoln("Store started on dir:", l.directory, "with", len(l.volumes), "volumes", "max", l.maxVolumeCount)
|
||||
}
|
||||
func (s *Store) Status() []*VolumeInfo {
|
||||
var stats []*VolumeInfo
|
||||
|
@ -259,15 +259,15 @@ func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) {
|
|||
err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.volumeSizeLimit, v.ContentSize())
|
||||
}
|
||||
if err != nil && s.volumeSizeLimit < v.ContentSize()+uint64(size) && s.volumeSizeLimit >= v.ContentSize() {
|
||||
log.Println("volume", i, "size is", v.ContentSize(), "close to", s.volumeSizeLimit)
|
||||
glog.V(0).Infoln("volume", i, "size is", v.ContentSize(), "close to", s.volumeSizeLimit)
|
||||
if err = s.Join(); err != nil {
|
||||
log.Printf("error with Join: %s", err)
|
||||
glog.V(0).Infoln("error with Join:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Println("volume", i, "not found!")
|
||||
glog.V(0).Infoln("volume", i, "not found!")
|
||||
err = fmt.Errorf("Volume %s not found!", i)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -2,10 +2,10 @@ package storage
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
@ -45,7 +45,7 @@ func NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v
|
|||
e = v.load(true)
|
||||
return
|
||||
}
|
||||
func LoadVolumeOnly(dirname string, id VolumeId) (v *Volume, e error) {
|
||||
func loadVolumeWithoutIndex(dirname string, id VolumeId) (v *Volume, e error) {
|
||||
v = &Volume{dir: dirname, Id: id}
|
||||
v.SuperBlock = SuperBlock{ReplicaType: CopyNil}
|
||||
e = v.load(false)
|
||||
|
@ -57,12 +57,12 @@ func (v *Volume) load(alsoLoadIndex bool) error {
|
|||
v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
|
||||
if e != nil {
|
||||
if !os.IsPermission(e) {
|
||||
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e)
|
||||
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e.Error())
|
||||
}
|
||||
if v.dataFile, e = os.Open(fileName + ".dat"); e != nil {
|
||||
return fmt.Errorf("cannot open Volume Data %s.dat: %s", fileName, e)
|
||||
return fmt.Errorf("cannot open Volume Data %s.dat: %s", fileName, e.Error())
|
||||
}
|
||||
log.Printf("opening " + fileName + ".dat in READONLY mode")
|
||||
glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode")
|
||||
v.readOnly = true
|
||||
}
|
||||
if v.ReplicaType == CopyNil {
|
||||
|
@ -73,28 +73,40 @@ func (v *Volume) load(alsoLoadIndex bool) error {
|
|||
if e == nil && alsoLoadIndex {
|
||||
var indexFile *os.File
|
||||
if v.readOnly {
|
||||
glog.V(4).Infoln("opening file", fileName+".idx")
|
||||
if indexFile, e = os.Open(fileName + ".idx"); e != nil && !os.IsNotExist(e) {
|
||||
return fmt.Errorf("cannot open index file %s.idx: %s", fileName, e)
|
||||
return fmt.Errorf("cannot open index file %s.idx: %s", fileName, e.Error())
|
||||
}
|
||||
if indexFile != nil {
|
||||
log.Printf("converting %s.idx to %s.cdb", fileName, fileName)
|
||||
if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
|
||||
log.Printf("error converting %s.idx to %s.cdb: %s", fileName, fileName)
|
||||
} else {
|
||||
indexFile.Close()
|
||||
os.Remove(indexFile.Name())
|
||||
indexFile = nil
|
||||
glog.V(4).Infoln("check file", fileName+".cdb")
|
||||
if _, err := os.Stat(fileName + ".cdb"); os.IsNotExist(err) {
|
||||
glog.V(0).Infof("converting %s.idx to %s.cdb", fileName, fileName)
|
||||
if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
|
||||
glog.V(0).Infof("error converting %s.idx to %s.cdb: %s", fileName, e.Error())
|
||||
} else {
|
||||
indexFile.Close()
|
||||
indexFile = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
v.nm, e = OpenCdbMap(fileName + ".cdb")
|
||||
return e
|
||||
} else {
|
||||
indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644)
|
||||
if e != nil {
|
||||
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e)
|
||||
glog.V(4).Infoln("open file", fileName+".cdb")
|
||||
if v.nm, e = OpenCdbMap(fileName + ".cdb"); e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
glog.V(0).Infof("Failed to read cdb file :%s, fall back to normal readonly mode.", fileName)
|
||||
} else {
|
||||
glog.V(0).Infof("%s.cdb open errro:%s", fileName, e.Error())
|
||||
return e
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(4).Infoln("open to write file", fileName+".idx")
|
||||
indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644)
|
||||
if e != nil {
|
||||
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e.Error())
|
||||
}
|
||||
glog.V(4).Infoln("loading file", fileName+".idx")
|
||||
v.nm, e = LoadNeedleMap(indexFile)
|
||||
glog.V(4).Infoln("loading error:", e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
@ -108,7 +120,7 @@ func (v *Volume) Size() int64 {
|
|||
if e == nil {
|
||||
return stat.Size()
|
||||
}
|
||||
log.Printf("Failed to read file size %s %s\n", v.dataFile.Name(), e.Error())
|
||||
glog.V(0).Infof("Failed to read file size %s %s", v.dataFile.Name(), e.Error())
|
||||
return -1
|
||||
}
|
||||
func (v *Volume) Close() {
|
||||
|
@ -120,7 +132,7 @@ func (v *Volume) Close() {
|
|||
func (v *Volume) maybeWriteSuperBlock() error {
|
||||
stat, e := v.dataFile.Stat()
|
||||
if e != nil {
|
||||
log.Printf("failed to stat datafile %s: %s", v.dataFile, e)
|
||||
glog.V(0).Infof("failed to stat datafile %s: %s", v.dataFile, e.Error())
|
||||
return e
|
||||
}
|
||||
if stat.Size() == 0 {
|
||||
|
@ -221,10 +233,10 @@ func (v *Volume) delete(n *Needle) (uint32, error) {
|
|||
//fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
|
||||
if ok {
|
||||
size := nv.Size
|
||||
if err:= v.nm.Delete(n.Id); err != nil {
|
||||
if err := v.nm.Delete(n.Id); err != nil {
|
||||
return size, err
|
||||
}
|
||||
if _, err:= v.dataFile.Seek(0, 2); err != nil {
|
||||
if _, err := v.dataFile.Seek(0, 2); err != nil {
|
||||
return size, err
|
||||
}
|
||||
n.Data = make([]byte, 0)
|
||||
|
@ -286,7 +298,7 @@ func (v *Volume) freeze() error {
|
|||
defer v.accessLock.Unlock()
|
||||
bn, _ := nakeFilename(v.dataFile.Name())
|
||||
cdbFn := bn + ".cdb"
|
||||
log.Printf("converting %s to %s", nm.indexFile.Name(), cdbFn)
|
||||
glog.V(0).Infof("converting %s to %s", nm.indexFile.Name(), cdbFn)
|
||||
err := DumpNeedleMapToCdb(cdbFn, nm)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -304,7 +316,7 @@ func ScanVolumeFile(dirname string, id VolumeId,
|
|||
visitSuperBlock func(SuperBlock) error,
|
||||
visitNeedle func(n *Needle, offset uint32) error) (err error) {
|
||||
var v *Volume
|
||||
if v, err = LoadVolumeOnly(dirname, id); err != nil {
|
||||
if v, err = loadVolumeWithoutIndex(dirname, id); err != nil {
|
||||
return
|
||||
}
|
||||
if err = visitSuperBlock(v.SuperBlock); err != nil {
|
||||
|
@ -361,7 +373,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
|
|||
return err
|
||||
}, func(n *Needle, offset uint32) error {
|
||||
nv, ok := v.nm.Get(n.Id)
|
||||
//log.Println("file size is", n.Size, "rest", rest)
|
||||
//glog.V(0).Infoln("file size is", n.Size, "rest", rest)
|
||||
if ok && nv.Offset*NeedlePaddingSize == offset {
|
||||
if nv.Size > 0 {
|
||||
if _, err = nm.Put(n.Id, new_offset/NeedlePaddingSize, n.Size); err != nil {
|
||||
|
@ -371,7 +383,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
|
|||
return fmt.Errorf("cannot append needle: %s", err)
|
||||
}
|
||||
new_offset += n.DiskSize()
|
||||
//log.Println("saving key", n.Id, "volume offset", old_offset, "=>", new_offset, "data_size", n.Size, "rest", rest)
|
||||
//glog.V(0).Infoln("saving key", n.Id, "volume offset", old_offset, "=>", new_offset, "data_size", n.Size, "rest", rest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
)
|
||||
|
||||
type NodeId string
|
||||
|
@ -155,7 +155,7 @@ func (n *NodeImpl) LinkChildNode(node Node) {
|
|||
n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
|
||||
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
|
||||
node.SetParent(n)
|
||||
log.Println(n, "adds child", node.Id())
|
||||
glog.V(0).Infoln(n, "adds child", node.Id())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
|
|||
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
|
||||
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
|
||||
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
|
||||
log.Println(n, "removes", node, "volumeCount =", n.activeVolumeCount)
|
||||
glog.V(0).Infoln(n, "removes", node, "volumeCount =", n.activeVolumeCount)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
|
@ -70,7 +70,7 @@ func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId
|
|||
randomVolumeIndex -= freeSpace
|
||||
} else {
|
||||
if node.IsDataNode() && node.FreeSpace() > 0 {
|
||||
log.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||
glog.V(0).Infoln("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||
return true, node.(*DataNode)
|
||||
}
|
||||
children := node.Children()
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/storage"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
|
@ -55,7 +55,7 @@ func (t *Topology) loadConfiguration(configurationFile string) error {
|
|||
t.configuration, e = NewConfiguration(b)
|
||||
return e
|
||||
} else {
|
||||
log.Println("Using default configurations.")
|
||||
glog.V(0).Infoln("Using default configurations.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ func (t *Topology) Lookup(vid storage.VolumeId) []*DataNode {
|
|||
|
||||
func (t *Topology) RandomlyReserveOneVolume(dataCenter string) (bool, *DataNode, *storage.VolumeId) {
|
||||
if t.FreeSpace() <= 0 {
|
||||
log.Println("Topology does not have free space left!")
|
||||
glog.V(0).Infoln("Topology does not have free space left!")
|
||||
return false, nil, nil
|
||||
}
|
||||
vid := t.NextVolumeId()
|
||||
|
@ -102,7 +102,7 @@ func (t *Topology) PickForWrite(repType storage.ReplicationType, count int, data
|
|||
func (t *Topology) GetVolumeLayout(repType storage.ReplicationType) *VolumeLayout {
|
||||
replicationTypeIndex := repType.GetReplicationLevelIndex()
|
||||
if t.replicaType2VolumeLayout[replicationTypeIndex] == nil {
|
||||
log.Println("adding replication type", repType)
|
||||
glog.V(0).Infoln("adding replication type", repType)
|
||||
t.replicaType2VolumeLayout[replicationTypeIndex] = NewVolumeLayout(repType, t.volumeSizeLimit, t.pulse)
|
||||
}
|
||||
return t.replicaType2VolumeLayout[replicationTypeIndex]
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/util"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
@ -14,12 +14,12 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist
|
|||
ch := make(chan bool, locationlist.Length())
|
||||
for index, dn := range locationlist.list {
|
||||
go func(index int, url string, vid storage.VolumeId) {
|
||||
//log.Println(index, "Check vacuuming", vid, "on", dn.Url())
|
||||
//glog.V(0).Infoln(index, "Check vacuuming", vid, "on", dn.Url())
|
||||
if e, ret := vacuumVolume_Check(url, vid, garbageThreshold); e != nil {
|
||||
//log.Println(index, "Error when checking vacuuming", vid, "on", url, e)
|
||||
//glog.V(0).Infoln(index, "Error when checking vacuuming", vid, "on", url, e)
|
||||
ch <- false
|
||||
} else {
|
||||
//log.Println(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
|
||||
//glog.V(0).Infoln(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
|
||||
ch <- ret
|
||||
}
|
||||
}(index, dn.Url(), vid)
|
||||
|
@ -41,12 +41,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
|||
ch := make(chan bool, locationlist.Length())
|
||||
for index, dn := range locationlist.list {
|
||||
go func(index int, url string, vid storage.VolumeId) {
|
||||
log.Println(index, "Start vacuuming", vid, "on", url)
|
||||
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
|
||||
if e := vacuumVolume_Compact(url, vid); e != nil {
|
||||
log.Println(index, "Error when vacuuming", vid, "on", url, e)
|
||||
glog.V(0).Infoln(index, "Error when vacuuming", vid, "on", url, e)
|
||||
ch <- false
|
||||
} else {
|
||||
log.Println(index, "Complete vacuuming", vid, "on", url)
|
||||
glog.V(0).Infoln(index, "Complete vacuuming", vid, "on", url)
|
||||
ch <- true
|
||||
}
|
||||
}(index, dn.Url(), vid)
|
||||
|
@ -65,12 +65,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
|||
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
|
||||
isCommitSuccess := true
|
||||
for _, dn := range locationlist.list {
|
||||
log.Println("Start Commiting vacuum", vid, "on", dn.Url())
|
||||
glog.V(0).Infoln("Start Commiting vacuum", vid, "on", dn.Url())
|
||||
if e := vacuumVolume_Commit(dn.Url(), vid); e != nil {
|
||||
log.Println("Error when committing vacuum", vid, "on", dn.Url(), e)
|
||||
glog.V(0).Infoln("Error when committing vacuum", vid, "on", dn.Url(), e)
|
||||
isCommitSuccess = false
|
||||
} else {
|
||||
log.Println("Complete Commiting vacuum", vid, "on", dn.Url())
|
||||
glog.V(0).Infoln("Complete Commiting vacuum", vid, "on", dn.Url())
|
||||
}
|
||||
}
|
||||
if isCommitSuccess {
|
||||
|
@ -104,7 +104,7 @@ func vacuumVolume_Check(urlLocation string, vid storage.VolumeId, garbageThresho
|
|||
values.Add("garbageThreshold", garbageThreshold)
|
||||
jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values)
|
||||
if err != nil {
|
||||
log.Println("parameters:", values)
|
||||
glog.V(0).Infoln("parameters:", values)
|
||||
return err, false
|
||||
}
|
||||
var ret VacuumVolumeResult
|
||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
@ -28,10 +28,10 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
|
|||
t.SetVolumeCapacityFull(v)
|
||||
case dn := <-t.chanRecoveredDataNodes:
|
||||
t.RegisterRecoveredDataNode(dn)
|
||||
log.Println("DataNode", dn, "is back alive!")
|
||||
glog.V(0).Infoln("DataNode", dn, "is back alive!")
|
||||
case dn := <-t.chanDeadDataNodes:
|
||||
t.UnRegisterDataNode(dn)
|
||||
log.Println("DataNode", dn, "is dead!")
|
||||
glog.V(0).Infoln("DataNode", dn, "is dead!")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -48,7 +48,7 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
|||
}
|
||||
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
|
||||
for _, v := range dn.volumes {
|
||||
log.Println("Removing Volume", v.Id, "from the dead volume server", dn)
|
||||
glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn)
|
||||
vl := t.GetVolumeLayout(v.RepType)
|
||||
vl.SetVolumeUnavailable(dn, v.Id)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package topology
|
|||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"errors"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
@ -59,7 +59,7 @@ func (vl *VolumeLayout) Lookup(vid storage.VolumeId) []*DataNode {
|
|||
func (vl *VolumeLayout) PickForWrite(count int, dataCenter string) (*storage.VolumeId, int, *VolumeLocationList, error) {
|
||||
len_writers := len(vl.writables)
|
||||
if len_writers <= 0 {
|
||||
log.Println("No more writable volumes!")
|
||||
glog.V(0).Infoln("No more writable volumes!")
|
||||
return nil, 0, nil, errors.New("No more writable volumes!")
|
||||
}
|
||||
if dataCenter == "" {
|
||||
|
@ -107,7 +107,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(dataCenter string) int {
|
|||
func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool {
|
||||
for i, v := range vl.writables {
|
||||
if v == vid {
|
||||
log.Println("Volume", vid, "becomes unwritable")
|
||||
glog.V(0).Infoln("Volume", vid, "becomes unwritable")
|
||||
vl.writables = append(vl.writables[:i], vl.writables[i+1:]...)
|
||||
return true
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
log.Println("Volume", vid, "becomes writable")
|
||||
glog.V(0).Infoln("Volume", vid, "becomes writable")
|
||||
vl.writables = append(vl.writables, vid)
|
||||
return true
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId)
|
|||
|
||||
if vl.vid2location[vid].Remove(dn) {
|
||||
if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() {
|
||||
log.Println("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
|
||||
glog.V(0).Infoln("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
|
||||
return vl.removeFromWritable(vid)
|
||||
}
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ func (vl *VolumeLayout) SetVolumeCapacityFull(vid storage.VolumeId) bool {
|
|||
vl.accessLock.Lock()
|
||||
defer vl.accessLock.Unlock()
|
||||
|
||||
// log.Println("Volume", vid, "reaches full capacity.")
|
||||
// glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
|
||||
return vl.removeFromWritable(vid)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ package util
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"os"
|
||||
)
|
||||
|
||||
|
@ -32,7 +32,7 @@ func LoadConfig(filename string) *Config {
|
|||
result.filename = filename
|
||||
err := result.parse()
|
||||
if err != nil {
|
||||
log.Fatalf("error loading config file %s: %s", filename, err)
|
||||
glog.Fatalf("error loading config file %s: %s", filename, err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func LoadConfigString(s string) *Config {
|
|||
result := newConfig()
|
||||
err := json.Unmarshal([]byte(s), &result.data)
|
||||
if err != nil {
|
||||
log.Fatalf("error parsing config string %s: %s", s, err)
|
||||
glog.Fatalf("error parsing config string %s: %s", s, err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package util
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
@ -10,13 +10,13 @@ import (
|
|||
func Post(url string, values url.Values) ([]byte, error) {
|
||||
r, err := http.PostForm(url, values)
|
||||
if err != nil {
|
||||
log.Println("post to", url, err)
|
||||
glog.V(0).Infoln("post to", url, err)
|
||||
return nil, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
log.Println("read post result from", url, err)
|
||||
glog.V(0).Infoln("read post result from", url, err)
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"bytes"
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"fmt"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -67,7 +67,7 @@ func runExport(cmd *Command, args []string) bool {
|
|||
fh = os.Stdout
|
||||
} else {
|
||||
if fh, err = os.Create(*dest); err != nil {
|
||||
log.Fatalf("cannot open output tar %s: %s", *dest, err)
|
||||
glog.Fatalf("cannot open output tar %s: %s", *dest, err)
|
||||
}
|
||||
}
|
||||
defer fh.Close()
|
||||
|
@ -84,13 +84,13 @@ func runExport(cmd *Command, args []string) bool {
|
|||
vid := storage.VolumeId(*exportVolumeId)
|
||||
indexFile, err := os.OpenFile(path.Join(*exportVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||
glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||
}
|
||||
defer indexFile.Close()
|
||||
|
||||
nm, err := storage.LoadNeedleMap(indexFile)
|
||||
if err != nil {
|
||||
log.Fatalf("cannot load needle map from %s: %s", indexFile, err)
|
||||
glog.Fatalf("cannot load needle map from %s: %s", indexFile, err)
|
||||
}
|
||||
|
||||
var version storage.Version
|
||||
|
@ -113,7 +113,7 @@ func runExport(cmd *Command, args []string) bool {
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Export Volume File [ERROR] %s\n", err)
|
||||
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -35,7 +35,7 @@ func runFix(cmd *Command, args []string) bool {
|
|||
fileName := strconv.Itoa(*fixVolumeId)
|
||||
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||
glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||
}
|
||||
defer indexFile.Close()
|
||||
|
||||
|
@ -57,7 +57,7 @@ func runFix(cmd *Command, args []string) bool {
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Export Volume File [ERROR] %s\n", err)
|
||||
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
|
||||
}
|
||||
|
||||
return true
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/topology"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -205,10 +205,10 @@ func runMaster(cmd *Command, args []string) bool {
|
|||
var e error
|
||||
if topo, e = topology.NewTopology("topo", *confFile, *metaFolder, "weed",
|
||||
uint64(*volumeSizeLimitMB)*1024*1024, *mpulse); e != nil {
|
||||
log.Fatalf("cannot create topology:%s", e)
|
||||
glog.Fatalf("cannot create topology:%s", e)
|
||||
}
|
||||
vg = replication.NewDefaultVolumeGrowth()
|
||||
log.Println("Volume Size Limit is", *volumeSizeLimitMB, "MB")
|
||||
glog.V(0).Infoln("Volume Size Limit is", *volumeSizeLimitMB, "MB")
|
||||
http.HandleFunc("/dir/assign", dirAssignHandler)
|
||||
http.HandleFunc("/dir/lookup", dirLookupHandler)
|
||||
http.HandleFunc("/dir/join", dirJoinHandler)
|
||||
|
@ -222,7 +222,7 @@ func runMaster(cmd *Command, args []string) bool {
|
|||
|
||||
topo.StartRefreshWritableVolumes(*garbageThreshold)
|
||||
|
||||
log.Println("Start Weed Master", VERSION, "at port", strconv.Itoa(*mport))
|
||||
glog.V(0).Infoln("Start Weed Master", VERSION, "at port", strconv.Itoa(*mport))
|
||||
srv := &http.Server{
|
||||
Addr: ":" + strconv.Itoa(*mport),
|
||||
Handler: http.DefaultServeMux,
|
||||
|
@ -230,7 +230,7 @@ func runMaster(cmd *Command, args []string) bool {
|
|||
}
|
||||
e = srv.ListenAndServe()
|
||||
if e != nil {
|
||||
log.Fatalf("Fail to start:%s", e)
|
||||
glog.Fatalf("Fail to start:%s", e)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"os"
|
||||
)
|
||||
|
||||
|
@ -28,10 +28,10 @@ func runShell(command *Command, args []string) bool {
|
|||
prompt := func() {
|
||||
var err error
|
||||
if _, err = o.WriteString("> "); err != nil {
|
||||
log.Printf("error writing to stdout: %s", err)
|
||||
glog.V(0).Infoln("error writing to stdout:", err)
|
||||
}
|
||||
if err = o.Flush(); err != nil {
|
||||
log.Printf("error flushing stdout: %s", err)
|
||||
glog.V(0).Infoln("error flushing stdout:", err)
|
||||
}
|
||||
}
|
||||
readLine := func() string {
|
||||
|
@ -45,7 +45,7 @@ func runShell(command *Command, args []string) bool {
|
|||
execCmd := func(cmd string) int {
|
||||
if cmd != "" {
|
||||
if _, err := o.WriteString(cmd); err != nil {
|
||||
log.Printf("error writing to stdout: %s", err)
|
||||
glog.V(0).Infoln("error writing to stdout:", err)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"code.google.com/p/weed-fs/go/operation"
|
||||
"code.google.com/p/weed-fs/go/replication"
|
||||
"code.google.com/p/weed-fs/go/storage"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
@ -145,7 +145,7 @@ func GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool)
|
|||
return
|
||||
}
|
||||
if n.Cookie != cookie {
|
||||
log.Println("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||
glog.V(0).Infoln("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ func DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if n.Cookie != cookie {
|
||||
log.Println("delete with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||
glog.V(0).Infoln("delete with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -283,7 +283,7 @@ func parseURLPath(path string) (vid, fid, filename, ext string) {
|
|||
commaIndex := strings.LastIndex(path[sepIndex:], ",")
|
||||
if commaIndex <= 0 {
|
||||
if "favicon.ico" != path[sepIndex+1:] {
|
||||
log.Println("unknown file id", path[sepIndex+1:])
|
||||
glog.V(0).Infoln("unknown file id", path[sepIndex+1:])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -311,23 +311,23 @@ func runVolume(cmd *Command, args []string) bool {
|
|||
if max, e := strconv.Atoi(maxString); e == nil {
|
||||
maxCounts = append(maxCounts, max)
|
||||
} else {
|
||||
log.Fatalf("The max specified in -max not a valid number %s", max)
|
||||
glog.Fatalf("The max specified in -max not a valid number %s", max)
|
||||
}
|
||||
}
|
||||
if len(folders) != len(maxCounts) {
|
||||
log.Fatalf("%d directories by -dir, but only %d max is set by -max", len(folders), len(maxCounts))
|
||||
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(folders), len(maxCounts))
|
||||
}
|
||||
for _, folder := range folders {
|
||||
fileInfo, err := os.Stat(folder)
|
||||
if err != nil {
|
||||
log.Fatalf("No Existing Folder:%s", folder)
|
||||
glog.Fatalf("No Existing Folder:%s", folder)
|
||||
}
|
||||
if !fileInfo.IsDir() {
|
||||
log.Fatalf("Volume Folder should not be a file:%s", folder)
|
||||
glog.Fatalf("Volume Folder should not be a file:%s", folder)
|
||||
}
|
||||
perm := fileInfo.Mode().Perm()
|
||||
log.Println("Volume Folder", folder)
|
||||
log.Println("Permission:", perm)
|
||||
glog.V(0).Infoln("Volume Folder", folder)
|
||||
glog.V(0).Infoln("Permission:", perm)
|
||||
}
|
||||
|
||||
if *publicUrl == "" {
|
||||
|
@ -355,7 +355,7 @@ func runVolume(cmd *Command, args []string) bool {
|
|||
if err == nil {
|
||||
if !connected {
|
||||
connected = true
|
||||
log.Println("Reconnected with master")
|
||||
glog.V(0).Infoln("Reconnected with master")
|
||||
}
|
||||
} else {
|
||||
if connected {
|
||||
|
@ -365,9 +365,9 @@ func runVolume(cmd *Command, args []string) bool {
|
|||
time.Sleep(time.Duration(float32(*vpulse*1e3)*(1+rand.Float32())) * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
log.Println("store joined at", *masterNode)
|
||||
glog.V(0).Infoln("store joined at", *masterNode)
|
||||
|
||||
log.Println("Start Weed volume server", VERSION, "at http://"+*ip+":"+strconv.Itoa(*vport))
|
||||
glog.V(0).Infoln("Start Weed volume server", VERSION, "at http://"+*ip+":"+strconv.Itoa(*vport))
|
||||
srv := &http.Server{
|
||||
Addr: ":" + strconv.Itoa(*vport),
|
||||
Handler: http.DefaultServeMux,
|
||||
|
@ -375,7 +375,7 @@ func runVolume(cmd *Command, args []string) bool {
|
|||
}
|
||||
e := srv.ListenAndServe()
|
||||
if e != nil {
|
||||
log.Fatalf("Fail to start:%s", e.Error())
|
||||
glog.Fatalf("Fail to start:%s", e.Error())
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -7,5 +7,5 @@ import (
|
|||
)
|
||||
|
||||
func TestXYZ(t *testing.T) {
|
||||
println("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
|
||||
glog.V(4).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"code.google.com/p/weed-fs/go/glog"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -42,6 +42,7 @@ func setExitStatus(n int) {
|
|||
}
|
||||
|
||||
func main() {
|
||||
glog.ToStderrAndLog()
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
@ -207,7 +208,7 @@ func writeJson(w http.ResponseWriter, r *http.Request, obj interface{}) (err err
|
|||
// wrapper for writeJson - just logs errors
|
||||
func writeJsonQuiet(w http.ResponseWriter, r *http.Request, obj interface{}) {
|
||||
if err := writeJson(w, r, obj); err != nil {
|
||||
log.Printf("error writing JSON %s: %s", obj, err)
|
||||
glog.V(0).Infof("error writing JSON %s: %s", obj, err.Error())
|
||||
}
|
||||
}
|
||||
func writeJsonError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
|
@ -218,6 +219,6 @@ func writeJsonError(w http.ResponseWriter, r *http.Request, err error) {
|
|||
|
||||
func debug(params ...interface{}) {
|
||||
if *IsDebug {
|
||||
log.Println(params)
|
||||
glog.V(0).Infoln(params)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue