mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
switching to temporarily use glog library
This commit is contained in:
parent
b27947b355
commit
ed154053c8
17
go/glog/convenient_api.go
Normal file
17
go/glog/convenient_api.go
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package glog
|
||||||
|
|
||||||
|
import ()
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copying the original glog because it is missing several convenient methods.
|
||||||
|
1. change log file size limit to 180MB
|
||||||
|
2. use ToStderrAndLog() in the weed.go
|
||||||
|
3. remove nano time in log format
|
||||||
|
*/
|
||||||
|
|
||||||
|
func ToStderr() {
|
||||||
|
logging.toStderr = true
|
||||||
|
}
|
||||||
|
func ToStderrAndLog() {
|
||||||
|
logging.alsoToStderr = true
|
||||||
|
}
|
|
@ -557,8 +557,8 @@ func (l *loggingT) header(s severity) *buffer {
|
||||||
buf.twoDigits(9, minute)
|
buf.twoDigits(9, minute)
|
||||||
buf.tmp[11] = ':'
|
buf.tmp[11] = ':'
|
||||||
buf.twoDigits(12, second)
|
buf.twoDigits(12, second)
|
||||||
buf.tmp[14] = '.'
|
//buf.tmp[14] = '.'
|
||||||
buf.nDigits(6, 15, now.Nanosecond()/1000)
|
//buf.nDigits(6, 15, now.Nanosecond()/1000)
|
||||||
buf.tmp[21] = ' '
|
buf.tmp[21] = ' '
|
||||||
buf.nDigits(5, 22, pid) // TODO: should be TID
|
buf.nDigits(5, 22, pid) // TODO: should be TID
|
||||||
buf.tmp[27] = ' '
|
buf.tmp[27] = ' '
|
||||||
|
@ -798,7 +798,7 @@ func (sb *syncBuffer) rotateFile(now time.Time) error {
|
||||||
fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
|
fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
|
||||||
fmt.Fprintf(&buf, "Running on machine: %s\n", host)
|
fmt.Fprintf(&buf, "Running on machine: %s\n", host)
|
||||||
fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||||
fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
|
fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss threadid file:line] msg\n")
|
||||||
n, err := sb.file.Write(buf.Bytes())
|
n, err := sb.file.Write(buf.Bytes())
|
||||||
sb.nbytes += uint64(n)
|
sb.nbytes += uint64(n)
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// MaxSize is the maximum size of a log file in bytes.
|
// MaxSize is the maximum size of a log file in bytes.
|
||||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
var MaxSize uint64 = 1024 * 1024 * 180
|
||||||
|
|
||||||
// logDirs lists the candidate directories for new log files.
|
// logDirs lists the candidate directories for new log files.
|
||||||
var logDirs []string
|
var logDirs []string
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
package operation
|
package operation
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Delete(url string) error {
|
func Delete(url string) error {
|
||||||
req, err := http.NewRequest("DELETE", url, nil)
|
req, err := http.NewRequest("DELETE", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("failing to delete", url)
|
glog.V(0).Infoln("failing to delete", url)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = http.DefaultClient.Do(req)
|
_, err = http.DefaultClient.Do(req)
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"mime"
|
"mime"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -37,21 +37,21 @@ func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool,
|
||||||
}
|
}
|
||||||
file_writer, err := body_writer.CreatePart(h)
|
file_writer, err := body_writer.CreatePart(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("error creating form file", err)
|
glog.V(0).Infoln("error creating form file", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, err = io.Copy(file_writer, reader); err != nil {
|
if _, err = io.Copy(file_writer, reader); err != nil {
|
||||||
log.Println("error copying data", err)
|
glog.V(0).Infoln("error copying data", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
content_type := body_writer.FormDataContentType()
|
content_type := body_writer.FormDataContentType()
|
||||||
if err = body_writer.Close(); err != nil {
|
if err = body_writer.Close(); err != nil {
|
||||||
log.Println("error closing body", err)
|
glog.V(0).Infoln("error closing body", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resp, err := http.Post(uploadUrl, content_type, body_buf)
|
resp, err := http.Post(uploadUrl, content_type, body_buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("failing to upload to", uploadUrl)
|
glog.V(0).Infoln("failing to upload to", uploadUrl)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
@ -62,7 +62,7 @@ func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool,
|
||||||
var ret UploadResult
|
var ret UploadResult
|
||||||
err = json.Unmarshal(resp_body, &ret)
|
err = json.Unmarshal(resp_body, &ret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("failing to read upload resonse", uploadUrl, resp_body)
|
glog.V(0).Infoln("failing to read upload resonse", uploadUrl, resp_body)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ret.Error != "" {
|
if ret.Error != "" {
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"code.google.com/p/weed-fs/go/operation"
|
"code.google.com/p/weed-fs/go/operation"
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
@ -50,7 +50,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId storage.Volum
|
||||||
func ReplicatedDelete(masterNode string, store *storage.Store, volumeId storage.VolumeId, n *storage.Needle, r *http.Request) (ret uint32) {
|
func ReplicatedDelete(masterNode string, store *storage.Store, volumeId storage.VolumeId, n *storage.Needle, r *http.Request) (ret uint32) {
|
||||||
ret, err := store.Delete(volumeId, n)
|
ret, err := store.Delete(volumeId, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("delete error:", err)
|
glog.V(0).Infoln("delete error:", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ func distributedOperation(masterNode string, store *storage.Store, volumeId stor
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
} else {
|
} else {
|
||||||
log.Println("Failed to lookup for", volumeId, lookupErr.Error())
|
glog.V(0).Infoln("Failed to lookup for", volumeId, lookupErr.Error())
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"code.google.com/p/weed-fs/go/topology"
|
"code.google.com/p/weed-fs/go/topology"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
@ -204,9 +204,9 @@ func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, repT
|
||||||
vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version: storage.CurrentVersion}
|
vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version: storage.CurrentVersion}
|
||||||
server.AddOrUpdateVolume(vi)
|
server.AddOrUpdateVolume(vi)
|
||||||
topo.RegisterVolumeLayout(&vi, server)
|
topo.RegisterVolumeLayout(&vi, server)
|
||||||
log.Println("Created Volume", vid, "on", server)
|
glog.V(0).Infoln("Created Volume", vid, "on", server)
|
||||||
} else {
|
} else {
|
||||||
log.Println("Failed to assign", vid, "to", servers, "error", err)
|
glog.V(0).Infoln("Failed to assign", vid, "to", servers, "error", err)
|
||||||
return errors.New("Failed to assign " + vid.String())
|
return errors.New("Failed to assign " + vid.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ package sequence
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -32,16 +32,16 @@ func NewSequencer(dirname string, filename string) (m *SequencerImpl) {
|
||||||
seqFile, se := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_RDONLY, 0644)
|
seqFile, se := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_RDONLY, 0644)
|
||||||
if se != nil {
|
if se != nil {
|
||||||
m.FileIdSequence = FileIdSaveInterval
|
m.FileIdSequence = FileIdSaveInterval
|
||||||
log.Println("Setting file id sequence", m.FileIdSequence)
|
glog.V(0).Infoln("Setting file id sequence", m.FileIdSequence)
|
||||||
} else {
|
} else {
|
||||||
decoder := gob.NewDecoder(seqFile)
|
decoder := gob.NewDecoder(seqFile)
|
||||||
defer seqFile.Close()
|
defer seqFile.Close()
|
||||||
if se = decoder.Decode(&m.FileIdSequence); se != nil {
|
if se = decoder.Decode(&m.FileIdSequence); se != nil {
|
||||||
log.Printf("error decoding FileIdSequence: %s", se)
|
glog.V(0).Infof("error decoding FileIdSequence: %s", se)
|
||||||
m.FileIdSequence = FileIdSaveInterval
|
m.FileIdSequence = FileIdSaveInterval
|
||||||
log.Println("Setting file id sequence", m.FileIdSequence)
|
glog.V(0).Infoln("Setting file id sequence", m.FileIdSequence)
|
||||||
} else {
|
} else {
|
||||||
log.Println("Loading file id sequence", m.FileIdSequence, "=>", m.FileIdSequence+FileIdSaveInterval)
|
glog.V(0).Infoln("Loading file id sequence", m.FileIdSequence, "=>", m.FileIdSequence+FileIdSaveInterval)
|
||||||
m.FileIdSequence += FileIdSaveInterval
|
m.FileIdSequence += FileIdSaveInterval
|
||||||
}
|
}
|
||||||
//in case the server stops between intervals
|
//in case the server stops between intervals
|
||||||
|
@ -65,14 +65,14 @@ func (m *SequencerImpl) NextFileId(count int) (uint64, int) {
|
||||||
return m.FileIdSequence - m.fileIdCounter - uint64(count), count
|
return m.FileIdSequence - m.fileIdCounter - uint64(count), count
|
||||||
}
|
}
|
||||||
func (m *SequencerImpl) saveSequence() {
|
func (m *SequencerImpl) saveSequence() {
|
||||||
log.Println("Saving file id sequence", m.FileIdSequence, "to", path.Join(m.dir, m.fileName+".seq"))
|
glog.V(0).Infoln("Saving file id sequence", m.FileIdSequence, "to", path.Join(m.dir, m.fileName+".seq"))
|
||||||
seqFile, e := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_CREATE|os.O_WRONLY, 0644)
|
seqFile, e := os.OpenFile(path.Join(m.dir, m.fileName+".seq"), os.O_CREATE|os.O_WRONLY, 0644)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Fatalf("Sequence File Save [ERROR] %s\n", e)
|
glog.Fatalf("Sequence File Save [ERROR] %s", e)
|
||||||
}
|
}
|
||||||
defer seqFile.Close()
|
defer seqFile.Close()
|
||||||
encoder := gob.NewEncoder(seqFile)
|
encoder := gob.NewEncoder(seqFile)
|
||||||
if e = encoder.Encode(m.FileIdSequence); e != nil {
|
if e = encoder.Encode(m.FileIdSequence); e != nil {
|
||||||
log.Fatalf("Sequence File Save [ERROR] %s\n", e)
|
glog.Fatalf("Sequence File Save [ERROR] %s", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -39,7 +39,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
||||||
t.Fatalf("error opening cdb: %s", err)
|
t.Fatalf("error opening cdb: %s", err)
|
||||||
}
|
}
|
||||||
b := getMemStats()
|
b := getMemStats()
|
||||||
log.Printf("opening cdb consumed %d bytes", b-a)
|
glog.V(0).Infof("opening cdb consumed %d bytes", b-a)
|
||||||
defer nm.Close()
|
defer nm.Close()
|
||||||
|
|
||||||
a = getMemStats()
|
a = getMemStats()
|
||||||
|
@ -47,7 +47,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
||||||
t.Fatalf("error visiting %s: %s", nm, err)
|
t.Fatalf("error visiting %s: %s", nm, err)
|
||||||
}
|
}
|
||||||
b = getMemStats()
|
b = getMemStats()
|
||||||
log.Printf("visit cdb %d consumed %d bytes", i, b-a)
|
glog.V(0).Infof("visit cdb %d consumed %d bytes", i, b-a)
|
||||||
nm.Close()
|
nm.Close()
|
||||||
|
|
||||||
indexFile, err := os.Open(testIndexFilename)
|
indexFile, err := os.Open(testIndexFilename)
|
||||||
|
@ -61,7 +61,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
||||||
}
|
}
|
||||||
defer nm.Close()
|
defer nm.Close()
|
||||||
b = getMemStats()
|
b = getMemStats()
|
||||||
log.Printf("opening idx consumed %d bytes", b-a)
|
glog.V(0).Infof("opening idx consumed %d bytes", b-a)
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
a = getMemStats()
|
a = getMemStats()
|
||||||
|
@ -69,7 +69,7 @@ func TestCdbMap1Mem(t *testing.T) {
|
||||||
t.Fatalf("error visiting %s: %s", nm, err)
|
t.Fatalf("error visiting %s: %s", nm, err)
|
||||||
}
|
}
|
||||||
b = getMemStats()
|
b = getMemStats()
|
||||||
log.Printf("visit idx %d consumed %d bytes", i, b-a)
|
glog.V(0).Infof("visit idx %d consumed %d bytes", i, b-a)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCdbMap9List(t *testing.B) {
|
func BenchmarkCdbMap9List(t *testing.B) {
|
||||||
|
@ -88,7 +88,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
||||||
}
|
}
|
||||||
defer idx.Close()
|
defer idx.Close()
|
||||||
b := getMemStats()
|
b := getMemStats()
|
||||||
log.Printf("LoadNeedleMap consumed %d bytes", b-a)
|
glog.V(0).Infof("LoadNeedleMap consumed %d bytes", b-a)
|
||||||
|
|
||||||
cdbFn := testIndexFilename + ".cdb"
|
cdbFn := testIndexFilename + ".cdb"
|
||||||
a = getMemStats()
|
a = getMemStats()
|
||||||
|
@ -99,10 +99,10 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
||||||
}
|
}
|
||||||
defer m.Close()
|
defer m.Close()
|
||||||
b = getMemStats()
|
b = getMemStats()
|
||||||
log.Printf("OpenCdbMap consumed %d bytes", b-a)
|
glog.V(0).Infof("OpenCdbMap consumed %d bytes", b-a)
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
log.Printf("checking whether the cdb contains every key")
|
glog.V(0).Infoln("checking whether the cdb contains every key")
|
||||||
t.StartTimer()
|
t.StartTimer()
|
||||||
err = idx.Visit(func(nv NeedleValue) error {
|
err = idx.Visit(func(nv NeedleValue) error {
|
||||||
if i > t.N || rand.Intn(10) < 9 {
|
if i > t.N || rand.Intn(10) < 9 {
|
||||||
|
@ -110,7 +110,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
if i%1000 == 0 {
|
if i%1000 == 0 {
|
||||||
log.Printf("%d. %s", i, nv)
|
glog.V(0).Infof("%d. %s", i, nv)
|
||||||
}
|
}
|
||||||
if nv2, ok := m.Get(uint64(nv.Key)); !ok || nv2 == nil {
|
if nv2, ok := m.Get(uint64(nv.Key)); !ok || nv2 == nil {
|
||||||
t.Errorf("%s in index, not in cdb", nv.Key)
|
t.Errorf("%s in index, not in cdb", nv.Key)
|
||||||
|
@ -130,7 +130,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
log.Printf("checking wheter the cdb contains no stray keys")
|
glog.V(0).Infoln("checking wheter the cdb contains no stray keys")
|
||||||
t.StartTimer()
|
t.StartTimer()
|
||||||
err = m.Visit(func(nv NeedleValue) error {
|
err = m.Visit(func(nv NeedleValue) error {
|
||||||
if i > t.N || rand.Intn(10) < 9 {
|
if i > t.N || rand.Intn(10) < 9 {
|
||||||
|
@ -147,7 +147,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
if i%1000 == 0 {
|
if i%1000 == 0 {
|
||||||
log.Printf("%d. %s", i, nv)
|
glog.V(0).Infof("%d. %s", i, nv)
|
||||||
}
|
}
|
||||||
t.SetBytes(int64(nv.Size))
|
t.SetBytes(int64(nv.Size))
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -38,13 +38,13 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
|
||||||
}
|
}
|
||||||
if i := cs.binarySearchValues(key); i >= 0 {
|
if i := cs.binarySearchValues(key); i >= 0 {
|
||||||
ret = cs.values[i].Size
|
ret = cs.values[i].Size
|
||||||
//println("key", key, "old size", ret)
|
//glog.V(4).Infoln("key", key, "old size", ret)
|
||||||
cs.values[i].Offset, cs.values[i].Size = offset, size
|
cs.values[i].Offset, cs.values[i].Size = offset, size
|
||||||
} else {
|
} else {
|
||||||
needOverflow := cs.counter >= batch
|
needOverflow := cs.counter >= batch
|
||||||
needOverflow = needOverflow || cs.counter > 0 && cs.values[cs.counter-1].Key > key
|
needOverflow = needOverflow || cs.counter > 0 && cs.values[cs.counter-1].Key > key
|
||||||
if needOverflow {
|
if needOverflow {
|
||||||
//println("start", cs.start, "counter", cs.counter, "key", key)
|
//glog.V(4).Infoln("start", cs.start, "counter", cs.counter, "key", key)
|
||||||
if oldValue, found := cs.overflow[key]; found {
|
if oldValue, found := cs.overflow[key]; found {
|
||||||
ret = oldValue.Size
|
ret = oldValue.Size
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
|
||||||
} else {
|
} else {
|
||||||
p := &cs.values[cs.counter]
|
p := &cs.values[cs.counter]
|
||||||
p.Key, p.Offset, p.Size = key, offset, size
|
p.Key, p.Offset, p.Size = key, offset, size
|
||||||
//println("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
|
//glog.V(4).Infoln("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
|
||||||
cs.counter++
|
cs.counter++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,16 +88,16 @@ func (cs *CompactSection) binarySearchValues(key Key) int {
|
||||||
if h >= 0 && cs.values[h].Key < key {
|
if h >= 0 && cs.values[h].Key < key {
|
||||||
return -2
|
return -2
|
||||||
}
|
}
|
||||||
//println("looking for key", key)
|
//glog.V(4).Infoln("looking for key", key)
|
||||||
for l <= h {
|
for l <= h {
|
||||||
m := (l + h) / 2
|
m := (l + h) / 2
|
||||||
//println("mid", m, "key", cs.values[m].Key, cs.values[m].Offset, cs.values[m].Size)
|
//glog.V(4).Infoln("mid", m, "key", cs.values[m].Key, cs.values[m].Offset, cs.values[m].Size)
|
||||||
if cs.values[m].Key < key {
|
if cs.values[m].Key < key {
|
||||||
l = m + 1
|
l = m + 1
|
||||||
} else if key < cs.values[m].Key {
|
} else if key < cs.values[m].Key {
|
||||||
h = m - 1
|
h = m - 1
|
||||||
} else {
|
} else {
|
||||||
//println("found", m)
|
//glog.V(4).Infoln("found", m)
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ func NewCompactMap() CompactMap {
|
||||||
func (cm *CompactMap) Set(key Key, offset uint32, size uint32) uint32 {
|
func (cm *CompactMap) Set(key Key, offset uint32, size uint32) uint32 {
|
||||||
x := cm.binarySearchCompactSection(key)
|
x := cm.binarySearchCompactSection(key)
|
||||||
if x < 0 {
|
if x < 0 {
|
||||||
//println(x, "creating", len(cm.list), "section1, starting", key)
|
//glog.V(4).Infoln(x, "creating", len(cm.list), "section1, starting", key)
|
||||||
cm.list = append(cm.list, NewCompactSection(key))
|
cm.list = append(cm.list, NewCompactSection(key))
|
||||||
x = len(cm.list) - 1
|
x = len(cm.list) - 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"code.google.com/p/weed-fs/go/util"
|
"code.google.com/p/weed-fs/go/util"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +23,7 @@ func LoadNewNeedleMap(file *os.File) CompactMap {
|
||||||
count, e := file.Read(bytes)
|
count, e := file.Read(bytes)
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
fstat, _ := file.Stat()
|
fstat, _ := file.Stat()
|
||||||
log.Println("Loading index file", fstat.Name(), "size", fstat.Size())
|
glog.V(0).Infoln("Loading index file", fstat.Name(), "size", fstat.Size())
|
||||||
}
|
}
|
||||||
for count > 0 && e == nil {
|
for count > 0 && e == nil {
|
||||||
for i := 0; i < count; i += 16 {
|
for i := 0; i < count; i += 16 {
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestXYZ(t *testing.T) {
|
||||||
|
|
||||||
// for i := uint32(0); i < 100; i++ {
|
// for i := uint32(0); i < 100; i++ {
|
||||||
// if v := m.Get(Key(i)); v != nil {
|
// if v := m.Get(Key(i)); v != nil {
|
||||||
// println(i, "=", v.Key, v.Offset, v.Size)
|
// glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size)
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/flate"
|
"compress/flate"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
@ -36,11 +37,11 @@ func GzipData(input []byte) ([]byte, error) {
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
|
w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
|
||||||
if _, err := w.Write(input); err != nil {
|
if _, err := w.Write(input); err != nil {
|
||||||
println("error compressing data:", err)
|
glog.V(4).Infoln("error compressing data:", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := w.Close(); err != nil {
|
if err := w.Close(); err != nil {
|
||||||
println("error closing compressed data:", err)
|
glog.V(4).Infoln("error closing compressed data:", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return buf.Bytes(), nil
|
return buf.Bytes(), nil
|
||||||
|
@ -51,7 +52,7 @@ func UnGzipData(input []byte) ([]byte, error) {
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
output, err := ioutil.ReadAll(r)
|
output, err := ioutil.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("error uncompressing data:", err)
|
glog.V(4).Infoln("error uncompressing data:", err)
|
||||||
}
|
}
|
||||||
return output, err
|
return output, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"code.google.com/p/weed-fs/go/util"
|
"code.google.com/p/weed-fs/go/util"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"strings"
|
"strings"
|
||||||
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileId struct {
|
type FileId struct {
|
||||||
|
@ -18,7 +19,7 @@ func NewFileId(VolumeId VolumeId, Key uint64, Hashcode uint32) *FileId {
|
||||||
func ParseFileId(fid string) *FileId {
|
func ParseFileId(fid string) *FileId {
|
||||||
a := strings.Split(fid, ",")
|
a := strings.Split(fid, ",")
|
||||||
if len(a) != 2 {
|
if len(a) != 2 {
|
||||||
println("Invalid fid", fid, ", split length", len(a))
|
glog.V(4).Infoln("Invalid fid", fid, ", split length", len(a))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
vid_string, key_hash_string := a[0], a[1]
|
vid_string, key_hash_string := a[0], a[1]
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
|
@ -41,13 +41,13 @@ type Needle struct {
|
||||||
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, e error) {
|
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, e error) {
|
||||||
form, fe := r.MultipartReader()
|
form, fe := r.MultipartReader()
|
||||||
if fe != nil {
|
if fe != nil {
|
||||||
log.Println("MultipartReader [ERROR]", fe)
|
glog.V(0).Infoln("MultipartReader [ERROR]", fe)
|
||||||
e = fe
|
e = fe
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
part, fe := form.NextPart()
|
part, fe := form.NextPart()
|
||||||
if fe != nil {
|
if fe != nil {
|
||||||
log.Println("Reading Multi part [ERROR]", fe)
|
glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
|
||||||
e = fe
|
e = fe
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
|
||||||
}
|
}
|
||||||
data, e = ioutil.ReadAll(part)
|
data, e = ioutil.ReadAll(part)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Println("Reading Content [ERROR]", e)
|
glog.V(0).Infoln("Reading Content [ERROR]", e)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dotIndex := strings.LastIndex(fileName, ".")
|
dotIndex := strings.LastIndex(fileName, ".")
|
||||||
|
@ -131,7 +131,7 @@ func (n *Needle) ParsePath(fid string) {
|
||||||
length := len(fid)
|
length := len(fid)
|
||||||
if length <= 8 {
|
if length <= 8 {
|
||||||
if length > 0 {
|
if length > 0 {
|
||||||
log.Println("Invalid fid", fid, "length", length)
|
glog.V(0).Infoln("Invalid fid", fid, "length", length)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func ParseKeyHash(key_hash_string string) (uint64, uint32) {
|
||||||
key_hash_bytes, khe := hex.DecodeString(key_hash_string)
|
key_hash_bytes, khe := hex.DecodeString(key_hash_string)
|
||||||
key_hash_len := len(key_hash_bytes)
|
key_hash_len := len(key_hash_bytes)
|
||||||
if khe != nil || key_hash_len <= 4 {
|
if khe != nil || key_hash_len <= 4 {
|
||||||
log.Println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
glog.V(0).Infoln("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
|
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
|
||||||
|
|
|
@ -57,14 +57,14 @@ func LoadNeedleMap(file *os.File) (*NeedleMap, error) {
|
||||||
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
|
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
|
||||||
if offset > 0 {
|
if offset > 0 {
|
||||||
oldSize := nm.m.Set(Key(key), offset, size)
|
oldSize := nm.m.Set(Key(key), offset, size)
|
||||||
//log.Println("reading key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
//glog.V(0).Infoln("reading key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
||||||
if oldSize > 0 {
|
if oldSize > 0 {
|
||||||
nm.DeletionCounter++
|
nm.DeletionCounter++
|
||||||
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
|
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oldSize := nm.m.Delete(Key(key))
|
oldSize := nm.m.Delete(Key(key))
|
||||||
//log.Println("removing key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
//glog.V(0).Infoln("removing key", key, "offset", offset, "size", size, "oldSize", oldSize)
|
||||||
nm.DeletionCounter++
|
nm.DeletionCounter++
|
||||||
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
|
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,12 +27,12 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
|
||||||
defer func(s io.Seeker, off int64) {
|
defer func(s io.Seeker, off int64) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, e = s.Seek(off, 0); e != nil {
|
if _, e = s.Seek(off, 0); e != nil {
|
||||||
log.Printf("Failed to seek %s back to %d with error: %s\n", w, off, e)
|
glog.V(0).Infof("Failed to seek %s back to %d with error: %s", w, off, e.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(s, end)
|
}(s, end)
|
||||||
} else {
|
} else {
|
||||||
err = fmt.Errorf("Cnnot Read Current Volume Position: %s", e)
|
err = fmt.Errorf("Cnnot Read Current Volume Position: %s", e.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -95,7 +95,7 @@ func (s *Store) addVolume(vid VolumeId, replicationType ReplicationType) error {
|
||||||
return fmt.Errorf("Volume Id %s already exists!", vid)
|
return fmt.Errorf("Volume Id %s already exists!", vid)
|
||||||
}
|
}
|
||||||
if location := s.findFreeLocation(); location != nil {
|
if location := s.findFreeLocation(); location != nil {
|
||||||
log.Println("In dir", location.directory, "adds volume =", vid, ", replicationType =", replicationType)
|
glog.V(0).Infoln("In dir", location.directory, "adds volume =", vid, ", replicationType =", replicationType)
|
||||||
if volume, err := NewVolume(location.directory, vid, replicationType); err == nil {
|
if volume, err := NewVolume(location.directory, vid, replicationType); err == nil {
|
||||||
location.volumes[vid] = volume
|
location.volumes[vid] = volume
|
||||||
return nil
|
return nil
|
||||||
|
@ -163,14 +163,14 @@ func (l *DiskLocation) loadExistingVolumes() {
|
||||||
if l.volumes[vid] == nil {
|
if l.volumes[vid] == nil {
|
||||||
if v, e := NewVolume(l.directory, vid, CopyNil); e == nil {
|
if v, e := NewVolume(l.directory, vid, CopyNil); e == nil {
|
||||||
l.volumes[vid] = v
|
l.volumes[vid] = v
|
||||||
log.Println("In dir", l.directory, "read volume =", vid, "replicationType =", v.ReplicaType, "version =", v.Version(), "size =", v.Size())
|
glog.V(0).Infoln("In dir", l.directory, "read volume =", vid, "replicationType =", v.ReplicaType, "version =", v.Version(), "size =", v.Size())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Println("Store started on dir:", l.directory, "with", len(l.volumes), "volumes", "max", l.maxVolumeCount)
|
glog.V(0).Infoln("Store started on dir:", l.directory, "with", len(l.volumes), "volumes", "max", l.maxVolumeCount)
|
||||||
}
|
}
|
||||||
func (s *Store) Status() []*VolumeInfo {
|
func (s *Store) Status() []*VolumeInfo {
|
||||||
var stats []*VolumeInfo
|
var stats []*VolumeInfo
|
||||||
|
@ -259,15 +259,15 @@ func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) {
|
||||||
err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.volumeSizeLimit, v.ContentSize())
|
err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.volumeSizeLimit, v.ContentSize())
|
||||||
}
|
}
|
||||||
if err != nil && s.volumeSizeLimit < v.ContentSize()+uint64(size) && s.volumeSizeLimit >= v.ContentSize() {
|
if err != nil && s.volumeSizeLimit < v.ContentSize()+uint64(size) && s.volumeSizeLimit >= v.ContentSize() {
|
||||||
log.Println("volume", i, "size is", v.ContentSize(), "close to", s.volumeSizeLimit)
|
glog.V(0).Infoln("volume", i, "size is", v.ContentSize(), "close to", s.volumeSizeLimit)
|
||||||
if err = s.Join(); err != nil {
|
if err = s.Join(); err != nil {
|
||||||
log.Printf("error with Join: %s", err)
|
glog.V(0).Infoln("error with Join:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Println("volume", i, "not found!")
|
glog.V(0).Infoln("volume", i, "not found!")
|
||||||
err = fmt.Errorf("Volume %s not found!", i)
|
err = fmt.Errorf("Volume %s not found!", i)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,10 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -45,7 +45,7 @@ func NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v
|
||||||
e = v.load(true)
|
e = v.load(true)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func LoadVolumeOnly(dirname string, id VolumeId) (v *Volume, e error) {
|
func loadVolumeWithoutIndex(dirname string, id VolumeId) (v *Volume, e error) {
|
||||||
v = &Volume{dir: dirname, Id: id}
|
v = &Volume{dir: dirname, Id: id}
|
||||||
v.SuperBlock = SuperBlock{ReplicaType: CopyNil}
|
v.SuperBlock = SuperBlock{ReplicaType: CopyNil}
|
||||||
e = v.load(false)
|
e = v.load(false)
|
||||||
|
@ -57,12 +57,12 @@ func (v *Volume) load(alsoLoadIndex bool) error {
|
||||||
v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
|
v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
if !os.IsPermission(e) {
|
if !os.IsPermission(e) {
|
||||||
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e)
|
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e.Error())
|
||||||
}
|
}
|
||||||
if v.dataFile, e = os.Open(fileName + ".dat"); e != nil {
|
if v.dataFile, e = os.Open(fileName + ".dat"); e != nil {
|
||||||
return fmt.Errorf("cannot open Volume Data %s.dat: %s", fileName, e)
|
return fmt.Errorf("cannot open Volume Data %s.dat: %s", fileName, e.Error())
|
||||||
}
|
}
|
||||||
log.Printf("opening " + fileName + ".dat in READONLY mode")
|
glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode")
|
||||||
v.readOnly = true
|
v.readOnly = true
|
||||||
}
|
}
|
||||||
if v.ReplicaType == CopyNil {
|
if v.ReplicaType == CopyNil {
|
||||||
|
@ -73,28 +73,40 @@ func (v *Volume) load(alsoLoadIndex bool) error {
|
||||||
if e == nil && alsoLoadIndex {
|
if e == nil && alsoLoadIndex {
|
||||||
var indexFile *os.File
|
var indexFile *os.File
|
||||||
if v.readOnly {
|
if v.readOnly {
|
||||||
|
glog.V(4).Infoln("opening file", fileName+".idx")
|
||||||
if indexFile, e = os.Open(fileName + ".idx"); e != nil && !os.IsNotExist(e) {
|
if indexFile, e = os.Open(fileName + ".idx"); e != nil && !os.IsNotExist(e) {
|
||||||
return fmt.Errorf("cannot open index file %s.idx: %s", fileName, e)
|
return fmt.Errorf("cannot open index file %s.idx: %s", fileName, e.Error())
|
||||||
}
|
}
|
||||||
if indexFile != nil {
|
if indexFile != nil {
|
||||||
log.Printf("converting %s.idx to %s.cdb", fileName, fileName)
|
glog.V(4).Infoln("check file", fileName+".cdb")
|
||||||
if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
|
if _, err := os.Stat(fileName + ".cdb"); os.IsNotExist(err) {
|
||||||
log.Printf("error converting %s.idx to %s.cdb: %s", fileName, fileName)
|
glog.V(0).Infof("converting %s.idx to %s.cdb", fileName, fileName)
|
||||||
} else {
|
if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
|
||||||
indexFile.Close()
|
glog.V(0).Infof("error converting %s.idx to %s.cdb: %s", fileName, e.Error())
|
||||||
os.Remove(indexFile.Name())
|
} else {
|
||||||
indexFile = nil
|
indexFile.Close()
|
||||||
|
indexFile = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
v.nm, e = OpenCdbMap(fileName + ".cdb")
|
glog.V(4).Infoln("open file", fileName+".cdb")
|
||||||
return e
|
if v.nm, e = OpenCdbMap(fileName + ".cdb"); e != nil {
|
||||||
} else {
|
if os.IsNotExist(e) {
|
||||||
indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644)
|
glog.V(0).Infof("Failed to read cdb file :%s, fall back to normal readonly mode.", fileName)
|
||||||
if e != nil {
|
} else {
|
||||||
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e)
|
glog.V(0).Infof("%s.cdb open errro:%s", fileName, e.Error())
|
||||||
|
return e
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
glog.V(4).Infoln("open to write file", fileName+".idx")
|
||||||
|
indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644)
|
||||||
|
if e != nil {
|
||||||
|
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e.Error())
|
||||||
|
}
|
||||||
|
glog.V(4).Infoln("loading file", fileName+".idx")
|
||||||
v.nm, e = LoadNeedleMap(indexFile)
|
v.nm, e = LoadNeedleMap(indexFile)
|
||||||
|
glog.V(4).Infoln("loading error:", e)
|
||||||
}
|
}
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
@ -108,7 +120,7 @@ func (v *Volume) Size() int64 {
|
||||||
if e == nil {
|
if e == nil {
|
||||||
return stat.Size()
|
return stat.Size()
|
||||||
}
|
}
|
||||||
log.Printf("Failed to read file size %s %s\n", v.dataFile.Name(), e.Error())
|
glog.V(0).Infof("Failed to read file size %s %s", v.dataFile.Name(), e.Error())
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
func (v *Volume) Close() {
|
func (v *Volume) Close() {
|
||||||
|
@ -120,7 +132,7 @@ func (v *Volume) Close() {
|
||||||
func (v *Volume) maybeWriteSuperBlock() error {
|
func (v *Volume) maybeWriteSuperBlock() error {
|
||||||
stat, e := v.dataFile.Stat()
|
stat, e := v.dataFile.Stat()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Printf("failed to stat datafile %s: %s", v.dataFile, e)
|
glog.V(0).Infof("failed to stat datafile %s: %s", v.dataFile, e.Error())
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
if stat.Size() == 0 {
|
if stat.Size() == 0 {
|
||||||
|
@ -221,10 +233,10 @@ func (v *Volume) delete(n *Needle) (uint32, error) {
|
||||||
//fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
|
//fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
|
||||||
if ok {
|
if ok {
|
||||||
size := nv.Size
|
size := nv.Size
|
||||||
if err:= v.nm.Delete(n.Id); err != nil {
|
if err := v.nm.Delete(n.Id); err != nil {
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
if _, err:= v.dataFile.Seek(0, 2); err != nil {
|
if _, err := v.dataFile.Seek(0, 2); err != nil {
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
n.Data = make([]byte, 0)
|
n.Data = make([]byte, 0)
|
||||||
|
@ -286,7 +298,7 @@ func (v *Volume) freeze() error {
|
||||||
defer v.accessLock.Unlock()
|
defer v.accessLock.Unlock()
|
||||||
bn, _ := nakeFilename(v.dataFile.Name())
|
bn, _ := nakeFilename(v.dataFile.Name())
|
||||||
cdbFn := bn + ".cdb"
|
cdbFn := bn + ".cdb"
|
||||||
log.Printf("converting %s to %s", nm.indexFile.Name(), cdbFn)
|
glog.V(0).Infof("converting %s to %s", nm.indexFile.Name(), cdbFn)
|
||||||
err := DumpNeedleMapToCdb(cdbFn, nm)
|
err := DumpNeedleMapToCdb(cdbFn, nm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -304,7 +316,7 @@ func ScanVolumeFile(dirname string, id VolumeId,
|
||||||
visitSuperBlock func(SuperBlock) error,
|
visitSuperBlock func(SuperBlock) error,
|
||||||
visitNeedle func(n *Needle, offset uint32) error) (err error) {
|
visitNeedle func(n *Needle, offset uint32) error) (err error) {
|
||||||
var v *Volume
|
var v *Volume
|
||||||
if v, err = LoadVolumeOnly(dirname, id); err != nil {
|
if v, err = loadVolumeWithoutIndex(dirname, id); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err = visitSuperBlock(v.SuperBlock); err != nil {
|
if err = visitSuperBlock(v.SuperBlock); err != nil {
|
||||||
|
@ -361,7 +373,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
|
||||||
return err
|
return err
|
||||||
}, func(n *Needle, offset uint32) error {
|
}, func(n *Needle, offset uint32) error {
|
||||||
nv, ok := v.nm.Get(n.Id)
|
nv, ok := v.nm.Get(n.Id)
|
||||||
//log.Println("file size is", n.Size, "rest", rest)
|
//glog.V(0).Infoln("file size is", n.Size, "rest", rest)
|
||||||
if ok && nv.Offset*NeedlePaddingSize == offset {
|
if ok && nv.Offset*NeedlePaddingSize == offset {
|
||||||
if nv.Size > 0 {
|
if nv.Size > 0 {
|
||||||
if _, err = nm.Put(n.Id, new_offset/NeedlePaddingSize, n.Size); err != nil {
|
if _, err = nm.Put(n.Id, new_offset/NeedlePaddingSize, n.Size); err != nil {
|
||||||
|
@ -371,7 +383,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
|
||||||
return fmt.Errorf("cannot append needle: %s", err)
|
return fmt.Errorf("cannot append needle: %s", err)
|
||||||
}
|
}
|
||||||
new_offset += n.DiskSize()
|
new_offset += n.DiskSize()
|
||||||
//log.Println("saving key", n.Id, "volume offset", old_offset, "=>", new_offset, "data_size", n.Size, "rest", rest)
|
//glog.V(0).Infoln("saving key", n.Id, "volume offset", old_offset, "=>", new_offset, "data_size", n.Size, "rest", rest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeId string
|
type NodeId string
|
||||||
|
@ -155,7 +155,7 @@ func (n *NodeImpl) LinkChildNode(node Node) {
|
||||||
n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
|
n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
|
||||||
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
|
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
|
||||||
node.SetParent(n)
|
node.SetParent(n)
|
||||||
log.Println(n, "adds child", node.Id())
|
glog.V(0).Infoln(n, "adds child", node.Id())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
|
||||||
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
|
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
|
||||||
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
|
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
|
||||||
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
|
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
|
||||||
log.Println(n, "removes", node, "volumeCount =", n.activeVolumeCount)
|
glog.V(0).Infoln(n, "removes", node, "volumeCount =", n.activeVolumeCount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId
|
||||||
randomVolumeIndex -= freeSpace
|
randomVolumeIndex -= freeSpace
|
||||||
} else {
|
} else {
|
||||||
if node.IsDataNode() && node.FreeSpace() > 0 {
|
if node.IsDataNode() && node.FreeSpace() > 0 {
|
||||||
log.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
glog.V(0).Infoln("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
|
||||||
return true, node.(*DataNode)
|
return true, node.(*DataNode)
|
||||||
}
|
}
|
||||||
children := node.Children()
|
children := node.Children()
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ func (t *Topology) loadConfiguration(configurationFile string) error {
|
||||||
t.configuration, e = NewConfiguration(b)
|
t.configuration, e = NewConfiguration(b)
|
||||||
return e
|
return e
|
||||||
} else {
|
} else {
|
||||||
log.Println("Using default configurations.")
|
glog.V(0).Infoln("Using default configurations.")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ func (t *Topology) Lookup(vid storage.VolumeId) []*DataNode {
|
||||||
|
|
||||||
func (t *Topology) RandomlyReserveOneVolume(dataCenter string) (bool, *DataNode, *storage.VolumeId) {
|
func (t *Topology) RandomlyReserveOneVolume(dataCenter string) (bool, *DataNode, *storage.VolumeId) {
|
||||||
if t.FreeSpace() <= 0 {
|
if t.FreeSpace() <= 0 {
|
||||||
log.Println("Topology does not have free space left!")
|
glog.V(0).Infoln("Topology does not have free space left!")
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
}
|
}
|
||||||
vid := t.NextVolumeId()
|
vid := t.NextVolumeId()
|
||||||
|
@ -102,7 +102,7 @@ func (t *Topology) PickForWrite(repType storage.ReplicationType, count int, data
|
||||||
func (t *Topology) GetVolumeLayout(repType storage.ReplicationType) *VolumeLayout {
|
func (t *Topology) GetVolumeLayout(repType storage.ReplicationType) *VolumeLayout {
|
||||||
replicationTypeIndex := repType.GetReplicationLevelIndex()
|
replicationTypeIndex := repType.GetReplicationLevelIndex()
|
||||||
if t.replicaType2VolumeLayout[replicationTypeIndex] == nil {
|
if t.replicaType2VolumeLayout[replicationTypeIndex] == nil {
|
||||||
log.Println("adding replication type", repType)
|
glog.V(0).Infoln("adding replication type", repType)
|
||||||
t.replicaType2VolumeLayout[replicationTypeIndex] = NewVolumeLayout(repType, t.volumeSizeLimit, t.pulse)
|
t.replicaType2VolumeLayout[replicationTypeIndex] = NewVolumeLayout(repType, t.volumeSizeLimit, t.pulse)
|
||||||
}
|
}
|
||||||
return t.replicaType2VolumeLayout[replicationTypeIndex]
|
return t.replicaType2VolumeLayout[replicationTypeIndex]
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"code.google.com/p/weed-fs/go/util"
|
"code.google.com/p/weed-fs/go/util"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -14,12 +14,12 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist
|
||||||
ch := make(chan bool, locationlist.Length())
|
ch := make(chan bool, locationlist.Length())
|
||||||
for index, dn := range locationlist.list {
|
for index, dn := range locationlist.list {
|
||||||
go func(index int, url string, vid storage.VolumeId) {
|
go func(index int, url string, vid storage.VolumeId) {
|
||||||
//log.Println(index, "Check vacuuming", vid, "on", dn.Url())
|
//glog.V(0).Infoln(index, "Check vacuuming", vid, "on", dn.Url())
|
||||||
if e, ret := vacuumVolume_Check(url, vid, garbageThreshold); e != nil {
|
if e, ret := vacuumVolume_Check(url, vid, garbageThreshold); e != nil {
|
||||||
//log.Println(index, "Error when checking vacuuming", vid, "on", url, e)
|
//glog.V(0).Infoln(index, "Error when checking vacuuming", vid, "on", url, e)
|
||||||
ch <- false
|
ch <- false
|
||||||
} else {
|
} else {
|
||||||
//log.Println(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
|
//glog.V(0).Infoln(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
|
||||||
ch <- ret
|
ch <- ret
|
||||||
}
|
}
|
||||||
}(index, dn.Url(), vid)
|
}(index, dn.Url(), vid)
|
||||||
|
@ -41,12 +41,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
||||||
ch := make(chan bool, locationlist.Length())
|
ch := make(chan bool, locationlist.Length())
|
||||||
for index, dn := range locationlist.list {
|
for index, dn := range locationlist.list {
|
||||||
go func(index int, url string, vid storage.VolumeId) {
|
go func(index int, url string, vid storage.VolumeId) {
|
||||||
log.Println(index, "Start vacuuming", vid, "on", url)
|
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
|
||||||
if e := vacuumVolume_Compact(url, vid); e != nil {
|
if e := vacuumVolume_Compact(url, vid); e != nil {
|
||||||
log.Println(index, "Error when vacuuming", vid, "on", url, e)
|
glog.V(0).Infoln(index, "Error when vacuuming", vid, "on", url, e)
|
||||||
ch <- false
|
ch <- false
|
||||||
} else {
|
} else {
|
||||||
log.Println(index, "Complete vacuuming", vid, "on", url)
|
glog.V(0).Infoln(index, "Complete vacuuming", vid, "on", url)
|
||||||
ch <- true
|
ch <- true
|
||||||
}
|
}
|
||||||
}(index, dn.Url(), vid)
|
}(index, dn.Url(), vid)
|
||||||
|
@ -65,12 +65,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
|
||||||
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
|
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
|
||||||
isCommitSuccess := true
|
isCommitSuccess := true
|
||||||
for _, dn := range locationlist.list {
|
for _, dn := range locationlist.list {
|
||||||
log.Println("Start Commiting vacuum", vid, "on", dn.Url())
|
glog.V(0).Infoln("Start Commiting vacuum", vid, "on", dn.Url())
|
||||||
if e := vacuumVolume_Commit(dn.Url(), vid); e != nil {
|
if e := vacuumVolume_Commit(dn.Url(), vid); e != nil {
|
||||||
log.Println("Error when committing vacuum", vid, "on", dn.Url(), e)
|
glog.V(0).Infoln("Error when committing vacuum", vid, "on", dn.Url(), e)
|
||||||
isCommitSuccess = false
|
isCommitSuccess = false
|
||||||
} else {
|
} else {
|
||||||
log.Println("Complete Commiting vacuum", vid, "on", dn.Url())
|
glog.V(0).Infoln("Complete Commiting vacuum", vid, "on", dn.Url())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isCommitSuccess {
|
if isCommitSuccess {
|
||||||
|
@ -104,7 +104,7 @@ func vacuumVolume_Check(urlLocation string, vid storage.VolumeId, garbageThresho
|
||||||
values.Add("garbageThreshold", garbageThreshold)
|
values.Add("garbageThreshold", garbageThreshold)
|
||||||
jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values)
|
jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("parameters:", values)
|
glog.V(0).Infoln("parameters:", values)
|
||||||
return err, false
|
return err, false
|
||||||
}
|
}
|
||||||
var ret VacuumVolumeResult
|
var ret VacuumVolumeResult
|
||||||
|
|
|
@ -2,7 +2,7 @@ package topology
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -28,10 +28,10 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
|
||||||
t.SetVolumeCapacityFull(v)
|
t.SetVolumeCapacityFull(v)
|
||||||
case dn := <-t.chanRecoveredDataNodes:
|
case dn := <-t.chanRecoveredDataNodes:
|
||||||
t.RegisterRecoveredDataNode(dn)
|
t.RegisterRecoveredDataNode(dn)
|
||||||
log.Println("DataNode", dn, "is back alive!")
|
glog.V(0).Infoln("DataNode", dn, "is back alive!")
|
||||||
case dn := <-t.chanDeadDataNodes:
|
case dn := <-t.chanDeadDataNodes:
|
||||||
t.UnRegisterDataNode(dn)
|
t.UnRegisterDataNode(dn)
|
||||||
log.Println("DataNode", dn, "is dead!")
|
glog.V(0).Infoln("DataNode", dn, "is dead!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -48,7 +48,7 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
|
||||||
}
|
}
|
||||||
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
|
func (t *Topology) UnRegisterDataNode(dn *DataNode) {
|
||||||
for _, v := range dn.volumes {
|
for _, v := range dn.volumes {
|
||||||
log.Println("Removing Volume", v.Id, "from the dead volume server", dn)
|
glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn)
|
||||||
vl := t.GetVolumeLayout(v.RepType)
|
vl := t.GetVolumeLayout(v.RepType)
|
||||||
vl.SetVolumeUnavailable(dn, v.Id)
|
vl.SetVolumeUnavailable(dn, v.Id)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ package topology
|
||||||
import (
|
import (
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
@ -59,7 +59,7 @@ func (vl *VolumeLayout) Lookup(vid storage.VolumeId) []*DataNode {
|
||||||
func (vl *VolumeLayout) PickForWrite(count int, dataCenter string) (*storage.VolumeId, int, *VolumeLocationList, error) {
|
func (vl *VolumeLayout) PickForWrite(count int, dataCenter string) (*storage.VolumeId, int, *VolumeLocationList, error) {
|
||||||
len_writers := len(vl.writables)
|
len_writers := len(vl.writables)
|
||||||
if len_writers <= 0 {
|
if len_writers <= 0 {
|
||||||
log.Println("No more writable volumes!")
|
glog.V(0).Infoln("No more writable volumes!")
|
||||||
return nil, 0, nil, errors.New("No more writable volumes!")
|
return nil, 0, nil, errors.New("No more writable volumes!")
|
||||||
}
|
}
|
||||||
if dataCenter == "" {
|
if dataCenter == "" {
|
||||||
|
@ -107,7 +107,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(dataCenter string) int {
|
||||||
func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool {
|
func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool {
|
||||||
for i, v := range vl.writables {
|
for i, v := range vl.writables {
|
||||||
if v == vid {
|
if v == vid {
|
||||||
log.Println("Volume", vid, "becomes unwritable")
|
glog.V(0).Infoln("Volume", vid, "becomes unwritable")
|
||||||
vl.writables = append(vl.writables[:i], vl.writables[i+1:]...)
|
vl.writables = append(vl.writables[:i], vl.writables[i+1:]...)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Println("Volume", vid, "becomes writable")
|
glog.V(0).Infoln("Volume", vid, "becomes writable")
|
||||||
vl.writables = append(vl.writables, vid)
|
vl.writables = append(vl.writables, vid)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,7 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId)
|
||||||
|
|
||||||
if vl.vid2location[vid].Remove(dn) {
|
if vl.vid2location[vid].Remove(dn) {
|
||||||
if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() {
|
if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() {
|
||||||
log.Println("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
|
glog.V(0).Infoln("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
|
||||||
return vl.removeFromWritable(vid)
|
return vl.removeFromWritable(vid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func (vl *VolumeLayout) SetVolumeCapacityFull(vid storage.VolumeId) bool {
|
||||||
vl.accessLock.Lock()
|
vl.accessLock.Lock()
|
||||||
defer vl.accessLock.Unlock()
|
defer vl.accessLock.Unlock()
|
||||||
|
|
||||||
// log.Println("Volume", vid, "reaches full capacity.")
|
// glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
|
||||||
return vl.removeFromWritable(vid)
|
return vl.removeFromWritable(vid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ package util
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ func LoadConfig(filename string) *Config {
|
||||||
result.filename = filename
|
result.filename = filename
|
||||||
err := result.parse()
|
err := result.parse()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error loading config file %s: %s", filename, err)
|
glog.Fatalf("error loading config file %s: %s", filename, err)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ func LoadConfigString(s string) *Config {
|
||||||
result := newConfig()
|
result := newConfig()
|
||||||
err := json.Unmarshal([]byte(s), &result.data)
|
err := json.Unmarshal([]byte(s), &result.data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error parsing config string %s: %s", s, err)
|
glog.Fatalf("error parsing config string %s: %s", s, err)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
)
|
)
|
||||||
|
@ -10,13 +10,13 @@ import (
|
||||||
func Post(url string, values url.Values) ([]byte, error) {
|
func Post(url string, values url.Values) ([]byte, error) {
|
||||||
r, err := http.PostForm(url, values)
|
r, err := http.PostForm(url, values)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("post to", url, err)
|
glog.V(0).Infoln("post to", url, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
b, err := ioutil.ReadAll(r.Body)
|
b, err := ioutil.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("read post result from", url, err)
|
glog.V(0).Infoln("read post result from", url, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -67,7 +67,7 @@ func runExport(cmd *Command, args []string) bool {
|
||||||
fh = os.Stdout
|
fh = os.Stdout
|
||||||
} else {
|
} else {
|
||||||
if fh, err = os.Create(*dest); err != nil {
|
if fh, err = os.Create(*dest); err != nil {
|
||||||
log.Fatalf("cannot open output tar %s: %s", *dest, err)
|
glog.Fatalf("cannot open output tar %s: %s", *dest, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
defer fh.Close()
|
defer fh.Close()
|
||||||
|
@ -84,13 +84,13 @@ func runExport(cmd *Command, args []string) bool {
|
||||||
vid := storage.VolumeId(*exportVolumeId)
|
vid := storage.VolumeId(*exportVolumeId)
|
||||||
indexFile, err := os.OpenFile(path.Join(*exportVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
|
indexFile, err := os.OpenFile(path.Join(*exportVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||||
}
|
}
|
||||||
defer indexFile.Close()
|
defer indexFile.Close()
|
||||||
|
|
||||||
nm, err := storage.LoadNeedleMap(indexFile)
|
nm, err := storage.LoadNeedleMap(indexFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("cannot load needle map from %s: %s", indexFile, err)
|
glog.Fatalf("cannot load needle map from %s: %s", indexFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var version storage.Version
|
var version storage.Version
|
||||||
|
@ -113,7 +113,7 @@ func runExport(cmd *Command, args []string) bool {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Export Volume File [ERROR] %s\n", err)
|
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -35,7 +35,7 @@ func runFix(cmd *Command, args []string) bool {
|
||||||
fileName := strconv.Itoa(*fixVolumeId)
|
fileName := strconv.Itoa(*fixVolumeId)
|
||||||
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_WRONLY|os.O_CREATE, 0644)
|
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_WRONLY|os.O_CREATE, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
|
||||||
}
|
}
|
||||||
defer indexFile.Close()
|
defer indexFile.Close()
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ func runFix(cmd *Command, args []string) bool {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Export Volume File [ERROR] %s\n", err)
|
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"code.google.com/p/weed-fs/go/topology"
|
"code.google.com/p/weed-fs/go/topology"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -205,10 +205,10 @@ func runMaster(cmd *Command, args []string) bool {
|
||||||
var e error
|
var e error
|
||||||
if topo, e = topology.NewTopology("topo", *confFile, *metaFolder, "weed",
|
if topo, e = topology.NewTopology("topo", *confFile, *metaFolder, "weed",
|
||||||
uint64(*volumeSizeLimitMB)*1024*1024, *mpulse); e != nil {
|
uint64(*volumeSizeLimitMB)*1024*1024, *mpulse); e != nil {
|
||||||
log.Fatalf("cannot create topology:%s", e)
|
glog.Fatalf("cannot create topology:%s", e)
|
||||||
}
|
}
|
||||||
vg = replication.NewDefaultVolumeGrowth()
|
vg = replication.NewDefaultVolumeGrowth()
|
||||||
log.Println("Volume Size Limit is", *volumeSizeLimitMB, "MB")
|
glog.V(0).Infoln("Volume Size Limit is", *volumeSizeLimitMB, "MB")
|
||||||
http.HandleFunc("/dir/assign", dirAssignHandler)
|
http.HandleFunc("/dir/assign", dirAssignHandler)
|
||||||
http.HandleFunc("/dir/lookup", dirLookupHandler)
|
http.HandleFunc("/dir/lookup", dirLookupHandler)
|
||||||
http.HandleFunc("/dir/join", dirJoinHandler)
|
http.HandleFunc("/dir/join", dirJoinHandler)
|
||||||
|
@ -222,7 +222,7 @@ func runMaster(cmd *Command, args []string) bool {
|
||||||
|
|
||||||
topo.StartRefreshWritableVolumes(*garbageThreshold)
|
topo.StartRefreshWritableVolumes(*garbageThreshold)
|
||||||
|
|
||||||
log.Println("Start Weed Master", VERSION, "at port", strconv.Itoa(*mport))
|
glog.V(0).Infoln("Start Weed Master", VERSION, "at port", strconv.Itoa(*mport))
|
||||||
srv := &http.Server{
|
srv := &http.Server{
|
||||||
Addr: ":" + strconv.Itoa(*mport),
|
Addr: ":" + strconv.Itoa(*mport),
|
||||||
Handler: http.DefaultServeMux,
|
Handler: http.DefaultServeMux,
|
||||||
|
@ -230,7 +230,7 @@ func runMaster(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
e = srv.ListenAndServe()
|
e = srv.ListenAndServe()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Fatalf("Fail to start:%s", e)
|
glog.Fatalf("Fail to start:%s", e)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,10 +28,10 @@ func runShell(command *Command, args []string) bool {
|
||||||
prompt := func() {
|
prompt := func() {
|
||||||
var err error
|
var err error
|
||||||
if _, err = o.WriteString("> "); err != nil {
|
if _, err = o.WriteString("> "); err != nil {
|
||||||
log.Printf("error writing to stdout: %s", err)
|
glog.V(0).Infoln("error writing to stdout:", err)
|
||||||
}
|
}
|
||||||
if err = o.Flush(); err != nil {
|
if err = o.Flush(); err != nil {
|
||||||
log.Printf("error flushing stdout: %s", err)
|
glog.V(0).Infoln("error flushing stdout:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
readLine := func() string {
|
readLine := func() string {
|
||||||
|
@ -45,7 +45,7 @@ func runShell(command *Command, args []string) bool {
|
||||||
execCmd := func(cmd string) int {
|
execCmd := func(cmd string) int {
|
||||||
if cmd != "" {
|
if cmd != "" {
|
||||||
if _, err := o.WriteString(cmd); err != nil {
|
if _, err := o.WriteString(cmd); err != nil {
|
||||||
log.Printf("error writing to stdout: %s", err)
|
glog.V(0).Infoln("error writing to stdout:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"code.google.com/p/weed-fs/go/operation"
|
"code.google.com/p/weed-fs/go/operation"
|
||||||
"code.google.com/p/weed-fs/go/replication"
|
"code.google.com/p/weed-fs/go/replication"
|
||||||
"code.google.com/p/weed-fs/go/storage"
|
"code.google.com/p/weed-fs/go/storage"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -145,7 +145,7 @@ func GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if n.Cookie != cookie {
|
if n.Cookie != cookie {
|
||||||
log.Println("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
glog.V(0).Infoln("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||||
w.WriteHeader(http.StatusNotFound)
|
w.WriteHeader(http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -246,7 +246,7 @@ func DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.Cookie != cookie {
|
if n.Cookie != cookie {
|
||||||
log.Println("delete with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
glog.V(0).Infoln("delete with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,7 +283,7 @@ func parseURLPath(path string) (vid, fid, filename, ext string) {
|
||||||
commaIndex := strings.LastIndex(path[sepIndex:], ",")
|
commaIndex := strings.LastIndex(path[sepIndex:], ",")
|
||||||
if commaIndex <= 0 {
|
if commaIndex <= 0 {
|
||||||
if "favicon.ico" != path[sepIndex+1:] {
|
if "favicon.ico" != path[sepIndex+1:] {
|
||||||
log.Println("unknown file id", path[sepIndex+1:])
|
glog.V(0).Infoln("unknown file id", path[sepIndex+1:])
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -311,23 +311,23 @@ func runVolume(cmd *Command, args []string) bool {
|
||||||
if max, e := strconv.Atoi(maxString); e == nil {
|
if max, e := strconv.Atoi(maxString); e == nil {
|
||||||
maxCounts = append(maxCounts, max)
|
maxCounts = append(maxCounts, max)
|
||||||
} else {
|
} else {
|
||||||
log.Fatalf("The max specified in -max not a valid number %s", max)
|
glog.Fatalf("The max specified in -max not a valid number %s", max)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(folders) != len(maxCounts) {
|
if len(folders) != len(maxCounts) {
|
||||||
log.Fatalf("%d directories by -dir, but only %d max is set by -max", len(folders), len(maxCounts))
|
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(folders), len(maxCounts))
|
||||||
}
|
}
|
||||||
for _, folder := range folders {
|
for _, folder := range folders {
|
||||||
fileInfo, err := os.Stat(folder)
|
fileInfo, err := os.Stat(folder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("No Existing Folder:%s", folder)
|
glog.Fatalf("No Existing Folder:%s", folder)
|
||||||
}
|
}
|
||||||
if !fileInfo.IsDir() {
|
if !fileInfo.IsDir() {
|
||||||
log.Fatalf("Volume Folder should not be a file:%s", folder)
|
glog.Fatalf("Volume Folder should not be a file:%s", folder)
|
||||||
}
|
}
|
||||||
perm := fileInfo.Mode().Perm()
|
perm := fileInfo.Mode().Perm()
|
||||||
log.Println("Volume Folder", folder)
|
glog.V(0).Infoln("Volume Folder", folder)
|
||||||
log.Println("Permission:", perm)
|
glog.V(0).Infoln("Permission:", perm)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *publicUrl == "" {
|
if *publicUrl == "" {
|
||||||
|
@ -355,7 +355,7 @@ func runVolume(cmd *Command, args []string) bool {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if !connected {
|
if !connected {
|
||||||
connected = true
|
connected = true
|
||||||
log.Println("Reconnected with master")
|
glog.V(0).Infoln("Reconnected with master")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if connected {
|
if connected {
|
||||||
|
@ -365,9 +365,9 @@ func runVolume(cmd *Command, args []string) bool {
|
||||||
time.Sleep(time.Duration(float32(*vpulse*1e3)*(1+rand.Float32())) * time.Millisecond)
|
time.Sleep(time.Duration(float32(*vpulse*1e3)*(1+rand.Float32())) * time.Millisecond)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
log.Println("store joined at", *masterNode)
|
glog.V(0).Infoln("store joined at", *masterNode)
|
||||||
|
|
||||||
log.Println("Start Weed volume server", VERSION, "at http://"+*ip+":"+strconv.Itoa(*vport))
|
glog.V(0).Infoln("Start Weed volume server", VERSION, "at http://"+*ip+":"+strconv.Itoa(*vport))
|
||||||
srv := &http.Server{
|
srv := &http.Server{
|
||||||
Addr: ":" + strconv.Itoa(*vport),
|
Addr: ":" + strconv.Itoa(*vport),
|
||||||
Handler: http.DefaultServeMux,
|
Handler: http.DefaultServeMux,
|
||||||
|
@ -375,7 +375,7 @@ func runVolume(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
e := srv.ListenAndServe()
|
e := srv.ListenAndServe()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Fatalf("Fail to start:%s", e.Error())
|
glog.Fatalf("Fail to start:%s", e.Error())
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,5 +7,5 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestXYZ(t *testing.T) {
|
func TestXYZ(t *testing.T) {
|
||||||
println("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
|
glog.V(4).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"code.google.com/p/weed-fs/go/glog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -42,6 +42,7 @@ func setExitStatus(n int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
glog.ToStderrAndLog()
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
flag.Usage = usage
|
flag.Usage = usage
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
@ -207,7 +208,7 @@ func writeJson(w http.ResponseWriter, r *http.Request, obj interface{}) (err err
|
||||||
// wrapper for writeJson - just logs errors
|
// wrapper for writeJson - just logs errors
|
||||||
func writeJsonQuiet(w http.ResponseWriter, r *http.Request, obj interface{}) {
|
func writeJsonQuiet(w http.ResponseWriter, r *http.Request, obj interface{}) {
|
||||||
if err := writeJson(w, r, obj); err != nil {
|
if err := writeJson(w, r, obj); err != nil {
|
||||||
log.Printf("error writing JSON %s: %s", obj, err)
|
glog.V(0).Infof("error writing JSON %s: %s", obj, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func writeJsonError(w http.ResponseWriter, r *http.Request, err error) {
|
func writeJsonError(w http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
@ -218,6 +219,6 @@ func writeJsonError(w http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
|
||||||
func debug(params ...interface{}) {
|
func debug(params ...interface{}) {
|
||||||
if *IsDebug {
|
if *IsDebug {
|
||||||
log.Println(params)
|
glog.V(0).Infoln(params)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue