mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
compatible with Go1
git-svn-id: https://weed-fs.googlecode.com/svn/trunk@46 282b0af5-e82d-9cf1-ede4-77906d7719d0
This commit is contained in:
parent
8edf12f026
commit
bb01324482
|
@ -1,19 +0,0 @@
|
||||||
# Build script generated by gb: http://go-gb.googlecode.com
|
|
||||||
# gb provides configuration-free building and distributing
|
|
||||||
|
|
||||||
echo "Build script generated by gb: http://go-gb.googlecode.com"
|
|
||||||
if [ "$1" = "goinstall" ]; then
|
|
||||||
echo Running goinstall \
|
|
||||||
|
|
||||||
else
|
|
||||||
echo Building \
|
|
||||||
&& echo "(in src/pkg/util)" gomake $1 && cd src/pkg/util && gomake $1 && cd - > /dev/null \
|
|
||||||
&& echo "(in src/pkg/storage)" gomake $1 && cd src/pkg/storage && gomake $1 && cd - > /dev/null \
|
|
||||||
&& echo "(in src/cmd/weedvolume/fix_volume_index)" gomake $1 && cd src/cmd/weedvolume/fix_volume_index && gomake $1 && cd - > /dev/null \
|
|
||||||
&& echo "(in src/cmd/weedvolume)" gomake $1 && cd src/cmd/weedvolume && gomake $1 && cd - > /dev/null \
|
|
||||||
&& echo "(in src/pkg/directory)" gomake $1 && cd src/pkg/directory && gomake $1 && cd - > /dev/null \
|
|
||||||
&& echo "(in src/cmd/weedmaster)" gomake $1 && cd src/cmd/weedmaster && gomake $1 && cd - > /dev/null \
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
# The makefiles above are invoked in topological dependence order
|
|
|
@ -1,39 +0,0 @@
|
||||||
# Makefile generated by gb: http://go-gb.googlecode.com
|
|
||||||
# gb provides configuration-free building and distributing
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=weedmaster
|
|
||||||
GOFILES=\
|
|
||||||
weedmaster.go\
|
|
||||||
|
|
||||||
# gb: this is the local install
|
|
||||||
GBROOT=../../..
|
|
||||||
|
|
||||||
# gb: compile/link against local install
|
|
||||||
GCIMPORTS+= -I $(GBROOT)/_obj
|
|
||||||
LDIMPORTS+= -L $(GBROOT)/_obj
|
|
||||||
|
|
||||||
# gb: compile/link against GOPATH entries
|
|
||||||
GOPATHSEP=:
|
|
||||||
ifeq ($(GOHOSTOS),windows)
|
|
||||||
GOPATHSEP=;
|
|
||||||
endif
|
|
||||||
GCIMPORTS+=-I $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -I , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
LDIMPORTS+=-L $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -L , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
|
|
||||||
# gb: default target is in GBROOT this way
|
|
||||||
command:
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.cmd
|
|
||||||
|
|
||||||
# gb: copy to local install
|
|
||||||
$(GBROOT)/bin/$(TARG): $(TARG)
|
|
||||||
mkdir -p $(dir $@); cp -f $< $@
|
|
||||||
command: $(GBROOT)/bin/$(TARG)
|
|
||||||
|
|
||||||
# gb: local dependencies
|
|
||||||
$(TARG): $(GBROOT)/_obj/storage.a
|
|
||||||
|
|
||||||
$(TARG): $(GBROOT)/_obj/directory.a
|
|
||||||
|
|
|
@ -1,90 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"storage"
|
|
||||||
"directory"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"http"
|
|
||||||
"json"
|
|
||||||
"log"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
port = flag.Int("port", 9333, "http listen port")
|
|
||||||
metaFolder = flag.String("mdir", "/tmp", "data directory to store mappings")
|
|
||||||
capacity = flag.Int("capacity", 100, "maximum number of volumes to hold")
|
|
||||||
mapper *directory.Mapper
|
|
||||||
IsDebug = flag.Bool("debug", false, "verbose debug information")
|
|
||||||
volumeSizeLimitMB = flag.Uint("volumeSizeLimitMB", 32*1024, "Default Volume Size in MegaBytes")
|
|
||||||
)
|
|
||||||
|
|
||||||
func dirLookupHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
vid := r.FormValue("volumeId")
|
|
||||||
commaSep := strings.Index(vid, ",")
|
|
||||||
if commaSep > 0 {
|
|
||||||
vid = vid[0:commaSep]
|
|
||||||
}
|
|
||||||
volumeId, _ := strconv.Atoui64(vid)
|
|
||||||
machine, e := mapper.Get(uint32(volumeId))
|
|
||||||
if e == nil {
|
|
||||||
writeJson(w, r, machine.Server)
|
|
||||||
} else {
|
|
||||||
log.Println("Invalid volume id", volumeId)
|
|
||||||
writeJson(w, r, map[string]string{"error": "volume id " + strconv.Uitoa64(volumeId) + " not found"})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func dirAssignHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
fid, machine, err := mapper.PickForWrite()
|
|
||||||
if err == nil {
|
|
||||||
writeJson(w, r, map[string]string{"fid": fid, "url": machine.Url})
|
|
||||||
} else {
|
|
||||||
log.Println(err)
|
|
||||||
writeJson(w, r, map[string]string{"error": err.String()})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func dirJoinHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
s := r.RemoteAddr[0:strings.Index(r.RemoteAddr, ":")+1] + r.FormValue("port")
|
|
||||||
publicUrl := r.FormValue("publicUrl")
|
|
||||||
volumes := new([]storage.VolumeInfo)
|
|
||||||
json.Unmarshal([]byte(r.FormValue("volumes")), volumes)
|
|
||||||
if *IsDebug {
|
|
||||||
log.Println(s, "volumes", r.FormValue("volumes"))
|
|
||||||
}
|
|
||||||
mapper.Add(*directory.NewMachine(s, publicUrl, *volumes))
|
|
||||||
}
|
|
||||||
func dirStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
writeJson(w, r, mapper)
|
|
||||||
}
|
|
||||||
func writeJson(w http.ResponseWriter, r *http.Request, obj interface{}) {
|
|
||||||
w.Header().Set("Content-Type", "application/javascript")
|
|
||||||
bytes, _ := json.Marshal(obj)
|
|
||||||
callback := r.FormValue("callback")
|
|
||||||
if callback == "" {
|
|
||||||
w.Write(bytes)
|
|
||||||
} else {
|
|
||||||
w.Write([]uint8(callback))
|
|
||||||
w.Write([]uint8("("))
|
|
||||||
fmt.Fprint(w, string(bytes))
|
|
||||||
w.Write([]uint8(")"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
log.Println("Volume Size Limit is", *volumeSizeLimitMB, "MB")
|
|
||||||
mapper = directory.NewMapper(*metaFolder, "directory", uint64(*volumeSizeLimitMB)*1024*1024)
|
|
||||||
http.HandleFunc("/dir/assign", dirAssignHandler)
|
|
||||||
http.HandleFunc("/dir/lookup", dirLookupHandler)
|
|
||||||
http.HandleFunc("/dir/join", dirJoinHandler)
|
|
||||||
http.HandleFunc("/dir/status", dirStatusHandler)
|
|
||||||
|
|
||||||
log.Println("Start directory service at http://127.0.0.1:" + strconv.Itoa(*port))
|
|
||||||
e := http.ListenAndServe(":"+strconv.Itoa(*port), nil)
|
|
||||||
if e != nil {
|
|
||||||
log.Fatalf("Fail to start:", e.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
# Makefile generated by gb: http://go-gb.googlecode.com
|
|
||||||
# gb provides configuration-free building and distributing
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=weedvolume
|
|
||||||
GOFILES=\
|
|
||||||
weedvolume.go\
|
|
||||||
|
|
||||||
# gb: this is the local install
|
|
||||||
GBROOT=../../..
|
|
||||||
|
|
||||||
# gb: compile/link against local install
|
|
||||||
GCIMPORTS+= -I $(GBROOT)/_obj
|
|
||||||
LDIMPORTS+= -L $(GBROOT)/_obj
|
|
||||||
|
|
||||||
# gb: compile/link against GOPATH entries
|
|
||||||
GOPATHSEP=:
|
|
||||||
ifeq ($(GOHOSTOS),windows)
|
|
||||||
GOPATHSEP=;
|
|
||||||
endif
|
|
||||||
GCIMPORTS+=-I $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -I , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
LDIMPORTS+=-L $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -L , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
|
|
||||||
# gb: default target is in GBROOT this way
|
|
||||||
command:
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.cmd
|
|
||||||
|
|
||||||
# gb: copy to local install
|
|
||||||
$(GBROOT)/bin/$(TARG): $(TARG)
|
|
||||||
mkdir -p $(dir $@); cp -f $< $@
|
|
||||||
command: $(GBROOT)/bin/$(TARG)
|
|
||||||
|
|
||||||
# gb: local dependencies
|
|
||||||
$(TARG): $(GBROOT)/_obj/storage.a
|
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
# Makefile generated by gb: http://go-gb.googlecode.com
|
|
||||||
# gb provides configuration-free building and distributing
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=fix_volume_index
|
|
||||||
GOFILES=\
|
|
||||||
fix_volume_index.go\
|
|
||||||
|
|
||||||
# gb: this is the local install
|
|
||||||
GBROOT=../../../..
|
|
||||||
|
|
||||||
# gb: compile/link against local install
|
|
||||||
GCIMPORTS+= -I $(GBROOT)/_obj
|
|
||||||
LDIMPORTS+= -L $(GBROOT)/_obj
|
|
||||||
|
|
||||||
# gb: compile/link against GOPATH entries
|
|
||||||
GOPATHSEP=:
|
|
||||||
ifeq ($(GOHOSTOS),windows)
|
|
||||||
GOPATHSEP=;
|
|
||||||
endif
|
|
||||||
GCIMPORTS+=-I $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -I , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
LDIMPORTS+=-L $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -L , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
|
|
||||||
# gb: default target is in GBROOT this way
|
|
||||||
command:
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.cmd
|
|
||||||
|
|
||||||
# gb: copy to local install
|
|
||||||
$(GBROOT)/bin/$(TARG): $(TARG)
|
|
||||||
mkdir -p $(dir $@); cp -f $< $@
|
|
||||||
command: $(GBROOT)/bin/$(TARG)
|
|
||||||
|
|
||||||
# gb: local dependencies
|
|
||||||
$(TARG): $(GBROOT)/_obj/storage.a
|
|
||||||
|
|
|
@ -1,59 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"storage"
|
|
||||||
"flag"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
dir = flag.String("dir", "/tmp", "data directory to store files")
|
|
||||||
volumeId = flag.Int("volumeId", -1, "a non-negative volume id. The volume should already exist in the dir. The volume index file should not exist.")
|
|
||||||
IsDebug = flag.Bool("debug", false, "enable debug mode")
|
|
||||||
|
|
||||||
store *storage.Store
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if *volumeId == -1 {
|
|
||||||
flag.Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fileName := strconv.Itoa(*volumeId)
|
|
||||||
dataFile, e := os.OpenFile(path.Join(*dir, fileName+".dat"), os.O_RDONLY, 0644)
|
|
||||||
if e != nil {
|
|
||||||
log.Fatalf("Read Volume [ERROR] %s\n", e)
|
|
||||||
}
|
|
||||||
defer dataFile.Close()
|
|
||||||
indexFile, ie := os.OpenFile(path.Join(*dir, fileName+".idx"), os.O_WRONLY|os.O_CREATE, 0644)
|
|
||||||
if ie != nil {
|
|
||||||
log.Fatalf("Create Volume Index [ERROR] %s\n", ie)
|
|
||||||
}
|
|
||||||
defer indexFile.Close()
|
|
||||||
|
|
||||||
//skip the volume super block
|
|
||||||
dataFile.Seek(storage.SuperBlockSize, 0)
|
|
||||||
|
|
||||||
n, length := storage.ReadNeedle(dataFile)
|
|
||||||
nm := storage.NewNeedleMap(indexFile)
|
|
||||||
offset := uint32(storage.SuperBlockSize)
|
|
||||||
for n != nil {
|
|
||||||
if *IsDebug {
|
|
||||||
log.Println("key", n.Key, "volume offset", offset, "data_size", n.Size, "length", length)
|
|
||||||
}
|
|
||||||
if n.Size > 0 {
|
|
||||||
count, pe := nm.Put(n.Key, offset/8, n.Size)
|
|
||||||
if *IsDebug {
|
|
||||||
log.Println("saved", count, "with error", pe)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
offset += length
|
|
||||||
n, length = storage.ReadNeedle(dataFile)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,168 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"storage"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"http"
|
|
||||||
"json"
|
|
||||||
"log"
|
|
||||||
"mime"
|
|
||||||
"rand"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
port = flag.Int("port", 8080, "http listen port")
|
|
||||||
chunkFolder = flag.String("dir", "/tmp", "data directory to store files")
|
|
||||||
volumes = flag.String("volumes", "0,1-3,4", "comma-separated list of volume ids or range of ids")
|
|
||||||
publicUrl = flag.String("publicUrl", "localhost:8080", "public url to serve data read")
|
|
||||||
metaServer = flag.String("mserver", "localhost:9333", "master directory server to store mappings")
|
|
||||||
IsDebug = flag.Bool("debug", false, "enable debug mode")
|
|
||||||
pulse = flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
|
|
||||||
|
|
||||||
store *storage.Store
|
|
||||||
)
|
|
||||||
|
|
||||||
func statusHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
writeJson(w, r, store.Status())
|
|
||||||
}
|
|
||||||
func addVolumeHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
store.AddVolume(r.FormValue("volume"))
|
|
||||||
writeJson(w, r, store.Status())
|
|
||||||
}
|
|
||||||
func storeHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
switch r.Method {
|
|
||||||
case "GET":
|
|
||||||
GetHandler(w, r)
|
|
||||||
case "DELETE":
|
|
||||||
DeleteHandler(w, r)
|
|
||||||
case "POST":
|
|
||||||
PostHandler(w, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func GetHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
n := new(storage.Needle)
|
|
||||||
vid, fid, ext := parseURLPath(r.URL.Path)
|
|
||||||
volumeId, _ := strconv.Atoui64(vid)
|
|
||||||
n.ParsePath(fid)
|
|
||||||
|
|
||||||
if *IsDebug {
|
|
||||||
log.Println("volume", volumeId, "reading", n)
|
|
||||||
}
|
|
||||||
cookie := n.Cookie
|
|
||||||
count, e := store.Read(volumeId, n)
|
|
||||||
if *IsDebug {
|
|
||||||
log.Println("read bytes", count, "error", e)
|
|
||||||
}
|
|
||||||
if n.Cookie != cookie {
|
|
||||||
log.Println("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ext != "" {
|
|
||||||
w.Header().Set("Content-Type", mime.TypeByExtension(ext))
|
|
||||||
}
|
|
||||||
w.Write(n.Data)
|
|
||||||
}
|
|
||||||
func PostHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
vid, _, _ := parseURLPath(r.URL.Path)
|
|
||||||
volumeId, e := strconv.Atoui64(vid)
|
|
||||||
if e != nil {
|
|
||||||
writeJson(w, r, e)
|
|
||||||
} else {
|
|
||||||
ret := store.Write(volumeId, storage.NewNeedle(r))
|
|
||||||
m := make(map[string]uint32)
|
|
||||||
m["size"] = ret
|
|
||||||
writeJson(w, r, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func DeleteHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
n := new(storage.Needle)
|
|
||||||
vid, fid, _ := parseURLPath(r.URL.Path)
|
|
||||||
volumeId, _ := strconv.Atoui64(vid)
|
|
||||||
n.ParsePath(fid)
|
|
||||||
|
|
||||||
if *IsDebug {
|
|
||||||
log.Println("deleting", n)
|
|
||||||
}
|
|
||||||
|
|
||||||
cookie := n.Cookie
|
|
||||||
count, ok := store.Read(volumeId, n)
|
|
||||||
|
|
||||||
if ok!=nil {
|
|
||||||
m := make(map[string]uint32)
|
|
||||||
m["size"] = 0
|
|
||||||
writeJson(w, r, m)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if n.Cookie != cookie {
|
|
||||||
log.Println("delete with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
n.Size = 0
|
|
||||||
store.Delete(volumeId, n)
|
|
||||||
m := make(map[string]uint32)
|
|
||||||
m["size"] = uint32(count)
|
|
||||||
writeJson(w, r, m)
|
|
||||||
}
|
|
||||||
func writeJson(w http.ResponseWriter, r *http.Request, obj interface{}) {
|
|
||||||
w.Header().Set("Content-Type", "application/javascript")
|
|
||||||
bytes, _ := json.Marshal(obj)
|
|
||||||
callback := r.FormValue("callback")
|
|
||||||
if callback == "" {
|
|
||||||
w.Write(bytes)
|
|
||||||
} else {
|
|
||||||
w.Write([]uint8(callback))
|
|
||||||
w.Write([]uint8("("))
|
|
||||||
fmt.Fprint(w, string(bytes))
|
|
||||||
w.Write([]uint8(")"))
|
|
||||||
}
|
|
||||||
//log.Println("JSON Response", string(bytes))
|
|
||||||
}
|
|
||||||
func parseURLPath(path string) (vid, fid, ext string) {
|
|
||||||
sepIndex := strings.LastIndex(path, "/")
|
|
||||||
commaIndex := strings.LastIndex(path[sepIndex:], ",")
|
|
||||||
if commaIndex <= 0 {
|
|
||||||
log.Println("unknown file id", path[sepIndex+1:])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dotIndex := strings.LastIndex(path[sepIndex:], ".")
|
|
||||||
vid = path[sepIndex+1 : commaIndex]
|
|
||||||
fid = path[commaIndex+1:]
|
|
||||||
ext = ""
|
|
||||||
if dotIndex > 0 {
|
|
||||||
fid = path[commaIndex+1 : dotIndex]
|
|
||||||
ext = path[dotIndex+1:]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
//TODO: now default to 1G, this value should come from server?
|
|
||||||
store = storage.NewStore(*port, *publicUrl, *chunkFolder, *volumes)
|
|
||||||
defer store.Close()
|
|
||||||
http.HandleFunc("/", storeHandler)
|
|
||||||
http.HandleFunc("/status", statusHandler)
|
|
||||||
http.HandleFunc("/add_volume", addVolumeHandler)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
store.Join(*metaServer)
|
|
||||||
ns := int64(*pulse) * 1e9
|
|
||||||
time.Sleep(ns + rand.Int63()%ns)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
log.Println("store joined at", *metaServer)
|
|
||||||
|
|
||||||
log.Println("Start storage service at http://127.0.0.1:"+strconv.Itoa(*port), "public url", *publicUrl)
|
|
||||||
e := http.ListenAndServe(":"+strconv.Itoa(*port), nil)
|
|
||||||
if e != nil {
|
|
||||||
log.Fatalf("Fail to start:", e.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
# Makefile generated by gb: http://go-gb.googlecode.com
|
|
||||||
# gb provides configuration-free building and distributing
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=directory
|
|
||||||
GOFILES=\
|
|
||||||
file_id.go\
|
|
||||||
volume_mapping.go\
|
|
||||||
|
|
||||||
# gb: this is the local install
|
|
||||||
GBROOT=../../..
|
|
||||||
|
|
||||||
# gb: compile/link against local install
|
|
||||||
GCIMPORTS+= -I $(GBROOT)/_obj
|
|
||||||
LDIMPORTS+= -L $(GBROOT)/_obj
|
|
||||||
|
|
||||||
# gb: compile/link against GOPATH entries
|
|
||||||
GOPATHSEP=:
|
|
||||||
ifeq ($(GOHOSTOS),windows)
|
|
||||||
GOPATHSEP=;
|
|
||||||
endif
|
|
||||||
GCIMPORTS+=-I $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -I , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
LDIMPORTS+=-L $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -L , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
|
|
||||||
# gb: copy to local install
|
|
||||||
$(GBROOT)/_obj/$(TARG).a: _obj/$(TARG).a
|
|
||||||
mkdir -p $(dir $@); cp -f $< $@
|
|
||||||
|
|
||||||
package: $(GBROOT)/_obj/$(TARG).a
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.pkg
|
|
||||||
|
|
||||||
# gb: local dependencies
|
|
||||||
_obj/$(TARG).a: $(GBROOT)/_obj/storage.a
|
|
||||||
_obj/$(TARG).a: $(GBROOT)/_obj/util.a
|
|
|
@ -3,10 +3,10 @@ package directory
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"log"
|
"log"
|
||||||
"storage"
|
"pkg/storage"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"util"
|
"pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileId struct {
|
type FileId struct {
|
||||||
|
@ -25,7 +25,7 @@ func ParseFileId(fid string) *FileId {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
vid_string, key_hash_string := a[0], a[1]
|
vid_string, key_hash_string := a[0], a[1]
|
||||||
vid, _ := strconv.Atoui64(vid_string)
|
vid, _ := strconv.ParseUint(vid_string, 10, 64)
|
||||||
key, hash := storage.ParseKeyHash(key_hash_string)
|
key, hash := storage.ParseKeyHash(key_hash_string)
|
||||||
return &FileId{VolumeId: uint32(vid), Key: key, Hashcode: hash}
|
return &FileId{VolumeId: uint32(vid), Key: key, Hashcode: hash}
|
||||||
}
|
}
|
||||||
|
@ -36,5 +36,5 @@ func (n *FileId) String() string {
|
||||||
nonzero_index := 0
|
nonzero_index := 0
|
||||||
for ; bytes[nonzero_index] == 0; nonzero_index++ {
|
for ; bytes[nonzero_index] == 0; nonzero_index++ {
|
||||||
}
|
}
|
||||||
return strconv.Uitoa64(uint64(n.VolumeId)) + "," + hex.EncodeToString(bytes[nonzero_index:])
|
return strconv.FormatUint(uint64(n.VolumeId), 10) + "," + hex.EncodeToString(bytes[nonzero_index:])
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
package directory
|
package directory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gob"
|
"encoding/gob"
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"rand"
|
"pkg/storage"
|
||||||
"log"
|
|
||||||
"storage"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
@ -64,11 +65,11 @@ func NewMapper(dirname string, filename string, volumeSizeLimit uint64) (m *Mapp
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (m *Mapper) PickForWrite() (string, MachineInfo, os.Error) {
|
func (m *Mapper) PickForWrite() (string, MachineInfo, error) {
|
||||||
len_writers := len(m.Writers)
|
len_writers := len(m.Writers)
|
||||||
if len_writers <= 0 {
|
if len_writers <= 0 {
|
||||||
log.Println("No more writable volumes!")
|
log.Println("No more writable volumes!")
|
||||||
return "", m.Machines[rand.Intn(len(m.Machines))].Server, os.NewError("No more writable volumes!")
|
return "", m.Machines[rand.Intn(len(m.Machines))].Server, errors.New("No more writable volumes!")
|
||||||
}
|
}
|
||||||
vid := m.Writers[rand.Intn(len_writers)]
|
vid := m.Writers[rand.Intn(len_writers)]
|
||||||
machine_id := m.vid2machineId[vid]
|
machine_id := m.vid2machineId[vid]
|
||||||
|
@ -76,7 +77,7 @@ func (m *Mapper) PickForWrite() (string, MachineInfo, os.Error) {
|
||||||
machine := m.Machines[machine_id-1]
|
machine := m.Machines[machine_id-1]
|
||||||
return NewFileId(vid, m.NextFileId(), rand.Uint32()).String(), machine.Server, nil
|
return NewFileId(vid, m.NextFileId(), rand.Uint32()).String(), machine.Server, nil
|
||||||
}
|
}
|
||||||
return "", m.Machines[rand.Intn(len(m.Machines))].Server, os.NewError("Strangely vid " + strconv.Uitoa64(uint64(vid)) + " is on no machine!")
|
return "", m.Machines[rand.Intn(len(m.Machines))].Server, errors.New("Strangely vid " + strconv.FormatUint(uint64(vid), 10) + " is on no machine!")
|
||||||
}
|
}
|
||||||
func (m *Mapper) NextFileId() uint64 {
|
func (m *Mapper) NextFileId() uint64 {
|
||||||
if m.fileIdCounter <= 0 {
|
if m.fileIdCounter <= 0 {
|
||||||
|
@ -87,10 +88,10 @@ func (m *Mapper) NextFileId() uint64 {
|
||||||
m.fileIdCounter--
|
m.fileIdCounter--
|
||||||
return m.FileIdSequence - m.fileIdCounter
|
return m.FileIdSequence - m.fileIdCounter
|
||||||
}
|
}
|
||||||
func (m *Mapper) Get(vid uint32) (*Machine, os.Error) {
|
func (m *Mapper) Get(vid uint32) (*Machine, error) {
|
||||||
machineId := m.vid2machineId[vid]
|
machineId := m.vid2machineId[vid]
|
||||||
if machineId <= 0 {
|
if machineId <= 0 {
|
||||||
return nil, os.NewError("invalid volume id " + strconv.Uitob64(uint64(vid), 10))
|
return nil, errors.New("invalid volume id " + strconv.FormatUint(uint64(vid), 10))
|
||||||
}
|
}
|
||||||
return m.Machines[machineId-1], nil
|
return m.Machines[machineId-1], nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
# Makefile generated by gb: http://go-gb.googlecode.com
|
|
||||||
# gb provides configuration-free building and distributing
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=storage
|
|
||||||
GOFILES=\
|
|
||||||
needle.go\
|
|
||||||
needle_map.go\
|
|
||||||
store.go\
|
|
||||||
volume.go\
|
|
||||||
|
|
||||||
# gb: this is the local install
|
|
||||||
GBROOT=../../..
|
|
||||||
|
|
||||||
# gb: compile/link against local install
|
|
||||||
GCIMPORTS+= -I $(GBROOT)/_obj
|
|
||||||
LDIMPORTS+= -L $(GBROOT)/_obj
|
|
||||||
|
|
||||||
# gb: compile/link against GOPATH entries
|
|
||||||
GOPATHSEP=:
|
|
||||||
ifeq ($(GOHOSTOS),windows)
|
|
||||||
GOPATHSEP=;
|
|
||||||
endif
|
|
||||||
GCIMPORTS+=-I $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -I , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
LDIMPORTS+=-L $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -L , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
|
|
||||||
# gb: copy to local install
|
|
||||||
$(GBROOT)/_obj/$(TARG).a: _obj/$(TARG).a
|
|
||||||
mkdir -p $(dir $@); cp -f $< $@
|
|
||||||
|
|
||||||
package: $(GBROOT)/_obj/$(TARG).a
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.pkg
|
|
||||||
|
|
||||||
# gb: local dependencies
|
|
||||||
_obj/$(TARG).a: $(GBROOT)/_obj/util.a
|
|
|
@ -4,11 +4,11 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"http"
|
|
||||||
"log"
|
"log"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
. "util"
|
"pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Needle struct {
|
type Needle struct {
|
||||||
|
@ -21,6 +21,7 @@ type Needle struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNeedle(r *http.Request) (n *Needle) {
|
func NewNeedle(r *http.Request) (n *Needle) {
|
||||||
|
|
||||||
n = new(Needle)
|
n = new(Needle)
|
||||||
form, fe := r.MultipartReader()
|
form, fe := r.MultipartReader()
|
||||||
if fe != nil {
|
if fe != nil {
|
||||||
|
@ -49,27 +50,27 @@ func (n *Needle) ParsePath(fid string) {
|
||||||
}
|
}
|
||||||
n.Key, n.Cookie = ParseKeyHash(fid)
|
n.Key, n.Cookie = ParseKeyHash(fid)
|
||||||
}
|
}
|
||||||
func (n *Needle) Append(w io.Writer) (uint32){
|
func (n *Needle) Append(w io.Writer) uint32 {
|
||||||
header := make([]byte, 16)
|
header := make([]byte, 16)
|
||||||
Uint32toBytes(header[0:4], n.Cookie)
|
util.Uint32toBytes(header[0:4], n.Cookie)
|
||||||
Uint64toBytes(header[4:12], n.Key)
|
util.Uint64toBytes(header[4:12], n.Key)
|
||||||
n.Size = uint32(len(n.Data))
|
n.Size = uint32(len(n.Data))
|
||||||
Uint32toBytes(header[12:16], n.Size)
|
util.Uint32toBytes(header[12:16], n.Size)
|
||||||
w.Write(header)
|
w.Write(header)
|
||||||
w.Write(n.Data)
|
w.Write(n.Data)
|
||||||
rest := 8 - ((n.Size + 16 + 4) % 8)
|
rest := 8 - ((n.Size + 16 + 4) % 8)
|
||||||
Uint32toBytes(header[0:4], uint32(n.Checksum))
|
util.Uint32toBytes(header[0:4], uint32(n.Checksum))
|
||||||
w.Write(header[0 : rest+4])
|
w.Write(header[0 : rest+4])
|
||||||
return n.Size
|
return n.Size
|
||||||
}
|
}
|
||||||
func (n *Needle) Read(r io.Reader, size uint32)(int, os.Error) {
|
func (n *Needle) Read(r io.Reader, size uint32) (int, error) {
|
||||||
bytes := make([]byte, size+16+4)
|
bytes := make([]byte, size+16+4)
|
||||||
ret, e := r.Read(bytes)
|
ret, e := r.Read(bytes)
|
||||||
n.Cookie = BytesToUint32(bytes[0:4])
|
n.Cookie = util.BytesToUint32(bytes[0:4])
|
||||||
n.Key = BytesToUint64(bytes[4:12])
|
n.Key = util.BytesToUint64(bytes[4:12])
|
||||||
n.Size = BytesToUint32(bytes[12:16])
|
n.Size = util.BytesToUint32(bytes[12:16])
|
||||||
n.Data = bytes[16 : 16+size]
|
n.Data = bytes[16 : 16+size]
|
||||||
n.Checksum = int32(BytesToUint32(bytes[16+size : 16+size+4]))
|
n.Checksum = int32(util.BytesToUint32(bytes[16+size : 16+size+4]))
|
||||||
return ret, e
|
return ret, e
|
||||||
}
|
}
|
||||||
func ReadNeedle(r *os.File) (*Needle, uint32) {
|
func ReadNeedle(r *os.File) (*Needle, uint32) {
|
||||||
|
@ -79,9 +80,9 @@ func ReadNeedle(r *os.File) (*Needle, uint32) {
|
||||||
if count <= 0 || e != nil {
|
if count <= 0 || e != nil {
|
||||||
return nil, 0
|
return nil, 0
|
||||||
}
|
}
|
||||||
n.Cookie = BytesToUint32(bytes[0:4])
|
n.Cookie = util.BytesToUint32(bytes[0:4])
|
||||||
n.Key = BytesToUint64(bytes[4:12])
|
n.Key = util.BytesToUint64(bytes[4:12])
|
||||||
n.Size = BytesToUint32(bytes[12:16])
|
n.Size = util.BytesToUint32(bytes[12:16])
|
||||||
rest := 8 - ((n.Size + 16 + 4) % 8)
|
rest := 8 - ((n.Size + 16 + 4) % 8)
|
||||||
r.Seek(int64(n.Size+4+rest), 1)
|
r.Seek(int64(n.Size+4+rest), 1)
|
||||||
return n, 16 + n.Size + 4 + rest
|
return n, 16 + n.Size + 4 + rest
|
||||||
|
@ -93,7 +94,7 @@ func ParseKeyHash(key_hash_string string)(uint64,uint32) {
|
||||||
log.Println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
log.Println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
|
||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
key := BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
|
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
|
||||||
hash := BytesToUint32(key_hash_bytes[key_hash_len-4 : key_hash_len])
|
hash := util.BytesToUint32(key_hash_bytes[key_hash_len-4 : key_hash_len])
|
||||||
return key, hash
|
return key, hash
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ package storage
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
. "util"
|
"pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NeedleValue struct {
|
type NeedleValue struct {
|
||||||
|
@ -36,24 +36,30 @@ func LoadNeedleMap(file *os.File) *NeedleMap {
|
||||||
count, e := nm.indexFile.Read(bytes)
|
count, e := nm.indexFile.Read(bytes)
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
fstat, _ := file.Stat()
|
fstat, _ := file.Stat()
|
||||||
log.Println("Loading index file", fstat.Name, "size", fstat.Size)
|
log.Println("Loading index file", fstat.Name(), "size", fstat.Size())
|
||||||
}
|
}
|
||||||
for count > 0 && e == nil {
|
for count > 0 && e == nil {
|
||||||
for i := 0; i < count; i += 16 {
|
for i := 0; i < count; i += 16 {
|
||||||
key := BytesToUint64(bytes[i : i+8])
|
key := util.BytesToUint64(bytes[i : i+8])
|
||||||
offset := BytesToUint32(bytes[i+8 : i+12])
|
offset := util.BytesToUint32(bytes[i+8 : i+12])
|
||||||
size := BytesToUint32(bytes[i+12 : i+16])
|
size := util.BytesToUint32(bytes[i+12 : i+16])
|
||||||
nm.m[key] = &NeedleValue{Offset: offset, Size: size}, offset > 0
|
if offset>0 {
|
||||||
|
nm.m[key] = &NeedleValue{util.Offset: offset, Size: size}
|
||||||
|
}else{
|
||||||
|
delete(nm.m, key)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
count, e = nm.indexFile.Read(bytes)
|
count, e = nm.indexFile.Read(bytes)
|
||||||
}
|
}
|
||||||
return nm
|
return nm
|
||||||
}
|
}
|
||||||
func (nm *NeedleMap) Put(key uint64, offset uint32, size uint32) (int, os.Error) {
|
|
||||||
|
func (nm *NeedleMap) Put(key uint64, offset uint32, size uint32) (int, error) {
|
||||||
nm.m[key] = &NeedleValue{Offset: offset, Size: size}
|
nm.m[key] = &NeedleValue{Offset: offset, Size: size}
|
||||||
Uint64toBytes(nm.bytes[0:8], key)
|
util.Uint64toBytes(nm.bytes[0:8], key)
|
||||||
Uint32toBytes(nm.bytes[8:12], offset)
|
util.Uint32toBytes(nm.bytes[8:12], offset)
|
||||||
Uint32toBytes(nm.bytes[12:16], size)
|
util.Uint32toBytes(nm.bytes[12:16], size)
|
||||||
return nm.indexFile.Write(nm.bytes)
|
return nm.indexFile.Write(nm.bytes)
|
||||||
}
|
}
|
||||||
func (nm *NeedleMap) Get(key uint64) (element *NeedleValue, ok bool) {
|
func (nm *NeedleMap) Get(key uint64) (element *NeedleValue, ok bool) {
|
||||||
|
@ -61,10 +67,10 @@ func (nm *NeedleMap) Get(key uint64) (element *NeedleValue, ok bool) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (nm *NeedleMap) Delete(key uint64) {
|
func (nm *NeedleMap) Delete(key uint64) {
|
||||||
nm.m[key] = nil, false
|
delete(nm.m, key)
|
||||||
Uint64toBytes(nm.bytes[0:8], key)
|
util.Uint64toBytes(nm.bytes[0:8], key)
|
||||||
Uint32toBytes(nm.bytes[8:12], 0)
|
util.Uint32toBytes(nm.bytes[8:12], 0)
|
||||||
Uint32toBytes(nm.bytes[12:16], 0)
|
util.Uint32toBytes(nm.bytes[12:16], 0)
|
||||||
nm.indexFile.Write(nm.bytes)
|
nm.indexFile.Write(nm.bytes)
|
||||||
}
|
}
|
||||||
func (nm *NeedleMap) Close() {
|
func (nm *NeedleMap) Close() {
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"log"
|
"log"
|
||||||
"json"
|
"net/url"
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"url"
|
"strings"
|
||||||
"util"
|
"pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Store struct {
|
type Store struct {
|
||||||
|
@ -30,24 +30,24 @@ func NewStore(port int, publicUrl, dirname string, volumeListString string) (s *
|
||||||
log.Println("Store started on dir:", dirname, "with", len(s.volumes), "volumes")
|
log.Println("Store started on dir:", dirname, "with", len(s.volumes), "volumes")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (s *Store) AddVolume(volumeListString string) os.Error{
|
func (s *Store) AddVolume(volumeListString string) error {
|
||||||
for _, range_string := range strings.Split(volumeListString, ",") {
|
for _, range_string := range strings.Split(volumeListString, ",") {
|
||||||
if strings.Index(range_string, "-") < 0 {
|
if strings.Index(range_string, "-") < 0 {
|
||||||
id_string := range_string
|
id_string := range_string
|
||||||
id, err := strconv.Atoui64(id_string)
|
id, err := strconv.ParseUint(id_string, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return os.NewError("Volume Id " + id_string+ " is not a valid unsigned integer!")
|
return errors.New("Volume Id " + id_string + " is not a valid unsigned integer!")
|
||||||
}
|
}
|
||||||
s.addVolume(id)
|
s.addVolume(id)
|
||||||
} else {
|
} else {
|
||||||
pair := strings.Split(range_string, "-")
|
pair := strings.Split(range_string, "-")
|
||||||
start, start_err := strconv.Atoui64(pair[0])
|
start, start_err := strconv.ParseUint(pair[0], 10, 64)
|
||||||
if start_err != nil {
|
if start_err != nil {
|
||||||
return os.NewError("Volume Start Id" + pair[0] + " is not a valid unsigned integer!")
|
return errors.New("Volume Start Id" + pair[0] + " is not a valid unsigned integer!")
|
||||||
}
|
}
|
||||||
end, end_err := strconv.Atoui64(pair[1])
|
end, end_err := strconv.ParseUint(pair[1], 10, 64)
|
||||||
if end_err != nil {
|
if end_err != nil {
|
||||||
return os.NewError("Volume End Id" + pair[1] + " is not a valid unsigned integer!")
|
return errors.New("Volume End Id" + pair[1] + " is not a valid unsigned integer!")
|
||||||
}
|
}
|
||||||
for id := start; id <= end; id++ {
|
for id := start; id <= end; id++ {
|
||||||
s.addVolume(id)
|
s.addVolume(id)
|
||||||
|
@ -56,9 +56,9 @@ func (s *Store) AddVolume(volumeListString string) os.Error{
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (s *Store) addVolume(vid uint64) os.Error{
|
func (s *Store) addVolume(vid uint64) error {
|
||||||
if s.volumes[vid] != nil {
|
if s.volumes[vid] != nil {
|
||||||
return os.NewError("Volume Id "+strconv.Uitoa64(vid)+" already exists!")
|
return errors.New("Volume Id " + strconv.FormatUint(vid, 10) + " already exists!")
|
||||||
}
|
}
|
||||||
s.volumes[vid] = NewVolume(s.dir, uint32(vid))
|
s.volumes[vid] = NewVolume(s.dir, uint32(vid))
|
||||||
return nil
|
return nil
|
||||||
|
@ -97,6 +97,6 @@ func (s *Store) Write(i uint64, n *Needle) uint32 {
|
||||||
func (s *Store) Delete(i uint64, n *Needle) uint32 {
|
func (s *Store) Delete(i uint64, n *Needle) uint32 {
|
||||||
return s.volumes[i].delete(n)
|
return s.volumes[i].delete(n)
|
||||||
}
|
}
|
||||||
func (s *Store) Read(i uint64, n *Needle) (int, os.Error) {
|
func (s *Store) Read(i uint64, n *Needle) (int, error) {
|
||||||
return s.volumes[i].read(n)
|
return s.volumes[i].read(n)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -22,9 +23,9 @@ type Volume struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewVolume(dirname string, id uint32) (v *Volume) {
|
func NewVolume(dirname string, id uint32) (v *Volume) {
|
||||||
var e os.Error
|
var e error
|
||||||
v = &Volume{dir: dirname, Id: id}
|
v = &Volume{dir: dirname, Id: id}
|
||||||
fileName := strconv.Uitoa64(uint64(v.Id))
|
fileName := strconv.FormatUint(uint64(v.Id), 10)
|
||||||
v.dataFile, e = os.OpenFile(path.Join(v.dir, fileName+".dat"), os.O_RDWR|os.O_CREATE, 0644)
|
v.dataFile, e = os.OpenFile(path.Join(v.dir, fileName+".dat"), os.O_RDWR|os.O_CREATE, 0644)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Fatalf("New Volume [ERROR] %s\n", e)
|
log.Fatalf("New Volume [ERROR] %s\n", e)
|
||||||
|
@ -41,7 +42,7 @@ func NewVolume(dirname string, id uint32) (v *Volume) {
|
||||||
func (v *Volume) Size() int64 {
|
func (v *Volume) Size() int64 {
|
||||||
stat, e := v.dataFile.Stat()
|
stat, e := v.dataFile.Stat()
|
||||||
if e == nil {
|
if e == nil {
|
||||||
return stat.Size
|
return stat.Size()
|
||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
@ -51,7 +52,7 @@ func (v *Volume) Close() {
|
||||||
}
|
}
|
||||||
func (v *Volume) maybeWriteSuperBlock() {
|
func (v *Volume) maybeWriteSuperBlock() {
|
||||||
stat, _ := v.dataFile.Stat()
|
stat, _ := v.dataFile.Stat()
|
||||||
if stat.Size == 0 {
|
if stat.Size() == 0 {
|
||||||
header := make([]byte, SuperBlockSize)
|
header := make([]byte, SuperBlockSize)
|
||||||
header[0] = 1
|
header[0] = 1
|
||||||
v.dataFile.Write(header)
|
v.dataFile.Write(header)
|
||||||
|
@ -82,7 +83,7 @@ func (v *Volume) delete(n *Needle) uint32 {
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
func (v *Volume) read(n *Needle) (int, os.Error) {
|
func (v *Volume) read(n *Needle) (int, error) {
|
||||||
v.accessLock.Lock()
|
v.accessLock.Lock()
|
||||||
defer v.accessLock.Unlock()
|
defer v.accessLock.Unlock()
|
||||||
nv, ok := v.nm.Get(n.Key)
|
nv, ok := v.nm.Get(n.Key)
|
||||||
|
@ -90,5 +91,5 @@ func (v *Volume) read(n *Needle) (int, os.Error) {
|
||||||
v.dataFile.Seek(int64(nv.Offset)*8, 0)
|
v.dataFile.Seek(int64(nv.Offset)*8, 0)
|
||||||
return n.Read(v.dataFile, nv.Size)
|
return n.Read(v.dataFile, nv.Size)
|
||||||
}
|
}
|
||||||
return -1, os.EOF
|
return -1, io.EOF
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,32 +0,0 @@
|
||||||
# Makefile generated by gb: http://go-gb.googlecode.com
|
|
||||||
# gb provides configuration-free building and distributing
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=util
|
|
||||||
GOFILES=\
|
|
||||||
bytes.go\
|
|
||||||
post.go\
|
|
||||||
|
|
||||||
# gb: this is the local install
|
|
||||||
GBROOT=../../..
|
|
||||||
|
|
||||||
# gb: compile/link against local install
|
|
||||||
GCIMPORTS+= -I $(GBROOT)/_obj
|
|
||||||
LDIMPORTS+= -L $(GBROOT)/_obj
|
|
||||||
|
|
||||||
# gb: compile/link against GOPATH entries
|
|
||||||
GOPATHSEP=:
|
|
||||||
ifeq ($(GOHOSTOS),windows)
|
|
||||||
GOPATHSEP=;
|
|
||||||
endif
|
|
||||||
GCIMPORTS+=-I $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -I , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
LDIMPORTS+=-L $(subst $(GOPATHSEP),/pkg/$(GOOS)_$(GOARCH) -L , $(GOPATH))/pkg/$(GOOS)_$(GOARCH)
|
|
||||||
|
|
||||||
# gb: copy to local install
|
|
||||||
$(GBROOT)/_obj/$(TARG).a: _obj/$(TARG).a
|
|
||||||
mkdir -p $(dir $@); cp -f $< $@
|
|
||||||
|
|
||||||
package: $(GBROOT)/_obj/$(TARG).a
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.pkg
|
|
|
@ -1,10 +1,10 @@
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"http"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"url"
|
|
||||||
"log"
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Post(url string, values url.Values) []byte {
|
func Post(url string, values url.Values) []byte {
|
||||||
|
|
Loading…
Reference in a new issue