simplify the file id format

git-svn-id: https://weed-fs.googlecode.com/svn/trunk@19 282b0af5-e82d-9cf1-ede4-77906d7719d0
This commit is contained in:
chris.lu@gmail.com 2011-12-20 09:00:01 +00:00
parent a657eec299
commit 9c6a9bf518
8 changed files with 110 additions and 51 deletions

View file

@ -8,7 +8,6 @@ import (
"http"
"json"
"log"
"rand"
"strconv"
"strings"
)
@ -23,7 +22,7 @@ var (
func dirReadHandler(w http.ResponseWriter, r *http.Request) {
volumeId, _ := strconv.Atoui64(r.FormValue("volumeId"))
machine := mapper.Get(volumeId)
machine := mapper.Get(uint32(volumeId))
writeJson(w, r, machine.Server)
}
func dirWriteHandler(w http.ResponseWriter, r *http.Request) {
@ -31,9 +30,7 @@ func dirWriteHandler(w http.ResponseWriter, r *http.Request) {
writeJson(w, r, machine)
}
func dirPickHandler(w http.ResponseWriter, r *http.Request) {
vid, machine := mapper.PickForWrite()
hashcode := rand.Uint32()
fid := strconv.Uitoa64(vid) + "," + strconv.Uitoa64(mapper.NextFileId())+","+strconv.Uitoa64(uint64(hashcode))
fid, machine := mapper.PickForWrite()
writeJson(w, r, map[string]string{"fid":fid,"url":machine.Url})
}
func dirJoinHandler(w http.ResponseWriter, r *http.Request) {

View file

@ -0,0 +1,44 @@
package directory
import (
"encoding/hex"
"storage"
"strconv"
"strings"
)
type FileId struct {
VolumeId uint32
Key uint64
Hashcode uint32
}
func NewFileId(VolumeId uint32, Key uint64, Hashcode uint32) *FileId {
return &FileId{VolumeId: VolumeId, Key: Key, Hashcode: Hashcode}
}
func ParseFileId(path string) *FileId {
a := strings.Split(path, ",")
if len(a) != 2 {
return nil
}
vid_string, key_hash_string := a[0], a[1]
key_hash_bytes, khe := hex.DecodeString(key_hash_string)
key_hash_len := len(key_hash_bytes)
if khe != nil || key_hash_len <= 4 {
return nil
}
vid, _ := strconv.Atoui64(vid_string)
key := storage.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
hash := storage.BytesToUint32(key_hash_bytes[key_hash_len-4 : key_hash_len])
return &FileId{VolumeId: uint32(vid), Key: key, Hashcode: hash}
}
func (n *FileId) String() string {
bytes := make([]byte, 12)
storage.Uint64toBytes(bytes[0:8], n.Key)
storage.Uint32toBytes(bytes[8:12], n.Hashcode)
nonzero_index := 0
for ; bytes[nonzero_index] == 0; nonzero_index++ {
}
return strconv.Uitoa64(uint64(n.VolumeId)) + "," + hex.EncodeToString(bytes[nonzero_index:])
}

View file

@ -0,0 +1,16 @@
package directory
import {
"testing"
"log"
}
func TestSerialDeserialization(t *testing.T) {
f1 := &FileId{VolumeId: 345, Key:8698, Hashcode: 23849095}
log.Println("vid", f1.VolumeId, "key", f1.Key, "hash", f1.Hashcode)
f2 := ParseFileId(t.String())
log.Println("vvid", f2.VolumeId, "vkey", f2.Key, "vhash", f2.Hashcode)
t.
}

View file

@ -32,10 +32,10 @@ type Mapper struct {
lock sync.Mutex
Machines []*Machine
vid2machineId map[uint64]int
vid2machineId map[uint32]int
Writers []int // transient array of Writers volume id
GlobalVolumeSequence uint64
GlobalVolumeSequence uint32
FileIdSequence uint64
fileIdCounter uint64
@ -49,7 +49,7 @@ func NewMapper(dirname string, filename string, capacity int) (m *Mapper) {
m = &Mapper{dir: dirname, fileName: filename, capacity: capacity}
log.Println("Loading volume id to maching mapping:", path.Join(m.dir, m.fileName+".map"))
dataFile, e := os.OpenFile(path.Join(m.dir, m.fileName+".map"), os.O_RDONLY, 0644)
m.vid2machineId = make(map[uint64]int)
m.vid2machineId = make(map[uint32]int)
m.Writers = *new([]int)
if e != nil {
log.Println("Mapping File Read", e)
@ -86,10 +86,10 @@ func NewMapper(dirname string, filename string, capacity int) (m *Mapper) {
}
return
}
func (m *Mapper) PickForWrite() (vid uint64, server MachineInfo) {
func (m *Mapper) PickForWrite() (string, MachineInfo) {
machine := m.Machines[m.Writers[rand.Intn(len(m.Writers))]]
vid = machine.Volumes[rand.Intn(len(machine.Volumes))].Id
return vid, machine.Server
vid := machine.Volumes[rand.Intn(len(machine.Volumes))].Id
return NewFileId(vid,m.NextFileId(),rand.Uint32()).String(), machine.Server
}
func (m *Mapper) NextFileId() uint64 {
if m.fileIdCounter <= 0 {
@ -99,10 +99,10 @@ func (m *Mapper) NextFileId() uint64 {
m.fileIdCounter--
return m.FileIdSequence - m.fileIdCounter
}
func (m *Mapper) Get(vid uint64) *Machine {
func (m *Mapper) Get(vid uint32) *Machine {
return m.Machines[m.vid2machineId[vid]]
}
func (m *Mapper) Add(machine Machine) []uint64 {
func (m *Mapper) Add(machine Machine) []uint32 {
log.Println("Adding existing", machine.Server.Url, len(machine.Volumes), "volumes to dir", len(m.Machines))
log.Println("Adding new ", machine.Server.Url, machine.Capacity-len(machine.Volumes), "volumes to dir", len(m.Machines))
//check existing machine, linearly
@ -121,7 +121,7 @@ func (m *Mapper) Add(machine Machine) []uint64 {
}
//generate new volumes
vids := new([]uint64)
vids := new([]uint32)
for vid, i := m.GlobalVolumeSequence, len(machine.Volumes); i < machine.Capacity; i, vid = i+1, vid+1 {
newVolume := storage.VolumeInfo{Id: vid, Size: 0}
machine.Volumes = append(machine.Volumes, newVolume)

View file

@ -5,17 +5,16 @@ import (
"io/ioutil"
"http"
"log"
"strconv"
"strings"
)
type Needle struct {
Cookie uint32 "random number to mitigate brute force lookups"
Key uint64 "file id"
Size uint32 "Data size"
Data []byte "The actual file data"
Checksum int32 "CRC32 to check integrity"
Padding []byte "Aligned to 8 bytes"
Cookie uint32 "random number to mitigate brute force lookups"
Key uint64 "file id"
Size uint32 "Data size"
Data []byte "The actual file data"
Checksum int32 "CRC32 to check integrity"
Padding []byte "Aligned to 8 bytes"
}
func NewNeedle(r *http.Request) (n *Needle) {
@ -33,30 +32,31 @@ func NewNeedle(r *http.Request) (n *Needle) {
return
}
func (n *Needle) ParsePath(path string) {
a := strings.Split(path, "_")
log.Println("cookie", a[0], "key", a[1], "altKey", a[2])
cookie, _ := strconv.Atoui(a[0])
n.Cookie = uint32(cookie)
n.Key, _ = strconv.Atoui64(a[1])
if len(path) != 16 {
return
}
bytes := []byte(path)
n.Cookie = BytesToUint32(bytes[12:16])
n.Key = BytesToUint64(bytes[4:12])
}
func (n *Needle) Append(w io.Writer) {
header := make([]byte, 16)
uint32toBytes(header[0:4], n.Cookie)
uint64toBytes(header[4:12], n.Key)
Uint32toBytes(header[0:4], n.Cookie)
Uint64toBytes(header[4:12], n.Key)
n.Size = uint32(len(n.Data))
uint32toBytes(header[12:16], n.Size)
Uint32toBytes(header[12:16], n.Size)
w.Write(header)
w.Write(n.Data)
rest := 8 - ((n.Size + 16 + 4) % 8)
uint32toBytes(header[0:4], uint32(n.Checksum))
Uint32toBytes(header[0:4], uint32(n.Checksum))
w.Write(header[0 : rest+4])
}
func (n *Needle) Read(r io.Reader, size uint32) {
bytes := make([]byte, size+16+4)
r.Read(bytes)
n.Cookie = bytesToUint32(bytes[0:4])
n.Key = bytesToUint64(bytes[4:12])
n.Size = bytesToUint32(bytes[12:16])
n.Cookie = BytesToUint32(bytes[0:4])
n.Key = BytesToUint64(bytes[4:12])
n.Size = BytesToUint32(bytes[12:16])
n.Data = bytes[16 : 16+size]
n.Checksum = int32(bytesToUint32(bytes[16+size : 16+size+4]))
n.Checksum = int32(BytesToUint32(bytes[16+size : 16+size+4]))
}

View file

@ -17,7 +17,7 @@ type Store struct {
PublicServer string
}
type VolumeInfo struct {
Id uint64
Id uint32
Size int64
}
@ -35,7 +35,7 @@ func NewStore(port int, publicServer, dirname string, chunkSize, capacity int) (
if err != nil {
continue
}
s.volumes[id] = NewVolume(s.dir, id)
s.volumes[id] = NewVolume(s.dir, uint32(id))
}
log.Println("Store started on dir:", dirname, "with", len(s.volumes), "existing volumes")
log.Println("Expected capacity=", s.capacity, "volumes")
@ -46,7 +46,7 @@ func (s *Store) Join(mserver string) {
stats := new([]*VolumeInfo)
for k, v := range s.volumes {
s := new(VolumeInfo)
s.Id, s.Size = k, v.Size()
s.Id, s.Size = uint32(k), v.Size()
*stats = append(*stats, s)
}
bytes, _ := json.Marshal(stats)
@ -63,7 +63,7 @@ func (s *Store) Join(mserver string) {
e := json.Unmarshal(retString, newVids)
if e == nil {
for _, vid := range *newVids {
s.volumes[uint64(vid)] = NewVolume(s.dir, uint64(vid))
s.volumes[uint64(vid)] = NewVolume(s.dir, uint32(vid))
log.Println("Adding volume", vid)
}
}

View file

@ -7,30 +7,32 @@ import (
"log"
)
func bytesToUint64(b []byte)(v uint64){
for i :=uint(7);i>0;i-- {
func BytesToUint64(b []byte)(v uint64){
length := uint(len(b))
for i :=uint(0);i<length-1;i++ {
v += uint64(b[i])
v <<= 8
}
v+=uint64(b[0])
v+=uint64(b[length-1])
return
}
func bytesToUint32(b []byte)(v uint32){
for i :=uint(3);i>0;i-- {
func BytesToUint32(b []byte)(v uint32){
length := uint(len(b))
for i :=uint(0);i<length-1;i++ {
v += uint32(b[i])
v <<= 8
}
v+=uint32(b[0])
v+=uint32(b[length-1])
return
}
func uint64toBytes(b []byte, v uint64){
func Uint64toBytes(b []byte, v uint64){
for i :=uint(0);i<8;i++ {
b[i] = byte(v>>(i*8))
b[7-i] = byte(v>>(i*8))
}
}
func uint32toBytes(b []byte, v uint32){
func Uint32toBytes(b []byte, v uint32){
for i :=uint(0);i<4;i++ {
b[i] = byte(v>>(i*8))
b[3-i] = byte(v>>(i*8))
}
}

View file

@ -8,7 +8,7 @@ import (
)
type Volume struct {
Id uint64
Id uint32
dir string
dataFile, indexFile *os.File
nm *NeedleMap
@ -16,10 +16,10 @@ type Volume struct {
accessChannel chan int
}
func NewVolume(dirname string, id uint64) (v *Volume) {
func NewVolume(dirname string, id uint32) (v *Volume) {
var e os.Error
v = &Volume{dir:dirname,Id:id, nm:NewNeedleMap()}
fileName := strconv.Uitoa64(v.Id)
fileName := strconv.Uitoa64(uint64(v.Id))
v.dataFile, e = os.OpenFile(path.Join(v.dir,fileName+".dat"), os.O_RDWR|os.O_CREATE, 0644)
if e != nil {
log.Fatalf("New Volume [ERROR] %s\n", e)