Merge pull request #284 from thinxer/binary

replace util/bytes.go with binary.BigEndian (again)
This commit is contained in:
Chris Lu 2016-04-09 01:16:13 -07:00
commit 3523ad5239
12 changed files with 123 additions and 117 deletions

View file

@ -1,6 +1,7 @@
package operation
import (
"encoding/binary"
"encoding/json"
"fmt"
"net/url"
@ -42,9 +43,9 @@ func GetVolumeIdxEntries(server string, vid string, eachEntryFn func(key uint64,
values.Add("volume", vid)
line := make([]byte, 16)
err := util.GetBufferStream("http://"+server+"/admin/sync/index", values, line, func(bytes []byte) {
key := util.BytesToUint64(bytes[:8])
offset := util.BytesToUint32(bytes[8:12])
size := util.BytesToUint32(bytes[12:16])
key := binary.BigEndian.Uint64(bytes[:8])
offset := binary.BigEndian.Uint32(bytes[8:12])
size := binary.BigEndian.Uint32(bytes[12:16])
eachEntryFn(key, offset, size)
})
if err != nil {

View file

@ -1,12 +1,12 @@
package storage
import (
"encoding/binary"
"log"
"os"
"testing"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/util"
)
func TestMemoryUsage(t *testing.T) {
@ -29,9 +29,9 @@ func LoadNewNeedleMap(file *os.File) CompactMap {
}
for count > 0 && e == nil {
for i := 0; i < count; i += 16 {
key := util.BytesToUint64(bytes[i : i+8])
offset := util.BytesToUint32(bytes[i+8 : i+12])
size := util.BytesToUint32(bytes[i+12 : i+16])
key := binary.BigEndian.Uint64(bytes[i : i+8])
offset := binary.BigEndian.Uint32(bytes[i+8 : i+12])
size := binary.BigEndian.Uint32(bytes[i+12 : i+16])
if offset > 0 {
m.Set(Key(key), offset, size)
} else {

View file

@ -1,10 +1,10 @@
package storage
import (
"encoding/binary"
"fmt"
"github.com/klauspost/crc32"
"github.com/chrislusf/seaweedfs/go/util"
"github.com/klauspost/crc32"
)
var table = crc32.MakeTable(crc32.Castagnoli)
@ -25,6 +25,6 @@ func (c CRC) Value() uint32 {
func (n *Needle) Etag() string {
bits := make([]byte, 4)
util.Uint32toBytes(bits, uint32(n.Checksum))
binary.BigEndian.PutUint32(bits, uint32(n.Checksum))
return fmt.Sprintf("\"%x\"", bits)
}

View file

@ -1,12 +1,12 @@
package storage
import (
"encoding/binary"
"encoding/hex"
"errors"
"strings"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/util"
)
type FileId struct {
@ -34,8 +34,8 @@ func ParseFileId(fid string) (*FileId, error) {
}
func (n *FileId) String() string {
bytes := make([]byte, 12)
util.Uint64toBytes(bytes[0:8], n.Key)
util.Uint32toBytes(bytes[8:12], n.Hashcode)
binary.BigEndian.PutUint64(bytes[0:8], n.Key)
binary.BigEndian.PutUint32(bytes[8:12], n.Hashcode)
nonzero_index := 0
for ; bytes[nonzero_index] == 0; nonzero_index++ {
}

View file

@ -1,7 +1,6 @@
package storage
import (
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
@ -15,7 +14,6 @@ import (
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/images"
"github.com/chrislusf/seaweedfs/go/operation"
"github.com/chrislusf/seaweedfs/go/util"
)
const (
@ -213,16 +211,25 @@ func (n *Needle) ParsePath(fid string) (err error) {
}
func ParseKeyHash(key_hash_string string) (uint64, uint32, error) {
if len(key_hash_string)%2 == 1 {
key_hash_string = "0" + key_hash_string
}
key_hash_bytes, khe := hex.DecodeString(key_hash_string)
key_hash_len := len(key_hash_bytes)
if khe != nil || key_hash_len <= 4 {
glog.V(0).Infoln("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
key, hash, ok := parseKeyHash(key_hash_string)
if !ok {
return 0, 0, errors.New("Invalid key and hash:" + key_hash_string)
}
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])
hash := util.BytesToUint32(key_hash_bytes[key_hash_len-4 : key_hash_len])
return key, hash, nil
}
func parseKeyHash(keyhash string) (uint64, uint32, bool) {
if len(keyhash) <= 8 || len(keyhash) > 24 {
return 0, 0, false
}
split := len(keyhash) - 8
key, err := strconv.ParseUint(keyhash[:split], 16, 64)
if err != nil {
return 0, 0, false
}
hash, err := strconv.ParseUint(keyhash[split:], 16, 32)
if err != nil {
return 0, 0, false
}
return key, uint32(hash), true
}

View file

@ -1,12 +1,11 @@
package storage
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"sync"
"github.com/chrislusf/seaweedfs/go/util"
)
type NeedleMapType int
@ -53,16 +52,16 @@ func (nm baseNeedleMapper) IndexFileName() string {
}
func idxFileEntry(bytes []byte) (key uint64, offset uint32, size uint32) {
key = util.BytesToUint64(bytes[:8])
offset = util.BytesToUint32(bytes[8:12])
size = util.BytesToUint32(bytes[12:16])
key = binary.BigEndian.Uint64(bytes[:8])
offset = binary.BigEndian.Uint32(bytes[8:12])
size = binary.BigEndian.Uint32(bytes[12:16])
return
}
func (nm baseNeedleMapper) appendToIndexFile(key uint64, offset uint32, size uint32) error {
bytes := make([]byte, 16)
util.Uint64toBytes(bytes[0:8], key)
util.Uint32toBytes(bytes[8:12], offset)
util.Uint32toBytes(bytes[12:16], size)
binary.BigEndian.PutUint64(bytes[0:8], key)
binary.BigEndian.PutUint32(bytes[8:12], offset)
binary.BigEndian.PutUint32(bytes[12:16], size)
nm.indexFileAccessLock.Lock()
defer nm.indexFileAccessLock.Unlock()

View file

@ -1,13 +1,12 @@
package storage
import (
"encoding/binary"
"fmt"
"os"
"github.com/boltdb/bolt"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/util"
)
type BoltDbNeedleMap struct {
@ -75,7 +74,7 @@ func generateBoltDbFile(dbFileName string, indexFile *os.File) error {
func (m *BoltDbNeedleMap) Get(key uint64) (element *NeedleValue, ok bool) {
bytes := make([]byte, 8)
var data []byte
util.Uint64toBytes(bytes, key)
binary.BigEndian.PutUint64(bytes, key)
err := m.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(boltdbBucket)
if bucket == nil {
@ -89,8 +88,8 @@ func (m *BoltDbNeedleMap) Get(key uint64) (element *NeedleValue, ok bool) {
if err != nil || len(data) != 8 {
return nil, false
}
offset := util.BytesToUint32(data[0:4])
size := util.BytesToUint32(data[4:8])
offset := binary.BigEndian.Uint32(data[0:4])
size := binary.BigEndian.Uint32(data[4:8])
return &NeedleValue{Key: Key(key), Offset: offset, Size: size}, true
}
@ -110,9 +109,9 @@ func (m *BoltDbNeedleMap) Put(key uint64, offset uint32, size uint32) error {
func boltDbWrite(db *bolt.DB,
key uint64, offset uint32, size uint32) error {
bytes := make([]byte, 16)
util.Uint64toBytes(bytes[0:8], key)
util.Uint32toBytes(bytes[8:12], offset)
util.Uint32toBytes(bytes[12:16], size)
binary.BigEndian.PutUint64(bytes[0:8], key)
binary.BigEndian.PutUint32(bytes[8:12], offset)
binary.BigEndian.PutUint32(bytes[12:16], size)
return db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(boltdbBucket)
if err != nil {
@ -128,7 +127,7 @@ func boltDbWrite(db *bolt.DB,
}
func boltDbDelete(db *bolt.DB, key uint64) error {
bytes := make([]byte, 8)
util.Uint64toBytes(bytes, key)
binary.BigEndian.PutUint64(bytes, key)
return db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(boltdbBucket)
if err != nil {

View file

@ -1,12 +1,12 @@
package storage
import (
"encoding/binary"
"fmt"
"os"
"path/filepath"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/util"
"github.com/syndtr/goleveldb/leveldb"
)
@ -72,13 +72,13 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
func (m *LevelDbNeedleMap) Get(key uint64) (element *NeedleValue, ok bool) {
bytes := make([]byte, 8)
util.Uint64toBytes(bytes, key)
binary.BigEndian.PutUint64(bytes, key)
data, err := m.db.Get(bytes, nil)
if err != nil || len(data) != 8 {
return nil, false
}
offset := util.BytesToUint32(data[0:4])
size := util.BytesToUint32(data[4:8])
offset := binary.BigEndian.Uint32(data[0:4])
size := binary.BigEndian.Uint32(data[4:8])
return &NeedleValue{Key: Key(key), Offset: offset, Size: size}, true
}
@ -98,9 +98,9 @@ func (m *LevelDbNeedleMap) Put(key uint64, offset uint32, size uint32) error {
func levelDbWrite(db *leveldb.DB,
key uint64, offset uint32, size uint32) error {
bytes := make([]byte, 16)
util.Uint64toBytes(bytes[0:8], key)
util.Uint32toBytes(bytes[8:12], offset)
util.Uint32toBytes(bytes[12:16], size)
binary.BigEndian.PutUint64(bytes[0:8], key)
binary.BigEndian.PutUint32(bytes[8:12], offset)
binary.BigEndian.PutUint32(bytes[12:16], size)
if err := db.Put(bytes[0:8], bytes[8:16], nil); err != nil {
return fmt.Errorf("failed to write leveldb: %v", err)
}
@ -108,7 +108,7 @@ func levelDbWrite(db *leveldb.DB,
}
func levelDbDelete(db *leveldb.DB, key uint64) error {
bytes := make([]byte, 8)
util.Uint64toBytes(bytes, key)
binary.BigEndian.PutUint64(bytes, key)
return db.Delete(bytes, nil)
}

View file

@ -1,13 +1,13 @@
package storage
import (
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/util"
)
const (
@ -43,11 +43,11 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
switch version {
case Version1:
header := make([]byte, NeedleHeaderSize)
util.Uint32toBytes(header[0:4], n.Cookie)
util.Uint64toBytes(header[4:12], n.Id)
binary.BigEndian.PutUint32(header[0:4], n.Cookie)
binary.BigEndian.PutUint64(header[4:12], n.Id)
n.Size = uint32(len(n.Data))
size = n.Size
util.Uint32toBytes(header[12:16], n.Size)
binary.BigEndian.PutUint32(header[12:16], n.Size)
if _, err = w.Write(header); err != nil {
return
}
@ -55,13 +55,13 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
return
}
padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
binary.BigEndian.PutUint32(header[0:NeedleChecksumSize], n.Checksum.Value())
_, err = w.Write(header[0 : NeedleChecksumSize+padding])
return
case Version2:
header := make([]byte, NeedleHeaderSize)
util.Uint32toBytes(header[0:4], n.Cookie)
util.Uint64toBytes(header[4:12], n.Id)
binary.BigEndian.PutUint32(header[0:4], n.Cookie)
binary.BigEndian.PutUint64(header[4:12], n.Id)
n.DataSize, n.NameSize, n.MimeSize = uint32(len(n.Data)), uint8(len(n.Name)), uint8(len(n.Mime))
if n.DataSize > 0 {
n.Size = 4 + n.DataSize + 1
@ -81,24 +81,24 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
n.Size = 0
}
size = n.DataSize
util.Uint32toBytes(header[12:16], n.Size)
binary.BigEndian.PutUint32(header[12:16], n.Size)
if _, err = w.Write(header); err != nil {
return
}
if n.DataSize > 0 {
util.Uint32toBytes(header[0:4], n.DataSize)
binary.BigEndian.PutUint32(header[0:4], n.DataSize)
if _, err = w.Write(header[0:4]); err != nil {
return
}
if _, err = w.Write(n.Data); err != nil {
return
}
util.Uint8toBytes(header[0:1], n.Flags)
header[0] = n.Flags
if _, err = w.Write(header[0:1]); err != nil {
return
}
if n.HasName() {
util.Uint8toBytes(header[0:1], n.NameSize)
header[0] = n.NameSize
if _, err = w.Write(header[0:1]); err != nil {
return
}
@ -107,7 +107,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
}
}
if n.HasMime() {
util.Uint8toBytes(header[0:1], n.MimeSize)
header[0] = n.MimeSize
if _, err = w.Write(header[0:1]); err != nil {
return
}
@ -116,7 +116,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
}
}
if n.HasLastModifiedDate() {
util.Uint64toBytes(header[0:8], n.LastModified)
binary.BigEndian.PutUint64(header[0:8], n.LastModified)
if _, err = w.Write(header[8-LastModifiedBytesLength : 8]); err != nil {
return
}
@ -129,7 +129,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
}
}
padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
binary.BigEndian.PutUint32(header[0:NeedleChecksumSize], n.Checksum.Value())
_, err = w.Write(header[0 : NeedleChecksumSize+padding])
return n.DataSize, err
}
@ -158,7 +158,7 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version
case Version2:
n.readNeedleDataVersion2(bytes[NeedleHeaderSize : NeedleHeaderSize+int(n.Size)])
}
checksum := util.BytesToUint32(bytes[NeedleHeaderSize+size : NeedleHeaderSize+size+NeedleChecksumSize])
checksum := binary.BigEndian.Uint32(bytes[NeedleHeaderSize+size : NeedleHeaderSize+size+NeedleChecksumSize])
newChecksum := NewCRC(n.Data)
if checksum != newChecksum.Value() {
return errors.New("CRC error! Data On Disk Corrupted")
@ -167,14 +167,14 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version
return nil
}
func (n *Needle) ParseNeedleHeader(bytes []byte) {
n.Cookie = util.BytesToUint32(bytes[0:4])
n.Id = util.BytesToUint64(bytes[4:12])
n.Size = util.BytesToUint32(bytes[12:NeedleHeaderSize])
n.Cookie = binary.BigEndian.Uint32(bytes[0:4])
n.Id = binary.BigEndian.Uint64(bytes[4:12])
n.Size = binary.BigEndian.Uint32(bytes[12:NeedleHeaderSize])
}
func (n *Needle) readNeedleDataVersion2(bytes []byte) {
index, lenBytes := 0, len(bytes)
if index < lenBytes {
n.DataSize = util.BytesToUint32(bytes[index : index+4])
n.DataSize = binary.BigEndian.Uint32(bytes[index : index+4])
index = index + 4
if int(n.DataSize)+index > lenBytes {
// this if clause is due to bug #87 and #93, fixed in v0.69
@ -199,7 +199,7 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) {
index = index + int(n.MimeSize)
}
if index < lenBytes && n.HasLastModifiedDate() {
n.LastModified = util.BytesToUint64(bytes[index : index+LastModifiedBytesLength])
n.LastModified = uint64(bytes[index])<<32 | uint64(binary.BigEndian.Uint32(bytes[index+1:index+LastModifiedBytesLength]))
index = index + LastModifiedBytesLength
}
if index < lenBytes && n.HasTtl() {

45
go/storage/needle_test.go Normal file
View file

@ -0,0 +1,45 @@
package storage
import "testing"
func TestParseKeyHash(t *testing.T) {
testcases := []struct {
KeyHash string
ID uint64
Cookie uint32
Err bool
}{
// normal
{"4ed4c8116e41", 0x4ed4, 0xc8116e41, false},
// cookie with leading zeros
{"4ed401116e41", 0x4ed4, 0x01116e41, false},
// odd length
{"ed400116e41", 0xed4, 0x00116e41, false},
// uint
{"fed4c8114ed4c811f0116e41", 0xfed4c8114ed4c811, 0xf0116e41, false},
// err: too short
{"4ed4c811", 0, 0, true},
// err: too long
{"4ed4c8114ed4c8114ed4c8111", 0, 0, true},
// err: invalid character
{"helloworld", 0, 0, true},
}
for _, tc := range testcases {
if id, cookie, err := ParseKeyHash(tc.KeyHash); err != nil && !tc.Err {
t.Fatalf("Parse %s error: %v", tc.KeyHash, err)
} else if err == nil && tc.Err {
t.Fatalf("Parse %s expected error got nil", tc.KeyHash)
} else if id != tc.ID || cookie != tc.Cookie {
t.Fatalf("Parse %s wrong result. Expected: (%d, %d) got: (%d, %d)", tc.KeyHash, tc.ID, tc.Cookie, id, cookie)
}
}
}
func BenchmarkParseKeyHash(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
ParseKeyHash("4ed44ed44ed44ed4c8116e41")
}
}

View file

@ -1,11 +1,11 @@
package storage
import (
"encoding/binary"
"fmt"
"os"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/util"
)
const (
@ -35,7 +35,7 @@ func (s *SuperBlock) Bytes() []byte {
header[0] = byte(s.version)
header[1] = s.ReplicaPlacement.Byte()
s.Ttl.ToBytes(header[2:4])
util.Uint16toBytes(header[4:6], s.CompactRevision)
binary.BigEndian.PutUint16(header[4:6], s.CompactRevision)
return header
}
@ -76,6 +76,6 @@ func ParseSuperBlock(header []byte) (superBlock SuperBlock, err error) {
err = fmt.Errorf("cannot read replica type: %s", err.Error())
}
superBlock.Ttl = LoadTTLFromBytes(header[2:4])
superBlock.CompactRevision = util.BytesToUint16(header[4:6])
superBlock.CompactRevision = binary.BigEndian.Uint16(header[4:6])
return
}

View file

@ -1,45 +0,0 @@
package util
// big endian
func BytesToUint64(b []byte) (v uint64) {
length := uint(len(b))
for i := uint(0); i < length-1; i++ {
v += uint64(b[i])
v <<= 8
}
v += uint64(b[length-1])
return
}
func BytesToUint32(b []byte) (v uint32) {
length := uint(len(b))
for i := uint(0); i < length-1; i++ {
v += uint32(b[i])
v <<= 8
}
v += uint32(b[length-1])
return
}
func BytesToUint16(b []byte) (v uint16) {
v += uint16(b[0])
v <<= 8
v += uint16(b[1])
return
}
func Uint64toBytes(b []byte, v uint64) {
for i := uint(0); i < 8; i++ {
b[7-i] = byte(v >> (i * 8))
}
}
func Uint32toBytes(b []byte, v uint32) {
for i := uint(0); i < 4; i++ {
b[3-i] = byte(v >> (i * 8))
}
}
func Uint16toBytes(b []byte, v uint16) {
b[0] = byte(v >> 8)
b[1] = byte(v)
}
func Uint8toBytes(b []byte, v uint8) {
b[0] = byte(v)
}