mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
change count to uint64 to fix #109
fix https://github.com/chrislusf/weed-fs/issues/109
This commit is contained in:
parent
c37a20178e
commit
3ece066700
32
go/compress/delta_binary_pack32.go
Normal file
32
go/compress/delta_binary_pack32.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package compress
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/reducedb/encoding/cursor"
|
||||||
|
"github.com/reducedb/encoding/delta/bp32"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compress compresses in[]int32 to out[]int32
|
||||||
|
func Compress32(in []int32) (out []int32, err error) {
|
||||||
|
out = make([]int32, len(in)*2)
|
||||||
|
inpos := cursor.New()
|
||||||
|
outpos := cursor.New()
|
||||||
|
|
||||||
|
if err = bp32.New().Compress(in, inpos, len(in), out, outpos); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return out[:outpos.Get()], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uncompress uncompresses in[]int32 to out[]int32
|
||||||
|
func Uncompress32(in []int32, buffer []int32) (out []int32, err error) {
|
||||||
|
out = buffer
|
||||||
|
inpos := cursor.New()
|
||||||
|
outpos := cursor.New()
|
||||||
|
|
||||||
|
if err = bp32.New().Uncompress(in, inpos, len(in), out, outpos); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return out[:outpos.Get()], nil
|
||||||
|
}
|
|
@ -15,13 +15,13 @@ type AssignResult struct {
|
||||||
Fid string `json:"fid,omitempty"`
|
Fid string `json:"fid,omitempty"`
|
||||||
Url string `json:"url,omitempty"`
|
Url string `json:"url,omitempty"`
|
||||||
PublicUrl string `json:"publicUrl,omitempty"`
|
PublicUrl string `json:"publicUrl,omitempty"`
|
||||||
Count int `json:"count,omitempty"`
|
Count uint64 `json:"count,omitempty"`
|
||||||
Error string `json:"error,omitempty"`
|
Error string `json:"error,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func Assign(server string, count int, replication string, collection string, ttl string) (*AssignResult, error) {
|
func Assign(server string, count uint64, replication string, collection string, ttl string) (*AssignResult, error) {
|
||||||
values := make(url.Values)
|
values := make(url.Values)
|
||||||
values.Add("count", strconv.Itoa(count))
|
values.Add("count", strconv.FormatUint(count, 10))
|
||||||
if replication != "" {
|
if replication != "" {
|
||||||
values.Add("replication", replication)
|
values.Add("replication", replication)
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ func SubmitFiles(master string, files []FilePart,
|
||||||
for index, file := range files {
|
for index, file := range files {
|
||||||
results[index].FileName = file.FileName
|
results[index].FileName = file.FileName
|
||||||
}
|
}
|
||||||
ret, err := Assign(master, len(files), replication, collection, ttl)
|
ret, err := Assign(master, uint64(len(files)), replication, collection, ttl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for index, _ := range files {
|
for index, _ := range files {
|
||||||
results[index].Error = err.Error()
|
results[index].Error = err.Error()
|
||||||
|
|
|
@ -15,7 +15,7 @@ func NewMemorySequencer() (m *MemorySequencer) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemorySequencer) NextFileId(count int) (uint64, int) {
|
func (m *MemorySequencer) NextFileId(count uint64) (uint64, uint64) {
|
||||||
m.sequenceLock.Lock()
|
m.sequenceLock.Lock()
|
||||||
defer m.sequenceLock.Unlock()
|
defer m.sequenceLock.Unlock()
|
||||||
ret := m.counter
|
ret := m.counter
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package sequence
|
package sequence
|
||||||
|
|
||||||
type Sequencer interface {
|
type Sequencer interface {
|
||||||
NextFileId(count int) (uint64, int)
|
NextFileId(count uint64) (uint64, uint64)
|
||||||
SetMax(uint64)
|
SetMax(uint64)
|
||||||
Peek() uint64
|
Peek() uint64
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,7 +115,7 @@ func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
|
||||||
return vl.GetActiveVolumeCount(option) > 0
|
return vl.GetActiveVolumeCount(option) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Topology) PickForWrite(count int, option *VolumeGrowOption) (string, int, *DataNode, error) {
|
func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) {
|
||||||
vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option)
|
vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option)
|
||||||
if err != nil || datanodes.Length() == 0 {
|
if err != nil || datanodes.Length() == 0 {
|
||||||
return "", 0, nil, errors.New("No writable volumes available!")
|
return "", 0, nil, errors.New("No writable volumes available!")
|
||||||
|
|
|
@ -87,7 +87,7 @@ func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vl *VolumeLayout) PickForWrite(count int, option *VolumeGrowOption) (*storage.VolumeId, int, *VolumeLocationList, error) {
|
func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*storage.VolumeId, uint64, *VolumeLocationList, error) {
|
||||||
len_writers := len(vl.writables)
|
len_writers := len(vl.writables)
|
||||||
if len_writers <= 0 {
|
if len_writers <= 0 {
|
||||||
glog.V(0).Infoln("No more writable volumes!")
|
glog.V(0).Infoln("No more writable volumes!")
|
||||||
|
|
|
@ -69,8 +69,8 @@ func (ms *MasterServer) volumeLookupHandler(w http.ResponseWriter, r *http.Reque
|
||||||
|
|
||||||
func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) {
|
func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
stats.AssignRequest()
|
stats.AssignRequest()
|
||||||
requestedCount, e := strconv.Atoi(r.FormValue("count"))
|
requestedCount, e := strconv.ParseUint(r.FormValue("count"), 10, 64)
|
||||||
if e != nil {
|
if e != nil || requestedCount == 0 {
|
||||||
requestedCount = 1
|
requestedCount = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue