mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
add configurable volume size limit
git-svn-id: https://weed-fs.googlecode.com/svn/trunk@33 282b0af5-e82d-9cf1-ede4-77906d7719d0
This commit is contained in:
parent
0eff4311f5
commit
298fdb4603
|
@ -18,6 +18,7 @@ var (
|
||||||
capacity = flag.Int("capacity", 100, "maximum number of volumes to hold")
|
capacity = flag.Int("capacity", 100, "maximum number of volumes to hold")
|
||||||
mapper *directory.Mapper
|
mapper *directory.Mapper
|
||||||
IsDebug = flag.Bool("debug", false, "verbose debug information")
|
IsDebug = flag.Bool("debug", false, "verbose debug information")
|
||||||
|
volumeSizeLimitMB = flag.Uint("volumeSizeLimitMB", 32*1024, "Default Volume Size in MegaBytes")
|
||||||
)
|
)
|
||||||
|
|
||||||
func dirLookupHandler(w http.ResponseWriter, r *http.Request) {
|
func dirLookupHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -69,12 +70,11 @@ func writeJson(w http.ResponseWriter, r *http.Request, obj interface{}) {
|
||||||
fmt.Fprint(w, string(bytes))
|
fmt.Fprint(w, string(bytes))
|
||||||
w.Write([]uint8(")"))
|
w.Write([]uint8(")"))
|
||||||
}
|
}
|
||||||
//log.Println("JSON Response", string(bytes))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
mapper = directory.NewMapper(*metaFolder, "directory")
|
mapper = directory.NewMapper(*metaFolder, "directory", uint32(*volumeSizeLimitMB*1024*1024))
|
||||||
http.HandleFunc("/dir/assign", dirAssignHandler)
|
http.HandleFunc("/dir/assign", dirAssignHandler)
|
||||||
http.HandleFunc("/dir/lookup", dirLookupHandler)
|
http.HandleFunc("/dir/lookup", dirLookupHandler)
|
||||||
http.HandleFunc("/dir/join", dirJoinHandler)
|
http.HandleFunc("/dir/join", dirJoinHandler)
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ChunkSizeLimit = 32 * 1024 * 1024 //32G, can not be more than max(uint32)*8
|
|
||||||
FileIdSaveInterval = 10000
|
FileIdSaveInterval = 10000
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,15 +35,18 @@ type Mapper struct {
|
||||||
|
|
||||||
FileIdSequence uint64
|
FileIdSequence uint64
|
||||||
fileIdCounter uint64
|
fileIdCounter uint64
|
||||||
|
|
||||||
|
volumeSizeLimit uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMachine(server, publicUrl string, volumes []storage.VolumeInfo) *Machine {
|
func NewMachine(server, publicUrl string, volumes []storage.VolumeInfo) *Machine {
|
||||||
return &Machine{Server: MachineInfo{Url: server, PublicUrl: publicUrl}, Volumes: volumes}
|
return &Machine{Server: MachineInfo{Url: server, PublicUrl: publicUrl}, Volumes: volumes}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMapper(dirname string, filename string) (m *Mapper) {
|
func NewMapper(dirname string, filename string, volumeSizeLimit uint32) (m *Mapper) {
|
||||||
m = &Mapper{dir: dirname, fileName: filename}
|
m = &Mapper{dir: dirname, fileName: filename}
|
||||||
m.vid2machineId = make(map[uint32]int)
|
m.vid2machineId = make(map[uint32]int)
|
||||||
|
m.volumeSizeLimit = volumeSizeLimit
|
||||||
m.Writers = *new([]int)
|
m.Writers = *new([]int)
|
||||||
m.Machines = *new([]*Machine)
|
m.Machines = *new([]*Machine)
|
||||||
|
|
||||||
|
@ -116,7 +118,7 @@ func (m *Mapper) Add(machine Machine){
|
||||||
var writers []int
|
var writers []int
|
||||||
for machine_index, machine_entry := range m.Machines {
|
for machine_index, machine_entry := range m.Machines {
|
||||||
for _, v := range machine_entry.Volumes {
|
for _, v := range machine_entry.Volumes {
|
||||||
if v.Size < ChunkSizeLimit {
|
if v.Size < int64(m.volumeSizeLimit) {
|
||||||
writers = append(writers, machine_index)
|
writers = append(writers, machine_index)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue