lots of fix

1. sending 404 if not found
2. handle node-up/node-down/changing-max/volume-become-full
This commit is contained in:
Chris Lu 2012-09-20 02:11:08 -07:00
parent eae0080d75
commit a1bc529db6
9 changed files with 156 additions and 129 deletions

View file

@ -1,127 +1,150 @@
package main package main
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors" "errors"
"flag" "fmt"
"fmt" "io"
"io" "io/ioutil"
"io/ioutil" "mime/multipart"
"mime/multipart" "net/http"
"net/http" "net/url"
"net/url" "os"
"os" "pkg/util"
"pkg/util" "strconv"
"strconv"
) )
var uploadReplication *string
func init() { func init() {
cmdUpload.Run = runUpload // break init cycle cmdUpload.Run = runUpload // break init cycle
IsDebug = cmdUpload.Flag.Bool("debug", false, "verbose debug information") IsDebug = cmdUpload.Flag.Bool("debug", false, "verbose debug information")
server = cmdUpload.Flag.String("server", "localhost:9333", "weedfs master location") server = cmdUpload.Flag.String("server", "localhost:9333", "weedfs master location")
uploadReplication = cmdUpload.Flag.String("replication", "00", "replication type(00,01,10,11)")
} }
var cmdUpload = &Command{ var cmdUpload = &Command{
UsageLine: "upload -server=localhost:9333 file1 file2 file2", UsageLine: "upload -server=localhost:9333 file1 [file2 file3]",
Short: "upload a set of files, using consecutive file keys", Short: "upload one or a list of files",
Long: `upload a set of files, using consecutive file keys. Long: `upload one or a list of files.
It uses consecutive file keys for the list of files.
e.g. If the file1 uses key k, file2 can be read via k_1 e.g. If the file1 uses key k, file2 can be read via k_1
`, `,
} }
type AssignResult struct { type AssignResult struct {
Fid string "fid" Fid string "fid"
Url string "url" Url string "url"
PublicUrl string "publicUrl" PublicUrl string "publicUrl"
Count int `json:",string"` Count int
Error string "error" Error string "error"
} }
func assign(count int) (*AssignResult, error) { func assign(count int) (*AssignResult, error) {
values := make(url.Values) values := make(url.Values)
values.Add("count", strconv.Itoa(count)) values.Add("count", strconv.Itoa(count))
jsonBlob, err := util.Post("http://"+*server+"/dir/assign", values) values.Add("replication", *uploadReplication)
if err != nil { jsonBlob, err := util.Post("http://"+*server+"/dir/assign2", values)
return nil, err if *IsDebug {
} fmt.Println("debug", *IsDebug, "assign result :", string(jsonBlob))
var ret AssignResult }
err = json.Unmarshal(jsonBlob, &ret) if err != nil {
if err != nil { return nil, err
return nil, err }
} var ret AssignResult
if ret.Count <= 0 { err = json.Unmarshal(jsonBlob, &ret)
return nil, errors.New(ret.Error) if err != nil {
} return nil, err
return &ret, nil }
if ret.Count <= 0 {
return nil, errors.New(ret.Error)
}
return &ret, nil
} }
type UploadResult struct { type UploadResult struct {
Size int Size int
} }
func upload(filename string, uploadUrl string) (int, string) { func upload(filename string, uploadUrl string) (int, string) {
body_buf := bytes.NewBufferString("") if *IsDebug {
body_writer := multipart.NewWriter(body_buf) fmt.Println("Start uploading file:", filename)
file_writer, err := body_writer.CreateFormFile("file", filename) }
if err != nil { body_buf := bytes.NewBufferString("")
panic(err.Error()) body_writer := multipart.NewWriter(body_buf)
} file_writer, err := body_writer.CreateFormFile("file", filename)
fh, err := os.Open(filename) if err != nil {
if err != nil { if *IsDebug {
panic(err.Error()) fmt.Println("Failed to create form file:", filename)
} }
io.Copy(file_writer, fh) panic(err.Error())
content_type := body_writer.FormDataContentType() }
body_writer.Close() fh, err := os.Open(filename)
resp, err := http.Post(uploadUrl, content_type, body_buf) if err != nil {
if err != nil { if *IsDebug {
panic(err.Error()) fmt.Println("Failed to open file:", filename)
} }
defer resp.Body.Close() panic(err.Error())
resp_body, err := ioutil.ReadAll(resp.Body) }
if err != nil { io.Copy(file_writer, fh)
panic(err.Error()) content_type := body_writer.FormDataContentType()
} body_writer.Close()
var ret UploadResult resp, err := http.Post(uploadUrl, content_type, body_buf)
err = json.Unmarshal(resp_body, &ret) if err != nil {
if err != nil { if *IsDebug {
panic(err.Error()) fmt.Println("Failed to upload file to", uploadUrl)
} }
//fmt.Println("Uploaded " + strconv.Itoa(ret.Size) + " Bytes to " + uploadUrl) panic(err.Error())
return ret.Size, uploadUrl }
defer resp.Body.Close()
resp_body, err := ioutil.ReadAll(resp.Body)
if *IsDebug {
fmt.Println("Upload response:", string(resp_body))
}
if err != nil {
panic(err.Error())
}
var ret UploadResult
err = json.Unmarshal(resp_body, &ret)
if err != nil {
panic(err.Error())
}
//fmt.Println("Uploaded " + strconv.Itoa(ret.Size) + " Bytes to " + uploadUrl)
return ret.Size, uploadUrl
} }
type SubmitResult struct { type SubmitResult struct {
Fid string "fid" Fid string "fid"
Size int "size" Size int "size"
} }
func submit(files []string)([]SubmitResult) { func submit(files []string) []SubmitResult {
ret, err := assign(len(files)) ret, err := assign(len(files))
if err != nil { if err != nil {
panic(err) panic(err)
} }
results := make([]SubmitResult, len(files)) results := make([]SubmitResult, len(files))
for index, file := range files { for index, file := range files {
fid := ret.Fid fid := ret.Fid
if index > 0 { if index > 0 {
fid = fid + "_" + strconv.Itoa(index) fid = fid + "_" + strconv.Itoa(index)
} }
uploadUrl := "http://" + ret.PublicUrl + "/" + fid uploadUrl := "http://" + ret.PublicUrl + "/" + fid
results[index].Size, _ = upload(file, uploadUrl) results[index].Size, _ = upload(file, uploadUrl)
results[index].Fid = fid results[index].Fid = fid
} }
return results return results
} }
func runUpload(cmd *Command, args []string) bool { func runUpload(cmd *Command, args []string) bool {
if len(cmdUpload.Flag.Args()) == 0 { *IsDebug = true
return false if len(cmdUpload.Flag.Args()) == 0 {
} return false
results := submit(flag.Args()) }
bytes, _ := json.Marshal(results) results := submit(args)
fmt.Print(string(bytes)) bytes, _ := json.Marshal(results)
return true fmt.Print(string(bytes))
return true
} }

View file

@ -28,7 +28,7 @@ var cmdVolume = &Command{
var ( var (
vport = cmdVolume.Flag.Int("port", 8080, "http listen port") vport = cmdVolume.Flag.Int("port", 8080, "http listen port")
volumeFolder = cmdVolume.Flag.String("dir", "/tmp", "data directory to store files") volumeFolder = cmdVolume.Flag.String("dir", "/tmp", "data directory to store files")
volumes = cmdVolume.Flag.String("volumes", "", "comma-separated list, or ranges of volume ids") volumes = cmdVolume.Flag.String("volumes", "", "comma-separated list, or ranges of volume ids")
publicUrl = cmdVolume.Flag.String("publicUrl", "localhost:8080", "public url to serve data read") publicUrl = cmdVolume.Flag.String("publicUrl", "localhost:8080", "public url to serve data read")
masterNode = cmdVolume.Flag.String("mserver", "localhost:9333", "master directory server to store mappings") masterNode = cmdVolume.Flag.String("mserver", "localhost:9333", "master directory server to store mappings")
@ -88,15 +88,16 @@ func GetHandler(w http.ResponseWriter, r *http.Request) {
} }
cookie := n.Cookie cookie := n.Cookie
count, e := store.Read(volumeId, n) count, e := store.Read(volumeId, n)
if e != nil { if *IsDebug {
w.WriteHeader(404) log.Println("read bytes", count, "error", e)
} }
if *IsDebug { if e != nil || count <= 0 {
log.Println("read bytes", count, "error", e) w.WriteHeader(404)
return
} }
if n.Cookie != cookie { if n.Cookie != cookie {
log.Println("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent()) log.Println("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
w.WriteHeader(404) w.WriteHeader(404)
return return
} }
if ext != "" { if ext != "" {
@ -161,6 +162,7 @@ func DeleteHandler(w http.ResponseWriter, r *http.Request) {
writeJson(w, r, m) writeJson(w, r, m)
} }
func parseURLPath(path string) (vid, fid, ext string) { func parseURLPath(path string) (vid, fid, ext string) {
sepIndex := strings.LastIndex(path, "/") sepIndex := strings.LastIndex(path, "/")
commaIndex := strings.LastIndex(path[sepIndex:], ",") commaIndex := strings.LastIndex(path[sepIndex:], ",")
if commaIndex <= 0 { if commaIndex <= 0 {
@ -181,17 +183,17 @@ func parseURLPath(path string) (vid, fid, ext string) {
} }
func runVolume(cmd *Command, args []string) bool { func runVolume(cmd *Command, args []string) bool {
fileInfo, err := os.Stat(*volumeFolder) fileInfo, err := os.Stat(*volumeFolder)
//TODO: now default to 1G, this value should come from server? //TODO: now default to 1G, this value should come from server?
if err!=nil{ if err != nil {
log.Fatalf("No Existing Folder:%s", *volumeFolder) log.Fatalf("No Existing Folder:%s", *volumeFolder)
} }
if !fileInfo.IsDir() { if !fileInfo.IsDir() {
log.Fatalf("Volume Folder should not be a file:%s", *volumeFolder) log.Fatalf("Volume Folder should not be a file:%s", *volumeFolder)
} }
perm:=fileInfo.Mode().Perm() perm := fileInfo.Mode().Perm()
log.Println("Volume Folder permission:", perm) log.Println("Volume Folder permission:", perm)
store = storage.NewStore(*vport, *publicUrl, *volumeFolder, *maxVolumeCount, *volumes) store = storage.NewStore(*vport, *publicUrl, *volumeFolder, *maxVolumeCount, *volumes)
defer store.Close() defer store.Close()
http.HandleFunc("/", storeHandler) http.HandleFunc("/", storeHandler)
@ -199,7 +201,6 @@ func runVolume(cmd *Command, args []string) bool {
http.HandleFunc("/admin/assign_volume", assignVolumeHandler) http.HandleFunc("/admin/assign_volume", assignVolumeHandler)
http.HandleFunc("/admin/set_volume_locations_list", setVolumeLocationsHandler) http.HandleFunc("/admin/set_volume_locations_list", setVolumeLocationsHandler)
go func() { go func() {
for { for {
store.Join(*masterNode) store.Join(*masterNode)

View file

@ -126,7 +126,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(count int, repType storage.Replicatio
func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, repType storage.ReplicationType, servers ...*topology.DataNode) error { func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, repType storage.ReplicationType, servers ...*topology.DataNode) error {
for _, server := range servers { for _, server := range servers {
if err := AllocateVolume(server, vid, repType); err == nil { if err := AllocateVolume(server, vid, repType); err == nil {
vi := storage.VolumeInfo{Id: vid, Size: 0} vi := storage.VolumeInfo{Id: vid, Size: 0, RepType:repType}
server.AddOrUpdateVolume(vi) server.AddOrUpdateVolume(vi)
topo.RegisterVolumeLayout(&vi, server) topo.RegisterVolumeLayout(&vi, server)
fmt.Println("Created Volume", vid, "on", server) fmt.Println("Created Volume", vid, "on", server)

View file

@ -38,6 +38,7 @@ func (dc *DataCenter) GetOrCreateRack(ip string) *Rack {
func (dc *DataCenter) ToMap() interface{}{ func (dc *DataCenter) ToMap() interface{}{
m := make(map[string]interface{}) m := make(map[string]interface{})
m["Max"] = dc.GetMaxVolumeCount()
m["Free"] = dc.FreeSpace() m["Free"] = dc.FreeSpace()
var racks []interface{} var racks []interface{}
for _, c := range dc.Children() { for _, c := range dc.Children() {

View file

@ -49,8 +49,8 @@ func (dn *DataNode) ToMap() interface{} {
ret["Ip"] = dn.Ip ret["Ip"] = dn.Ip
ret["Port"] = dn.Port ret["Port"] = dn.Port
ret["Volumes"] = dn.GetActiveVolumeCount() ret["Volumes"] = dn.GetActiveVolumeCount()
ret["MaxVolumeCount"] = dn.GetMaxVolumeCount() ret["Max"] = dn.GetMaxVolumeCount()
ret["FreeVolumeCount"] = dn.FreeSpace() ret["Free"] = dn.FreeSpace()
ret["PublicUrl"] = dn.PublicUrl ret["PublicUrl"] = dn.PublicUrl
return ret return ret
} }

View file

@ -32,10 +32,10 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol
if dn.MatchLocation(ip, port) { if dn.MatchLocation(ip, port) {
dn.LastSeen = time.Now().Unix() dn.LastSeen = time.Now().Unix()
if dn.Dead { if dn.Dead {
dn.Dead = false dn.Dead = false
r.GetTopology().chanRecoveredDataNodes <- dn r.GetTopology().chanRecoveredDataNodes <- dn
dn.UpAdjustMaxVolumeCountDelta(maxVolumeCount - dn.maxVolumeCount)
} }
dn.UpAdjustMaxVolumeCountDelta(maxVolumeCount - dn.maxVolumeCount)
return dn return dn
} }
} }
@ -51,6 +51,7 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol
func (rack *Rack) ToMap() interface{} { func (rack *Rack) ToMap() interface{} {
m := make(map[string]interface{}) m := make(map[string]interface{})
m["Max"] = rack.GetMaxVolumeCount()
m["Free"] = rack.FreeSpace() m["Free"] = rack.FreeSpace()
var dns []interface{} var dns []interface{}
for _, c := range rack.Children() { for _, c := range rack.Children() {

View file

@ -23,8 +23,6 @@ type Topology struct {
chanDeadDataNodes chan *DataNode chanDeadDataNodes chan *DataNode
chanRecoveredDataNodes chan *DataNode chanRecoveredDataNodes chan *DataNode
chanFullVolumes chan *storage.VolumeInfo chanFullVolumes chan *storage.VolumeInfo
chanIncomplemteVolumes chan *storage.VolumeInfo
chanRecoveredVolumes chan *storage.VolumeInfo
} }
func NewTopology(id string, dirname string, filename string, volumeSizeLimit uint64, pulse int) *Topology { func NewTopology(id string, dirname string, filename string, volumeSizeLimit uint64, pulse int) *Topology {
@ -42,8 +40,6 @@ func NewTopology(id string, dirname string, filename string, volumeSizeLimit uin
t.chanDeadDataNodes = make(chan *DataNode) t.chanDeadDataNodes = make(chan *DataNode)
t.chanRecoveredDataNodes = make(chan *DataNode) t.chanRecoveredDataNodes = make(chan *DataNode)
t.chanFullVolumes = make(chan *storage.VolumeInfo) t.chanFullVolumes = make(chan *storage.VolumeInfo)
t.chanIncomplemteVolumes = make(chan *storage.VolumeInfo)
t.chanRecoveredVolumes = make(chan *storage.VolumeInfo)
return t return t
} }
@ -124,6 +120,7 @@ func (t *Topology) GetOrCreateDataCenter(ip string) *DataCenter {
func (t *Topology) ToMap() interface{} { func (t *Topology) ToMap() interface{} {
m := make(map[string]interface{}) m := make(map[string]interface{})
m["Max"] = t.GetMaxVolumeCount()
m["Free"] = t.FreeSpace() m["Free"] = t.FreeSpace()
var dcs []interface{} var dcs []interface{}
for _, c := range t.Children() { for _, c := range t.Children() {

View file

@ -18,10 +18,6 @@ func (t *Topology) StartRefreshWritableVolumes() {
go func() { go func() {
for { for {
select { select {
case v := <-t.chanIncomplemteVolumes:
fmt.Println("Volume", v, "is incomplete!")
case v := <-t.chanRecoveredVolumes:
fmt.Println("Volume", v, "is recovered!")
case v := <-t.chanFullVolumes: case v := <-t.chanFullVolumes:
t.SetVolumeCapacityFull(v) t.SetVolumeCapacityFull(v)
fmt.Println("Volume", v, "is full!") fmt.Println("Volume", v, "is full!")
@ -38,6 +34,9 @@ func (t *Topology) StartRefreshWritableVolumes() {
func (t *Topology) SetVolumeCapacityFull(volumeInfo *storage.VolumeInfo) { func (t *Topology) SetVolumeCapacityFull(volumeInfo *storage.VolumeInfo) {
vl := t.GetVolumeLayout(volumeInfo.RepType) vl := t.GetVolumeLayout(volumeInfo.RepType)
vl.SetVolumeCapacityFull(volumeInfo.Id) vl.SetVolumeCapacityFull(volumeInfo.Id)
for _, dn := range vl.vid2location[volumeInfo.Id].list {
dn.UpAdjustActiveVolumeCountDelta(-1)
}
} }
func (t *Topology) UnRegisterDataNode(dn *DataNode) { func (t *Topology) UnRegisterDataNode(dn *DataNode) {
for _, v := range dn.volumes { for _, v := range dn.volumes {
@ -45,6 +44,9 @@ func (t *Topology) UnRegisterDataNode(dn *DataNode) {
vl := t.GetVolumeLayout(v.RepType) vl := t.GetVolumeLayout(v.RepType)
vl.SetVolumeUnavailable(dn, v.Id) vl.SetVolumeUnavailable(dn, v.Id)
} }
dn.UpAdjustActiveVolumeCountDelta(-dn.GetActiveVolumeCount())
dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount())
dn.Parent().UnlinkChildNode(dn.Id())
} }
func (t *Topology) RegisterRecoveredDataNode(dn *DataNode) { func (t *Topology) RegisterRecoveredDataNode(dn *DataNode) {
for _, v := range dn.volumes { for _, v := range dn.volumes {

View file

@ -76,16 +76,18 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
} }
func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId) bool { func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId) bool {
if vl.vid2location[vid].Remove(dn) { if vl.vid2location[vid].Remove(dn) {
if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() { if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() {
return vl.removeFromWritable(vid) fmt.Println("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
} return vl.removeFromWritable(vid)
} }
return false }
return false
} }
func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid storage.VolumeId) bool { func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid storage.VolumeId) bool {
if vl.vid2location[vid].Add(dn) { if vl.vid2location[vid].Add(dn) {
if vl.vid2location[vid].Length() >= vl.repType.GetCopyCount() { if vl.vid2location[vid].Length() >= vl.repType.GetCopyCount() {
fmt.Println("Volume", vid, "becomes writable")
return vl.setVolumeWritable(vid) return vl.setVolumeWritable(vid)
} }
} }