Merge pull request #345 from hxiaodon/master

recover filer read handler logic
This commit is contained in:
Chris Lu 2016-07-20 21:28:22 -07:00 committed by GitHub
commit 40ba6d2a6f

View file

@ -1,25 +1,27 @@
package weed_server package weed_server
import ( import (
"io"
"net/http" "net/http"
"net/url"
"strconv" "strconv"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation"
ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
) )
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { // listDirectoryHandler lists directories and folers under a directory
// files are sorted by name and paginated via "lastFileName" and "limit".
// sub directories are listed on the first page, when "lastFileName"
// is empty.
func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) {
if !strings.HasSuffix(r.URL.Path, "/") { if !strings.HasSuffix(r.URL.Path, "/") {
return return
} }
if fs.disableDirListing {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
limit, limit_err := strconv.Atoi(r.FormValue("limit")) limit, limit_err := strconv.Atoi(r.FormValue("limit"))
if limit_err != nil { if limit_err != nil {
limit = 100 limit = 100
@ -73,3 +75,66 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
} }
ui.StatusTpl.Execute(w, args) ui.StatusTpl.Execute(w, args)
} }
func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) {
if strings.HasSuffix(r.URL.Path, "/") {
if fs.disableDirListing {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
fs.listDirectoryHandler(w, r)
return
}
fileId, err := fs.filer.FindFile(r.URL.Path)
if err == leveldb.ErrNotFound {
glog.V(3).Infoln("Not found in db", r.URL.Path)
w.WriteHeader(http.StatusNotFound)
return
}
urlLocation, err := operation.LookupFileId(fs.getMasterNode(), fileId)
if err != nil {
glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
w.WriteHeader(http.StatusNotFound)
return
}
urlString := urlLocation
if fs.redirectOnRead {
http.Redirect(w, r, urlString, http.StatusFound)
return
}
u, _ := url.Parse(urlString)
q := u.Query()
for key, values := range r.URL.Query() {
for _, value := range values {
q.Add(key, value)
}
}
u.RawQuery = q.Encode()
request := &http.Request{
Method: r.Method,
URL: u,
Proto: r.Proto,
ProtoMajor: r.ProtoMajor,
ProtoMinor: r.ProtoMinor,
Header: r.Header,
Body: r.Body,
Host: r.Host,
ContentLength: r.ContentLength,
}
glog.V(3).Infoln("retrieving from", u)
resp, do_err := util.Do(request)
if do_err != nil {
glog.V(0).Infoln("failing to connect to volume server", do_err.Error())
writeJsonError(w, r, http.StatusInternalServerError, do_err)
return
}
defer resp.Body.Close()
for k, v := range resp.Header {
w.Header()[k] = v
}
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
}