mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
fix directory creation, directory listing
This commit is contained in:
parent
e31c514b00
commit
ab4ddb1e0e
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
"os"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
)
|
||||
|
||||
type Filer struct {
|
||||
|
@ -44,22 +45,23 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
|
|||
dirPath := "/" + filepath.Join(dirParts[:i]...)
|
||||
// fmt.Printf("%d directory: %+v\n", i, dirPath)
|
||||
|
||||
dirFound := false
|
||||
|
||||
// first check local cache
|
||||
dirEntry := f.cacheGetDirectory(dirPath)
|
||||
|
||||
// not found, check the store directly
|
||||
if dirEntry == nil {
|
||||
glog.V(4).Infof("find uncached directory: %s", dirPath)
|
||||
var dirFindErr error
|
||||
dirFound, dirEntry, dirFindErr = f.FindEntry(FullPath(dirPath))
|
||||
_, dirEntry, dirFindErr = f.FindEntry(FullPath(dirPath))
|
||||
if dirFindErr != nil {
|
||||
return fmt.Errorf("findDirectory %s: %v", dirPath, dirFindErr)
|
||||
}
|
||||
}else{
|
||||
glog.V(4).Infof("found cached directory: %s", dirPath)
|
||||
}
|
||||
|
||||
// no such existing directory
|
||||
if !dirFound {
|
||||
if dirEntry == nil {
|
||||
|
||||
// create the directory
|
||||
now := time.Now()
|
||||
|
@ -75,6 +77,7 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
|
|||
},
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create directory: %s", dirPath)
|
||||
mkdirErr := f.store.InsertEntry(dirEntry)
|
||||
if mkdirErr != nil {
|
||||
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
||||
|
|
|
@ -77,11 +77,13 @@ func (filer *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFil
|
|||
}
|
||||
entry := item.(Entry).Entry
|
||||
// println("checking", entry.FullPath)
|
||||
|
||||
if entry.FullPath == fullpath {
|
||||
// skipping the current directory
|
||||
// println("skipping the folder", entry.FullPath)
|
||||
return true
|
||||
}
|
||||
|
||||
dir, name := entry.FullPath.DirAndName()
|
||||
if name == startFileName {
|
||||
if inclusive {
|
||||
|
@ -90,11 +92,13 @@ func (filer *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFil
|
|||
}
|
||||
return true
|
||||
}
|
||||
if !strings.HasPrefix(dir, string(fullpath)) {
|
||||
// println("directory is:", dir, "fullpath:", fullpath)
|
||||
|
||||
// only iterate the same prefix
|
||||
if !strings.HasPrefix(string(entry.FullPath), string(fullpath)) {
|
||||
// println("breaking from", entry.FullPath)
|
||||
return false
|
||||
}
|
||||
|
||||
if dir != string(fullpath) {
|
||||
// this could be items in deeper directories
|
||||
// println("skipping deeper folder", entry.FullPath)
|
||||
|
|
|
@ -15,7 +15,7 @@ func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
|
|||
fileId := r.FormValue("fileId")
|
||||
fileSize, err := strconv.ParseUint(r.FormValue("fileSize"), 10, 64)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("register %s to %s parse fileSize %s: %v", fileId, path, r.FormValue("fileSize"), err)
|
||||
glog.V(0).Infof("register %s to %s parse fileSize %s: %v", fileId, path, r.FormValue("fileSize"), err)
|
||||
writeJsonError(w, r, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Mtime: time.Now().UnixNano(),
|
||||
}},
|
||||
}
|
||||
glog.V(2).Infof("register %s to %s parse fileSize %s", fileId, path, r.FormValue("fileSize"))
|
||||
err = fs.filer.CreateEntry(entry)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("register %s to %s error: %v", fileId, path, err)
|
||||
|
|
|
@ -48,6 +48,8 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
|
|||
lastFileName = entries[len(entries)-1].Name()
|
||||
}
|
||||
|
||||
glog.V(4).Infof("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries))
|
||||
|
||||
args := struct {
|
||||
Path string
|
||||
Entries interface{}
|
||||
|
|
Loading…
Reference in a new issue