copy works, edit somehow still fails

This commit is contained in:
Chris Lu 2018-05-23 03:08:46 -07:00
parent 1675243f29
commit 536559f62d
4 changed files with 252 additions and 180 deletions

View file

@ -114,9 +114,19 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
})
if err == nil {
node := dir.newFile(req.Name, nil)
dir.NodeMap[req.Name] = node
return node, node, nil
file := dir.newFile(req.Name, nil)
dir.NodeMap[req.Name] = file
return file, &FileHandle{
wfs: file.wfs,
dirPath: file.dir.Path,
name: file.Name,
RequestId: req.Header.ID,
NodeId: req.Header.Node,
Uid: req.Uid,
Gid: req.Gid,
attributes: file.attributes,
Chunks: file.Chunks,
}, nil
}
return nil, nil, err

View file

@ -11,20 +11,12 @@ import (
"path/filepath"
"os"
"time"
"bytes"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/filer2"
)
var _ = fs.Node(&File{})
var _ = fs.NodeOpener(&File{})
var _ = fs.NodeFsyncer(&File{})
var _ = fs.Handle(&File{})
var _ = fs.HandleReadAller(&File{})
// var _ = fs.HandleReader(&File{})
var _ = fs.HandleFlusher(&File{})
var _ = fs.HandleWriter(&File{})
var _ = fs.HandleReleaser(&File{})
var _ = fs.NodeSetattrer(&File{})
type File struct {
@ -32,20 +24,18 @@ type File struct {
Name string
dir *Dir
wfs *WFS
isOpened bool
attributes *filer_pb.FuseAttributes
}
func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
if !file.isOpened {
fullPath := filepath.Join(file.dir.Path, file.Name)
item := file.wfs.listDirectoryEntriesCache.Get(fullPath)
if item != nil {
entry := item.Value().(*filer_pb.Entry)
file.Chunks = entry.Chunks
file.attributes = entry.Attributes
glog.V(1).Infof("read cached file %v attributes", file.Name)
glog.V(1).Infof("file attr read cached %v attributes", file.Name)
} else {
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -54,16 +44,17 @@ func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
ParentDir: file.dir.Path,
}
glog.V(1).Infof("read file: %v", request)
resp, err := client.GetEntryAttributes(context, request)
if err != nil {
glog.V(0).Infof("read file %v: %v", request, err)
glog.V(0).Infof("file attr read file %v: %v", request, err)
return err
}
file.attributes = resp.Attributes
file.Chunks = resp.Chunks
glog.V(1).Infof("file attr %v %+v: %d", fullPath, file.attributes, filer2.TotalSize(file.Chunks))
return nil
})
@ -71,7 +62,6 @@ func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
return err
}
}
}
attr.Mode = os.FileMode(file.attributes.FileMode)
attr.Size = filer2.TotalSize(file.Chunks)
@ -84,19 +74,29 @@ func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
}
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
fullPath := filepath.Join(file.dir.Path, file.Name)
fmt.Printf("Open %v %+v\n", fullPath, req)
file.isOpened = true
glog.V(3).Infof("file open %v %+v", fullPath, req)
return file, nil
return &FileHandle{
wfs: file.wfs,
dirPath: file.dir.Path,
name: file.Name,
RequestId: req.Header.ID,
NodeId: req.Header.Node,
Uid: req.Uid,
Gid: req.Gid,
attributes: file.attributes,
Chunks: file.Chunks,
}, nil
}
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
fullPath := filepath.Join(file.dir.Path, file.Name)
fmt.Printf("Setattr %v %+v\n", fullPath, req)
glog.V(3).Infof("file setattr %v %+v", fullPath, req)
if req.Valid.Size() {
if req.Size == 0 {
@ -125,134 +125,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
}
func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
fmt.Printf("read all file %+v/%v\n", file.dir.Path, file.Name)
if len(file.Chunks) == 0 {
glog.V(0).Infof("empty file %v/%v", file.dir.Path, file.Name)
return
}
err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// FIXME: need to either use Read() or implement differently
chunks, _ := filer2.CompactFileChunks(file.Chunks)
glog.V(1).Infof("read file %v/%v %d/%d chunks", file.dir.Path, file.Name, len(chunks), len(file.Chunks))
request := &filer_pb.GetFileContentRequest{
FileId: chunks[0].FileId,
}
glog.V(1).Infof("read file content %d chunk %s [%d,%d): %v", len(chunks),
chunks[0].FileId, chunks[0].Offset, chunks[0].Offset+int64(chunks[0].Size), request)
resp, err := client.GetFileContent(ctx, request)
if err != nil {
return err
}
content = resp.Content
return nil
})
return content, err
}
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filer
fmt.Printf("flush file %+v\n", req)
return nil
}
func (file *File) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// fflush works at file level
// send the data to the OS
glog.V(3).Infof("file flush %v", req)
if len(file.Chunks) == 0 {
glog.V(2).Infof("%x file %s/%s flush skipping empty: %v", file, file.dir.Path, file.Name, req)
return nil
}
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: file.dir.Path,
Entry: &filer_pb.Entry{
Name: file.Name,
Attributes: file.attributes,
Chunks: file.Chunks,
},
}
glog.V(1).Infof("%s/%s set chunks: %v", file.dir.Path, file.Name, len(file.Chunks))
if _, err := client.UpdateEntry(ctx, request); err != nil {
return fmt.Errorf("update file: %v", err)
}
return nil
})
return err
}
func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
// fmt.Printf("write file %+v\n", req)
var fileId, host string
if err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: "000",
Collection: "",
}
glog.V(1).Infof("assign volume: %v", request)
resp, err := client.AssignVolume(ctx, request)
if err != nil {
return err
}
fileId, host = resp.FileId, resp.Url
return nil
}); err != nil {
return fmt.Errorf("filer assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(req.Data)
uploadResult, err := operation.Upload(fileUrl, file.Name, bufReader, false, "application/octet-stream", nil, "")
if err != nil {
return fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
return fmt.Errorf("upload result: %v", uploadResult.Error)
}
resp.Size = int(uploadResult.Size)
file.Chunks = append(file.Chunks, &filer_pb.FileChunk{
FileId: fileId,
Offset: req.Offset,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
})
glog.V(1).Infof("uploaded %s/%s to: %v, [%d,%d)", file.dir.Path, file.Name, fileUrl, req.Offset, req.Offset+int64(resp.Size))
return nil
}
func (file *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
fmt.Printf("release file %+v\n", req)
file.isOpened = false
glog.V(3).Infof("fsync file %+v\n", req)
return nil
}

176
weed/filesys/filehandle.go Normal file
View file

@ -0,0 +1,176 @@
package filesys
import (
"bazil.org/fuse/fs"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/filer2"
"context"
"github.com/chrislusf/seaweedfs/weed/glog"
"bazil.org/fuse"
"bytes"
"github.com/chrislusf/seaweedfs/weed/operation"
"time"
)
type FileHandle struct {
// cache file has been written to
dirty bool
cachePath string
handle uint64
wfs *WFS
dirPath string
name string
RequestId fuse.RequestID // unique ID for request
NodeId fuse.NodeID // file or directory the request is about
Uid uint32 // user ID of process making request
Gid uint32 // group ID of process making request
attributes *filer_pb.FuseAttributes
Chunks []*filer_pb.FileChunk
}
var _ = fs.Handle(&FileHandle{})
var _ = fs.HandleReadAller(&FileHandle{})
// var _ = fs.HandleReader(&FileHandle{})
var _ = fs.HandleFlusher(&FileHandle{})
var _ = fs.HandleWriter(&FileHandle{})
var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) ReadAll(ctx context.Context) (content []byte, err error) {
glog.V(3).Infof("read all fh %+v/%v", fh.dirPath, fh.name)
if len(fh.Chunks) == 0 {
glog.V(0).Infof("empty fh %v/%v", fh.dirPath, fh.name)
return
}
err = fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// FIXME: need to either use Read() or implement differently
chunks, _ := filer2.CompactFileChunks(fh.Chunks)
glog.V(1).Infof("read fh %v/%v %d/%d chunks", fh.dirPath, fh.name, len(chunks), len(fh.Chunks))
request := &filer_pb.GetFileContentRequest{
FileId: chunks[0].FileId,
}
glog.V(1).Infof("read fh content %d chunk %s [%d,%d): %v", len(chunks),
chunks[0].FileId, chunks[0].Offset, chunks[0].Offset+int64(chunks[0].Size), request)
resp, err := client.GetFileContent(ctx, request)
if err != nil {
return err
}
content = resp.Content
return nil
})
return content, err
}
// Write to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
// glog.V(3).Infof("write fh %+v", req)
var fileId, host string
if err := fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: "000",
Collection: "",
}
glog.V(1).Infof("assign volume: %v", request)
resp, err := client.AssignVolume(ctx, request)
if err != nil {
return err
}
fileId, host = resp.FileId, resp.Url
return nil
}); err != nil {
return fmt.Errorf("filer assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(req.Data)
uploadResult, err := operation.Upload(fileUrl, fh.name, bufReader, false, "application/octet-stream", nil, "")
if err != nil {
return fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
return fmt.Errorf("upload result: %v", uploadResult.Error)
}
resp.Size = int(uploadResult.Size)
fh.Chunks = append(fh.Chunks, &filer_pb.FileChunk{
FileId: fileId,
Offset: req.Offset,
Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
})
glog.V(1).Infof("uploaded %s/%s to: %v, [%d,%d)", fh.dirPath, fh.name, fileUrl, req.Offset, req.Offset+int64(resp.Size))
fh.dirty = true
return nil
}
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
glog.V(3).Infof("release fh %+v/%v", fh.dirPath, fh.name)
return nil
}
// Flush - experimenting with uploading at flush, this slows operations down till it has been
// completely flushed
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// fflush works at fh level
// send the data to the OS
glog.V(3).Infof("fh flush %v", req)
if !fh.dirty {
return nil
}
if len(fh.Chunks) == 0 {
glog.V(2).Infof("fh %s/%s flush skipping empty: %v", fh.dirPath, fh.name, req)
return nil
}
err := fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.UpdateEntryRequest{
Directory: fh.dirPath,
Entry: &filer_pb.Entry{
Name: fh.name,
Attributes: fh.attributes,
Chunks: fh.Chunks,
},
}
glog.V(1).Infof("%s/%s set chunks: %v", fh.dirPath, fh.name, len(fh.Chunks))
if _, err := client.UpdateEntry(ctx, request); err != nil {
return fmt.Errorf("update fh: %v", err)
}
return nil
})
if err == nil {
fh.dirty = false
}
return err
}

View file

@ -80,7 +80,7 @@ func (fs *FilerServer) GetEntryAttributes(ctx context.Context, req *filer_pb.Get
attributes.Gid = entry.Gid
attributes.Mtime = entry.Mtime.Unix()
glog.V(0).Infof("GetEntryAttributes %v size %d chunks %d: %+v", fullpath, attributes.FileSize, len(entry.Chunks), attributes)
glog.V(3).Infof("GetEntryAttributes %v size %d chunks %d: %+v", fullpath, attributes.FileSize, len(entry.Chunks), attributes)
return &filer_pb.GetEntryAttributesResponse{
Attributes: attributes,
@ -139,19 +139,29 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr
chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks)
err = fs.filer.UpdateEntry(&filer2.Entry{
newEntry := &filer2.Entry{
FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
Attr: filer2.Attr{
Mtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Crtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Mode: os.FileMode(req.Entry.Attributes.FileMode),
Uid: req.Entry.Attributes.Uid,
Gid: req.Entry.Attributes.Gid,
},
Attr: entry.Attr,
Chunks: chunks,
})
}
if err == nil {
glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v",
fullpath, entry.Attr, len(entry.Chunks), entry.Chunks,
req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks)
if req.Entry.Attributes != nil {
if req.Entry.Attributes.Mtime != 0 {
newEntry.Attr.Mtime = time.Unix(req.Entry.Attributes.Mtime, 0)
}
if req.Entry.Attributes.FileMode != 0 {
newEntry.Attr.Mode = os.FileMode(req.Entry.Attributes.FileMode)
}
newEntry.Attr.Uid = req.Entry.Attributes.Uid
newEntry.Attr.Gid = req.Entry.Attributes.Gid
}
if err = fs.filer.UpdateEntry(newEntry); err == nil {
for _, garbage := range unusedChunks {
glog.V(0).Infof("deleting %s old chunk: %v, [%d, %d)", fullpath, garbage.FileId, garbage.Offset, garbage.Offset+int64(garbage.Size))
operation.DeleteFile(fs.master, garbage.FileId, fs.jwt(garbage.FileId))