seaweedfs/weed/filesys/file.go

182 lines
4.2 KiB
Go
Raw Normal View History

2018-05-06 05:47:16 +00:00
package filesys
import (
2018-05-06 06:39:29 +00:00
"context"
2018-05-06 05:47:16 +00:00
"fmt"
2018-05-06 06:39:29 +00:00
"bazil.org/fuse"
2018-05-08 08:59:43 +00:00
"bazil.org/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/glog"
2018-05-10 06:18:02 +00:00
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"path/filepath"
"os"
"time"
"bytes"
"github.com/chrislusf/seaweedfs/weed/operation"
2018-05-06 05:47:16 +00:00
)
2018-05-08 08:59:43 +00:00
var _ = fs.Node(&File{})
// var _ = fs.NodeOpener(&File{})
var _ = fs.NodeFsyncer(&File{})
2018-05-08 08:59:43 +00:00
var _ = fs.Handle(&File{})
var _ = fs.HandleReadAller(&File{})
// var _ = fs.HandleReader(&File{})
var _ = fs.HandleFlusher(&File{})
2018-05-08 08:59:43 +00:00
var _ = fs.HandleWriter(&File{})
2018-05-06 05:47:16 +00:00
type File struct {
Chunks []*filer_pb.FileChunk
2018-05-06 05:47:16 +00:00
Name string
dir *Dir
2018-05-06 05:47:16 +00:00
wfs *WFS
}
func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
fullPath := filepath.Join(file.dir.Path, file.Name)
item := file.wfs.listDirectoryEntriesCache.Get(fullPath)
var attributes *filer_pb.FuseAttributes
if item != nil {
attributes = item.Value().(*filer_pb.FuseAttributes)
glog.V(1).Infof("read cached file %v attributes", file.Name)
} else {
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.GetFileAttributesRequest{
Name: file.Name,
ParentDir: file.dir.Path,
}
glog.V(1).Infof("read file size: %v", request)
resp, err := client.GetFileAttributes(context, request)
if err != nil {
return err
}
attributes = resp.Attributes
return nil
})
2018-05-08 08:59:43 +00:00
if err != nil {
return err
}
}
2018-05-08 08:59:43 +00:00
attr.Mode = os.FileMode(attributes.FileMode)
attr.Size = attributes.FileSize
attr.Mtime = time.Unix(attributes.Mtime, 0)
attr.Gid = attributes.Gid
attr.Uid = attributes.Uid
return nil
2018-05-08 08:59:43 +00:00
}
func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
if len(file.Chunks) == 0 {
return
}
2018-05-10 06:18:02 +00:00
err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
2018-05-08 08:59:43 +00:00
// FIXME: need to either use Read() or implement differently
2018-05-10 06:18:02 +00:00
request := &filer_pb.GetFileContentRequest{
FileId: file.Chunks[0].FileId,
2018-05-08 08:59:43 +00:00
}
glog.V(1).Infof("read file content: %v", request)
resp, err := client.GetFileContent(ctx, request)
if err != nil {
return err
}
content = resp.Content
return nil
})
return content, err
2018-05-06 05:47:16 +00:00
}
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filer
fmt.Printf("flush file %+v\n", req)
return nil
}
func (file *File) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// fflush works at file level
// send the data to the OS
fmt.Printf("flush file %+v\n", req)
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AppendFileChunksRequest{
Directory: file.dir.Path,
Entry: &filer_pb.Entry{
Name: file.Name,
Chunks: file.Chunks,
},
}
glog.V(1).Infof("append chunks: %v", request)
if _, err := client.AppendFileChunks(ctx, request); err != nil {
return fmt.Errorf("create file: %v", err)
}
return nil
})
return err
}
2018-05-08 08:59:43 +00:00
func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
2018-05-08 08:59:43 +00:00
fmt.Printf("write file %+v\n", req)
var fileId, host string
if err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: "000",
Collection: "",
}
glog.V(1).Infof("assign volume: %v", request)
resp, err := client.AssignVolume(ctx, request)
if err != nil {
return err
}
fileId, host = resp.FileId, resp.Url
return nil
}); err != nil {
return fmt.Errorf("filer assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(req.Data)
uploadResult, err := operation.Upload(fileUrl, file.Name, bufReader, false, "application/octet-stream", nil, "")
if err != nil {
return fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
return fmt.Errorf("upload result: %v", uploadResult.Error)
}
glog.V(1).Infof("uploaded %s/%s to: %v", file.dir.Path, file.Name, fileUrl)
file.Chunks = append(file.Chunks, &filer_pb.FileChunk{
FileId: fileId,
Offset: req.Offset,
Size: uint64(uploadResult.Size),
})
2018-05-08 08:59:43 +00:00
return nil
2018-05-06 05:47:16 +00:00
}