cp file can work

1. consolidate to filer_pb.FileChunk
2. dir add file, mkdir
3. file flush, write

updates having issue
This commit is contained in:
Chris Lu 2018-05-16 00:08:44 -07:00
parent c7a71d35b0
commit b303a02461
14 changed files with 619 additions and 102 deletions

View file

@ -3,6 +3,7 @@ package embedded
import ( import (
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
type EmbeddedStore struct { type EmbeddedStore struct {
@ -25,7 +26,7 @@ func (filer *EmbeddedStore) AddDirectoryLink(directory *filer2.Entry, delta int3
return nil return nil
} }
func (filer *EmbeddedStore) AppendFileChunk(fullpath filer2.FullPath, fileChunk filer2.FileChunk) (err error) { func (filer *EmbeddedStore) AppendFileChunk(fullpath filer2.FullPath, fileChunks []*filer_pb.FileChunk) (err error) {
return nil return nil
} }

View file

@ -1,6 +1,8 @@
package filer2 package filer2
type Chunks []FileChunk import "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
type Chunks []*filer_pb.FileChunk
func (chunks Chunks) TotalSize() (size uint64) { func (chunks Chunks) TotalSize() (size uint64) {
for _, c := range chunks { for _, c := range chunks {

View file

@ -8,6 +8,7 @@ import (
"path/filepath" "path/filepath"
"time" "time"
"os" "os"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
type Filer struct { type Filer struct {
@ -105,8 +106,8 @@ func (f *Filer) CreateEntry(entry *Entry) (error) {
return nil return nil
} }
func (f *Filer) AppendFileChunk(p FullPath, c FileChunk) (err error) { func (f *Filer) AppendFileChunk(p FullPath, chunks []*filer_pb.FileChunk) (err error) {
return f.store.AppendFileChunk(p, c) return f.store.AppendFileChunk(p, chunks)
} }
func (f *Filer) FindEntry(p FullPath) (found bool, entry *Entry, err error) { func (f *Filer) FindEntry(p FullPath) (found bool, entry *Entry, err error) {

View file

@ -5,9 +5,9 @@ import (
"os" "os"
"time" "time"
"path/filepath" "path/filepath"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
type FileId string //file id in SeaweedFS
type FullPath string type FullPath string
func NewFullPath(dir, name string) FullPath { func NewFullPath(dir, name string) FullPath {
@ -51,18 +51,12 @@ type Entry struct {
Attr Attr
// the following is for files // the following is for files
Chunks []FileChunk `json:"chunks,omitempty"` Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"`
}
type FileChunk struct {
Fid FileId `json:"fid,omitempty"`
Offset int64 `json:"offset,omitempty"`
Size uint64 `json:"size,omitempty"` // size in bytes
} }
type AbstractFiler interface { type AbstractFiler interface {
CreateEntry(*Entry) (error) CreateEntry(*Entry) (error)
AppendFileChunk(FullPath, FileChunk) (err error) AppendFileChunk(FullPath, []*filer_pb.FileChunk) (err error)
FindEntry(FullPath) (found bool, fileEntry *Entry, err error) FindEntry(FullPath) (found bool, fileEntry *Entry, err error)
DeleteEntry(FullPath) (fileEntry *Entry, err error) DeleteEntry(FullPath) (fileEntry *Entry, err error)
@ -74,7 +68,7 @@ var ErrNotFound = errors.New("filer: no entry is found in filer store")
type FilerStore interface { type FilerStore interface {
InsertEntry(*Entry) (error) InsertEntry(*Entry) (error)
AppendFileChunk(FullPath, FileChunk) (err error) AppendFileChunk(FullPath, []*filer_pb.FileChunk) (err error)
FindEntry(FullPath) (found bool, entry *Entry, err error) FindEntry(FullPath) (found bool, entry *Entry, err error)
DeleteEntry(FullPath) (fileEntry *Entry, err error) DeleteEntry(FullPath) (fileEntry *Entry, err error)

View file

@ -6,6 +6,7 @@ import (
"strings" "strings"
"fmt" "fmt"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
type MemDbStore struct { type MemDbStore struct {
@ -32,13 +33,15 @@ func (filer *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) {
return nil return nil
} }
func (filer *MemDbStore) AppendFileChunk(fullpath filer2.FullPath, fileChunk filer2.FileChunk) (err error) { func (filer *MemDbStore) AppendFileChunk(fullpath filer2.FullPath, fileChunks []*filer_pb.FileChunk) (err error) {
found, entry, err := filer.FindEntry(fullpath) found, entry, err := filer.FindEntry(fullpath)
if !found { if !found {
return fmt.Errorf("No such file: %s", fullpath) return fmt.Errorf("No such file: %s", fullpath)
} }
entry.Chunks = append(entry.Chunks, fileChunk) entry.Chunks = append(entry.Chunks, fileChunks...)
entry.Mtime = time.Now() entry.Mtime = time.Now()
println("appending to entry", entry.Name(), len(entry.Chunks))
filer.tree.ReplaceOrInsert(Entry{entry})
return nil return nil
} }

View file

@ -9,7 +9,6 @@ import (
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
"bazil.org/fuse" "bazil.org/fuse"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"time" "time"
@ -27,16 +26,77 @@ func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error {
return nil return nil
} }
func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{
Directory: dir.Path,
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: req.Mode&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
FileMode: uint32(req.Mode),
Uid: req.Uid,
Gid: req.Gid,
},
},
}
glog.V(1).Infof("create: %v", request)
if _, err := client.CreateEntry(ctx, request); err != nil {
return fmt.Errorf("create file: %v", err)
}
return nil
})
if err == nil {
node := &File{Name: req.Name, dir: dir, wfs: dir.wfs}
dir.NodeMap[req.Name] = node
return node, node, nil
}
return nil, nil, err
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
dir.NodeMapLock.Lock() dir.NodeMapLock.Lock()
defer dir.NodeMapLock.Unlock() defer dir.NodeMapLock.Unlock()
fmt.Printf("mkdir %+v\n", req) err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs} request := &filer_pb.CreateEntryRequest{
dir.NodeMap[req.Name] = node Directory: dir.Path,
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
FileMode: uint32(req.Mode),
Uid: req.Uid,
Gid: req.Gid,
},
},
}
return node, nil glog.V(1).Infof("mkdir: %v", request)
if _, err := client.CreateEntry(ctx, request); err != nil {
return fmt.Errorf("make dir: %v", err)
}
return nil
})
if err == nil {
node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs}
dir.NodeMap[req.Name] = node
return node, nil
}
return nil, err
} }
func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err error) { func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err error) {
@ -75,13 +135,13 @@ func (dir *Dir) Lookup(ctx context.Context, name string) (node fs.Node, err erro
if entry.IsDirectory { if entry.IsDirectory {
node = &Dir{Path: path.Join(dir.Path, name), wfs: dir.wfs} node = &Dir{Path: path.Join(dir.Path, name), wfs: dir.wfs}
} else { } else {
node = &File{FileId: filer.FileId(entry.FileId), Name: name, dir: dir, wfs: dir.wfs} node = &File{Chunks: entry.Chunks, Name: name, dir: dir, wfs: dir.wfs}
} }
dir.NodeMap[name] = node dir.NodeMap[name] = node
return node, nil return node, nil
} }
return nil, err return nil, fuse.ENOENT
} }
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {

View file

@ -5,13 +5,14 @@ import (
"fmt" "fmt"
"bazil.org/fuse" "bazil.org/fuse"
"github.com/chrislusf/seaweedfs/weed/filer"
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"path/filepath" "path/filepath"
"os" "os"
"time" "time"
"bytes"
"github.com/chrislusf/seaweedfs/weed/operation"
) )
var _ = fs.Node(&File{}) var _ = fs.Node(&File{})
@ -20,10 +21,11 @@ var _ = fs.Node(&File{})
var _ = fs.Handle(&File{}) var _ = fs.Handle(&File{})
var _ = fs.HandleReadAller(&File{}) var _ = fs.HandleReadAller(&File{})
// var _ = fs.HandleReader(&File{}) // var _ = fs.HandleReader(&File{})
var _ = fs.HandleFlusher(&File{})
var _ = fs.HandleWriter(&File{}) var _ = fs.HandleWriter(&File{})
type File struct { type File struct {
FileId filer.FileId Chunks []*filer_pb.FileChunk
Name string Name string
dir *Dir dir *Dir
wfs *WFS wfs *WFS
@ -71,10 +73,15 @@ func (file *File) Attr(context context.Context, attr *fuse.Attr) error {
func (file *File) ReadAll(ctx context.Context) (content []byte, err error) { func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
if len(file.Chunks) == 0 {
return
}
err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err = file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
// FIXME: need to either use Read() or implement differently
request := &filer_pb.GetFileContentRequest{ request := &filer_pb.GetFileContentRequest{
FileId: string(file.FileId), FileId: file.Chunks[0].FileId,
} }
glog.V(1).Infof("read file content: %v", request) glog.V(1).Infof("read file content: %v", request)
@ -91,7 +98,75 @@ func (file *File) ReadAll(ctx context.Context) (content []byte, err error) {
return content, err return content, err
} }
func (file *File) Flush(ctx context.Context, req *fuse.FlushRequest) error {
// write the file chunks to the filer
fmt.Printf("flush file %+v\n", req)
err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AppendFileChunksRequest{
Directory: file.dir.Path,
Entry: &filer_pb.Entry{
Name: file.Name,
Chunks: file.Chunks,
},
}
glog.V(1).Infof("append chunks: %v", request)
if _, err := client.AppendFileChunks(ctx, request); err != nil {
return fmt.Errorf("create file: %v", err)
}
return nil
})
return err
}
func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { func (file *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers
fmt.Printf("write file %+v\n", req) fmt.Printf("write file %+v\n", req)
var fileId, host string
if err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: "000",
Collection: "",
}
glog.V(1).Infof("assign volume: %v", request)
resp, err := client.AssignVolume(ctx, request)
if err != nil {
return err
}
fileId, host = resp.FileId, resp.Url
return nil
}); err != nil {
return fmt.Errorf("filer assign volume: %v", err)
}
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
bufReader := bytes.NewReader(req.Data)
uploadResult, err := operation.Upload(fileUrl, file.Name, bufReader, false, "application/octet-stream", nil, "")
if err != nil {
return fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
return fmt.Errorf("upload result: %v", uploadResult.Error)
}
glog.V(1).Infof("uploaded %s/%s to: %v", file.dir.Path, file.Name, fileUrl)
file.Chunks = append(file.Chunks, &filer_pb.FileChunk{
FileId: fileId,
Offset: req.Offset,
Size: uint64(uploadResult.Size),
})
return nil return nil
} }

View file

@ -52,7 +52,7 @@ func Assign(server string, r *VolumeAssignRequest) (*AssignResult, error) {
} }
jsonBlob, err := util.Post("http://"+server+"/dir/assign", values) jsonBlob, err := util.Post("http://"+server+"/dir/assign", values)
glog.V(2).Info("assign result :", string(jsonBlob)) glog.V(2).Infof("assign result from %s : %s", server, string(jsonBlob))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -18,9 +18,18 @@ service SeaweedFiler {
rpc GetFileContent (GetFileContentRequest) returns (GetFileContentResponse) { rpc GetFileContent (GetFileContentRequest) returns (GetFileContentResponse) {
} }
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
}
rpc AppendFileChunks (AppendFileChunksRequest) returns (AppendFileChunksResponse) {
}
rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) {
} }
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
}
} }
////////////////////////////////////////////////// //////////////////////////////////////////////////
@ -45,10 +54,16 @@ message ListEntriesResponse {
message Entry { message Entry {
string name = 1; string name = 1;
bool is_directory = 2; bool is_directory = 2;
string file_id = 3; repeated FileChunk chunks = 3;
FuseAttributes attributes = 4; FuseAttributes attributes = 4;
} }
message FileChunk {
string file_id = 1;
int64 offset = 2;
uint64 size = 3;
}
message FuseAttributes { message FuseAttributes {
uint64 file_size = 1; uint64 file_size = 1;
int64 mtime = 2; int64 mtime = 2;
@ -75,6 +90,14 @@ message GetFileContentResponse {
bytes content = 1; bytes content = 1;
} }
message CreateEntryRequest {
string directory = 1;
Entry entry = 2;
}
message CreateEntryResponse {
}
message DeleteEntryRequest { message DeleteEntryRequest {
string directory = 1; string directory = 1;
string name = 2; string name = 2;
@ -83,3 +106,23 @@ message DeleteEntryRequest {
message DeleteEntryResponse { message DeleteEntryResponse {
} }
message AssignVolumeRequest {
int32 count = 1;
string collection = 2;
string replication = 3;
}
message AssignVolumeResponse {
string file_id = 1;
string url = 2;
string public_url = 3;
int32 count = 4;
}
message AppendFileChunksRequest {
string directory = 1;
Entry entry = 2;
}
message AppendFileChunksResponse {
}

View file

@ -14,13 +14,20 @@ It has these top-level messages:
ListEntriesRequest ListEntriesRequest
ListEntriesResponse ListEntriesResponse
Entry Entry
FileChunk
FuseAttributes FuseAttributes
GetFileAttributesRequest GetFileAttributesRequest
GetFileAttributesResponse GetFileAttributesResponse
GetFileContentRequest GetFileContentRequest
GetFileContentResponse GetFileContentResponse
CreateEntryRequest
CreateEntryResponse
DeleteEntryRequest DeleteEntryRequest
DeleteEntryResponse DeleteEntryResponse
AssignVolumeRequest
AssignVolumeResponse
AppendFileChunksRequest
AppendFileChunksResponse
*/ */
package filer_pb package filer_pb
@ -119,7 +126,7 @@ func (m *ListEntriesResponse) GetEntries() []*Entry {
type Entry struct { type Entry struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"` IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"`
FileId string `protobuf:"bytes,3,opt,name=file_id,json=fileId" json:"file_id,omitempty"` Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"`
Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"` Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"`
} }
@ -142,11 +149,11 @@ func (m *Entry) GetIsDirectory() bool {
return false return false
} }
func (m *Entry) GetFileId() string { func (m *Entry) GetChunks() []*FileChunk {
if m != nil { if m != nil {
return m.FileId return m.Chunks
} }
return "" return nil
} }
func (m *Entry) GetAttributes() *FuseAttributes { func (m *Entry) GetAttributes() *FuseAttributes {
@ -156,6 +163,38 @@ func (m *Entry) GetAttributes() *FuseAttributes {
return nil return nil
} }
type FileChunk struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"`
Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"`
}
func (m *FileChunk) Reset() { *m = FileChunk{} }
func (m *FileChunk) String() string { return proto.CompactTextString(m) }
func (*FileChunk) ProtoMessage() {}
func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FileChunk) GetFileId() string {
if m != nil {
return m.FileId
}
return ""
}
func (m *FileChunk) GetOffset() int64 {
if m != nil {
return m.Offset
}
return 0
}
func (m *FileChunk) GetSize() uint64 {
if m != nil {
return m.Size
}
return 0
}
type FuseAttributes struct { type FuseAttributes struct {
FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"`
Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"` Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"`
@ -167,7 +206,7 @@ type FuseAttributes struct {
func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
func (m *FuseAttributes) String() string { return proto.CompactTextString(m) } func (m *FuseAttributes) String() string { return proto.CompactTextString(m) }
func (*FuseAttributes) ProtoMessage() {} func (*FuseAttributes) ProtoMessage() {}
func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FuseAttributes) GetFileSize() uint64 { func (m *FuseAttributes) GetFileSize() uint64 {
if m != nil { if m != nil {
@ -213,7 +252,7 @@ type GetFileAttributesRequest struct {
func (m *GetFileAttributesRequest) Reset() { *m = GetFileAttributesRequest{} } func (m *GetFileAttributesRequest) Reset() { *m = GetFileAttributesRequest{} }
func (m *GetFileAttributesRequest) String() string { return proto.CompactTextString(m) } func (m *GetFileAttributesRequest) String() string { return proto.CompactTextString(m) }
func (*GetFileAttributesRequest) ProtoMessage() {} func (*GetFileAttributesRequest) ProtoMessage() {}
func (*GetFileAttributesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (*GetFileAttributesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *GetFileAttributesRequest) GetName() string { func (m *GetFileAttributesRequest) GetName() string {
if m != nil { if m != nil {
@ -243,7 +282,7 @@ type GetFileAttributesResponse struct {
func (m *GetFileAttributesResponse) Reset() { *m = GetFileAttributesResponse{} } func (m *GetFileAttributesResponse) Reset() { *m = GetFileAttributesResponse{} }
func (m *GetFileAttributesResponse) String() string { return proto.CompactTextString(m) } func (m *GetFileAttributesResponse) String() string { return proto.CompactTextString(m) }
func (*GetFileAttributesResponse) ProtoMessage() {} func (*GetFileAttributesResponse) ProtoMessage() {}
func (*GetFileAttributesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (*GetFileAttributesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *GetFileAttributesResponse) GetAttributes() *FuseAttributes { func (m *GetFileAttributesResponse) GetAttributes() *FuseAttributes {
if m != nil { if m != nil {
@ -259,7 +298,7 @@ type GetFileContentRequest struct {
func (m *GetFileContentRequest) Reset() { *m = GetFileContentRequest{} } func (m *GetFileContentRequest) Reset() { *m = GetFileContentRequest{} }
func (m *GetFileContentRequest) String() string { return proto.CompactTextString(m) } func (m *GetFileContentRequest) String() string { return proto.CompactTextString(m) }
func (*GetFileContentRequest) ProtoMessage() {} func (*GetFileContentRequest) ProtoMessage() {}
func (*GetFileContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (*GetFileContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *GetFileContentRequest) GetFileId() string { func (m *GetFileContentRequest) GetFileId() string {
if m != nil { if m != nil {
@ -275,7 +314,7 @@ type GetFileContentResponse struct {
func (m *GetFileContentResponse) Reset() { *m = GetFileContentResponse{} } func (m *GetFileContentResponse) Reset() { *m = GetFileContentResponse{} }
func (m *GetFileContentResponse) String() string { return proto.CompactTextString(m) } func (m *GetFileContentResponse) String() string { return proto.CompactTextString(m) }
func (*GetFileContentResponse) ProtoMessage() {} func (*GetFileContentResponse) ProtoMessage() {}
func (*GetFileContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (*GetFileContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *GetFileContentResponse) GetContent() []byte { func (m *GetFileContentResponse) GetContent() []byte {
if m != nil { if m != nil {
@ -284,6 +323,38 @@ func (m *GetFileContentResponse) GetContent() []byte {
return nil return nil
} }
type CreateEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
}
func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} }
func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) }
func (*CreateEntryRequest) ProtoMessage() {}
func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *CreateEntryRequest) GetDirectory() string {
if m != nil {
return m.Directory
}
return ""
}
func (m *CreateEntryRequest) GetEntry() *Entry {
if m != nil {
return m.Entry
}
return nil
}
type CreateEntryResponse struct {
}
func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} }
func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) }
func (*CreateEntryResponse) ProtoMessage() {}
func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
type DeleteEntryRequest struct { type DeleteEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
@ -293,7 +364,7 @@ type DeleteEntryRequest struct {
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} } func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) } func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryRequest) ProtoMessage() {} func (*DeleteEntryRequest) ProtoMessage() {}
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
func (m *DeleteEntryRequest) GetDirectory() string { func (m *DeleteEntryRequest) GetDirectory() string {
if m != nil { if m != nil {
@ -322,7 +393,111 @@ type DeleteEntryResponse struct {
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) } func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteEntryResponse) ProtoMessage() {} func (*DeleteEntryResponse) ProtoMessage() {}
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
type AssignVolumeRequest struct {
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"`
Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"`
}
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeRequest) ProtoMessage() {}
func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
func (m *AssignVolumeRequest) GetCount() int32 {
if m != nil {
return m.Count
}
return 0
}
func (m *AssignVolumeRequest) GetCollection() string {
if m != nil {
return m.Collection
}
return ""
}
func (m *AssignVolumeRequest) GetReplication() string {
if m != nil {
return m.Replication
}
return ""
}
type AssignVolumeResponse struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
}
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
func (*AssignVolumeResponse) ProtoMessage() {}
func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
func (m *AssignVolumeResponse) GetFileId() string {
if m != nil {
return m.FileId
}
return ""
}
func (m *AssignVolumeResponse) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *AssignVolumeResponse) GetPublicUrl() string {
if m != nil {
return m.PublicUrl
}
return ""
}
func (m *AssignVolumeResponse) GetCount() int32 {
if m != nil {
return m.Count
}
return 0
}
type AppendFileChunksRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
}
func (m *AppendFileChunksRequest) Reset() { *m = AppendFileChunksRequest{} }
func (m *AppendFileChunksRequest) String() string { return proto.CompactTextString(m) }
func (*AppendFileChunksRequest) ProtoMessage() {}
func (*AppendFileChunksRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
func (m *AppendFileChunksRequest) GetDirectory() string {
if m != nil {
return m.Directory
}
return ""
}
func (m *AppendFileChunksRequest) GetEntry() *Entry {
if m != nil {
return m.Entry
}
return nil
}
type AppendFileChunksResponse struct {
}
func (m *AppendFileChunksResponse) Reset() { *m = AppendFileChunksResponse{} }
func (m *AppendFileChunksResponse) String() string { return proto.CompactTextString(m) }
func (*AppendFileChunksResponse) ProtoMessage() {}
func (*AppendFileChunksResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func init() { func init() {
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
@ -330,13 +505,20 @@ func init() {
proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest") proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest")
proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse") proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse")
proto.RegisterType((*Entry)(nil), "filer_pb.Entry") proto.RegisterType((*Entry)(nil), "filer_pb.Entry")
proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk")
proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes") proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes")
proto.RegisterType((*GetFileAttributesRequest)(nil), "filer_pb.GetFileAttributesRequest") proto.RegisterType((*GetFileAttributesRequest)(nil), "filer_pb.GetFileAttributesRequest")
proto.RegisterType((*GetFileAttributesResponse)(nil), "filer_pb.GetFileAttributesResponse") proto.RegisterType((*GetFileAttributesResponse)(nil), "filer_pb.GetFileAttributesResponse")
proto.RegisterType((*GetFileContentRequest)(nil), "filer_pb.GetFileContentRequest") proto.RegisterType((*GetFileContentRequest)(nil), "filer_pb.GetFileContentRequest")
proto.RegisterType((*GetFileContentResponse)(nil), "filer_pb.GetFileContentResponse") proto.RegisterType((*GetFileContentResponse)(nil), "filer_pb.GetFileContentResponse")
proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest") proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse") proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
proto.RegisterType((*AppendFileChunksRequest)(nil), "filer_pb.AppendFileChunksRequest")
proto.RegisterType((*AppendFileChunksResponse)(nil), "filer_pb.AppendFileChunksResponse")
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
@ -354,7 +536,10 @@ type SeaweedFilerClient interface {
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error)
GetFileAttributes(ctx context.Context, in *GetFileAttributesRequest, opts ...grpc.CallOption) (*GetFileAttributesResponse, error) GetFileAttributes(ctx context.Context, in *GetFileAttributesRequest, opts ...grpc.CallOption) (*GetFileAttributesResponse, error)
GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error) GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error)
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
AppendFileChunks(ctx context.Context, in *AppendFileChunksRequest, opts ...grpc.CallOption) (*AppendFileChunksResponse, error)
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
} }
type seaweedFilerClient struct { type seaweedFilerClient struct {
@ -401,6 +586,24 @@ func (c *seaweedFilerClient) GetFileContent(ctx context.Context, in *GetFileCont
return out, nil return out, nil
} }
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
out := new(CreateEntryResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) AppendFileChunks(ctx context.Context, in *AppendFileChunksRequest, opts ...grpc.CallOption) (*AppendFileChunksResponse, error) {
out := new(AppendFileChunksResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendFileChunks", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) { func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) {
out := new(DeleteEntryResponse) out := new(DeleteEntryResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...) err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...)
@ -410,6 +613,15 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq
return out, nil return out, nil
} }
func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) {
out := new(AssignVolumeResponse)
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for SeaweedFiler service // Server API for SeaweedFiler service
type SeaweedFilerServer interface { type SeaweedFilerServer interface {
@ -417,7 +629,10 @@ type SeaweedFilerServer interface {
ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error)
GetFileAttributes(context.Context, *GetFileAttributesRequest) (*GetFileAttributesResponse, error) GetFileAttributes(context.Context, *GetFileAttributesRequest) (*GetFileAttributesResponse, error)
GetFileContent(context.Context, *GetFileContentRequest) (*GetFileContentResponse, error) GetFileContent(context.Context, *GetFileContentRequest) (*GetFileContentResponse, error)
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
AppendFileChunks(context.Context, *AppendFileChunksRequest) (*AppendFileChunksResponse, error)
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
} }
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@ -496,6 +711,42 @@ func _SeaweedFiler_GetFileContent_Handler(srv interface{}, ctx context.Context,
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateEntryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).CreateEntry(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/CreateEntry",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).CreateEntry(ctx, req.(*CreateEntryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_AppendFileChunks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AppendFileChunksRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).AppendFileChunks(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/AppendFileChunks",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).AppendFileChunks(ctx, req.(*AppendFileChunksRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteEntryRequest) in := new(DeleteEntryRequest)
if err := dec(in); err != nil { if err := dec(in); err != nil {
@ -514,6 +765,24 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AssignVolumeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeaweedFilerServer).AssignVolume(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/filer_pb.SeaweedFiler/AssignVolume",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeaweedFilerServer).AssignVolume(ctx, req.(*AssignVolumeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServiceName: "filer_pb.SeaweedFiler", ServiceName: "filer_pb.SeaweedFiler",
HandlerType: (*SeaweedFilerServer)(nil), HandlerType: (*SeaweedFilerServer)(nil),
@ -534,10 +803,22 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
MethodName: "GetFileContent", MethodName: "GetFileContent",
Handler: _SeaweedFiler_GetFileContent_Handler, Handler: _SeaweedFiler_GetFileContent_Handler,
}, },
{
MethodName: "CreateEntry",
Handler: _SeaweedFiler_CreateEntry_Handler,
},
{
MethodName: "AppendFileChunks",
Handler: _SeaweedFiler_AppendFileChunks_Handler,
},
{ {
MethodName: "DeleteEntry", MethodName: "DeleteEntry",
Handler: _SeaweedFiler_DeleteEntry_Handler, Handler: _SeaweedFiler_DeleteEntry_Handler,
}, },
{
MethodName: "AssignVolume",
Handler: _SeaweedFiler_AssignVolume_Handler,
},
}, },
Streams: []grpc.StreamDesc{}, Streams: []grpc.StreamDesc{},
Metadata: "filer.proto", Metadata: "filer.proto",
@ -546,39 +827,53 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{ var fileDescriptor0 = []byte{
// 532 bytes of a gzipped FileDescriptorProto // 762 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xd3, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x4e, 0xdb, 0x48,
0x10, 0xad, 0x71, 0xd2, 0x34, 0x93, 0xb4, 0xc0, 0xb4, 0x05, 0x93, 0xa6, 0x22, 0x2c, 0x2a, 0x82, 0x14, 0xc6, 0x38, 0x09, 0xe4, 0x24, 0xb0, 0xec, 0x24, 0x80, 0x37, 0xfc, 0x6c, 0x98, 0x15, 0x2b,
0x4b, 0x84, 0xc2, 0x85, 0x23, 0x88, 0xb4, 0x08, 0x29, 0x08, 0xc9, 0x55, 0xaf, 0x44, 0x49, 0x3d, 0x56, 0x2b, 0xa1, 0x55, 0xf6, 0x66, 0x2f, 0x17, 0x01, 0xbb, 0xaa, 0x44, 0x45, 0x65, 0x44, 0xa5,
0x8d, 0x56, 0x24, 0x76, 0xf0, 0xae, 0x85, 0xda, 0x33, 0x7f, 0x80, 0xbf, 0xc5, 0xaf, 0x42, 0xbb, 0xaa, 0x12, 0x51, 0x62, 0x9f, 0xa4, 0x23, 0x1c, 0xdb, 0xb5, 0xc7, 0xad, 0xe0, 0xba, 0xaf, 0xd2,
0xeb, 0x8f, 0x35, 0x76, 0x0a, 0x88, 0x9b, 0xf7, 0xcd, 0xbe, 0x99, 0x37, 0x6f, 0x66, 0x0d, 0x9d, 0x57, 0xe9, 0x73, 0x55, 0x33, 0x9e, 0xd8, 0x63, 0xec, 0xa4, 0x45, 0xea, 0xdd, 0xcc, 0xf9, 0xfd,
0x2b, 0xbe, 0xa4, 0x78, 0xb8, 0x8e, 0x23, 0x19, 0xe1, 0x8e, 0x3e, 0x4c, 0xd7, 0x73, 0xf6, 0x09, 0xce, 0xcf, 0x37, 0x36, 0xb4, 0x26, 0xcc, 0xc3, 0xe8, 0x34, 0x8c, 0x02, 0x1e, 0x90, 0x75, 0x79,
0x8e, 0x26, 0x51, 0xf4, 0x25, 0x59, 0x8f, 0x79, 0x4c, 0x97, 0x32, 0x8a, 0xaf, 0x4f, 0x43, 0x19, 0x19, 0x86, 0x63, 0x7a, 0x0d, 0x7b, 0x57, 0x41, 0x70, 0x9f, 0x84, 0x17, 0x2c, 0x42, 0x87, 0x07,
0x5f, 0xfb, 0xf4, 0x35, 0x21, 0x21, 0xb1, 0x0f, 0xed, 0x20, 0x0b, 0x78, 0xce, 0xc0, 0x79, 0xde, 0xd1, 0xc3, 0xa5, 0xcf, 0xa3, 0x07, 0x1b, 0xdf, 0x27, 0x18, 0x73, 0xb2, 0x0f, 0x4d, 0x77, 0xae,
0xf6, 0x0b, 0x00, 0x11, 0x1a, 0xe1, 0x6c, 0x45, 0xde, 0x1d, 0x1d, 0xd0, 0xdf, 0xec, 0x14, 0xfa, 0xb0, 0x8c, 0xbe, 0x71, 0xd2, 0xb4, 0x73, 0x01, 0x21, 0x50, 0xf3, 0x47, 0x33, 0xb4, 0x56, 0xa5,
0xf5, 0x09, 0xc5, 0x3a, 0x0a, 0x05, 0xe1, 0x09, 0x34, 0x49, 0x01, 0x3a, 0x5b, 0x67, 0x74, 0x77, 0x42, 0x9e, 0xe9, 0x25, 0xec, 0x57, 0x07, 0x8c, 0xc3, 0xc0, 0x8f, 0x91, 0x1c, 0x43, 0x1d, 0x85,
0x98, 0x49, 0x19, 0x9a, 0x7b, 0x26, 0xca, 0x46, 0x80, 0x13, 0x2e, 0xa4, 0xc2, 0x38, 0x89, 0xbf, 0x40, 0x46, 0x6b, 0x0d, 0x7e, 0x3a, 0x9d, 0x43, 0x39, 0x4d, 0xed, 0x52, 0x2d, 0x1d, 0x00, 0xb9,
0x92, 0xc3, 0xde, 0xc0, 0x7e, 0x89, 0x93, 0x56, 0x7c, 0x01, 0x2d, 0x32, 0x90, 0xe7, 0x0c, 0xdc, 0x62, 0x31, 0x17, 0x32, 0x86, 0xf1, 0x77, 0xc1, 0xa1, 0xff, 0x42, 0xa7, 0xe0, 0xa3, 0x32, 0xfe,
0xba, 0x9a, 0x59, 0x9c, 0xfd, 0x70, 0xa0, 0xa9, 0xa1, 0xbc, 0x35, 0xa7, 0x68, 0x0d, 0x9f, 0x40, 0x01, 0x6b, 0x98, 0x8a, 0x2c, 0xa3, 0x6f, 0x56, 0xe5, 0x9c, 0xeb, 0xe9, 0x67, 0x03, 0xea, 0x52,
0x97, 0x8b, 0x69, 0x21, 0x40, 0xb5, 0xbd, 0xe3, 0x77, 0xb8, 0xc8, 0x5b, 0xc5, 0x87, 0xd0, 0x52, 0x94, 0x95, 0x66, 0xe4, 0xa5, 0x91, 0x23, 0x68, 0xb3, 0x78, 0x98, 0x03, 0x10, 0x65, 0xaf, 0xdb,
0xb9, 0xa7, 0x3c, 0xf0, 0x5c, 0xcd, 0xdc, 0x56, 0xc7, 0x0f, 0x01, 0xbe, 0x06, 0x98, 0x49, 0x19, 0x2d, 0x16, 0x67, 0xa5, 0x92, 0x3f, 0xa1, 0xe1, 0xbc, 0x4b, 0xfc, 0xfb, 0xd8, 0x32, 0x65, 0xaa,
0xf3, 0x79, 0x22, 0x49, 0x78, 0x0d, 0xdd, 0xbb, 0x57, 0xe8, 0x38, 0x4b, 0x04, 0xbd, 0xcd, 0xe3, 0x4e, 0x9e, 0xea, 0x3f, 0xe6, 0xe1, 0xb9, 0xd0, 0xd9, 0xca, 0x84, 0xfc, 0x03, 0x30, 0xe2, 0x3c,
0xbe, 0x75, 0x97, 0x7d, 0x77, 0x60, 0xaf, 0x1c, 0xc6, 0x23, 0x68, 0xeb, 0x2a, 0x82, 0xdf, 0x18, 0x62, 0xe3, 0x84, 0x63, 0x6c, 0xd5, 0x64, 0x3f, 0x2c, 0xcd, 0x21, 0x89, 0xf1, 0x2c, 0xd3, 0xdb,
0x85, 0x0d, 0x5f, 0x4f, 0xf4, 0x9c, 0xdf, 0x10, 0x1e, 0x40, 0x73, 0x25, 0x79, 0x3a, 0x15, 0xd7, 0x9a, 0x2d, 0x7d, 0x05, 0xcd, 0x2c, 0x1c, 0xd9, 0x85, 0x35, 0xe1, 0x33, 0x64, 0xae, 0x42, 0xdb,
0x37, 0x87, 0x9c, 0xb2, 0x8a, 0x02, 0xd2, 0xd2, 0x76, 0x0d, 0xe5, 0x63, 0x14, 0x10, 0xde, 0x03, 0x10, 0xd7, 0x17, 0x2e, 0xd9, 0x81, 0x46, 0x30, 0x99, 0xc4, 0xc8, 0x25, 0x52, 0xd3, 0x56, 0x37,
0x37, 0xe1, 0x81, 0x56, 0xb5, 0xeb, 0xab, 0x4f, 0x85, 0x2c, 0x78, 0xe0, 0x35, 0x0d, 0xb2, 0xe0, 0x51, 0x5b, 0xcc, 0x1e, 0xd1, 0x32, 0xfb, 0xc6, 0x49, 0xcd, 0x96, 0x67, 0xfa, 0xc9, 0x80, 0xcd,
0x01, 0xbb, 0x02, 0xef, 0x3d, 0xc9, 0x33, 0xbe, 0xb4, 0x75, 0xa6, 0x63, 0xa9, 0x33, 0xeb, 0x18, 0x62, 0x42, 0xb2, 0x07, 0x4d, 0x19, 0x57, 0xda, 0x1a, 0xd2, 0x56, 0xee, 0xcd, 0x0d, 0x7b, 0x44,
0x60, 0x3d, 0x8b, 0x29, 0x94, 0xca, 0xb0, 0x74, 0x43, 0xda, 0x06, 0x19, 0xf3, 0x78, 0xa3, 0x51, 0xd2, 0x85, 0xfa, 0x8c, 0x33, 0x35, 0x7b, 0xd3, 0x4e, 0x2f, 0x99, 0xcb, 0x2c, 0x70, 0xd3, 0xf0,
0xec, 0x02, 0x1e, 0xd5, 0xd4, 0x49, 0x47, 0x59, 0x76, 0xd1, 0xf9, 0x07, 0x17, 0x5f, 0xc2, 0x61, 0x1b, 0xa9, 0xcb, 0xcb, 0xc0, 0x45, 0xb2, 0x05, 0x66, 0xc2, 0x5c, 0x59, 0xe7, 0x86, 0x2d, 0x8e,
0x9a, 0xf6, 0x5d, 0x14, 0x4a, 0x0a, 0x65, 0xa6, 0xdd, 0x12, 0xe2, 0x94, 0x84, 0x8c, 0xe0, 0xc1, 0x42, 0x32, 0x65, 0xae, 0x55, 0x4f, 0x25, 0x53, 0xe6, 0xd2, 0x09, 0x58, 0xff, 0x23, 0x17, 0xb5,
0xef, 0x8c, 0x54, 0x85, 0x07, 0xad, 0x4b, 0x03, 0x69, 0x4a, 0xd7, 0xcf, 0x8e, 0x8c, 0x03, 0x8e, 0x69, 0x95, 0xab, 0xe1, 0x57, 0x8d, 0xe4, 0x00, 0x20, 0x1c, 0x45, 0xe8, 0x73, 0x31, 0x16, 0xb5,
0x69, 0x49, 0x92, 0xfe, 0xef, 0x11, 0x55, 0x36, 0xcd, 0xad, 0x6c, 0x1a, 0x3b, 0x84, 0xfd, 0x52, 0x87, 0xcd, 0x54, 0x72, 0xc1, 0x22, 0xbd, 0x35, 0xa6, 0xde, 0x1a, 0x7a, 0x0b, 0xbf, 0x54, 0xe4,
0x29, 0xa3, 0x6d, 0xf4, 0xd3, 0x85, 0xee, 0x39, 0xcd, 0xbe, 0x11, 0x05, 0x4a, 0x7a, 0x8c, 0x0b, 0x51, 0x0b, 0x53, 0x9c, 0x8b, 0xf1, 0x8c, 0xb9, 0xfc, 0x05, 0xdb, 0x2a, 0xec, 0x79, 0xe0, 0x73,
0x38, 0xa8, 0x7b, 0x8f, 0x78, 0x52, 0xd8, 0x76, 0xcb, 0x0f, 0xa0, 0xf7, 0xec, 0x4f, 0xd7, 0x4c, 0xf4, 0xf9, 0x1c, 0xfb, 0xa2, 0x19, 0xd1, 0x01, 0xec, 0x3c, 0xf5, 0x50, 0x28, 0x2c, 0x58, 0x73,
0x5d, 0xb6, 0x85, 0x13, 0xe8, 0x58, 0xaf, 0x0f, 0xfb, 0x16, 0xb1, 0xf2, 0x90, 0x7b, 0xc7, 0x1b, 0x52, 0x91, 0x74, 0x69, 0xdb, 0xf3, 0x2b, 0x7d, 0x03, 0xe4, 0x3c, 0xc2, 0x11, 0xc7, 0x67, 0x50,
0xa2, 0x79, 0xb6, 0xcf, 0x70, 0xbf, 0xb2, 0x06, 0xc8, 0x0a, 0xd6, 0xa6, 0x5d, 0xec, 0x3d, 0xbd, 0x35, 0xa3, 0xdd, 0xea, 0x52, 0xda, 0x6d, 0x43, 0xa7, 0x10, 0x3a, 0xc5, 0x42, 0x19, 0x90, 0x0b,
0xf5, 0x4e, 0x9e, 0xff, 0x02, 0xf6, 0xca, 0xd3, 0xc5, 0xc7, 0x15, 0x62, 0x79, 0x53, 0x7a, 0x83, 0xf4, 0xf0, 0x59, 0x19, 0x2b, 0x1e, 0x87, 0x12, 0x83, 0xcc, 0x12, 0x83, 0x04, 0x82, 0x42, 0x2a,
0xcd, 0x17, 0x6c, 0x13, 0xac, 0xa9, 0xd8, 0x26, 0x54, 0xf7, 0xc2, 0x36, 0xa1, 0x66, 0x94, 0x6c, 0x85, 0x60, 0x06, 0x9d, 0xb3, 0x38, 0x66, 0x53, 0xff, 0x75, 0xe0, 0x25, 0x33, 0x9c, 0x43, 0xe8,
0x6b, 0xbe, 0xad, 0xff, 0xd6, 0xaf, 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x83, 0x8a, 0x32, 0x42, 0xdd, 0x09, 0x12, 0xd5, 0xa2, 0xba, 0x9d, 0x5e, 0xc8, 0x21, 0x80, 0x13, 0x78, 0x1e, 0x3a,
0xbc, 0x05, 0x00, 0x00, 0x9c, 0x05, 0xbe, 0x02, 0xa0, 0x49, 0x48, 0x1f, 0x5a, 0x11, 0x86, 0x1e, 0x73, 0x46, 0xd2, 0x20,
0x5d, 0x0d, 0x5d, 0x44, 0x3f, 0x40, 0xb7, 0x98, 0x4e, 0x0d, 0x65, 0x21, 0xd7, 0xc4, 0x72, 0x47,
0x9e, 0xca, 0x25, 0x8e, 0x72, 0x35, 0x93, 0xb1, 0xc7, 0x9c, 0xa1, 0x50, 0x98, 0x6a, 0x35, 0xa5,
0xe4, 0x36, 0xf2, 0x72, 0xe4, 0x35, 0x0d, 0x39, 0xbd, 0x83, 0xdd, 0xb3, 0x30, 0x44, 0xdf, 0xcd,
0xe8, 0x1d, 0xff, 0xd0, 0xf9, 0xf6, 0xc0, 0x2a, 0xc7, 0x4f, 0x6b, 0x1b, 0x7c, 0xa9, 0x43, 0xfb,
0x06, 0x47, 0x1f, 0x11, 0xa5, 0x36, 0x22, 0x53, 0xe8, 0x56, 0x3d, 0xe5, 0xe4, 0x38, 0x0f, 0xbe,
0xe4, 0xdb, 0xd1, 0xfb, 0xfd, 0x5b, 0x66, 0x6a, 0xb4, 0x2b, 0xe4, 0x0a, 0x5a, 0xda, 0xc3, 0x4d,
0xf6, 0x35, 0xc7, 0xd2, 0x37, 0xa0, 0x77, 0xb0, 0x40, 0x9b, 0x45, 0xbb, 0x83, 0x9f, 0x4b, 0xdc,
0x26, 0x34, 0xf7, 0x5a, 0xf4, 0xc0, 0xf4, 0x7e, 0x5b, 0x6a, 0x93, 0xc5, 0xbf, 0x85, 0xcd, 0x22,
0x65, 0xc9, 0xaf, 0x25, 0xc7, 0x22, 0xfd, 0x7b, 0xfd, 0xc5, 0x06, 0x7a, 0x13, 0x34, 0xea, 0xe9,
0x4d, 0x28, 0x93, 0x5d, 0x6f, 0x42, 0x15, 0x5f, 0x57, 0xc8, 0x5b, 0xd8, 0x7a, 0x3a, 0x68, 0x72,
0x94, 0x3b, 0x2d, 0x58, 0xb2, 0x1e, 0x5d, 0x66, 0xa2, 0x43, 0xd5, 0x38, 0xaa, 0x43, 0x2d, 0xbf,
0x12, 0x3a, 0xd4, 0x2a, 0x62, 0xaf, 0x90, 0x6b, 0x68, 0xeb, 0x5c, 0x23, 0x9a, 0x43, 0x05, 0xe5,
0x7b, 0x87, 0x8b, 0xd4, 0xf3, 0x80, 0xe3, 0x86, 0xfc, 0xc9, 0xf9, 0xfb, 0x6b, 0x00, 0x00, 0x00,
0xff, 0xff, 0xfe, 0xa5, 0x31, 0x33, 0xf3, 0x08, 0x00, 0x00,
} }

View file

@ -9,6 +9,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"path/filepath" "path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"time"
"os"
) )
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
@ -21,16 +23,11 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
return nil, fmt.Errorf("%s not found under %s", req.Name, req.Directory) return nil, fmt.Errorf("%s not found under %s", req.Name, req.Directory)
} }
var fileId string
if !entry.IsDirectory() && len(entry.Chunks) > 0 {
fileId = string(entry.Chunks[0].Fid)
}
return &filer_pb.LookupDirectoryEntryResponse{ return &filer_pb.LookupDirectoryEntryResponse{
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
Name: req.Name, Name: req.Name,
IsDirectory: entry.IsDirectory(), IsDirectory: entry.IsDirectory(),
FileId: fileId, Chunks: entry.Chunks,
}, },
}, nil }, nil
} }
@ -44,16 +41,12 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie
resp := &filer_pb.ListEntriesResponse{} resp := &filer_pb.ListEntriesResponse{}
for _, entry := range entries { for _, entry := range entries {
var fileId string
if !entry.IsDirectory() && len(entry.Chunks) > 0 {
fileId = string(entry.Chunks[0].Fid)
}
glog.V(0).Infof("%s attr=%v size=%d", entry.Name(), entry.Attr, filer2.Chunks(entry.Chunks).TotalSize()) glog.V(0).Infof("%s attr=%v size=%d", entry.Name(), entry.Attr, filer2.Chunks(entry.Chunks).TotalSize())
resp.Entries = append(resp.Entries, &filer_pb.Entry{ resp.Entries = append(resp.Entries, &filer_pb.Entry{
Name: entry.Name(), Name: entry.Name(),
IsDirectory: entry.IsDirectory(), IsDirectory: entry.IsDirectory(),
FileId: fileId, Chunks: entry.Chunks,
Attributes: &filer_pb.FuseAttributes{ Attributes: &filer_pb.FuseAttributes{
FileSize: filer2.Chunks(entry.Chunks).TotalSize(), FileSize: filer2.Chunks(entry.Chunks).TotalSize(),
Mtime: entry.Mtime.Unix(), Mtime: entry.Mtime.Unix(),
@ -106,15 +99,63 @@ func (fs *FilerServer) GetFileContent(ctx context.Context, req *filer_pb.GetFile
}, nil }, nil
} }
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
err = fs.filer.CreateEntry(&filer2.Entry{
FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
Attr: filer2.Attr{
Mtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Crtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Mode: os.FileMode(req.Entry.Attributes.FileMode),
Uid: req.Entry.Attributes.Uid,
Gid: req.Entry.Attributes.Gid,
},
})
if err == nil {
}
return &filer_pb.CreateEntryResponse{}, err
}
func (fs *FilerServer) AppendFileChunks(ctx context.Context, req *filer_pb.AppendFileChunksRequest) (*filer_pb.AppendFileChunksResponse, error) {
err := fs.filer.AppendFileChunk(
filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
req.Entry.Chunks,
)
return &filer_pb.AppendFileChunksResponse{}, err
}
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
entry, err := fs.filer.DeleteEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name))) entry, err := fs.filer.DeleteEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name)))
if err == nil { if err == nil {
for _, chunk := range entry.Chunks { for _, chunk := range entry.Chunks {
fid := string(chunk.Fid) if err = operation.DeleteFile(fs.getMasterNode(), chunk.FileId, fs.jwt(chunk.FileId)); err != nil {
if err = operation.DeleteFile(fs.getMasterNode(), fid, fs.jwt(fid)); err != nil { glog.V(0).Infof("deleting file %s: %v", chunk.FileId, err)
glog.V(0).Infof("deleting file %s: %v", fid, err)
} }
} }
} }
return &filer_pb.DeleteEntryResponse{}, err return &filer_pb.DeleteEntryResponse{}, err
} }
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
assignResult, err := operation.Assign(fs.master, &operation.VolumeAssignRequest{
Count: uint64(req.Count),
Replication: req.Replication,
Collection: req.Collection,
})
if err != nil {
return nil, fmt.Errorf("assign volume: %v", err)
}
if assignResult.Error != "" {
return nil, fmt.Errorf("assign volume result: %v", assignResult.Error)
}
return &filer_pb.AssignVolumeResponse{
FileId: assignResult.Fid,
Count: int32(assignResult.Count),
Url: assignResult.Url,
PublicUrl: assignResult.PublicUrl,
}, err
}

View file

@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"strconv" "strconv"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) { func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
@ -22,9 +23,9 @@ func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
Attr: filer2.Attr{ Attr: filer2.Attr{
Mode: 0660, Mode: 0660,
}, },
Chunks: []filer2.FileChunk{{ Chunks: []*filer_pb.FileChunk{{
Fid: filer2.FileId(fileId), FileId: fileId,
Size: fileSize, Size: fileSize,
}}, }},
} }
err = fs.filer.CreateEntry(entry) err = fs.filer.CreateEntry(entry)

View file

@ -98,7 +98,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
} }
// FIXME pick the right fid // FIXME pick the right fid
fileId := string(entry.Chunks[0].Fid) fileId := entry.Chunks[0].FileId
urlLocation, err := operation.LookupFileId(fs.getMasterNode(), fileId) urlLocation, err := operation.LookupFileId(fs.getMasterNode(), fileId)
if err != nil { if err != nil {

View file

@ -23,6 +23,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
type FilerPostResult struct { type FilerPostResult struct {
@ -80,7 +81,7 @@ func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Reques
glog.V(0).Infoln("failing to find path in filer store", path, err.Error()) glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
} else if found { } else if found {
fileId = string(entry.Chunks[0].Fid) fileId = entry.Chunks[0].FileId
urlLocation, err = operation.LookupFileId(fs.getMasterNode(), fileId) urlLocation, err = operation.LookupFileId(fs.getMasterNode(), fileId)
if err != nil { if err != nil {
glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error()) glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
@ -318,7 +319,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
// also delete the old fid unless PUT operation // also delete the old fid unless PUT operation
if r.Method != "PUT" { if r.Method != "PUT" {
if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil && found { if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil && found {
oldFid := string(entry.Chunks[0].Fid) oldFid := entry.Chunks[0].FileId
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid)) operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
} else if err != nil && err != filer.ErrNotFound { } else if err != nil && err != filer.ErrNotFound {
glog.V(0).Infof("error %v occur when finding %s in filer store", err, path) glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
@ -331,9 +332,9 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
Attr: filer2.Attr{ Attr: filer2.Attr{
Mode: 0660, Mode: 0660,
}, },
Chunks: []filer2.FileChunk{{ Chunks: []*filer_pb.FileChunk{{
Fid: filer2.FileId(fileId), FileId: fileId,
Size: uint64(r.ContentLength), Size: uint64(r.ContentLength),
}}, }},
} }
if db_err := fs.filer.CreateEntry(entry); db_err != nil { if db_err := fs.filer.CreateEntry(entry); db_err != nil {
@ -415,7 +416,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
fileName = path.Base(fileName) fileName = path.Base(fileName)
} }
var fileChunks []filer2.FileChunk var fileChunks []*filer_pb.FileChunk
totalBytesRead := int64(0) totalBytesRead := int64(0)
tmpBufferSize := int32(1024 * 1024) tmpBufferSize := int32(1024 * 1024)
@ -455,8 +456,8 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
// Save to chunk manifest structure // Save to chunk manifest structure
fileChunks = append(fileChunks, fileChunks = append(fileChunks,
filer2.FileChunk{ &filer_pb.FileChunk{
Fid: filer2.FileId(fileId), FileId: fileId,
Offset: chunkOffset, Offset: chunkOffset,
Size: uint64(chunkBufOffset), Size: uint64(chunkBufOffset),
}, },
@ -483,7 +484,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
if r.Method != "PUT" { if r.Method != "PUT" {
if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); found && err == nil { if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); found && err == nil {
for _, chunk := range entry.Chunks { for _, chunk := range entry.Chunks {
oldFid := string(chunk.Fid) oldFid := chunk.FileId
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid)) operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
} }
} else if err != nil { } else if err != nil {
@ -535,7 +536,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
if entry != nil && !entry.IsDirectory() { if entry != nil && !entry.IsDirectory() {
for _, chunk := range entry.Chunks { for _, chunk := range entry.Chunks {
oldFid := string(chunk.Fid) oldFid := chunk.FileId
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid)) operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
} }
} }