mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
file handler directly read from volume servers
this mostly works fine now! next: need to cache files to local disk
This commit is contained in:
parent
00d0274fd7
commit
d773e11c7a
|
@ -52,7 +52,14 @@ func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*filer_pb.FileChunk) {
|
type ChunkView struct {
|
||||||
|
FileId string
|
||||||
|
Offset int64
|
||||||
|
Size uint64
|
||||||
|
LogicOffset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) {
|
||||||
|
|
||||||
visibles := nonOverlappingVisibleIntervals(chunks)
|
visibles := nonOverlappingVisibleIntervals(chunks)
|
||||||
|
|
||||||
|
@ -60,10 +67,11 @@ func ReadFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views
|
||||||
|
|
||||||
for _, chunk := range visibles {
|
for _, chunk := range visibles {
|
||||||
if chunk.start <= offset && offset < chunk.stop {
|
if chunk.start <= offset && offset < chunk.stop {
|
||||||
views = append(views, &filer_pb.FileChunk{
|
views = append(views, &ChunkView{
|
||||||
FileId: chunk.fileId,
|
FileId: chunk.fileId,
|
||||||
Offset: offset - chunk.start, // offset is the data starting location in this file id
|
Offset: offset - chunk.start, // offset is the data starting location in this file id
|
||||||
Size: uint64(min(chunk.stop, stop) - offset),
|
Size: uint64(min(chunk.stop, stop) - offset),
|
||||||
|
LogicOffset: offset,
|
||||||
})
|
})
|
||||||
offset = min(chunk.stop, stop)
|
offset = min(chunk.stop, stop)
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,7 +151,7 @@ func TestChunksReading(t *testing.T) {
|
||||||
Chunks []*filer_pb.FileChunk
|
Chunks []*filer_pb.FileChunk
|
||||||
Offset int64
|
Offset int64
|
||||||
Size int
|
Size int
|
||||||
Expected []*filer_pb.FileChunk
|
Expected []*ChunkView
|
||||||
}{
|
}{
|
||||||
// case 0: normal
|
// case 0: normal
|
||||||
{
|
{
|
||||||
|
@ -162,10 +162,10 @@ func TestChunksReading(t *testing.T) {
|
||||||
},
|
},
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 250,
|
Size: 250,
|
||||||
Expected: []*filer_pb.FileChunk{
|
Expected: []*ChunkView{
|
||||||
{Offset: 0, Size: 100, FileId: "abc"},
|
{Offset: 0, Size: 100, FileId: "abc", LogicOffset:0},
|
||||||
{Offset: 0, Size: 100, FileId: "asdf"},
|
{Offset: 0, Size: 100, FileId: "asdf", LogicOffset:100},
|
||||||
{Offset: 0, Size: 50, FileId: "fsad"},
|
{Offset: 0, Size: 50, FileId: "fsad", LogicOffset:200},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// case 1: updates overwrite full chunks
|
// case 1: updates overwrite full chunks
|
||||||
|
@ -176,8 +176,8 @@ func TestChunksReading(t *testing.T) {
|
||||||
},
|
},
|
||||||
Offset: 50,
|
Offset: 50,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
Expected: []*filer_pb.FileChunk{
|
Expected: []*ChunkView{
|
||||||
{Offset: 50, Size: 100, FileId: "asdf"},
|
{Offset: 50, Size: 100, FileId: "asdf", LogicOffset:50},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// case 2: updates overwrite part of previous chunks
|
// case 2: updates overwrite part of previous chunks
|
||||||
|
@ -188,9 +188,9 @@ func TestChunksReading(t *testing.T) {
|
||||||
},
|
},
|
||||||
Offset: 25,
|
Offset: 25,
|
||||||
Size: 50,
|
Size: 50,
|
||||||
Expected: []*filer_pb.FileChunk{
|
Expected: []*ChunkView{
|
||||||
{Offset: 25, Size: 25, FileId: "asdf"},
|
{Offset: 25, Size: 25, FileId: "asdf", LogicOffset:25},
|
||||||
{Offset: 0, Size: 25, FileId: "abc"},
|
{Offset: 0, Size: 25, FileId: "abc", LogicOffset:50},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// case 3: updates overwrite full chunks
|
// case 3: updates overwrite full chunks
|
||||||
|
@ -202,9 +202,9 @@ func TestChunksReading(t *testing.T) {
|
||||||
},
|
},
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 200,
|
Size: 200,
|
||||||
Expected: []*filer_pb.FileChunk{
|
Expected: []*ChunkView{
|
||||||
{Offset: 0, Size: 50, FileId: "asdf"},
|
{Offset: 0, Size: 50, FileId: "asdf", LogicOffset:0},
|
||||||
{Offset: 0, Size: 150, FileId: "xxxx"},
|
{Offset: 0, Size: 150, FileId: "xxxx", LogicOffset:50},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// case 4: updates far away from prev chunks
|
// case 4: updates far away from prev chunks
|
||||||
|
@ -216,8 +216,8 @@ func TestChunksReading(t *testing.T) {
|
||||||
},
|
},
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 400,
|
Size: 400,
|
||||||
Expected: []*filer_pb.FileChunk{
|
Expected: []*ChunkView{
|
||||||
{Offset: 0, Size: 200, FileId: "asdf"},
|
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset:0},
|
||||||
// {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
|
// {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -231,9 +231,9 @@ func TestChunksReading(t *testing.T) {
|
||||||
},
|
},
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 220,
|
Size: 220,
|
||||||
Expected: []*filer_pb.FileChunk{
|
Expected: []*ChunkView{
|
||||||
{Offset: 0, Size: 200, FileId: "asdf"},
|
{Offset: 0, Size: 200, FileId: "asdf", LogicOffset:0},
|
||||||
{Offset: 0, Size: 20, FileId: "abc"},
|
{Offset: 0, Size: 20, FileId: "abc", LogicOffset:200},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// case 6: same updates
|
// case 6: same updates
|
||||||
|
@ -245,8 +245,8 @@ func TestChunksReading(t *testing.T) {
|
||||||
},
|
},
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
Expected: []*filer_pb.FileChunk{
|
Expected: []*ChunkView{
|
||||||
{Offset: 0, Size: 100, FileId: "abc"},
|
{Offset: 0, Size: 100, FileId: "abc", LogicOffset:0},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -269,6 +269,10 @@ func TestChunksReading(t *testing.T) {
|
||||||
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
|
t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
|
||||||
i, x, chunk.FileId, testcase.Expected[x].FileId)
|
i, x, chunk.FileId, testcase.Expected[x].FileId)
|
||||||
}
|
}
|
||||||
|
if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
|
||||||
|
t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
|
||||||
|
i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if len(chunks) != len(testcase.Expected) {
|
if len(chunks) != len(testcase.Expected) {
|
||||||
t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
|
t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
|
||||||
|
|
|
@ -11,6 +11,9 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"time"
|
"time"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileHandle struct {
|
type FileHandle struct {
|
||||||
|
@ -33,46 +36,94 @@ type FileHandle struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = fs.Handle(&FileHandle{})
|
var _ = fs.Handle(&FileHandle{})
|
||||||
var _ = fs.HandleReadAller(&FileHandle{})
|
// var _ = fs.HandleReadAller(&FileHandle{})
|
||||||
// var _ = fs.HandleReader(&FileHandle{})
|
var _ = fs.HandleReader(&FileHandle{})
|
||||||
var _ = fs.HandleFlusher(&FileHandle{})
|
var _ = fs.HandleFlusher(&FileHandle{})
|
||||||
var _ = fs.HandleWriter(&FileHandle{})
|
var _ = fs.HandleWriter(&FileHandle{})
|
||||||
var _ = fs.HandleReleaser(&FileHandle{})
|
var _ = fs.HandleReleaser(&FileHandle{})
|
||||||
|
|
||||||
func (fh *FileHandle) ReadAll(ctx context.Context) (content []byte, err error) {
|
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||||
|
|
||||||
glog.V(3).Infof("%v/%v read all fh ", fh.dirPath, fh.name)
|
glog.V(3).Infof("%v/%v read fh: [%d,%d)", fh.dirPath, fh.name, req.Offset, req.Offset+int64(req.Size))
|
||||||
|
|
||||||
if len(fh.Chunks) == 0 {
|
if len(fh.Chunks) == 0 {
|
||||||
glog.V(0).Infof("empty fh %v/%v", fh.dirPath, fh.name)
|
glog.V(0).Infof("empty fh %v/%v", fh.dirPath, fh.name)
|
||||||
return
|
return fmt.Errorf("empty file %v/%v", fh.dirPath, fh.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
buff := make([]byte, req.Size)
|
||||||
|
|
||||||
// FIXME: need to either use Read() or implement differently
|
chunkViews := filer2.ReadFromChunks(fh.Chunks, req.Offset, req.Size)
|
||||||
chunks, _ := filer2.CompactFileChunks(fh.Chunks)
|
|
||||||
glog.V(1).Infof("read fh %v/%v %d/%d chunks", fh.dirPath, fh.name, len(chunks), len(fh.Chunks))
|
|
||||||
for i, chunk := range chunks {
|
|
||||||
glog.V(1).Infof("read fh %v/%v %d/%d chunk %s [%d,%d)", fh.dirPath, fh.name, i, len(chunks), chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size))
|
|
||||||
}
|
|
||||||
request := &filer_pb.GetFileContentRequest{
|
|
||||||
FileId: chunks[0].FileId,
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(1).Infof("read fh content %d chunk %s [%d,%d): %v", len(chunks),
|
var vids []string
|
||||||
chunks[0].FileId, chunks[0].Offset, chunks[0].Offset+int64(chunks[0].Size), request)
|
for _, chunkView := range chunkViews {
|
||||||
resp, err := client.GetFileContent(ctx, request)
|
vids = append(vids, volumeId(chunkView.FileId))
|
||||||
|
}
|
||||||
|
|
||||||
|
vid2Locations := make(map[string]*filer_pb.Locations)
|
||||||
|
|
||||||
|
err := fh.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||||
|
|
||||||
|
glog.V(4).Infof("read fh lookup volume id locations: %v", vids)
|
||||||
|
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
|
||||||
|
VolumeIds: vids,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
content = resp.Content
|
vid2Locations = resp.LocationsMap
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return content, err
|
if err != nil {
|
||||||
|
glog.V(3).Infof("%v/%v read fh lookup volume ids: %v", fh.dirPath, fh.name, err)
|
||||||
|
return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var totalRead int64
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, chunkView := range chunkViews {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(chunkView *filer2.ChunkView) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
glog.V(3).Infof("read fh reading chunk: %+v", chunkView)
|
||||||
|
|
||||||
|
locations := vid2Locations[volumeId(chunkView.FileId)]
|
||||||
|
if locations == nil || len(locations.Locations) == 0 {
|
||||||
|
glog.V(0).Infof("failed to locate %s", chunkView.FileId)
|
||||||
|
err = fmt.Errorf("failed to locate %s", chunkView.FileId)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var n int64
|
||||||
|
n, err = util.ReadUrl(
|
||||||
|
fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId),
|
||||||
|
chunkView.Offset,
|
||||||
|
int(chunkView.Size),
|
||||||
|
buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)])
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
|
||||||
|
glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.dirPath, fh.name, locations.Locations[0].Url, chunkView.FileId, n, err)
|
||||||
|
|
||||||
|
err = fmt.Errorf("failed to read http://%s/%s: %v",
|
||||||
|
locations.Locations[0].Url, chunkView.FileId, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(3).Infof("read fh read %d bytes: %+v", n, chunkView)
|
||||||
|
totalRead += n
|
||||||
|
|
||||||
|
}(chunkView)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
resp.Data = buff[:totalRead]
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write to the file handle
|
// Write to the file handle
|
||||||
|
@ -179,3 +230,11 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func volumeId(fileId string) string {
|
||||||
|
lastCommaIndex := strings.LastIndex(fileId, ",")
|
||||||
|
if lastCommaIndex > 0 {
|
||||||
|
return fileId[:lastCommaIndex]
|
||||||
|
}
|
||||||
|
return fileId
|
||||||
|
}
|
||||||
|
|
|
@ -15,9 +15,6 @@ service SeaweedFiler {
|
||||||
rpc GetEntryAttributes (GetEntryAttributesRequest) returns (GetEntryAttributesResponse) {
|
rpc GetEntryAttributes (GetEntryAttributesRequest) returns (GetEntryAttributesResponse) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rpc GetFileContent (GetFileContentRequest) returns (GetFileContentResponse) {
|
|
||||||
}
|
|
||||||
|
|
||||||
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
|
rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,6 +27,9 @@ service SeaweedFiler {
|
||||||
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
|
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) {
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
|
@ -100,6 +100,13 @@ message CreateEntryRequest {
|
||||||
message CreateEntryResponse {
|
message CreateEntryResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message UpdateEntryRequest {
|
||||||
|
string directory = 1;
|
||||||
|
Entry entry = 2;
|
||||||
|
}
|
||||||
|
message UpdateEntryResponse {
|
||||||
|
}
|
||||||
|
|
||||||
message DeleteEntryRequest {
|
message DeleteEntryRequest {
|
||||||
string directory = 1;
|
string directory = 1;
|
||||||
string name = 2;
|
string name = 2;
|
||||||
|
@ -122,9 +129,18 @@ message AssignVolumeResponse {
|
||||||
int32 count = 4;
|
int32 count = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message UpdateEntryRequest {
|
message LookupVolumeRequest {
|
||||||
string directory = 1;
|
repeated string volume_ids = 1;
|
||||||
Entry entry = 2;
|
|
||||||
}
|
}
|
||||||
message UpdateEntryResponse {
|
|
||||||
|
message Locations {
|
||||||
|
repeated Location locations = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Location {
|
||||||
|
string url = 1;
|
||||||
|
string public_url = 2;
|
||||||
|
}
|
||||||
|
message LookupVolumeResponse {
|
||||||
|
map<string, Locations> locations_map = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,12 +22,16 @@ It has these top-level messages:
|
||||||
GetFileContentResponse
|
GetFileContentResponse
|
||||||
CreateEntryRequest
|
CreateEntryRequest
|
||||||
CreateEntryResponse
|
CreateEntryResponse
|
||||||
|
UpdateEntryRequest
|
||||||
|
UpdateEntryResponse
|
||||||
DeleteEntryRequest
|
DeleteEntryRequest
|
||||||
DeleteEntryResponse
|
DeleteEntryResponse
|
||||||
AssignVolumeRequest
|
AssignVolumeRequest
|
||||||
AssignVolumeResponse
|
AssignVolumeResponse
|
||||||
UpdateEntryRequest
|
LookupVolumeRequest
|
||||||
UpdateEntryResponse
|
Locations
|
||||||
|
Location
|
||||||
|
LookupVolumeResponse
|
||||||
*/
|
*/
|
||||||
package filer_pb
|
package filer_pb
|
||||||
|
|
||||||
|
@ -371,6 +375,38 @@ func (m *CreateEntryResponse) String() string { return proto.CompactT
|
||||||
func (*CreateEntryResponse) ProtoMessage() {}
|
func (*CreateEntryResponse) ProtoMessage() {}
|
||||||
func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
||||||
|
|
||||||
|
type UpdateEntryRequest struct {
|
||||||
|
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
||||||
|
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
|
||||||
|
func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*UpdateEntryRequest) ProtoMessage() {}
|
||||||
|
func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||||
|
|
||||||
|
func (m *UpdateEntryRequest) GetDirectory() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Directory
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *UpdateEntryRequest) GetEntry() *Entry {
|
||||||
|
if m != nil {
|
||||||
|
return m.Entry
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type UpdateEntryResponse struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
|
||||||
|
func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*UpdateEntryResponse) ProtoMessage() {}
|
||||||
|
func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||||
|
|
||||||
type DeleteEntryRequest struct {
|
type DeleteEntryRequest struct {
|
||||||
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
||||||
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
|
||||||
|
@ -380,7 +416,7 @@ type DeleteEntryRequest struct {
|
||||||
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
|
func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} }
|
||||||
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
|
func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DeleteEntryRequest) ProtoMessage() {}
|
func (*DeleteEntryRequest) ProtoMessage() {}
|
||||||
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||||
|
|
||||||
func (m *DeleteEntryRequest) GetDirectory() string {
|
func (m *DeleteEntryRequest) GetDirectory() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -409,7 +445,7 @@ type DeleteEntryResponse struct {
|
||||||
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
|
func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} }
|
||||||
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
|
func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DeleteEntryResponse) ProtoMessage() {}
|
func (*DeleteEntryResponse) ProtoMessage() {}
|
||||||
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||||
|
|
||||||
type AssignVolumeRequest struct {
|
type AssignVolumeRequest struct {
|
||||||
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
|
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
|
||||||
|
@ -420,7 +456,7 @@ type AssignVolumeRequest struct {
|
||||||
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
|
func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} }
|
||||||
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
|
func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*AssignVolumeRequest) ProtoMessage() {}
|
func (*AssignVolumeRequest) ProtoMessage() {}
|
||||||
func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
|
||||||
|
|
||||||
func (m *AssignVolumeRequest) GetCount() int32 {
|
func (m *AssignVolumeRequest) GetCount() int32 {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -453,7 +489,7 @@ type AssignVolumeResponse struct {
|
||||||
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
|
func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} }
|
||||||
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
|
func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*AssignVolumeResponse) ProtoMessage() {}
|
func (*AssignVolumeResponse) ProtoMessage() {}
|
||||||
func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
|
||||||
|
|
||||||
func (m *AssignVolumeResponse) GetFileId() string {
|
func (m *AssignVolumeResponse) GetFileId() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
|
@ -483,37 +519,77 @@ func (m *AssignVolumeResponse) GetCount() int32 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateEntryRequest struct {
|
type LookupVolumeRequest struct {
|
||||||
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
|
VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"`
|
||||||
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} }
|
func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} }
|
||||||
func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) }
|
func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdateEntryRequest) ProtoMessage() {}
|
func (*LookupVolumeRequest) ProtoMessage() {}
|
||||||
func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
|
func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
|
||||||
|
|
||||||
func (m *UpdateEntryRequest) GetDirectory() string {
|
func (m *LookupVolumeRequest) GetVolumeIds() []string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
return m.Directory
|
return m.VolumeIds
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UpdateEntryRequest) GetEntry() *Entry {
|
|
||||||
if m != nil {
|
|
||||||
return m.Entry
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateEntryResponse struct {
|
type Locations struct {
|
||||||
|
Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} }
|
func (m *Locations) Reset() { *m = Locations{} }
|
||||||
func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) }
|
func (m *Locations) String() string { return proto.CompactTextString(m) }
|
||||||
func (*UpdateEntryResponse) ProtoMessage() {}
|
func (*Locations) ProtoMessage() {}
|
||||||
func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
|
func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
|
||||||
|
|
||||||
|
func (m *Locations) GetLocations() []*Location {
|
||||||
|
if m != nil {
|
||||||
|
return m.Locations
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Location struct {
|
||||||
|
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
|
||||||
|
PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Location) Reset() { *m = Location{} }
|
||||||
|
func (m *Location) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Location) ProtoMessage() {}
|
||||||
|
func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
|
||||||
|
|
||||||
|
func (m *Location) GetUrl() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Url
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Location) GetPublicUrl() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.PublicUrl
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type LookupVolumeResponse struct {
|
||||||
|
LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} }
|
||||||
|
func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*LookupVolumeResponse) ProtoMessage() {}
|
||||||
|
func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
|
||||||
|
|
||||||
|
func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations {
|
||||||
|
if m != nil {
|
||||||
|
return m.LocationsMap
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
|
proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest")
|
||||||
|
@ -529,12 +605,16 @@ func init() {
|
||||||
proto.RegisterType((*GetFileContentResponse)(nil), "filer_pb.GetFileContentResponse")
|
proto.RegisterType((*GetFileContentResponse)(nil), "filer_pb.GetFileContentResponse")
|
||||||
proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
|
proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest")
|
||||||
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
|
proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse")
|
||||||
|
proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
|
||||||
|
proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
|
||||||
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
|
proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest")
|
||||||
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
|
proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse")
|
||||||
proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
|
proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest")
|
||||||
proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
|
proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse")
|
||||||
proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest")
|
proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest")
|
||||||
proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse")
|
proto.RegisterType((*Locations)(nil), "filer_pb.Locations")
|
||||||
|
proto.RegisterType((*Location)(nil), "filer_pb.Location")
|
||||||
|
proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
@ -551,11 +631,11 @@ type SeaweedFilerClient interface {
|
||||||
LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error)
|
LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error)
|
||||||
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error)
|
ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error)
|
||||||
GetEntryAttributes(ctx context.Context, in *GetEntryAttributesRequest, opts ...grpc.CallOption) (*GetEntryAttributesResponse, error)
|
GetEntryAttributes(ctx context.Context, in *GetEntryAttributesRequest, opts ...grpc.CallOption) (*GetEntryAttributesResponse, error)
|
||||||
GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error)
|
|
||||||
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
|
CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error)
|
||||||
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
|
UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error)
|
||||||
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
|
DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error)
|
||||||
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
|
AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error)
|
||||||
|
LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type seaweedFilerClient struct {
|
type seaweedFilerClient struct {
|
||||||
|
@ -593,15 +673,6 @@ func (c *seaweedFilerClient) GetEntryAttributes(ctx context.Context, in *GetEntr
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *seaweedFilerClient) GetFileContent(ctx context.Context, in *GetFileContentRequest, opts ...grpc.CallOption) (*GetFileContentResponse, error) {
|
|
||||||
out := new(GetFileContentResponse)
|
|
||||||
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFileContent", in, out, c.cc, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
|
func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) {
|
||||||
out := new(CreateEntryResponse)
|
out := new(CreateEntryResponse)
|
||||||
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...)
|
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...)
|
||||||
|
@ -638,17 +709,26 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) {
|
||||||
|
out := new(LookupVolumeResponse)
|
||||||
|
err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Server API for SeaweedFiler service
|
// Server API for SeaweedFiler service
|
||||||
|
|
||||||
type SeaweedFilerServer interface {
|
type SeaweedFilerServer interface {
|
||||||
LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error)
|
LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error)
|
||||||
ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error)
|
ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error)
|
||||||
GetEntryAttributes(context.Context, *GetEntryAttributesRequest) (*GetEntryAttributesResponse, error)
|
GetEntryAttributes(context.Context, *GetEntryAttributesRequest) (*GetEntryAttributesResponse, error)
|
||||||
GetFileContent(context.Context, *GetFileContentRequest) (*GetFileContentResponse, error)
|
|
||||||
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
|
CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error)
|
||||||
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
|
UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error)
|
||||||
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
|
DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error)
|
||||||
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
|
AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error)
|
||||||
|
LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
|
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
|
||||||
|
@ -709,24 +789,6 @@ func _SeaweedFiler_GetEntryAttributes_Handler(srv interface{}, ctx context.Conte
|
||||||
return interceptor(ctx, in, info, handler)
|
return interceptor(ctx, in, info, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _SeaweedFiler_GetFileContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(GetFileContentRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(SeaweedFilerServer).GetFileContent(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/filer_pb.SeaweedFiler/GetFileContent",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(SeaweedFilerServer).GetFileContent(ctx, req.(*GetFileContentRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
in := new(CreateEntryRequest)
|
in := new(CreateEntryRequest)
|
||||||
if err := dec(in); err != nil {
|
if err := dec(in); err != nil {
|
||||||
|
@ -799,6 +861,24 @@ func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, de
|
||||||
return interceptor(ctx, in, info, handler)
|
return interceptor(ctx, in, info, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(LookupVolumeRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(SeaweedFilerServer).LookupVolume(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/filer_pb.SeaweedFiler/LookupVolume",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(SeaweedFilerServer).LookupVolume(ctx, req.(*LookupVolumeRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "filer_pb.SeaweedFiler",
|
ServiceName: "filer_pb.SeaweedFiler",
|
||||||
HandlerType: (*SeaweedFilerServer)(nil),
|
HandlerType: (*SeaweedFilerServer)(nil),
|
||||||
|
@ -815,10 +895,6 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
||||||
MethodName: "GetEntryAttributes",
|
MethodName: "GetEntryAttributes",
|
||||||
Handler: _SeaweedFiler_GetEntryAttributes_Handler,
|
Handler: _SeaweedFiler_GetEntryAttributes_Handler,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
MethodName: "GetFileContent",
|
|
||||||
Handler: _SeaweedFiler_GetFileContent_Handler,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
MethodName: "CreateEntry",
|
MethodName: "CreateEntry",
|
||||||
Handler: _SeaweedFiler_CreateEntry_Handler,
|
Handler: _SeaweedFiler_CreateEntry_Handler,
|
||||||
|
@ -835,6 +911,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
||||||
MethodName: "AssignVolume",
|
MethodName: "AssignVolume",
|
||||||
Handler: _SeaweedFiler_AssignVolume_Handler,
|
Handler: _SeaweedFiler_AssignVolume_Handler,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
MethodName: "LookupVolume",
|
||||||
|
Handler: _SeaweedFiler_LookupVolume_Handler,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{},
|
||||||
Metadata: "filer.proto",
|
Metadata: "filer.proto",
|
||||||
|
@ -843,53 +923,61 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
|
||||||
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
|
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
var fileDescriptor0 = []byte{
|
||||||
// 763 bytes of a gzipped FileDescriptorProto
|
// 890 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0xd3, 0x4a,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x6d, 0x6f, 0xdc, 0x44,
|
||||||
0x10, 0xae, 0xe3, 0x24, 0x6d, 0x26, 0x69, 0xcf, 0xd1, 0x26, 0xed, 0xf1, 0x49, 0x7f, 0x08, 0x86,
|
0x10, 0x8e, 0xcf, 0x71, 0x12, 0xcf, 0x5d, 0x78, 0xd9, 0x4b, 0x8b, 0xb9, 0x26, 0x55, 0x58, 0x28,
|
||||||
0xa2, 0x22, 0xa4, 0x0a, 0x85, 0x1b, 0x2e, 0xa9, 0xda, 0x52, 0x21, 0x15, 0x55, 0x72, 0x55, 0x24,
|
0x6a, 0x85, 0x14, 0x45, 0x81, 0x0f, 0x15, 0x08, 0x89, 0xaa, 0x29, 0x55, 0xa5, 0x54, 0x95, 0x5c,
|
||||||
0xae, 0xa2, 0xc4, 0x9e, 0x84, 0x55, 0x1d, 0x3b, 0x78, 0xd7, 0xa0, 0x72, 0x0b, 0xaf, 0xc2, 0x4b,
|
0x82, 0xc4, 0xa7, 0x93, 0xcf, 0x9e, 0x3b, 0x56, 0xf1, 0xd9, 0xc6, 0xbb, 0x0e, 0x0a, 0x5f, 0xe1,
|
||||||
0xf0, 0x74, 0x68, 0x7f, 0xe2, 0xac, 0xb1, 0xd3, 0x9f, 0x0b, 0xee, 0x76, 0x67, 0x76, 0xbe, 0xf9,
|
0xaf, 0xf0, 0x0f, 0xf8, 0x07, 0xfc, 0x31, 0xb4, 0x2f, 0xb6, 0xd7, 0xb1, 0xaf, 0x2f, 0x1f, 0xf8,
|
||||||
0x76, 0x66, 0xbe, 0xb5, 0xa1, 0x39, 0xa6, 0x21, 0x26, 0x87, 0xb3, 0x24, 0xe6, 0x31, 0x59, 0x93,
|
0xb6, 0x3b, 0x3b, 0xf3, 0xcc, 0x33, 0xbb, 0x33, 0x8f, 0x0d, 0xe3, 0x25, 0x4b, 0xb1, 0x3c, 0x29,
|
||||||
0x9b, 0xc1, 0x6c, 0xe4, 0x5e, 0xc0, 0xf6, 0x79, 0x1c, 0x5f, 0xa7, 0xb3, 0x13, 0x9a, 0xa0, 0xcf,
|
0xca, 0x5c, 0xe4, 0x64, 0x4f, 0x6d, 0xe6, 0xc5, 0x82, 0xbe, 0x82, 0x7b, 0x17, 0x79, 0x7e, 0x55,
|
||||||
0xe3, 0xe4, 0xe6, 0x34, 0xe2, 0xc9, 0x8d, 0x87, 0x9f, 0x53, 0x64, 0x9c, 0xec, 0x40, 0x23, 0x98,
|
0x15, 0xe7, 0xac, 0xc4, 0x58, 0xe4, 0xe5, 0xcd, 0xb3, 0x4c, 0x94, 0x37, 0x21, 0xfe, 0x56, 0x21,
|
||||||
0x3b, 0x1c, 0xab, 0x67, 0x1d, 0x34, 0xbc, 0x85, 0x81, 0x10, 0xa8, 0x46, 0xc3, 0x29, 0x3a, 0x15,
|
0x17, 0xe4, 0x10, 0xfc, 0xa4, 0x3e, 0x08, 0x9c, 0x63, 0xe7, 0xa1, 0x1f, 0xb6, 0x06, 0x42, 0x60,
|
||||||
0xe9, 0x90, 0x6b, 0xf7, 0x14, 0x76, 0xca, 0x01, 0xd9, 0x2c, 0x8e, 0x18, 0x92, 0x7d, 0xa8, 0xa1,
|
0x3b, 0x8b, 0xd6, 0x18, 0x8c, 0xd4, 0x81, 0x5a, 0xd3, 0x67, 0x70, 0x38, 0x0c, 0xc8, 0x8b, 0x3c,
|
||||||
0x30, 0x48, 0xb4, 0x66, 0xff, 0x9f, 0xc3, 0x39, 0x95, 0x43, 0x75, 0x4e, 0x79, 0xdd, 0x3e, 0x90,
|
0xe3, 0x48, 0x1e, 0x80, 0x87, 0xd2, 0xa0, 0xd0, 0xc6, 0x67, 0x1f, 0x9e, 0xd4, 0x54, 0x4e, 0xb4,
|
||||||
0x73, 0xca, 0xb8, 0xb0, 0x51, 0x64, 0xf7, 0xa2, 0xe3, 0xbe, 0x81, 0x76, 0x2e, 0x46, 0x67, 0x7c,
|
0x9f, 0x3e, 0xa5, 0x67, 0x40, 0x2e, 0x18, 0x17, 0xd2, 0xc6, 0x90, 0xbf, 0x13, 0x1d, 0xfa, 0x03,
|
||||||
0x0e, 0xab, 0xa8, 0x4c, 0x8e, 0xd5, 0xb3, 0xcb, 0x72, 0xce, 0xfd, 0xee, 0x4f, 0x0b, 0x6a, 0xd2,
|
0x4c, 0x3b, 0x31, 0x26, 0xe3, 0x23, 0xd8, 0x45, 0x6d, 0x0a, 0x9c, 0x63, 0x77, 0x28, 0x67, 0x7d,
|
||||||
0x94, 0x5d, 0xcd, 0x5a, 0x5c, 0x8d, 0x3c, 0x86, 0x16, 0x65, 0x83, 0x05, 0x01, 0x71, 0xed, 0x35,
|
0x4e, 0xff, 0x76, 0xc0, 0x53, 0xa6, 0xa6, 0x34, 0xa7, 0x2d, 0x8d, 0x7c, 0x06, 0x13, 0xc6, 0xe7,
|
||||||
0xaf, 0x49, 0x59, 0x76, 0x55, 0xf2, 0x02, 0xea, 0xfe, 0xa7, 0x34, 0xba, 0x66, 0x8e, 0x2d, 0x53,
|
0x2d, 0x01, 0x59, 0xf6, 0x5e, 0x38, 0x66, 0xbc, 0x29, 0x95, 0x7c, 0x05, 0x3b, 0xf1, 0xaf, 0x55,
|
||||||
0xb5, 0x17, 0xa9, 0xde, 0xd2, 0x10, 0x8f, 0x85, 0xcf, 0xd3, 0x47, 0xc8, 0x6b, 0x80, 0x21, 0xe7,
|
0x76, 0xc5, 0x03, 0x57, 0xa5, 0x9a, 0xb6, 0xa9, 0x7e, 0x64, 0x29, 0x3e, 0x95, 0x67, 0xa1, 0x71,
|
||||||
0x09, 0x1d, 0xa5, 0x1c, 0x99, 0x53, 0x95, 0xf5, 0x70, 0x8c, 0x80, 0x94, 0xe1, 0x51, 0xe6, 0xf7,
|
0x21, 0x8f, 0x01, 0x22, 0x21, 0x4a, 0xb6, 0xa8, 0x04, 0xf2, 0x60, 0x5b, 0xdd, 0x47, 0x60, 0x05,
|
||||||
0x8c, 0xb3, 0xee, 0x18, 0x1a, 0x19, 0x1c, 0xf9, 0x0f, 0x56, 0x45, 0xcc, 0x80, 0x06, 0x9a, 0x6d,
|
0x54, 0x1c, 0x9f, 0x34, 0xe7, 0xa1, 0xe5, 0x4b, 0x97, 0xe0, 0x37, 0x70, 0xe4, 0x13, 0xd8, 0x95,
|
||||||
0x5d, 0x6c, 0xdf, 0x05, 0x64, 0x0b, 0xea, 0xf1, 0x78, 0xcc, 0x90, 0x4b, 0xa6, 0xb6, 0xa7, 0x77,
|
0x31, 0x73, 0x96, 0x18, 0xb6, 0x3b, 0x72, 0xfb, 0x22, 0x21, 0x77, 0x61, 0x27, 0x5f, 0x2e, 0x39,
|
||||||
0xe2, 0x6e, 0x8c, 0x7e, 0x43, 0xc7, 0xee, 0x59, 0x07, 0x55, 0x4f, 0xae, 0x49, 0x07, 0x6a, 0x53,
|
0x0a, 0xc5, 0xd4, 0x0d, 0xcd, 0x4e, 0xd6, 0xc6, 0xd9, 0x1f, 0x18, 0xb8, 0xc7, 0xce, 0xc3, 0xed,
|
||||||
0x4e, 0xa7, 0x28, 0x69, 0xd8, 0x9e, 0xda, 0xb8, 0x3f, 0x2c, 0xd8, 0xc8, 0xd3, 0x20, 0xdb, 0xd0,
|
0x50, 0xad, 0xc9, 0x01, 0x78, 0x6b, 0xc1, 0xd6, 0xa8, 0x68, 0xb8, 0xa1, 0xde, 0xd0, 0xbf, 0x1c,
|
||||||
0x90, 0xd9, 0x24, 0x82, 0x25, 0x11, 0xe4, 0x34, 0x5d, 0xe6, 0x50, 0x2a, 0x06, 0x4a, 0x16, 0x32,
|
0xf8, 0xa0, 0x4b, 0x83, 0xdc, 0x03, 0x5f, 0x65, 0x53, 0x08, 0x8e, 0x42, 0x50, 0xdd, 0xf4, 0xba,
|
||||||
0x8d, 0x03, 0x95, 0x74, 0x5d, 0x85, 0xbc, 0x8f, 0x03, 0x24, 0xff, 0x82, 0x9d, 0xd2, 0x40, 0xa6,
|
0x83, 0x32, 0xb2, 0x50, 0x9a, 0x90, 0x75, 0x9e, 0xe8, 0xa4, 0xfb, 0x3a, 0xe4, 0x65, 0x9e, 0x20,
|
||||||
0x5d, 0xf7, 0xc4, 0x52, 0x58, 0x26, 0x34, 0x70, 0x6a, 0xca, 0x32, 0xa1, 0x81, 0x3b, 0x81, 0xff,
|
0xf9, 0x08, 0xdc, 0x8a, 0x25, 0x2a, 0xed, 0x7e, 0x28, 0x97, 0xd2, 0xb2, 0x62, 0x49, 0xe0, 0x69,
|
||||||
0xcf, 0x50, 0xf6, 0xf5, 0xc6, 0x28, 0x88, 0x9e, 0x89, 0xb2, 0x4e, 0xed, 0x02, 0xcc, 0x86, 0x09,
|
0xcb, 0x8a, 0x25, 0x74, 0x05, 0x9f, 0x3e, 0x47, 0xf5, 0xae, 0x37, 0xd6, 0x85, 0x98, 0x9e, 0x18,
|
||||||
0x46, 0x5c, 0x74, 0x4b, 0x8f, 0x67, 0x43, 0x59, 0x4e, 0x68, 0x62, 0x56, 0xcc, 0x36, 0x2b, 0xe6,
|
0x7a, 0xa9, 0x23, 0x80, 0x22, 0x2a, 0x31, 0x13, 0xf2, 0xb5, 0x4c, 0x7b, 0xfa, 0xda, 0x72, 0xce,
|
||||||
0x7e, 0xb7, 0xa0, 0x5b, 0x96, 0x49, 0x4f, 0x52, 0xbe, 0x61, 0xd6, 0xfd, 0x1b, 0x66, 0xcc, 0x45,
|
0x4a, 0xfb, 0xc6, 0x5c, 0xfb, 0xc6, 0xe8, 0x9f, 0x0e, 0xcc, 0x86, 0x32, 0x99, 0x4e, 0xea, 0x3e,
|
||||||
0xe5, 0xce, 0xb9, 0x70, 0x5f, 0xc2, 0xe6, 0x19, 0x72, 0x69, 0x8f, 0x23, 0x8e, 0x11, 0x9f, 0x5f,
|
0x98, 0xf3, 0xee, 0x0f, 0x66, 0xf5, 0xc5, 0xe8, 0xad, 0x7d, 0x41, 0x4f, 0xe1, 0xce, 0x73, 0x14,
|
||||||
0x75, 0x59, 0xa7, 0xdd, 0x3e, 0x6c, 0xfd, 0x19, 0xa1, 0x29, 0x3b, 0xb0, 0xea, 0x2b, 0x93, 0x0c,
|
0xca, 0x9e, 0x67, 0x02, 0x33, 0x51, 0x97, 0xba, 0xe9, 0xa5, 0xe9, 0x19, 0xdc, 0xbd, 0x1d, 0x61,
|
||||||
0x69, 0x79, 0xf3, 0xad, 0xfb, 0x11, 0xc8, 0x71, 0x82, 0x43, 0x8e, 0x0f, 0x10, 0x7c, 0x26, 0xde,
|
0x28, 0x07, 0xb0, 0x1b, 0x6b, 0x93, 0x0a, 0x99, 0x84, 0xf5, 0x96, 0xfe, 0x02, 0xe4, 0x69, 0x89,
|
||||||
0xca, 0xad, 0xe2, 0xdd, 0x84, 0x76, 0x0e, 0x5a, 0x71, 0x71, 0x29, 0x90, 0x13, 0x0c, 0xf1, 0x41,
|
0x91, 0xc0, 0xf7, 0x18, 0xf8, 0x66, 0x78, 0x47, 0x6f, 0x1c, 0xde, 0x3b, 0x30, 0xed, 0x40, 0x6b,
|
||||||
0x19, 0x4b, 0x9e, 0x98, 0x82, 0x0e, 0xed, 0x82, 0x0e, 0x05, 0x83, 0x5c, 0x2a, 0xcd, 0x60, 0x0a,
|
0x2e, 0x32, 0xe3, 0x65, 0x91, 0xfc, 0x5f, 0x19, 0x3b, 0xd0, 0x26, 0x23, 0x03, 0x72, 0x8e, 0x29,
|
||||||
0xed, 0x23, 0xc6, 0xe8, 0x24, 0xfa, 0x10, 0x87, 0xe9, 0x14, 0xe7, 0x14, 0x3a, 0x50, 0xf3, 0xe3,
|
0xbe, 0x57, 0xc6, 0x01, 0x51, 0xeb, 0x4d, 0xbe, 0xdb, 0x9b, 0x7c, 0xc9, 0xa0, 0x93, 0xca, 0x30,
|
||||||
0x54, 0x97, 0xa8, 0xe6, 0xa9, 0x0d, 0xd9, 0x03, 0xf0, 0xe3, 0x30, 0x44, 0x9f, 0xd3, 0x38, 0xd2,
|
0x58, 0xc3, 0xf4, 0x09, 0xe7, 0x6c, 0x95, 0xfd, 0x9c, 0xa7, 0xd5, 0x1a, 0x6b, 0x0a, 0x07, 0xe0,
|
||||||
0x04, 0x0c, 0x0b, 0xe9, 0x41, 0x33, 0xc1, 0x59, 0x48, 0xfd, 0xa1, 0x3c, 0xa0, 0x26, 0xc9, 0x34,
|
0xc5, 0x79, 0x65, 0x1e, 0xc5, 0x0b, 0xf5, 0x86, 0xdc, 0x07, 0x88, 0xf3, 0x34, 0xc5, 0x58, 0xb0,
|
||||||
0xb9, 0x5f, 0xa0, 0x93, 0x4f, 0xa7, 0x9b, 0xb2, 0x54, 0xb1, 0x42, 0x0c, 0x49, 0xa8, 0x73, 0x89,
|
0x3c, 0x33, 0x04, 0x2c, 0x0b, 0x39, 0x86, 0x71, 0x89, 0x45, 0xca, 0xe2, 0x48, 0x39, 0xe8, 0xde,
|
||||||
0xa5, 0x9c, 0xe4, 0x74, 0x14, 0x52, 0x7f, 0x20, 0x1c, 0xb6, 0x9e, 0x64, 0x69, 0xb9, 0x4a, 0xc2,
|
0xb5, 0x4d, 0xf4, 0x1a, 0x0e, 0xba, 0xe9, 0x4c, 0x1b, 0x6c, 0xd4, 0x08, 0x39, 0x7e, 0x65, 0x6a,
|
||||||
0x05, 0xf3, 0xaa, 0xc1, 0x5c, 0xb4, 0xf6, 0x6a, 0x16, 0xfc, 0xad, 0xd6, 0xe6, 0xa0, 0xd5, 0x8d,
|
0x72, 0xc9, 0xa5, 0x9a, 0x9d, 0x6a, 0x91, 0xb2, 0x78, 0x2e, 0x0f, 0x5c, 0x33, 0x3b, 0xca, 0x72,
|
||||||
0xfa, 0xbf, 0x6a, 0xd0, 0xba, 0xc4, 0xe1, 0x57, 0xc4, 0x40, 0x4c, 0x61, 0x42, 0x26, 0xd0, 0x29,
|
0x59, 0xa6, 0x2d, 0xf3, 0x6d, 0x8b, 0x39, 0xfd, 0x06, 0xa6, 0x5a, 0xf5, 0xbb, 0x65, 0x1e, 0x01,
|
||||||
0xfb, 0x0c, 0x90, 0xfd, 0x05, 0xee, 0x2d, 0xdf, 0x9d, 0xee, 0xb3, 0xbb, 0x8e, 0xe9, 0x86, 0xae,
|
0x5c, 0x2b, 0xc3, 0x9c, 0x25, 0x5a, 0x7d, 0xfd, 0xd0, 0xd7, 0x96, 0x17, 0x09, 0xa7, 0xdf, 0x83,
|
||||||
0x90, 0x73, 0x68, 0x1a, 0x8f, 0x3e, 0xd9, 0x31, 0x02, 0x0b, 0xdf, 0x8f, 0xee, 0xee, 0x12, 0x6f,
|
0x7f, 0x91, 0x6b, 0xe6, 0x9c, 0x9c, 0x82, 0x9f, 0xd6, 0x1b, 0x23, 0xd4, 0xa4, 0x7d, 0xed, 0xda,
|
||||||
0x86, 0x36, 0x04, 0x52, 0xd4, 0x3f, 0x79, 0xb2, 0x08, 0x5b, 0xfa, 0x0e, 0x75, 0x9f, 0xde, 0x7e,
|
0x2f, 0x6c, 0x9d, 0xe8, 0x77, 0xb0, 0x57, 0x9b, 0xeb, 0x3a, 0x9c, 0x4d, 0x75, 0x8c, 0x6e, 0xd5,
|
||||||
0x28, 0x4b, 0x71, 0x05, 0x1b, 0x79, 0xad, 0x92, 0x47, 0xb9, 0xc8, 0xa2, 0xee, 0xbb, 0xbd, 0xe5,
|
0x41, 0xff, 0x75, 0xe0, 0xa0, 0x4b, 0xd9, 0x5c, 0xd5, 0x25, 0xec, 0x37, 0x29, 0xe6, 0xeb, 0xa8,
|
||||||
0x07, 0xcc, 0x3a, 0x18, 0x9a, 0x33, 0xeb, 0x50, 0x54, 0xb9, 0x59, 0x87, 0x32, 0xa1, 0x4a, 0x34,
|
0x30, 0x5c, 0x4e, 0x6d, 0x2e, 0xfd, 0xb0, 0x86, 0x20, 0x7f, 0x19, 0x15, 0xba, 0x05, 0x26, 0xa9,
|
||||||
0xa3, 0xcd, 0x26, 0x5a, 0x71, 0xb0, 0x4c, 0xb4, 0x92, 0xd9, 0x50, 0x68, 0x86, 0x1a, 0x4d, 0xb4,
|
0x65, 0x9a, 0xfd, 0x04, 0x1f, 0xf7, 0x5c, 0x24, 0xeb, 0x2b, 0xac, 0x7b, 0x50, 0x2e, 0xc9, 0x23,
|
||||||
0xe2, 0x7b, 0x60, 0xa2, 0x95, 0x49, 0x78, 0x85, 0x5c, 0x40, 0xcb, 0x54, 0x15, 0x31, 0x02, 0x4a,
|
0xf0, 0xae, 0xa3, 0xb4, 0x42, 0xd3, 0xef, 0xd3, 0xfe, 0x0d, 0xf0, 0x50, 0x7b, 0x7c, 0x3b, 0x7a,
|
||||||
0xc4, 0xdd, 0xdd, 0x5b, 0xe6, 0x9e, 0x03, 0x8e, 0xea, 0xf2, 0xa7, 0xe8, 0xd5, 0xef, 0x00, 0x00,
|
0xec, 0x9c, 0xfd, 0xe3, 0xc1, 0xe4, 0x35, 0x46, 0xbf, 0x23, 0x26, 0x72, 0xfa, 0x4b, 0xb2, 0xaa,
|
||||||
0x00, 0xff, 0xff, 0xec, 0x38, 0x88, 0xb2, 0x23, 0x09, 0x00, 0x00,
|
0xab, 0xea, 0x7e, 0x7e, 0xc9, 0x83, 0xdb, 0xf4, 0x07, 0xbf, 0xf7, 0xb3, 0x2f, 0xdf, 0xe6, 0x66,
|
||||||
|
0xda, 0x7a, 0x8b, 0x5c, 0xc0, 0xd8, 0xfa, 0xd8, 0x92, 0x43, 0x2b, 0xb0, 0xf7, 0xdd, 0x9e, 0x1d,
|
||||||
|
0x6d, 0x38, 0x6d, 0xd0, 0x22, 0x20, 0x7d, 0xdd, 0x25, 0x9f, 0xb7, 0x61, 0x1b, 0xf5, 0x7f, 0xf6,
|
||||||
|
0xc5, 0x9b, 0x9d, 0x6c, 0xc2, 0x96, 0x28, 0xd9, 0x84, 0xfb, 0x32, 0x68, 0x13, 0x1e, 0x52, 0x32,
|
||||||
|
0x85, 0x66, 0x09, 0x8e, 0x8d, 0xd6, 0x97, 0x38, 0x1b, 0x6d, 0x48, 0xa5, 0x14, 0x9a, 0x25, 0x1e,
|
||||||
|
0x36, 0x5a, 0x5f, 0xbe, 0x6c, 0xb4, 0x21, 0xc5, 0xd9, 0x22, 0xaf, 0x60, 0x62, 0x8b, 0x00, 0xb1,
|
||||||
|
0x02, 0x06, 0xb4, 0x68, 0x76, 0x7f, 0xd3, 0xb1, 0x0d, 0x68, 0xf7, 0xbc, 0x0d, 0x38, 0x30, 0xf5,
|
||||||
|
0x36, 0xe0, 0xd0, 0xa8, 0xd0, 0xad, 0xc5, 0x8e, 0xfa, 0x0d, 0xfd, 0xfa, 0xbf, 0x00, 0x00, 0x00,
|
||||||
|
0xff, 0xff, 0x01, 0xeb, 0x13, 0xfa, 0x95, 0x0a, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package weed_server
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer2"
|
"github.com/chrislusf/seaweedfs/weed/filer2"
|
||||||
|
@ -88,20 +87,31 @@ func (fs *FilerServer) GetEntryAttributes(ctx context.Context, req *filer_pb.Get
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FilerServer) GetFileContent(ctx context.Context, req *filer_pb.GetFileContentRequest) (*filer_pb.GetFileContentResponse, error) {
|
func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) {
|
||||||
|
|
||||||
server, err := operation.LookupFileId(fs.getMasterNode(), req.FileId)
|
lookupResult, err := operation.LookupVolumeIds(fs.getMasterNode(), req.VolumeIds)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
content, err := util.Get(server)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &filer_pb.GetFileContentResponse{
|
resp := &filer_pb.LookupVolumeResponse{
|
||||||
Content: content,
|
LocationsMap: make(map[string]*filer_pb.Locations),
|
||||||
}, nil
|
}
|
||||||
|
|
||||||
|
for vid, locations := range lookupResult {
|
||||||
|
var locs []*filer_pb.Location
|
||||||
|
for _, loc := range locations.Locations {
|
||||||
|
locs = append(locs, &filer_pb.Location{
|
||||||
|
Url: loc.Url,
|
||||||
|
PublicUrl: loc.PublicUrl,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
resp.LocationsMap[vid] = &filer_pb.Locations{
|
||||||
|
Locations: locs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
|
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
|
||||||
|
|
|
@ -183,3 +183,33 @@ func NormalizeUrl(url string) string {
|
||||||
}
|
}
|
||||||
return "http://" + url
|
return "http://" + url
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ReadUrl(fileUrl string, offset int64, size int, buf []byte) (n int64, e error) {
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", fileUrl, nil)
|
||||||
|
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)))
|
||||||
|
|
||||||
|
r, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer r.Body.Close()
|
||||||
|
if r.StatusCode >= 400 {
|
||||||
|
return 0, fmt.Errorf("%s: %s", fileUrl, r.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var i, m int
|
||||||
|
|
||||||
|
for {
|
||||||
|
m, err = r.Body.Read(buf[i:cap(buf)])
|
||||||
|
i += m
|
||||||
|
n += int64(m)
|
||||||
|
if err == io.EOF {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
return n, e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue