consistent 64bit size

This commit is contained in:
Chris Lu 2020-03-22 01:37:46 -07:00
parent 2bdd936fb6
commit ae2ee379c0
11 changed files with 14 additions and 14 deletions

View file

@ -75,7 +75,7 @@ type ChunkView struct {
isGzipped bool isGzipped bool
} }
func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
visibles := NonOverlappingVisibleIntervals(chunks) visibles := NonOverlappingVisibleIntervals(chunks)
@ -83,9 +83,9 @@ func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views
} }
func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) { func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
stop := offset + int64(size) stop := offset + size
for _, chunk := range visibles { for _, chunk := range visibles {

View file

@ -218,7 +218,7 @@ func TestChunksReading(t *testing.T) {
testcases := []struct { testcases := []struct {
Chunks []*filer_pb.FileChunk Chunks []*filer_pb.FileChunk
Offset int64 Offset int64
Size int Size int64
Expected []*ChunkView Expected []*ChunkView
}{ }{
// case 0: normal // case 0: normal

View file

@ -14,7 +14,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
) )
func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error { func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
chunkViews := ViewFromChunks(chunks, offset, size) chunkViews := ViewFromChunks(chunks, offset, size)
@ -61,7 +61,7 @@ var _ = io.ReadSeeker(&ChunkStreamReader{})
func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) chunkViews := ViewFromChunks(chunks, 0, math.MaxInt64)
return &ChunkStreamReader{ return &ChunkStreamReader{
chunkViews: chunkViews, chunkViews: chunkViews,

View file

@ -90,7 +90,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
fh.f.reader = nil fh.f.reader = nil
} }
if fh.f.reader == nil { if fh.f.reader == nil {
chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32) chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
fh.f.reader = filer2.NewChunkStreamReaderFromClient(fh.f.wfs, chunkViews) fh.f.reader = filer2.NewChunkStreamReaderFromClient(fh.f.wfs, chunkViews)
} }

View file

@ -96,7 +96,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error {
} }
totalSize := filer2.TotalSize(entry.Chunks) totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
// Create a URL that references a to-be-created blob in your // Create a URL that references a to-be-created blob in your
// Azure Storage account's container. // Azure Storage account's container.

View file

@ -85,7 +85,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
} }
totalSize := filer2.TotalSize(entry.Chunks) totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
bucket, err := g.client.Bucket(context.Background(), g.bucket) bucket, err := g.client.Bucket(context.Background(), g.bucket)
if err != nil { if err != nil {

View file

@ -90,7 +90,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error {
} }
totalSize := filer2.TotalSize(entry.Chunks) totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background())

View file

@ -103,7 +103,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error {
} }
totalSize := filer2.TotalSize(entry.Chunks) totalSize := filer2.TotalSize(entry.Chunks)
chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize))
parts := make([]*s3.CompletedPart, len(chunkViews)) parts := make([]*s3.CompletedPart, len(chunkViews))

View file

@ -102,7 +102,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
} }
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, int(size)) return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
}) })
} }

View file

@ -499,7 +499,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
f.reader = nil f.reader = nil
} }
if f.reader == nil { if f.reader == nil {
chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32) chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt64)
f.reader = filer2.NewChunkStreamReaderFromClient(f.fs, chunkViews) f.reader = filer2.NewChunkStreamReaderFromClient(f.fs, chunkViews)
} }

View file

@ -54,7 +54,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write
return err return err
} }
return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32) return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
}) })