mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Fix FUSE server buffer leaks in file gaps (#3472)
* Fix FUSE server buffer leaks in file gaps This change zeros read buffers when encountering file gaps during file/chunk reads in FUSE mounts. It prevents leaking internal buffers of the FUSE server which could otherwise reveal metadata, directory listings, file contents and other data related to FUSE API calls. The issue was that buffers are reused, but when a file gap was found the buffer was not zeroed accordingly and the existing data of the buffer was kept and returned. * Move zero logic into its own method
This commit is contained in:
parent
c7892bc7c4
commit
3f758820c1
|
@ -127,10 +127,11 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
||||||
nextChunks = c.chunkViews[i+1:]
|
nextChunks = c.chunkViews[i+1:]
|
||||||
}
|
}
|
||||||
if startOffset < chunk.LogicOffset {
|
if startOffset < chunk.LogicOffset {
|
||||||
gap := int(chunk.LogicOffset - startOffset)
|
gap := chunk.LogicOffset - startOffset
|
||||||
glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.LogicOffset)
|
glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.LogicOffset)
|
||||||
n += int(min(int64(gap), remaining))
|
c.zero(p, startOffset-offset, gap)
|
||||||
startOffset, remaining = chunk.LogicOffset, remaining-int64(gap)
|
n += int(min(gap, remaining))
|
||||||
|
startOffset, remaining = chunk.LogicOffset, remaining-gap
|
||||||
if remaining <= 0 {
|
if remaining <= 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -154,10 +155,19 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
||||||
|
|
||||||
// glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
|
// glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
|
||||||
|
|
||||||
if err == nil && remaining > 0 && c.fileSize > startOffset {
|
// zero the remaining bytes if a gap exists at the end of the last chunk (or a fully sparse file)
|
||||||
delta := int(min(remaining, c.fileSize-startOffset))
|
if err == nil && remaining > 0 {
|
||||||
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
|
var delta int64
|
||||||
n += delta
|
if c.fileSize > startOffset {
|
||||||
|
delta = min(remaining, c.fileSize-startOffset)
|
||||||
|
startOffset -= offset
|
||||||
|
} else {
|
||||||
|
delta = remaining
|
||||||
|
startOffset = max(startOffset-offset, startOffset-remaining-offset)
|
||||||
|
}
|
||||||
|
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize)
|
||||||
|
c.zero(p, startOffset, delta)
|
||||||
|
n += int(delta)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil && offset+int64(len(p)) >= c.fileSize {
|
if err == nil && offset+int64(len(p)) >= c.fileSize {
|
||||||
|
@ -195,3 +205,12 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next
|
||||||
c.lastChunkFid = chunkView.FileId
|
c.lastChunkFid = chunkView.FileId
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *ChunkReadAt) zero(buffer []byte, start, length int64) {
|
||||||
|
end := min(start+length, int64(len(buffer)))
|
||||||
|
|
||||||
|
// zero the bytes
|
||||||
|
for o := start; o < end; o++ {
|
||||||
|
buffer[o] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package filer
|
package filer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -75,29 +75,28 @@ func TestReaderAt(t *testing.T) {
|
||||||
readerPattern: NewReaderPattern(),
|
readerPattern: NewReaderPattern(),
|
||||||
}
|
}
|
||||||
|
|
||||||
testReadAt(t, readerAt, 0, 10, 10, io.EOF)
|
testReadAt(t, readerAt, 0, 10, 10, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 0, 12, 10, io.EOF)
|
testReadAt(t, readerAt, 0, 12, 12, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 2, 8, 8, io.EOF)
|
testReadAt(t, readerAt, 2, 8, 8, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 3, 6, 6, nil)
|
testReadAt(t, readerAt, 3, 6, 6, nil, nil, nil)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expected int, expectedErr error) {
|
func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expectedN int, expectedErr error, data, expectedData []byte) {
|
||||||
data := make([]byte, size)
|
if data == nil {
|
||||||
|
data = make([]byte, size)
|
||||||
|
}
|
||||||
n, err := readerAt.doReadAt(data, offset)
|
n, err := readerAt.doReadAt(data, offset)
|
||||||
|
|
||||||
for _, d := range data {
|
if expectedN != n {
|
||||||
fmt.Printf("%x", d)
|
t.Errorf("unexpected read size: %d, expect: %d", n, expectedN)
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
if expected != n {
|
|
||||||
t.Errorf("unexpected read size: %d, expect: %d", n, expected)
|
|
||||||
}
|
}
|
||||||
if err != expectedErr {
|
if err != expectedErr {
|
||||||
t.Errorf("unexpected read error: %v, expect: %v", err, expectedErr)
|
t.Errorf("unexpected read error: %v, expect: %v", err, expectedErr)
|
||||||
}
|
}
|
||||||
|
if expectedData != nil && !bytes.Equal(data, expectedData) {
|
||||||
|
t.Errorf("unexpected read data: %v, expect: %v", data, expectedData)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReaderAt0(t *testing.T) {
|
func TestReaderAt0(t *testing.T) {
|
||||||
|
@ -125,12 +124,12 @@ func TestReaderAt0(t *testing.T) {
|
||||||
readerPattern: NewReaderPattern(),
|
readerPattern: NewReaderPattern(),
|
||||||
}
|
}
|
||||||
|
|
||||||
testReadAt(t, readerAt, 0, 10, 10, io.EOF)
|
testReadAt(t, readerAt, 0, 10, 10, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 3, 16, 7, io.EOF)
|
testReadAt(t, readerAt, 3, 16, 7, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 3, 5, 5, nil)
|
testReadAt(t, readerAt, 3, 5, 5, nil, nil, nil)
|
||||||
|
|
||||||
testReadAt(t, readerAt, 11, 5, 0, io.EOF)
|
testReadAt(t, readerAt, 11, 5, 5, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 10, 5, 0, io.EOF)
|
testReadAt(t, readerAt, 10, 5, 5, io.EOF, nil, nil)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,13 +152,54 @@ func TestReaderAt1(t *testing.T) {
|
||||||
readerPattern: NewReaderPattern(),
|
readerPattern: NewReaderPattern(),
|
||||||
}
|
}
|
||||||
|
|
||||||
testReadAt(t, readerAt, 0, 20, 20, io.EOF)
|
testReadAt(t, readerAt, 0, 20, 20, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 1, 7, 7, nil)
|
testReadAt(t, readerAt, 1, 7, 7, nil, nil, nil)
|
||||||
testReadAt(t, readerAt, 0, 1, 1, nil)
|
testReadAt(t, readerAt, 0, 1, 1, nil, nil, nil)
|
||||||
testReadAt(t, readerAt, 18, 4, 2, io.EOF)
|
testReadAt(t, readerAt, 18, 4, 2, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 12, 4, 4, nil)
|
testReadAt(t, readerAt, 12, 4, 4, nil, nil, nil)
|
||||||
testReadAt(t, readerAt, 4, 20, 16, io.EOF)
|
testReadAt(t, readerAt, 4, 20, 16, io.EOF, nil, nil)
|
||||||
testReadAt(t, readerAt, 4, 10, 10, nil)
|
testReadAt(t, readerAt, 4, 10, 10, nil, nil, nil)
|
||||||
testReadAt(t, readerAt, 1, 10, 10, nil)
|
testReadAt(t, readerAt, 1, 10, 10, nil, nil, nil)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReaderAtGappedChunksDoNotLeak(t *testing.T) {
|
||||||
|
visibles := []VisibleInterval{
|
||||||
|
{
|
||||||
|
start: 2,
|
||||||
|
stop: 3,
|
||||||
|
fileId: "1",
|
||||||
|
chunkSize: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
start: 7,
|
||||||
|
stop: 9,
|
||||||
|
fileId: "1",
|
||||||
|
chunkSize: 4,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
readerAt := &ChunkReadAt{
|
||||||
|
chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64),
|
||||||
|
readerLock: sync.Mutex{},
|
||||||
|
fileSize: 9,
|
||||||
|
readerCache: newReaderCache(3, &mockChunkCache{}, nil),
|
||||||
|
readerPattern: NewReaderPattern(),
|
||||||
|
}
|
||||||
|
|
||||||
|
testReadAt(t, readerAt, 0, 9, 9, io.EOF, []byte{2, 2, 2, 2, 2, 2, 2, 2, 2}, []byte{0, 0, 1, 0, 0, 0, 0, 1, 1})
|
||||||
|
testReadAt(t, readerAt, 1, 8, 8, io.EOF, []byte{2, 2, 2, 2, 2, 2, 2, 2}, []byte{0, 1, 0, 0, 0, 0, 1, 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReaderAtSparseFileDoesNotLeak(t *testing.T) {
|
||||||
|
readerAt := &ChunkReadAt{
|
||||||
|
chunkViews: ViewFromVisibleIntervals([]VisibleInterval{}, 0, math.MaxInt64),
|
||||||
|
readerLock: sync.Mutex{},
|
||||||
|
fileSize: 3,
|
||||||
|
readerCache: newReaderCache(3, &mockChunkCache{}, nil),
|
||||||
|
readerPattern: NewReaderPattern(),
|
||||||
|
}
|
||||||
|
|
||||||
|
testReadAt(t, readerAt, 0, 3, 3, io.EOF, []byte{2, 2, 2}, []byte{0, 0, 0})
|
||||||
|
testReadAt(t, readerAt, 1, 2, 2, io.EOF, []byte{2, 2}, []byte{0, 0})
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue