mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #1568 from binbinshi/master
sometime volumeServer restart occur out of memory error
This commit is contained in:
commit
73d924f1ee
|
@ -61,6 +61,18 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err
|
|||
|
||||
func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size Size) (lastAppendAtNs uint64, err error) {
|
||||
n := new(needle.Needle)
|
||||
// case: node total memory 8g, set volumeLimitSize=2048 , save 10 files, every file size 2.2g or more , when we restart the volume server , while see out of memory error
|
||||
// fix: When the size of the last file exceeds 10M, consider directly returning the last modify time
|
||||
if size > 10 * 1024 * 1024 {
|
||||
bytes , err := needle.ReadNeedleBlob(datFile, offset+int64(size), 0, v);
|
||||
if err == nil {
|
||||
if v == needle.Version3 {
|
||||
tsOffset := NeedleHeaderSize + 0 + needle.NeedleChecksumSize
|
||||
n.AppendAtNs = util.BytesToUint64(bytes[tsOffset : tsOffset+TimestampSize])
|
||||
}
|
||||
}
|
||||
return n.AppendAtNs, err
|
||||
}
|
||||
if err = n.ReadData(datFile, offset, size, v); err != nil {
|
||||
return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue