seaweedfs/weed/storage/volume_read_write.go

435 lines
13 KiB
Go
Raw Normal View History

package storage
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
2019-12-23 20:48:20 +00:00
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
)
var ErrorNotFound = errors.New("not found")
// isFileUnchanged checks whether this needle to write is same as last one.
// It requires serialized access in the same volume.
2019-04-19 04:43:36 +00:00
func (v *Volume) isFileUnchanged(n *needle.Needle) bool {
if v.Ttl.String() != "" {
return false
}
nv, ok := v.nm.Get(n.Id)
2020-08-19 01:01:37 +00:00
if ok && !nv.Offset.IsZero() && nv.Size.IsValid() {
2019-04-19 04:43:36 +00:00
oldNeedle := new(needle.Needle)
err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
if err != nil {
glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToAcutalOffset(), nv.Size, err)
return false
}
if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) {
n.DataSize = oldNeedle.DataSize
return true
}
}
return false
}
// Destroy removes everything related to this volume
func (v *Volume) Destroy() (err error) {
if v.isCompacting {
err = fmt.Errorf("volume %d is compacting", v.Id)
return
}
close(v.asyncRequestsChan)
2019-12-26 00:17:58 +00:00
storageName, storageKey := v.RemoteStorageNameKey()
if v.HasRemoteFile() && storageName != "" && storageKey != "" {
if backendStorage, found := backend.BackendStorages[storageName]; found {
backendStorage.DeleteFile(storageKey)
}
}
v.Close()
os.Remove(v.FileName() + ".dat")
os.Remove(v.FileName() + ".idx")
2019-12-28 19:17:39 +00:00
os.Remove(v.FileName() + ".vif")
2019-12-24 18:19:12 +00:00
os.Remove(v.FileName() + ".sdx")
os.Remove(v.FileName() + ".cpd")
os.Remove(v.FileName() + ".cpx")
2019-11-19 01:35:06 +00:00
os.RemoveAll(v.FileName() + ".ldb")
return
}
func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) {
v.asyncRequestsChan <- request
}
func (v *Volume) syncWrite(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
2020-01-25 04:06:58 +00:00
// glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
actualSize := needle.GetActualSize(Size(len(n.Data)), v.Version())
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
err = fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize())
return
}
if v.isFileUnchanged(n) {
size = Size(n.DataSize)
isUnchanged = true
return
}
// check whether existing needle cookie matches
nv, ok := v.nm.Get(n.Id)
if ok {
existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset())
if existingNeedleReadErr != nil {
err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
return
}
if existingNeedle.Cookie != n.Cookie {
glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
err = fmt.Errorf("mismatching cookie %x", n.Cookie)
return
}
}
// append to dat file
n.AppendAtNs = uint64(time.Now().UnixNano())
if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil {
return
}
2019-04-19 07:39:34 +00:00
v.lastAppendAtNs = n.AppendAtNs
2017-01-06 18:22:20 +00:00
// add to needle map
2019-04-09 02:40:56 +00:00
if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
}
}
2019-04-19 07:39:34 +00:00
if v.lastModifiedTsSeconds < n.LastModified {
v.lastModifiedTsSeconds = n.LastModified
}
return
}
func (v *Volume) writeNeedle2(n *needle.Needle, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) {
// glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL {
n.SetHasTtl()
n.Ttl = v.Ttl
}
if !fsync {
return v.syncWrite(n)
} else {
asyncRequest := needle.NewAsyncRequest(n, true)
// using len(n.Data) here instead of n.Size before n.Size is populated in n.Append()
asyncRequest.ActualSize = needle.GetActualSize(Size(len(n.Data)), v.Version())
v.asyncRequestAppend(asyncRequest)
offset, _, isUnchanged, err = asyncRequest.WaitComplete()
return
}
}
func (v *Volume) doWriteRequest(n *needle.Needle) (offset uint64, size Size, isUnchanged bool, err error) {
// glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
if v.isFileUnchanged(n) {
size = Size(n.DataSize)
isUnchanged = true
return
}
// check whether existing needle cookie matches
nv, ok := v.nm.Get(n.Id)
if ok {
existingNeedle, _, _, existingNeedleReadErr := needle.ReadNeedleHeader(v.DataBackend, v.Version(), nv.Offset.ToAcutalOffset())
if existingNeedleReadErr != nil {
err = fmt.Errorf("reading existing needle: %v", existingNeedleReadErr)
return
}
if existingNeedle.Cookie != n.Cookie {
glog.V(0).Infof("write cookie mismatch: existing %x, new %x", existingNeedle.Cookie, n.Cookie)
err = fmt.Errorf("mismatching cookie %x", n.Cookie)
return
}
}
// append to dat file
n.AppendAtNs = uint64(time.Now().UnixNano())
if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil {
return
}
v.lastAppendAtNs = n.AppendAtNs
// add to needle map
if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
}
}
if v.lastModifiedTsSeconds < n.LastModified {
v.lastModifiedTsSeconds = n.LastModified
}
return
}
func (v *Volume) syncDelete(n *needle.Needle) (Size, error) {
2019-04-19 04:43:36 +00:00
glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
actualSize := needle.GetActualSize(0, v.Version())
v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock()
if MaxPossibleVolumeSize < v.nm.ContentSize()+uint64(actualSize) {
err := fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.nm.ContentSize())
return 0, err
}
nv, ok := v.nm.Get(n.Id)
2020-08-19 02:22:16 +00:00
// fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
2020-08-19 01:01:37 +00:00
if ok && nv.Size.IsValid() {
size := nv.Size
2018-12-31 23:08:32 +00:00
n.Data = nil
n.AppendAtNs = uint64(time.Now().UnixNano())
offset, _, _, err := n.Append(v.DataBackend, v.Version())
if err != nil {
return size, err
}
2019-04-19 07:39:34 +00:00
v.lastAppendAtNs = n.AppendAtNs
if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil {
return size, err
}
return size, err
}
return 0, nil
}
func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) {
// todo: delete info is always appended no fsync, it may need fsync in future
fsync := false
if !fsync {
return v.syncDelete(n)
} else {
asyncRequest := needle.NewAsyncRequest(n, false)
asyncRequest.ActualSize = needle.GetActualSize(0, v.Version())
v.asyncRequestAppend(asyncRequest)
_, size, _, err := asyncRequest.WaitComplete()
return Size(size), err
}
}
func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) {
glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
nv, ok := v.nm.Get(n.Id)
2020-08-19 02:22:16 +00:00
// fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
2020-08-19 01:01:37 +00:00
if ok && nv.Size.IsValid() {
size := nv.Size
n.Data = nil
n.AppendAtNs = uint64(time.Now().UnixNano())
offset, _, _, err := n.Append(v.DataBackend, v.Version())
if err != nil {
return size, err
}
v.lastAppendAtNs = n.AppendAtNs
if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil {
return size, err
}
return size, err
}
return 0, nil
}
// read fills in Needle content by looking up n.Id from NeedleMapper
func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, error) {
v.dataFileAccessLock.RLock()
defer v.dataFileAccessLock.RUnlock()
nv, ok := v.nm.Get(n.Id)
2019-04-09 02:40:56 +00:00
if !ok || nv.Offset.IsZero() {
2019-07-21 20:49:59 +00:00
return -1, ErrorNotFound
}
2020-08-19 02:22:16 +00:00
readSize := nv.Size
if readSize.IsDeleted() {
if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize {
glog.V(3).Infof("reading deleted %s", n.String())
2020-08-19 02:22:16 +00:00
readSize = -readSize
} else {
return -1, errors.New("already deleted")
}
2017-01-06 18:22:20 +00:00
}
2020-08-19 02:22:16 +00:00
if readSize == 0 {
2019-01-08 17:03:28 +00:00
return 0, nil
}
2020-08-19 02:22:16 +00:00
err := n.ReadData(v.DataBackend, nv.Offset.ToAcutalOffset(), readSize, v.Version())
if err != nil {
return 0, err
}
bytesRead := len(n.Data)
if !n.HasTtl() {
return bytesRead, nil
}
ttlMinutes := n.Ttl.Minutes()
if ttlMinutes == 0 {
return bytesRead, nil
}
if !n.HasLastModifiedDate() {
return bytesRead, nil
}
if uint64(time.Now().Unix()) < n.LastModified+uint64(ttlMinutes*60) {
return bytesRead, nil
}
2019-01-06 03:52:38 +00:00
return -1, ErrorNotFound
}
func (v *Volume) startWorker() {
go func() {
chanClosed := false
for {
// chan closed. go thread will exit
if chanClosed {
break
}
currentRequests := make([]*needle.AsyncRequest, 0, 128)
currentBytesToWrite := int64(0)
for {
request, ok := <-v.asyncRequestsChan
2020-08-19 02:22:16 +00:00
// volume may be closed
if !ok {
chanClosed = true
break
}
if MaxPossibleVolumeSize < v.ContentSize()+uint64(currentBytesToWrite+request.ActualSize) {
request.Complete(0, 0, false,
fmt.Errorf("volume size limit %d exceeded! current size is %d", MaxPossibleVolumeSize, v.ContentSize()))
break
}
currentRequests = append(currentRequests, request)
currentBytesToWrite += request.ActualSize
// submit at most 4M bytes or 128 requests at one time to decrease request delay.
// it also need to break if there is no data in channel to avoid io hang.
if currentBytesToWrite >= 4*1024*1024 || len(currentRequests) >= 128 || len(v.asyncRequestsChan) == 0 {
break
}
}
if len(currentRequests) == 0 {
continue
}
v.dataFileAccessLock.Lock()
end, _, e := v.DataBackend.GetStat()
if e != nil {
for i := 0; i < len(currentRequests); i++ {
currentRequests[i].Complete(0, 0, false,
fmt.Errorf("cannot read current volume position: %v", e))
}
v.dataFileAccessLock.Unlock()
continue
}
for i := 0; i < len(currentRequests); i++ {
if currentRequests[i].IsWriteRequest {
offset, size, isUnchanged, err := v.doWriteRequest(currentRequests[i].N)
currentRequests[i].UpdateResult(offset, uint64(size), isUnchanged, err)
} else {
size, err := v.doDeleteRequest(currentRequests[i].N)
currentRequests[i].UpdateResult(0, uint64(size), false, err)
}
}
// if sync error, data is not reliable, we should mark the completed request as fail and rollback
if err := v.DataBackend.Sync(); err != nil {
// todo: this may generate dirty data or cause data inconsistent, may be weed need to panic?
if te := v.DataBackend.Truncate(end); te != nil {
glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te)
}
for i := 0; i < len(currentRequests); i++ {
if currentRequests[i].IsSucceed() {
currentRequests[i].UpdateResult(0, 0, false, err)
}
}
}
for i := 0; i < len(currentRequests); i++ {
currentRequests[i].Submit()
}
v.dataFileAccessLock.Unlock()
}
}()
}
type VolumeFileScanner interface {
2019-12-23 20:48:20 +00:00
VisitSuperBlock(super_block.SuperBlock) error
ReadNeedleBody() bool
2019-10-22 07:50:30 +00:00
VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error
}
2019-04-19 04:43:36 +00:00
func ScanVolumeFile(dirname string, collection string, id needle.VolumeId,
needleMapKind NeedleMapType,
volumeFileScanner VolumeFileScanner) (err error) {
var v *Volume
if v, err = loadVolumeWithoutIndex(dirname, collection, id, needleMapKind); err != nil {
return fmt.Errorf("failed to load volume %d: %v", id, err)
}
if v.volumeInfo.Version == 0 {
if err = volumeFileScanner.VisitSuperBlock(v.SuperBlock); err != nil {
return fmt.Errorf("failed to process volume %d super block: %v", id, err)
}
}
defer v.Close()
version := v.Version()
2018-06-24 18:37:08 +00:00
offset := int64(v.SuperBlock.BlockSize())
return ScanVolumeFileFrom(version, v.DataBackend, offset, volumeFileScanner)
}
func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorageFile, offset int64, volumeFileScanner VolumeFileScanner) (err error) {
n, nh, rest, e := needle.ReadNeedleHeader(datBackend, version, offset)
if e != nil {
if e == io.EOF {
return nil
}
2019-12-09 03:44:16 +00:00
return fmt.Errorf("cannot read %s at offset %d: %v", datBackend.Name(), offset, e)
}
for n != nil {
2019-10-22 07:50:30 +00:00
var needleBody []byte
if volumeFileScanner.ReadNeedleBody() {
if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil {
glog.V(0).Infof("cannot read needle body: %v", err)
2020-08-19 02:22:16 +00:00
// err = fmt.Errorf("cannot read needle body: %v", err)
// return
}
}
2019-10-22 07:50:30 +00:00
err := volumeFileScanner.VisitNeedle(n, offset, nh, needleBody)
if err == io.EOF {
return nil
}
if err != nil {
glog.V(0).Infof("visit needle error: %v", err)
return fmt.Errorf("visit needle error: %v", err)
}
2019-04-19 07:39:34 +00:00
offset += NeedleHeaderSize + rest
glog.V(4).Infof("==> new entry offset %d", offset)
if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil {
if err == io.EOF {
return nil
}
return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err)
}
glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest)
}
return nil
}