seaweedfs/weed/filer/filer_notify_append.go

87 lines
2.4 KiB
Go
Raw Normal View History

2020-09-01 07:21:19 +00:00
package filer
2020-03-30 08:19:33 +00:00
import (
"context"
"fmt"
"os"
"time"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
2020-03-30 08:19:33 +00:00
)
func (f *Filer) appendToFile(targetFile string, data []byte) error {
2020-11-16 00:58:48 +00:00
assignResult, uploadResult, err2 := f.assignAndUpload(targetFile, data)
2020-04-12 21:03:07 +00:00
if err2 != nil {
return err2
2020-03-30 08:19:33 +00:00
}
// find out existing entry
fullpath := util.FullPath(targetFile)
entry, err := f.FindEntry(context.Background(), fullpath)
var offset int64 = 0
if err == filer_pb.ErrNotFound {
entry = &Entry{
FullPath: fullpath,
Attr: Attr{
Crtime: time.Now(),
Mtime: time.Now(),
Mode: os.FileMode(0644),
Uid: OS_UID,
Gid: OS_GID,
},
}
2021-09-28 06:59:45 +00:00
} else if err != nil {
return fmt.Errorf("find %s: %v", fullpath, err)
2020-03-30 08:19:33 +00:00
} else {
offset = int64(TotalSize(entry.GetChunks()))
2020-03-30 08:19:33 +00:00
}
// append to existing chunks
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset, time.Now().UnixNano()))
2020-03-30 08:19:33 +00:00
// update the entry
err = f.CreateEntry(context.Background(), entry, false, false, nil, false, f.MaxFilenameLength)
2020-03-30 08:19:33 +00:00
return err
}
2020-04-12 21:03:07 +00:00
2020-11-16 00:58:48 +00:00
func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.AssignResult, *operation.UploadResult, error) {
2020-04-12 21:03:07 +00:00
// assign a volume location
2020-11-16 00:58:48 +00:00
rule := f.FilerConf.MatchStorageRule(targetFile)
2020-04-12 21:03:07 +00:00
assignRequest := &operation.VolumeAssignRequest{
Count: 1,
2020-11-16 00:58:48 +00:00
Collection: util.Nvl(f.metaLogCollection, rule.Collection),
Replication: util.Nvl(f.metaLogReplication, rule.Replication),
WritableVolumeCount: rule.VolumeGrowthCount,
2020-04-12 21:03:07 +00:00
}
2020-11-16 00:58:48 +00:00
assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest)
2020-04-12 21:03:07 +00:00
if err != nil {
2020-04-17 07:00:48 +00:00
return nil, nil, fmt.Errorf("AssignVolume: %v", err)
2020-04-12 21:03:07 +00:00
}
if assignResult.Error != "" {
2020-04-17 07:00:48 +00:00
return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
2020-04-12 21:03:07 +00:00
}
// upload data
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
2021-09-06 23:20:49 +00:00
uploadOption := &operation.UploadOption{
UploadUrl: targetUrl,
Filename: "",
Cipher: f.Cipher,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
Jwt: assignResult.Auth,
}
uploadResult, err := operation.UploadData(data, uploadOption)
2020-04-12 21:03:07 +00:00
if err != nil {
2020-04-17 07:00:48 +00:00
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
2020-04-12 21:03:07 +00:00
}
// println("uploaded to", targetUrl)
2020-04-17 07:00:48 +00:00
return assignResult, uploadResult, nil
2020-04-12 21:03:07 +00:00
}