seaweedfs/weed/server/webdav_server.go

618 lines
15 KiB
Go
Raw Normal View History

package weed_server
import (
"context"
2019-05-03 07:24:35 +00:00
"fmt"
"io"
"os"
"path"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/util/buffered_writer"
2019-12-07 15:56:05 +00:00
"golang.org/x/net/webdav"
"google.golang.org/grpc"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
2019-12-07 15:56:05 +00:00
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/security"
)
type WebDavOption struct {
2021-09-14 17:37:06 +00:00
Filer pb.ServerAddress
FilerRootPath string
2021-09-14 17:37:06 +00:00
DomainName string
BucketsPath string
GrpcDialOption grpc.DialOption
Collection string
Replication string
DiskType string
Uid uint32
Gid uint32
Cipher bool
CacheDir string
CacheSizeMB int64
}
type WebDavServer struct {
option *WebDavOption
secret security.SigningKey
2020-09-01 07:21:19 +00:00
filer *filer.Filer
grpcDialOption grpc.DialOption
Handler *webdav.Handler
}
func max(x, y int64) int64 {
if x <= y {
return y
}
return x
}
func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) {
2019-05-03 07:24:35 +00:00
fs, _ := NewWebDavFileSystem(option)
// Fix no set filer.path , accessing "/" returns "//"
if option.FilerRootPath == "/" {
option.FilerRootPath = ""
}
ws = &WebDavServer{
option: option,
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"),
Handler: &webdav.Handler{
FileSystem: fs,
LockSystem: webdav.NewMemLS(),
},
}
return ws, nil
}
// adapted from https://github.com/mattn/davfs/blob/master/plugin/mysql/mysql.go
type WebDavFileSystem struct {
2019-05-03 07:24:35 +00:00
option *WebDavOption
secret security.SigningKey
grpcDialOption grpc.DialOption
2020-08-18 03:15:53 +00:00
chunkCache *chunk_cache.TieredChunkCache
signature int32
}
type FileInfo struct {
2022-09-15 06:06:44 +00:00
name string
size int64
mode os.FileMode
modifiedTime time.Time
2022-09-15 06:06:44 +00:00
isDirectory bool
}
func (fi *FileInfo) Name() string { return fi.name }
func (fi *FileInfo) Size() int64 { return fi.size }
func (fi *FileInfo) Mode() os.FileMode { return fi.mode }
func (fi *FileInfo) ModTime() time.Time { return fi.modifiedTime }
2019-05-03 07:24:35 +00:00
func (fi *FileInfo) IsDir() bool { return fi.isDirectory }
func (fi *FileInfo) Sys() interface{} { return nil }
type WebDavFile struct {
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
fs *WebDavFileSystem
name string
isDirectory bool
off int64
entry *filer_pb.Entry
visibleIntervals *filer.IntervalList[*filer.VisibleInterval]
reader io.ReaderAt
bufWriter *buffered_writer.BufferedWriteCloser
}
2019-05-03 07:24:35 +00:00
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
2020-04-12 04:12:41 +00:00
cacheUniqueId := util.Md5String([]byte("webdav" + string(option.Filer) + util.Version()))[0:8]
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
os.MkdirAll(cacheDir, os.FileMode(0755))
chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
2019-05-03 07:24:35 +00:00
return &WebDavFileSystem{
option: option,
2020-04-12 04:12:41 +00:00
chunkCache: chunkCache,
signature: util.RandomInt32(),
2019-05-03 07:24:35 +00:00
}, nil
}
2020-04-29 20:26:02 +00:00
var _ = filer_pb.FilerClient(&WebDavFileSystem{})
func (fs *WebDavFileSystem) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error {
2019-05-03 07:24:35 +00:00
return pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error {
2019-05-03 07:24:35 +00:00
client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client)
}, fs.option.Filer.ToGrpcAddress(), false, fs.option.GrpcDialOption)
2019-05-03 07:24:35 +00:00
}
func (fs *WebDavFileSystem) AdjustedUrl(location *filer_pb.Location) string {
return location.Url
}
func (fs *WebDavFileSystem) GetDataCenter() string {
return ""
}
func clearName(name string) (string, error) {
slashed := strings.HasSuffix(name, "/")
name = path.Clean(name)
if !strings.HasSuffix(name, "/") && slashed {
name += "/"
}
if !strings.HasPrefix(name, "/") {
return "", os.ErrInvalid
}
return name, nil
}
2019-05-03 07:24:35 +00:00
func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm os.FileMode) error {
2019-05-03 07:24:35 +00:00
glog.V(2).Infof("WebDavFileSystem.Mkdir %v", fullDirPath)
2019-05-03 07:24:35 +00:00
if !strings.HasSuffix(fullDirPath, "/") {
fullDirPath += "/"
}
var err error
2019-05-03 07:24:35 +00:00
if fullDirPath, err = clearName(fullDirPath); err != nil {
return err
}
2019-05-03 07:24:35 +00:00
_, err = fs.stat(ctx, fullDirPath)
if err == nil {
return os.ErrExist
}
return fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
2020-03-23 07:01:34 +00:00
dir, name := util.FullPath(fullDirPath).DirAndName()
2019-05-03 20:13:08 +00:00
request := &filer_pb.CreateEntryRequest{
Directory: dir,
Entry: &filer_pb.Entry{
Name: name,
IsDirectory: true,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(perm | os.ModeDir),
Uid: fs.option.Uid,
Gid: fs.option.Gid,
2019-05-03 07:24:35 +00:00
},
2019-05-03 20:13:08 +00:00
},
Signatures: []int32{fs.signature},
2019-05-03 20:13:08 +00:00
}
2019-05-03 07:24:35 +00:00
2019-05-03 20:13:08 +00:00
glog.V(1).Infof("mkdir: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
2019-05-03 20:13:08 +00:00
return fmt.Errorf("mkdir %s/%s: %v", dir, name, err)
}
2019-05-03 20:13:08 +00:00
return nil
})
}
2019-05-03 07:24:35 +00:00
func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) {
// Add filer.path
fullFilePath = fs.option.FilerRootPath + fullFilePath
2019-12-07 15:56:05 +00:00
glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag)
var err error
2019-05-03 07:24:35 +00:00
if fullFilePath, err = clearName(fullFilePath); err != nil {
return nil, err
}
if flag&os.O_CREATE != 0 {
// file should not have / suffix.
2019-05-03 07:24:35 +00:00
if strings.HasSuffix(fullFilePath, "/") {
return nil, os.ErrInvalid
}
2019-05-03 07:24:35 +00:00
_, err = fs.stat(ctx, fullFilePath)
if err == nil {
if flag&os.O_EXCL != 0 {
return nil, os.ErrExist
}
2019-05-03 07:24:35 +00:00
fs.removeAll(ctx, fullFilePath)
}
2019-05-03 07:24:35 +00:00
2020-03-23 07:01:34 +00:00
dir, name := util.FullPath(fullFilePath).DirAndName()
err = fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
2019-05-03 07:24:35 +00:00
Directory: dir,
Entry: &filer_pb.Entry{
Name: name,
IsDirectory: perm&os.ModeDir > 0,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(perm),
Uid: fs.option.Uid,
Gid: fs.option.Gid,
TtlSec: 0,
2019-05-03 07:24:35 +00:00
},
},
Signatures: []int32{fs.signature},
2019-05-03 07:24:35 +00:00
}); err != nil {
return fmt.Errorf("create %s: %v", fullFilePath, err)
}
return nil
})
if err != nil {
return nil, err
}
2019-05-03 07:24:35 +00:00
return &WebDavFile{
fs: fs,
name: fullFilePath,
isDirectory: false,
2020-12-01 23:32:27 +00:00
bufWriter: buffered_writer.NewBufferedWriteCloser(4 * 1024 * 1024),
2019-05-03 07:24:35 +00:00
}, nil
}
2019-05-03 07:24:35 +00:00
fi, err := fs.stat(ctx, fullFilePath)
if err != nil {
return nil, os.ErrNotExist
}
2019-05-03 07:24:35 +00:00
if !strings.HasSuffix(fullFilePath, "/") && fi.IsDir() {
fullFilePath += "/"
}
2019-05-03 07:24:35 +00:00
return &WebDavFile{
fs: fs,
name: fullFilePath,
isDirectory: false,
2020-12-01 23:32:27 +00:00
bufWriter: buffered_writer.NewBufferedWriteCloser(4 * 1024 * 1024),
2019-05-03 07:24:35 +00:00
}, nil
}
2019-05-03 07:24:35 +00:00
func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) error {
var err error
2019-05-03 07:24:35 +00:00
if fullFilePath, err = clearName(fullFilePath); err != nil {
return err
}
2020-03-23 07:01:34 +00:00
dir, name := util.FullPath(fullFilePath).DirAndName()
2019-05-03 07:24:35 +00:00
2020-09-09 18:21:23 +00:00
return filer_pb.Remove(fs, dir, name, true, false, false, false, []int32{fs.signature})
2019-05-03 07:24:35 +00:00
}
func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error {
glog.V(2).Infof("WebDavFileSystem.RemoveAll %v", name)
2019-05-03 07:24:35 +00:00
return fs.removeAll(ctx, name)
}
func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) error {
glog.V(2).Infof("WebDavFileSystem.Rename %v to %v", oldName, newName)
var err error
if oldName, err = clearName(oldName); err != nil {
return err
}
if newName, err = clearName(newName); err != nil {
return err
}
2019-05-03 07:24:35 +00:00
of, err := fs.stat(ctx, oldName)
if err != nil {
return os.ErrExist
}
2019-05-03 21:12:51 +00:00
if of.IsDir() {
if strings.HasSuffix(oldName, "/") {
oldName = strings.TrimRight(oldName, "/")
}
if strings.HasSuffix(newName, "/") {
newName = strings.TrimRight(newName, "/")
}
}
2019-05-03 07:24:35 +00:00
_, err = fs.stat(ctx, newName)
if err == nil {
return os.ErrExist
}
2020-03-23 07:01:34 +00:00
oldDir, oldBaseName := util.FullPath(oldName).DirAndName()
newDir, newBaseName := util.FullPath(newName).DirAndName()
2019-05-03 07:24:35 +00:00
return fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
2019-05-03 07:24:35 +00:00
request := &filer_pb.AtomicRenameEntryRequest{
OldDirectory: oldDir,
OldName: oldBaseName,
NewDirectory: newDir,
NewName: newBaseName,
}
_, err := client.AtomicRenameEntry(ctx, request)
if err != nil {
return fmt.Errorf("renaming %s/%s => %s/%s: %v", oldDir, oldBaseName, newDir, newBaseName, err)
}
return nil
})
}
2019-05-03 07:24:35 +00:00
func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.FileInfo, error) {
var err error
2019-05-03 07:24:35 +00:00
if fullFilePath, err = clearName(fullFilePath); err != nil {
return nil, err
}
2020-03-23 07:01:34 +00:00
fullpath := util.FullPath(fullFilePath)
2020-01-20 07:59:46 +00:00
var fi FileInfo
2020-03-23 07:01:34 +00:00
entry, err := filer_pb.GetEntry(fs, fullpath)
2019-05-03 21:12:51 +00:00
if entry == nil {
return nil, os.ErrNotExist
}
if err != nil {
return nil, err
}
2020-09-01 07:21:19 +00:00
fi.size = int64(filer.FileSize(entry))
2020-01-20 07:59:46 +00:00
fi.name = string(fullpath)
2019-05-03 07:24:35 +00:00
fi.mode = os.FileMode(entry.Attributes.FileMode)
fi.modifiedTime = time.Unix(entry.Attributes.Mtime, 0)
2019-05-03 07:24:35 +00:00
fi.isDirectory = entry.IsDirectory
2020-01-20 07:59:46 +00:00
if fi.name == "/" {
fi.modifiedTime = time.Now()
2019-05-03 07:24:35 +00:00
fi.isDirectory = true
}
return &fi, nil
}
func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) {
// Add filer.path
name = fs.option.FilerRootPath + name
glog.V(2).Infof("WebDavFileSystem.Stat %v", name)
2019-05-03 07:24:35 +00:00
return fs.stat(ctx, name)
}
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
2019-05-03 07:55:52 +00:00
2022-08-21 02:18:12 +00:00
fileId, uploadResult, flushErr, _ := operation.UploadWithRetry(
f.fs,
&filer_pb.AssignVolumeRequest{
Count: 1,
Replication: f.fs.option.Replication,
Collection: f.fs.option.Collection,
DiskType: f.fs.option.DiskType,
Path: name,
},
&operation.UploadOption{
Filename: f.name,
Cipher: f.fs.option.Cipher,
IsInputCompressed: false,
MimeType: "",
PairMap: nil,
},
func(host, fileId string) string {
return fmt.Sprintf("http://%s/%s", host, fileId)
},
reader,
)
2019-05-03 07:55:52 +00:00
2020-12-01 23:32:27 +00:00
if flushErr != nil {
2022-08-21 02:18:12 +00:00
glog.V(0).Infof("upload data %v: %v", f.name, flushErr)
return nil, fmt.Errorf("upload data: %v", flushErr)
2019-05-03 07:55:52 +00:00
}
if uploadResult.Error != "" {
2022-08-21 02:18:12 +00:00
glog.V(0).Infof("upload failure %v: %v", f.name, flushErr)
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)
2020-12-01 23:32:27 +00:00
}
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
return uploadResult.ToPbFileChunk(fileId, offset, tsNs), nil
2020-12-01 23:32:27 +00:00
}
func (f *WebDavFile) Write(buf []byte) (int, error) {
glog.V(2).Infof("WebDavFileSystem.Write %v", f.name)
dir, _ := util.FullPath(f.name).DirAndName()
var getErr error
ctx := context.Background()
if f.entry == nil {
f.entry, getErr = filer_pb.GetEntry(f.fs, util.FullPath(f.name))
}
if f.entry == nil {
return 0, getErr
}
if getErr != nil {
return 0, getErr
2019-05-03 07:55:52 +00:00
}
2020-12-01 23:32:27 +00:00
if f.bufWriter.FlushFunc == nil {
f.bufWriter.FlushFunc = func(data []byte, offset int64) (flushErr error) {
2019-05-03 07:55:52 +00:00
2020-12-01 23:32:27 +00:00
var chunk *filer_pb.FileChunk
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
chunk, flushErr = f.saveDataAsChunk(util.NewBytesReader(data), f.name, offset, time.Now().UnixNano())
2019-05-03 07:55:52 +00:00
2020-12-01 23:32:27 +00:00
if flushErr != nil {
return fmt.Errorf("%s upload result: %v", f.name, flushErr)
}
f.entry.Content = nil
f.entry.Chunks = append(f.entry.GetChunks(), chunk)
2020-12-01 23:32:27 +00:00
return flushErr
2019-05-03 07:55:52 +00:00
}
2020-12-01 23:32:27 +00:00
f.bufWriter.CloseFunc = func() error {
manifestedChunks, manifestErr := filer.MaybeManifestize(f.saveDataAsChunk, f.entry.GetChunks())
2020-12-01 23:32:27 +00:00
if manifestErr != nil {
// not good, but should be ok
2020-12-02 03:37:21 +00:00
glog.V(0).Infof("file %s close MaybeManifestize: %v", f.name, manifestErr)
2020-12-01 23:32:27 +00:00
} else {
f.entry.Chunks = manifestedChunks
}
flushErr := f.fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
2020-12-01 23:32:27 +00:00
f.entry.Attributes.Mtime = time.Now().Unix()
request := &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: f.entry,
Signatures: []int32{f.fs.signature},
}
2019-05-03 07:55:52 +00:00
2020-12-01 23:32:27 +00:00
if _, err := client.UpdateEntry(ctx, request); err != nil {
return fmt.Errorf("update %s: %v", f.name, err)
}
return nil
})
return flushErr
2019-05-03 07:55:52 +00:00
}
2020-12-01 23:32:27 +00:00
}
2019-05-03 07:55:52 +00:00
2020-12-01 23:32:27 +00:00
written, err := f.bufWriter.Write(buf)
2019-05-03 07:55:52 +00:00
if err == nil {
f.entry.Attributes.FileSize = uint64(max(f.off+int64(written), int64(f.entry.Attributes.FileSize)))
2019-12-07 15:56:05 +00:00
glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf)))
2020-12-01 23:32:27 +00:00
f.off += int64(written)
2019-05-03 07:55:52 +00:00
}
2019-12-07 15:56:05 +00:00
2020-12-01 23:32:27 +00:00
return written, err
}
func (f *WebDavFile) Close() error {
glog.V(2).Infof("WebDavFileSystem.Close %v", f.name)
2020-12-01 23:32:27 +00:00
err := f.bufWriter.Close()
2019-05-03 07:24:35 +00:00
if f.entry != nil {
f.entry = nil
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
f.visibleIntervals = nil
2019-05-03 07:24:35 +00:00
}
2020-12-01 23:32:27 +00:00
return err
}
2019-05-03 07:24:35 +00:00
func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
glog.V(2).Infof("WebDavFileSystem.Read %v", f.name)
2019-05-03 07:24:35 +00:00
if f.entry == nil {
2020-03-23 07:01:34 +00:00
f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name))
2019-05-03 07:24:35 +00:00
}
2019-05-03 21:12:51 +00:00
if f.entry == nil {
return 0, err
}
if err != nil {
return 0, err
}
2020-09-01 07:21:19 +00:00
fileSize := int64(filer.FileSize(f.entry))
2020-08-16 07:49:08 +00:00
if fileSize == 0 {
2019-05-03 07:24:35 +00:00
return 0, io.EOF
}
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
if f.visibleIntervals == nil {
f.visibleIntervals, _ = filer.NonOverlappingVisibleIntervals(filer.LookupFn(f.fs), f.entry.GetChunks(), 0, fileSize)
2020-03-22 08:30:27 +00:00
f.reader = nil
2019-05-03 07:24:35 +00:00
}
2020-03-22 08:30:27 +00:00
if f.reader == nil {
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2023-01-03 07:20:45 +00:00
chunkViews := filer.ViewFromVisibleIntervals(f.visibleIntervals, 0, fileSize)
f.reader = filer.NewChunkReaderAtFromClient(filer.LookupFn(f.fs), chunkViews, f.fs.chunkCache, fileSize)
2019-05-03 07:24:35 +00:00
}
readSize, err = f.reader.ReadAt(p, f.off)
2019-12-07 15:56:05 +00:00
2020-03-22 08:39:08 +00:00
glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize))
2020-03-22 08:30:27 +00:00
f.off += int64(readSize)
if err != nil && err != io.EOF {
2020-03-22 08:30:27 +00:00
glog.Errorf("file read %s: %v", f.name, err)
2019-05-03 07:24:35 +00:00
}
2019-12-07 15:56:05 +00:00
2019-05-03 07:24:35 +00:00
return
2020-03-22 08:30:27 +00:00
}
2019-05-03 07:24:35 +00:00
func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) {
2019-05-03 07:24:35 +00:00
glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count)
2020-03-23 07:01:34 +00:00
dir, _ := util.FullPath(f.name).DirAndName()
2019-05-03 07:24:35 +00:00
err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error {
2019-05-03 07:24:35 +00:00
fi := FileInfo{
2022-09-15 06:06:44 +00:00
size: int64(filer.FileSize(entry)),
name: entry.Name,
mode: os.FileMode(entry.Attributes.FileMode),
modifiedTime: time.Unix(entry.Attributes.Mtime, 0),
2022-09-15 06:06:44 +00:00
isDirectory: entry.IsDirectory,
2019-05-03 07:24:35 +00:00
}
if !strings.HasSuffix(fi.name, "/") && fi.IsDir() {
fi.name += "/"
}
glog.V(4).Infof("entry: %v", fi.name)
ret = append(ret, &fi)
return nil
2019-05-03 07:24:35 +00:00
})
old := f.off
if old >= int64(len(ret)) {
if count > 0 {
return nil, io.EOF
}
return nil, nil
}
if count > 0 {
f.off += int64(count)
if f.off > int64(len(ret)) {
f.off = int64(len(ret))
}
} else {
f.off = int64(len(ret))
old = 0
}
return ret[old:f.off], nil
}
func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) {
glog.V(2).Infof("WebDavFile.Seek %v %v %v", f.name, offset, whence)
2019-05-03 07:24:35 +00:00
ctx := context.Background()
var err error
switch whence {
2020-09-16 07:37:57 +00:00
case io.SeekStart:
f.off = 0
2020-09-16 07:37:57 +00:00
case io.SeekEnd:
2019-05-03 07:24:35 +00:00
if fi, err := f.fs.stat(ctx, f.name); err != nil {
return 0, err
} else {
f.off = fi.Size()
}
}
f.off += offset
return f.off, err
}
func (f *WebDavFile) Stat() (os.FileInfo, error) {
glog.V(2).Infof("WebDavFile.Stat %v", f.name)
2019-05-03 07:24:35 +00:00
ctx := context.Background()
return f.fs.stat(ctx, f.name)
}