diff --git a/.github/workflows/binaries_dev.yml b/.github/workflows/binaries_dev.yml index a61846127..f906ed2bb 100644 --- a/.github/workflows/binaries_dev.yml +++ b/.github/workflows/binaries_dev.yml @@ -17,14 +17,13 @@ jobs: steps: - name: Delete old release assets - uses: mknejp/delete-release-assets@a8aaab13272b1eaac16cc46dddd3f725b97ee05a # v1 + uses: mknejp/delete-release-assets@v1 with: token: ${{ github.token }} tag: dev fail-if-no-assets: false assets: | weed-* - fail-if-no-release: false build_dev_linux_windows: permissions: diff --git a/k8s/helm_charts2/templates/filer-statefulset.yaml b/k8s/helm_charts2/templates/filer-statefulset.yaml index 94003819f..21a4256be 100644 --- a/k8s/helm_charts2/templates/filer-statefulset.yaml +++ b/k8s/helm_charts2/templates/filer-statefulset.yaml @@ -46,7 +46,7 @@ spec: imagePullSecrets: - name: {{ .Values.global.imagePullSecrets }} {{- end }} - serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration + serviceAccountName: seaweedfs-rw-sa #hack for delete pod master after migration terminationGracePeriodSeconds: 60 {{- if .Values.filer.priorityClassName }} priorityClassName: {{ .Values.filer.priorityClassName | quote }} diff --git a/k8s/helm_charts2/templates/service-account.yaml b/k8s/helm_charts2/templates/service-account.yaml index 978452ca4..22c29b56a 100644 --- a/k8s/helm_charts2/templates/service-account.yaml +++ b/k8s/helm_charts2/templates/service-account.yaml @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: seaweefds-rw-cr + name: seaweedfs-rw-cr rules: - apiGroups: [""] resources: ["pods"] @@ -12,18 +12,18 @@ rules: apiVersion: v1 kind: ServiceAccount metadata: - name: seaweefds-rw-sa + name: seaweedfs-rw-sa namespace: {{ .Release.Namespace }} --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: system:serviceaccount:seaweefds-rw-sa:default + name: system:serviceaccount:seaweedfs-rw-sa:default subjects: - kind: ServiceAccount - name: seaweefds-rw-sa + name: seaweedfs-rw-sa namespace: {{ .Release.Namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: seaweefds-rw-cr + name: seaweedfs-rw-cr diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml index c82de8da0..860d8b291 100644 --- a/weed/command/scaffold/filer.toml +++ b/weed/command/scaffold/filer.toml @@ -337,3 +337,11 @@ pdaddrs = "localhost:2379" deleterange_concurrency = 1 # Enable 1PC enable_1pc = false +# Set the CA certificate path +ca_path="" +# Set the certificate path +cert_path="" +# Set the private key path +key_path="" +# The name list used to verify the cn name +verify_cn="" \ No newline at end of file diff --git a/weed/command/scaffold/shell.toml b/weed/command/scaffold/shell.toml index 288ae2efe..0213708a4 100644 --- a/weed/command/scaffold/shell.toml +++ b/weed/command/scaffold/shell.toml @@ -3,8 +3,6 @@ default = "c1" [cluster.c1] master = "localhost:9333" # comma-separated master servers -filer = "localhost:8888" # filer host and port [cluster.c2] master = "" -filer = "" diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index 48b344bf8..4c732ddcd 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -3,11 +3,9 @@ package filer import ( "bytes" "fmt" - "math" - "sync" - "github.com/chrislusf/seaweedfs/weed/wdclient" "golang.org/x/exp/slices" + "math" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -54,11 +52,11 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) { if len(chunks) == 1 { return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag)) } - md5_digests := [][]byte{} + var md5Digests [][]byte for _, c := range chunks { - md5_digests = append(md5_digests, util.Base64Md5ToBytes(c.ETag)) + md5Digests = append(md5Digests, util.Base64Md5ToBytes(c.ETag)) } - return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks)) + return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5Digests, nil)), len(chunks)) } func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { @@ -189,12 +187,6 @@ func logPrintf(name string, visibles []VisibleInterval) { */ } -var bufPool = sync.Pool{ - New: func() interface{} { - return new(VisibleInterval) - }, -} - func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) { newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed) diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go index 7d9997761..b938083d8 100644 --- a/weed/filer/reader_at.go +++ b/weed/filer/reader_at.go @@ -164,6 +164,10 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews []*ChunkView, offset uint64) (n int, err error) { if c.readerPattern.IsRandomMode() { + n, err := c.readerCache.chunkCache.ReadChunkAt(buffer, chunkView.FileId, offset) + if n > 0 { + return n, err + } return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset)) } diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go index bce97cc49..c319f6c78 100644 --- a/weed/filer/reader_cache.go +++ b/weed/filer/reader_cache.go @@ -19,6 +19,7 @@ type ReaderCache struct { type SingleChunkCacher struct { sync.RWMutex + cond *sync.Cond parent *ReaderCache chunkFileId string data []byte @@ -140,6 +141,7 @@ func newSingleChunkCacher(parent *ReaderCache, fileId string, cipherKey []byte, chunkSize: chunkSize, shouldCache: shouldCache, } + t.cond = sync.NewCond(t) return t } @@ -168,6 +170,7 @@ func (s *SingleChunkCacher) startCaching() { if s.shouldCache { s.parent.chunkCache.SetChunk(s.chunkFileId, s.data) } + s.cond.Broadcast() return } @@ -183,6 +186,10 @@ func (s *SingleChunkCacher) readChunkAt(buf []byte, offset int64) (int, error) { s.RLock() defer s.RUnlock() + for s.completedTime.IsZero() { + s.cond.Wait() + } + if s.err != nil { return 0, s.err } diff --git a/weed/filer/tikv/tikv_store.go b/weed/filer/tikv/tikv_store.go index f8932663d..ca6794f9c 100644 --- a/weed/filer/tikv/tikv_store.go +++ b/weed/filer/tikv/tikv_store.go @@ -15,6 +15,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" + "github.com/tikv/client-go/v2/config" "github.com/tikv/client-go/v2/txnkv" ) @@ -38,21 +39,25 @@ func (store *TikvStore) GetName() string { } func (store *TikvStore) Initialize(config util.Configuration, prefix string) error { - pdAddrs := []string{} - pdAddrsStr := config.GetString(prefix + "pdaddrs") - for _, item := range strings.Split(pdAddrsStr, ",") { - pdAddrs = append(pdAddrs, strings.TrimSpace(item)) - } + ca := config.GetString(prefix + "ca_path") + cert := config.GetString(prefix + "cert_path") + key := config.GetString(prefix + "key_path") + verify_cn := strings.Split(config.GetString(prefix+"verify_cn"), ",") + pdAddrs := strings.Split(config.GetString(prefix+"pdaddrs"), ",") + drc := config.GetInt(prefix + "deleterange_concurrency") if drc <= 0 { drc = 1 } store.onePC = config.GetBool(prefix + "enable_1pc") store.deleteRangeConcurrency = drc - return store.initialize(pdAddrs) + return store.initialize(ca, cert, key, verify_cn, pdAddrs) } -func (store *TikvStore) initialize(pdAddrs []string) error { +func (store *TikvStore) initialize(ca, cert, key string, verify_cn, pdAddrs []string) error { + config.UpdateGlobal(func(conf *config.Config) { + conf.Security = config.NewSecurity(ca, cert, key, verify_cn) + }) client, err := txnkv.NewClient(pdAddrs) store.client = client return err diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go index 8a464782a..cdeb9b927 100644 --- a/weed/iamapi/iamapi_server.go +++ b/weed/iamapi/iamapi_server.go @@ -6,6 +6,8 @@ import ( "bytes" "encoding/json" "fmt" + "net/http" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -17,7 +19,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/gorilla/mux" "google.golang.org/grpc" - "net/http" ) type IamS3ApiConfig interface { @@ -117,10 +118,10 @@ func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { } return nil }) - if err != nil { + if err != nil && err != filer_pb.ErrNotFound { return err } - if buf.Len() == 0 { + if err == filer_pb.ErrNotFound || buf.Len() == 0 { policies.Policies = make(map[string]PolicyDocument) return nil } diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index 49918c104..0dfcbd7f6 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -6,7 +6,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "golang.org/x/exp/slices" - "io" "sync" ) @@ -24,7 +23,7 @@ type FileHandle struct { dirtyMetadata bool dirtyPages *PageWriter entryViewCache []filer.VisibleInterval - reader io.ReaderAt + reader *filer.ChunkReadAt contentType string handle uint64 sync.Mutex @@ -99,8 +98,15 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) { fh.entryViewCache = nil } +func (fh *FileHandle) CloseReader() { + if fh.reader != nil { + fh.reader.Close() + } +} + func (fh *FileHandle) Release() { fh.dirtyPages.Destroy() + fh.CloseReader() } func lessThan(a, b *filer_pb.FileChunk) bool { diff --git a/weed/mount/filehandle_read.go b/weed/mount/filehandle_read.go index 88ab8612c..45fc10a0b 100644 --- a/weed/mount/filehandle_read.go +++ b/weed/mount/filehandle_read.go @@ -62,21 +62,19 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { if chunkResolveErr != nil { return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) } - fh.reader = nil + fh.CloseReader() } - reader := fh.reader - if reader == nil { + if fh.reader == nil { chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, fileSize) glog.V(4).Infof("file handle read %s [%d,%d) from %d views", fileFullPath, offset, offset+int64(len(buff)), len(chunkViews)) for _, chunkView := range chunkViews { glog.V(4).Infof(" read %s [%d,%d) from chunk %+v", fileFullPath, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.FileId) } - reader = filer.NewChunkReaderAtFromClient(fh.wfs.LookupFn(), chunkViews, fh.wfs.chunkCache, fileSize) + fh.reader = filer.NewChunkReaderAtFromClient(fh.wfs.LookupFn(), chunkViews, fh.wfs.chunkCache, fileSize) } - fh.reader = reader - totalRead, err := reader.ReadAt(buff, offset) + totalRead, err := fh.reader.ReadAt(buff, offset) if err != nil && err != io.EOF { glog.Errorf("file handle read %s: %v", fileFullPath, err) diff --git a/weed/mount/inode_to_path.go b/weed/mount/inode_to_path.go index fa17a9261..29635efca 100644 --- a/weed/mount/inode_to_path.go +++ b/weed/mount/inode_to_path.go @@ -152,7 +152,7 @@ func (i *InodeToPath) RemovePath(path util.FullPath) { } } -func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (replacedInode uint64) { +func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (sourceInode, targetInode uint64) { i.Lock() defer i.Unlock() sourceInode, sourceFound := i.path2inode[sourcePath] @@ -178,7 +178,7 @@ func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (replacedIn } else { glog.Errorf("MovePath %s to %s: sourceInode %d not found", sourcePath, targetPath, sourceInode) } - return targetInode + return } func (i *InodeToPath) Forget(inode, nlookup uint64, onForgetDir func(dir util.FullPath)) { diff --git a/weed/mount/weedfs_rename.go b/weed/mount/weedfs_rename.go index 23caa48cd..0c7de0bbb 100644 --- a/weed/mount/weedfs_rename.go +++ b/weed/mount/weedfs_rename.go @@ -233,10 +233,17 @@ func (wfs *WFS) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR oldPath := oldParent.Child(oldName) newPath := newParent.Child(newName) - replacedInode := wfs.inodeToPath.MovePath(oldPath, newPath) - // invalidate attr and data - if replacedInode > 0 { - wfs.fuseServer.InodeNotify(replacedInode, 0, -1) + sourceInode, targetInode := wfs.inodeToPath.MovePath(oldPath, newPath) + if sourceInode != 0 { + if fh, foundFh := wfs.fhmap.inode2fh[sourceInode]; foundFh && fh.entry != nil { + fh.entry.Name = newName + } + // invalidate attr and data + wfs.fuseServer.InodeNotify(sourceInode, 0, -1) + } + if targetInode != 0 { + // invalidate attr and data + wfs.fuseServer.InodeNotify(targetInode, 0, -1) } } else if resp.EventNotification.OldEntry != nil { diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go index da710234b..745379e7c 100644 --- a/weed/server/filer_grpc_server_sub_meta.go +++ b/weed/server/filer_grpc_server_sub_meta.go @@ -263,6 +263,9 @@ func (fs *FilerServer) addClient(clientType string, clientAddress string, client if clientId != 0 { fs.knownListenersLock.Lock() _, alreadyKnown = fs.knownListeners[clientId] + if !alreadyKnown { + fs.knownListeners[clientId] = struct{}{} + } fs.knownListenersLock.Unlock() } return diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index e53aa2853..3850b8916 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -185,10 +185,13 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float for _, vl := range c.storageType2VolumeLayout.Items() { if vl != nil { volumeLayout := vl.(*VolumeLayout) - if volumeId > 0 && volumeLayout.Lookup(needle.VolumeId(volumeId)) == nil { - continue + if volumeId > 0 { + if volumeLayout.Lookup(needle.VolumeId(volumeId)) != nil { + t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate) + } + } else { + t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate) } - t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate) } } }