mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge branch 'master' into messaging
This commit is contained in:
commit
08f2dcc532
3
.github/workflows/binaries_dev.yml
vendored
3
.github/workflows/binaries_dev.yml
vendored
|
@ -17,14 +17,13 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Delete old release assets
|
- name: Delete old release assets
|
||||||
uses: mknejp/delete-release-assets@a8aaab13272b1eaac16cc46dddd3f725b97ee05a # v1
|
uses: mknejp/delete-release-assets@v1
|
||||||
with:
|
with:
|
||||||
token: ${{ github.token }}
|
token: ${{ github.token }}
|
||||||
tag: dev
|
tag: dev
|
||||||
fail-if-no-assets: false
|
fail-if-no-assets: false
|
||||||
assets: |
|
assets: |
|
||||||
weed-*
|
weed-*
|
||||||
fail-if-no-release: false
|
|
||||||
|
|
||||||
build_dev_linux_windows:
|
build_dev_linux_windows:
|
||||||
permissions:
|
permissions:
|
||||||
|
|
|
@ -46,7 +46,7 @@ spec:
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
- name: {{ .Values.global.imagePullSecrets }}
|
- name: {{ .Values.global.imagePullSecrets }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration
|
serviceAccountName: seaweedfs-rw-sa #hack for delete pod master after migration
|
||||||
terminationGracePeriodSeconds: 60
|
terminationGracePeriodSeconds: 60
|
||||||
{{- if .Values.filer.priorityClassName }}
|
{{- if .Values.filer.priorityClassName }}
|
||||||
priorityClassName: {{ .Values.filer.priorityClassName | quote }}
|
priorityClassName: {{ .Values.filer.priorityClassName | quote }}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: seaweefds-rw-cr
|
name: seaweedfs-rw-cr
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["pods"]
|
resources: ["pods"]
|
||||||
|
@ -12,18 +12,18 @@ rules:
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: seaweefds-rw-sa
|
name: seaweedfs-rw-sa
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: system:serviceaccount:seaweefds-rw-sa:default
|
name: system:serviceaccount:seaweedfs-rw-sa:default
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: seaweefds-rw-sa
|
name: seaweedfs-rw-sa
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: seaweefds-rw-cr
|
name: seaweedfs-rw-cr
|
||||||
|
|
|
@ -337,3 +337,11 @@ pdaddrs = "localhost:2379"
|
||||||
deleterange_concurrency = 1
|
deleterange_concurrency = 1
|
||||||
# Enable 1PC
|
# Enable 1PC
|
||||||
enable_1pc = false
|
enable_1pc = false
|
||||||
|
# Set the CA certificate path
|
||||||
|
ca_path=""
|
||||||
|
# Set the certificate path
|
||||||
|
cert_path=""
|
||||||
|
# Set the private key path
|
||||||
|
key_path=""
|
||||||
|
# The name list used to verify the cn name
|
||||||
|
verify_cn=""
|
|
@ -3,8 +3,6 @@ default = "c1"
|
||||||
|
|
||||||
[cluster.c1]
|
[cluster.c1]
|
||||||
master = "localhost:9333" # comma-separated master servers
|
master = "localhost:9333" # comma-separated master servers
|
||||||
filer = "localhost:8888" # filer host and port
|
|
||||||
|
|
||||||
[cluster.c2]
|
[cluster.c2]
|
||||||
master = ""
|
master = ""
|
||||||
filer = ""
|
|
||||||
|
|
|
@ -3,11 +3,9 @@ package filer
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
"math"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
@ -54,11 +52,11 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
|
||||||
if len(chunks) == 1 {
|
if len(chunks) == 1 {
|
||||||
return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
|
return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
|
||||||
}
|
}
|
||||||
md5_digests := [][]byte{}
|
var md5Digests [][]byte
|
||||||
for _, c := range chunks {
|
for _, c := range chunks {
|
||||||
md5_digests = append(md5_digests, util.Base64Md5ToBytes(c.ETag))
|
md5Digests = append(md5Digests, util.Base64Md5ToBytes(c.ETag))
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
|
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5Digests, nil)), len(chunks))
|
||||||
}
|
}
|
||||||
|
|
||||||
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
|
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
|
||||||
|
@ -189,12 +187,6 @@ func logPrintf(name string, visibles []VisibleInterval) {
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return new(VisibleInterval)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
|
func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
|
||||||
|
|
||||||
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
|
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
|
||||||
|
|
|
@ -164,6 +164,10 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
||||||
func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews []*ChunkView, offset uint64) (n int, err error) {
|
func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews []*ChunkView, offset uint64) (n int, err error) {
|
||||||
|
|
||||||
if c.readerPattern.IsRandomMode() {
|
if c.readerPattern.IsRandomMode() {
|
||||||
|
n, err := c.readerCache.chunkCache.ReadChunkAt(buffer, chunkView.FileId, offset)
|
||||||
|
if n > 0 {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
|
return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ type ReaderCache struct {
|
||||||
|
|
||||||
type SingleChunkCacher struct {
|
type SingleChunkCacher struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
cond *sync.Cond
|
||||||
parent *ReaderCache
|
parent *ReaderCache
|
||||||
chunkFileId string
|
chunkFileId string
|
||||||
data []byte
|
data []byte
|
||||||
|
@ -140,6 +141,7 @@ func newSingleChunkCacher(parent *ReaderCache, fileId string, cipherKey []byte,
|
||||||
chunkSize: chunkSize,
|
chunkSize: chunkSize,
|
||||||
shouldCache: shouldCache,
|
shouldCache: shouldCache,
|
||||||
}
|
}
|
||||||
|
t.cond = sync.NewCond(t)
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,6 +170,7 @@ func (s *SingleChunkCacher) startCaching() {
|
||||||
if s.shouldCache {
|
if s.shouldCache {
|
||||||
s.parent.chunkCache.SetChunk(s.chunkFileId, s.data)
|
s.parent.chunkCache.SetChunk(s.chunkFileId, s.data)
|
||||||
}
|
}
|
||||||
|
s.cond.Broadcast()
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -183,6 +186,10 @@ func (s *SingleChunkCacher) readChunkAt(buf []byte, offset int64) (int, error) {
|
||||||
s.RLock()
|
s.RLock()
|
||||||
defer s.RUnlock()
|
defer s.RUnlock()
|
||||||
|
|
||||||
|
for s.completedTime.IsZero() {
|
||||||
|
s.cond.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
if s.err != nil {
|
if s.err != nil {
|
||||||
return 0, s.err
|
return 0, s.err
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"github.com/tikv/client-go/v2/config"
|
||||||
"github.com/tikv/client-go/v2/txnkv"
|
"github.com/tikv/client-go/v2/txnkv"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,21 +39,25 @@ func (store *TikvStore) GetName() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *TikvStore) Initialize(config util.Configuration, prefix string) error {
|
func (store *TikvStore) Initialize(config util.Configuration, prefix string) error {
|
||||||
pdAddrs := []string{}
|
ca := config.GetString(prefix + "ca_path")
|
||||||
pdAddrsStr := config.GetString(prefix + "pdaddrs")
|
cert := config.GetString(prefix + "cert_path")
|
||||||
for _, item := range strings.Split(pdAddrsStr, ",") {
|
key := config.GetString(prefix + "key_path")
|
||||||
pdAddrs = append(pdAddrs, strings.TrimSpace(item))
|
verify_cn := strings.Split(config.GetString(prefix+"verify_cn"), ",")
|
||||||
}
|
pdAddrs := strings.Split(config.GetString(prefix+"pdaddrs"), ",")
|
||||||
|
|
||||||
drc := config.GetInt(prefix + "deleterange_concurrency")
|
drc := config.GetInt(prefix + "deleterange_concurrency")
|
||||||
if drc <= 0 {
|
if drc <= 0 {
|
||||||
drc = 1
|
drc = 1
|
||||||
}
|
}
|
||||||
store.onePC = config.GetBool(prefix + "enable_1pc")
|
store.onePC = config.GetBool(prefix + "enable_1pc")
|
||||||
store.deleteRangeConcurrency = drc
|
store.deleteRangeConcurrency = drc
|
||||||
return store.initialize(pdAddrs)
|
return store.initialize(ca, cert, key, verify_cn, pdAddrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *TikvStore) initialize(pdAddrs []string) error {
|
func (store *TikvStore) initialize(ca, cert, key string, verify_cn, pdAddrs []string) error {
|
||||||
|
config.UpdateGlobal(func(conf *config.Config) {
|
||||||
|
conf.Security = config.NewSecurity(ca, cert, key, verify_cn)
|
||||||
|
})
|
||||||
client, err := txnkv.NewClient(pdAddrs)
|
client, err := txnkv.NewClient(pdAddrs)
|
||||||
store.client = client
|
store.client = client
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -6,6 +6,8 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
|
@ -17,7 +19,6 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"net/http"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type IamS3ApiConfig interface {
|
type IamS3ApiConfig interface {
|
||||||
|
@ -117,10 +118,10 @@ func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil && err != filer_pb.ErrNotFound {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if buf.Len() == 0 {
|
if err == filer_pb.ErrNotFound || buf.Len() == 0 {
|
||||||
policies.Policies = make(map[string]PolicyDocument)
|
policies.Policies = make(map[string]PolicyDocument)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
"io"
|
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,7 +23,7 @@ type FileHandle struct {
|
||||||
dirtyMetadata bool
|
dirtyMetadata bool
|
||||||
dirtyPages *PageWriter
|
dirtyPages *PageWriter
|
||||||
entryViewCache []filer.VisibleInterval
|
entryViewCache []filer.VisibleInterval
|
||||||
reader io.ReaderAt
|
reader *filer.ChunkReadAt
|
||||||
contentType string
|
contentType string
|
||||||
handle uint64
|
handle uint64
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
@ -99,8 +98,15 @@ func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) {
|
||||||
fh.entryViewCache = nil
|
fh.entryViewCache = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fh *FileHandle) CloseReader() {
|
||||||
|
if fh.reader != nil {
|
||||||
|
fh.reader.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (fh *FileHandle) Release() {
|
func (fh *FileHandle) Release() {
|
||||||
fh.dirtyPages.Destroy()
|
fh.dirtyPages.Destroy()
|
||||||
|
fh.CloseReader()
|
||||||
}
|
}
|
||||||
|
|
||||||
func lessThan(a, b *filer_pb.FileChunk) bool {
|
func lessThan(a, b *filer_pb.FileChunk) bool {
|
||||||
|
|
|
@ -62,21 +62,19 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
||||||
if chunkResolveErr != nil {
|
if chunkResolveErr != nil {
|
||||||
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
|
return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
|
||||||
}
|
}
|
||||||
fh.reader = nil
|
fh.CloseReader()
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := fh.reader
|
if fh.reader == nil {
|
||||||
if reader == nil {
|
|
||||||
chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, fileSize)
|
chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, fileSize)
|
||||||
glog.V(4).Infof("file handle read %s [%d,%d) from %d views", fileFullPath, offset, offset+int64(len(buff)), len(chunkViews))
|
glog.V(4).Infof("file handle read %s [%d,%d) from %d views", fileFullPath, offset, offset+int64(len(buff)), len(chunkViews))
|
||||||
for _, chunkView := range chunkViews {
|
for _, chunkView := range chunkViews {
|
||||||
glog.V(4).Infof(" read %s [%d,%d) from chunk %+v", fileFullPath, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.FileId)
|
glog.V(4).Infof(" read %s [%d,%d) from chunk %+v", fileFullPath, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.FileId)
|
||||||
}
|
}
|
||||||
reader = filer.NewChunkReaderAtFromClient(fh.wfs.LookupFn(), chunkViews, fh.wfs.chunkCache, fileSize)
|
fh.reader = filer.NewChunkReaderAtFromClient(fh.wfs.LookupFn(), chunkViews, fh.wfs.chunkCache, fileSize)
|
||||||
}
|
}
|
||||||
fh.reader = reader
|
|
||||||
|
|
||||||
totalRead, err := reader.ReadAt(buff, offset)
|
totalRead, err := fh.reader.ReadAt(buff, offset)
|
||||||
|
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
glog.Errorf("file handle read %s: %v", fileFullPath, err)
|
glog.Errorf("file handle read %s: %v", fileFullPath, err)
|
||||||
|
|
|
@ -152,7 +152,7 @@ func (i *InodeToPath) RemovePath(path util.FullPath) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (replacedInode uint64) {
|
func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (sourceInode, targetInode uint64) {
|
||||||
i.Lock()
|
i.Lock()
|
||||||
defer i.Unlock()
|
defer i.Unlock()
|
||||||
sourceInode, sourceFound := i.path2inode[sourcePath]
|
sourceInode, sourceFound := i.path2inode[sourcePath]
|
||||||
|
@ -178,7 +178,7 @@ func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (replacedIn
|
||||||
} else {
|
} else {
|
||||||
glog.Errorf("MovePath %s to %s: sourceInode %d not found", sourcePath, targetPath, sourceInode)
|
glog.Errorf("MovePath %s to %s: sourceInode %d not found", sourcePath, targetPath, sourceInode)
|
||||||
}
|
}
|
||||||
return targetInode
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *InodeToPath) Forget(inode, nlookup uint64, onForgetDir func(dir util.FullPath)) {
|
func (i *InodeToPath) Forget(inode, nlookup uint64, onForgetDir func(dir util.FullPath)) {
|
||||||
|
|
|
@ -233,10 +233,17 @@ func (wfs *WFS) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR
|
||||||
oldPath := oldParent.Child(oldName)
|
oldPath := oldParent.Child(oldName)
|
||||||
newPath := newParent.Child(newName)
|
newPath := newParent.Child(newName)
|
||||||
|
|
||||||
replacedInode := wfs.inodeToPath.MovePath(oldPath, newPath)
|
sourceInode, targetInode := wfs.inodeToPath.MovePath(oldPath, newPath)
|
||||||
|
if sourceInode != 0 {
|
||||||
|
if fh, foundFh := wfs.fhmap.inode2fh[sourceInode]; foundFh && fh.entry != nil {
|
||||||
|
fh.entry.Name = newName
|
||||||
|
}
|
||||||
// invalidate attr and data
|
// invalidate attr and data
|
||||||
if replacedInode > 0 {
|
wfs.fuseServer.InodeNotify(sourceInode, 0, -1)
|
||||||
wfs.fuseServer.InodeNotify(replacedInode, 0, -1)
|
}
|
||||||
|
if targetInode != 0 {
|
||||||
|
// invalidate attr and data
|
||||||
|
wfs.fuseServer.InodeNotify(targetInode, 0, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if resp.EventNotification.OldEntry != nil {
|
} else if resp.EventNotification.OldEntry != nil {
|
||||||
|
|
|
@ -263,6 +263,9 @@ func (fs *FilerServer) addClient(clientType string, clientAddress string, client
|
||||||
if clientId != 0 {
|
if clientId != 0 {
|
||||||
fs.knownListenersLock.Lock()
|
fs.knownListenersLock.Lock()
|
||||||
_, alreadyKnown = fs.knownListeners[clientId]
|
_, alreadyKnown = fs.knownListeners[clientId]
|
||||||
|
if !alreadyKnown {
|
||||||
|
fs.knownListeners[clientId] = struct{}{}
|
||||||
|
}
|
||||||
fs.knownListenersLock.Unlock()
|
fs.knownListenersLock.Unlock()
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
|
@ -185,11 +185,14 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float
|
||||||
for _, vl := range c.storageType2VolumeLayout.Items() {
|
for _, vl := range c.storageType2VolumeLayout.Items() {
|
||||||
if vl != nil {
|
if vl != nil {
|
||||||
volumeLayout := vl.(*VolumeLayout)
|
volumeLayout := vl.(*VolumeLayout)
|
||||||
if volumeId > 0 && volumeLayout.Lookup(needle.VolumeId(volumeId)) == nil {
|
if volumeId > 0 {
|
||||||
continue
|
if volumeLayout.Lookup(needle.VolumeId(volumeId)) != nil {
|
||||||
}
|
|
||||||
t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate)
|
t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
t.vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue