This commit is contained in:
chrislu 2022-09-16 23:54:39 -07:00
commit 3fc261d27c
20 changed files with 31 additions and 17 deletions

View file

@ -132,6 +132,7 @@ func init() {
serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files") serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers") serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
serverOptions.v.hasSlowRead = cmdServer.Flag.Bool("volume.hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.") serverOptions.v.hasSlowRead = cmdServer.Flag.Bool("volume.hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
serverOptions.v.readBufferSize = cmdServer.Flag.Int("volume.readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port") s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")

View file

@ -67,6 +67,7 @@ type VolumeServerOptions struct {
// pulseSeconds *int // pulseSeconds *int
inflightUploadDataTimeout *time.Duration inflightUploadDataTimeout *time.Duration
hasSlowRead *bool hasSlowRead *bool
readBufferSize *int
} }
func init() { func init() {
@ -98,6 +99,7 @@ func init() {
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers") v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.") v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
v.readBufferSize = cmdVolume.Flag.Int("readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally.")
} }
var cmdVolume = &Command{ var cmdVolume = &Command{
@ -246,6 +248,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
int64(*v.concurrentDownloadLimitMB)*1024*1024, int64(*v.concurrentDownloadLimitMB)*1024*1024,
*v.inflightUploadDataTimeout, *v.inflightUploadDataTimeout,
*v.hasSlowRead, *v.hasSlowRead,
*v.readBufferSize,
) )
// starting grpc server // starting grpc server
grpcS := v.startGrpcService(volumeServer) grpcS := v.startGrpcService(volumeServer)

View file

@ -195,6 +195,8 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
httpStatus := http.StatusInternalServerError httpStatus := http.StatusInternalServerError
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
httpStatus = http.StatusNoContent httpStatus = http.StatusNoContent
writeJsonQuiet(w, r, httpStatus, nil)
return
} }
writeJsonError(w, r, httpStatus, err) writeJsonError(w, r, httpStatus, err)
return return

View file

@ -29,6 +29,7 @@ type VolumeServer struct {
inFlightDownloadDataLimitCond *sync.Cond inFlightDownloadDataLimitCond *sync.Cond
inflightUploadDataTimeout time.Duration inflightUploadDataTimeout time.Duration
hasSlowRead bool hasSlowRead bool
readBufferSize int
SeedMasterNodes []pb.ServerAddress SeedMasterNodes []pb.ServerAddress
currentMaster pb.ServerAddress currentMaster pb.ServerAddress
@ -66,6 +67,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit int64, concurrentDownloadLimit int64,
inflightUploadDataTimeout time.Duration, inflightUploadDataTimeout time.Duration,
hasSlowRead bool, hasSlowRead bool,
readBufferSize int,
) *VolumeServer { ) *VolumeServer {
v := util.GetViper() v := util.GetViper()
@ -96,6 +98,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit: concurrentDownloadLimit, concurrentDownloadLimit: concurrentDownloadLimit,
inflightUploadDataTimeout: inflightUploadDataTimeout, inflightUploadDataTimeout: inflightUploadDataTimeout,
hasSlowRead: hasSlowRead, hasSlowRead: hasSlowRead,
readBufferSize: readBufferSize,
} }
vs.SeedMasterNodes = masterNodes vs.SeedMasterNodes = masterNodes

View file

@ -116,8 +116,9 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
cookie := n.Cookie cookie := n.Cookie
readOption := &storage.ReadOption{ readOption := &storage.ReadOption{
ReadDeleted: r.FormValue("readDeleted") == "true", ReadDeleted: r.FormValue("readDeleted") == "true",
HasSlowRead: vs.hasSlowRead, HasSlowRead: vs.hasSlowRead,
ReadBufferSize: vs.readBufferSize,
} }
var count int var count int

View file

@ -98,7 +98,7 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
return fmt.Errorf("parse replication %s: %v", *replication, err) return fmt.Errorf("parse replication %s: %v", *replication, err)
} }
if *volumeGrowthCount%rp.GetCopyCount() != 0 { if *volumeGrowthCount%rp.GetCopyCount() != 0 {
return fmt.Errorf("volumeGrowthCount %d should be devided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount()) return fmt.Errorf("volumeGrowthCount %d should be divided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount())
} }
} }

View file

@ -21,7 +21,7 @@ func (c *commandLock) Name() string {
func (c *commandLock) Help() string { func (c *commandLock) Help() string {
return `lock in order to exclusively manage the cluster return `lock in order to exclusively manage the cluster
This is a blocking operation if there is alread another lock. This is a blocking operation if there is already another lock.
` `
} }

View file

@ -96,7 +96,7 @@ After caching the file content, the entry.RemoteEntry will be
remoteEntry.LastLocalSyncTsNs == time.Now.UnixNano() remoteEntry.LastLocalSyncTsNs == time.Now.UnixNano()
Attributes.FileSize = uint64(remoteEntry.RemoteSize) Attributes.FileSize = uint64(remoteEntry.RemoteSize)
Attributes.Mtime = remoteEntry.RemoteMtime Attributes.Mtime = remoteEntry.RemoteMtime
chunks = non-emtpy chunks = non-empty
When "weed filer.remote.sync" to upload local changes to remote, the criteria is: When "weed filer.remote.sync" to upload local changes to remote, the criteria is:

View file

@ -285,7 +285,7 @@ func TestCircuitBreakerShell(t *testing.T) {
t.Error(err) t.Error(err)
} }
if !reflect.DeepEqual(actual, expect) { if !reflect.DeepEqual(actual, expect) {
t.Fatal("result of s3 circuit breaker shell command is unexpect!") t.Fatal("result of s3 circuit breaker shell command is unexpected!")
} }
} }
} }

View file

@ -60,7 +60,7 @@ func (c *commandS3CleanUploads) Do(args []string, commandEnv *CommandEnv, writer
for _, bucket := range buckets { for _, bucket := range buckets {
if err := c.cleanupUploads(commandEnv, writer, filerBucketsPath, bucket, *uploadedTimeAgo, signingKey); err != nil { if err := c.cleanupUploads(commandEnv, writer, filerBucketsPath, bucket, *uploadedTimeAgo, signingKey); err != nil {
fmt.Fprintf(writer, fmt.Sprintf("failed cleanup uploads for backet %s: %v", bucket, err)) fmt.Fprintf(writer, fmt.Sprintf("failed cleanup uploads for bucket %s: %v", bucket, err))
} }
} }

View file

@ -153,7 +153,7 @@ func allocate(hMapFile windows.Handle, offset uint64, length uint64, write bool)
mBuffer := MemoryBuffer{} mBuffer := MemoryBuffer{}
//align memory allocations to the minium virtal memory allocation size //align memory allocations to the minium virtual memory allocation size
dwSysGran := systemInfo.dwAllocationGranularity dwSysGran := systemInfo.dwAllocationGranularity
start := (offset / uint64(dwSysGran)) * uint64(dwSysGran) start := (offset / uint64(dwSysGran)) * uint64(dwSysGran)

View file

@ -15,7 +15,7 @@ func TestParseFileIdFromString(t *testing.T) {
fidStr1 = "100, 12345678" fidStr1 = "100, 12345678"
_, err = ParseFileIdFromString(fidStr1) _, err = ParseFileIdFromString(fidStr1)
if err == nil { if err == nil {
t.Errorf("%s : needlId invalid syntax", fidStr1) t.Errorf("%s : needleId invalid syntax", fidStr1)
} }
fidStr1 = "100,123456789" fidStr1 = "100,123456789"

View file

@ -195,7 +195,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
} }
contentType := part.Header.Get("Content-Type") contentType := part.Header.Get("Content-Type")
if contentType != "" && contentType != "application/octet-stream" && mtype != contentType { if contentType != "" && contentType != "application/octet-stream" && mtype != contentType {
pu.MimeType = contentType // only return mime type if not deductable pu.MimeType = contentType // only return mime type if not deducible
mtype = contentType mtype = contentType
} }

View file

@ -128,7 +128,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u
return return
} }
if offset >= MaxPossibleVolumeSize && n.Size.IsValid() { if offset >= MaxPossibleVolumeSize && n.Size.IsValid() {
err = fmt.Errorf("Volume Size %d Exeededs %d", offset, MaxPossibleVolumeSize) err = fmt.Errorf("Volume Size %d Exceeded %d", offset, MaxPossibleVolumeSize)
return return
} }

View file

@ -44,6 +44,10 @@ type ReadOption struct {
// * read requests should complete asap, not blocking other requests. // * read requests should complete asap, not blocking other requests.
// * write requests may see high latency when downloading large files. // * write requests may see high latency when downloading large files.
HasSlowRead bool HasSlowRead bool
// increasing ReadBufferSize can reduce the number of get locks times and shorten read P99 latency.
// but will increase memory usage a bit. Use with hasSlowRead normally.
ReadBufferSize int
} }
/* /*
@ -58,7 +62,7 @@ type Store struct {
GrpcPort int GrpcPort int
PublicUrl string PublicUrl string
Locations []*DiskLocation Locations []*DiskLocation
dataCenter string // optional informaton, overwriting master setting if exists dataCenter string // optional information, overwriting master setting if exists
rack string // optional information, overwriting master setting if exists rack string // optional information, overwriting master setting if exists
connected bool connected bool
NeedleMapKind NeedleMapKind NeedleMapKind NeedleMapKind

View file

@ -339,7 +339,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
ecVolume.ShardLocationsLock.RLock() ecVolume.ShardLocationsLock.RLock()
for shardId, locations := range ecVolume.ShardLocations { for shardId, locations := range ecVolume.ShardLocations {
// skip currnent shard or empty shard // skip current shard or empty shard
if shardId == shardIdToRecover { if shardId == shardIdToRecover {
continue continue
} }

View file

@ -10,7 +10,7 @@ import (
func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
if v := s.findVolume(volumeId); v != nil { if v := s.findVolume(volumeId); v != nil {
glog.V(3).Infof("volumd %d garbage level: %f", volumeId, v.garbageLevel()) glog.V(3).Infof("volume %d garbage level: %f", volumeId, v.garbageLevel())
return v.garbageLevel(), nil return v.garbageLevel(), nil
} }
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId) return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)

View file

@ -135,7 +135,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
actualOffset += int64(MaxPossibleVolumeSize) actualOffset += int64(MaxPossibleVolumeSize)
} }
buf := mem.Allocate(min(1024*1024, int(size))) buf := mem.Allocate(min(readOption.ReadBufferSize, int(size)))
defer mem.Free(buf) defer mem.Free(buf)
// read needle data // read needle data

View file

@ -167,7 +167,7 @@ func (v *Volume) CommitCompact() error {
if e = v.load(true, false, v.needleMapKind, 0); e != nil { if e = v.load(true, false, v.needleMapKind, 0); e != nil {
return e return e
} }
glog.V(3).Infof("Finish commiting volume %d", v.Id) glog.V(3).Infof("Finish committing volume %d", v.Id)
return nil return nil
} }

View file

@ -81,7 +81,7 @@ func removeVolumeFiles(filename string) {
// compaction // compaction
os.Remove(filename + ".cpd") os.Remove(filename + ".cpd")
os.Remove(filename + ".cpx") os.Remove(filename + ".cpx")
// level db indx file // level db index file
os.RemoveAll(filename + ".ldb") os.RemoveAll(filename + ".ldb")
// marker for damaged or incomplete volume // marker for damaged or incomplete volume
os.Remove(filename + ".note") os.Remove(filename + ".note")