s3: increase timeout limit

https://github.com/chrislusf/seaweedfs/issues/2541
This commit is contained in:
chrislu 2021-12-29 22:21:02 -08:00
parent d351541757
commit 5788bf2270
3 changed files with 25 additions and 4 deletions

View file

@ -192,7 +192,7 @@ func (s3opt *S3Options) startS3Server() bool {
httpS := &http.Server{Handler: router}
listenAddress := fmt.Sprintf("%s:%d", *s3opt.bindIp, *s3opt.port)
s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
s3ApiListener, err := util.NewListener(listenAddress, time.Duration(30)*time.Second)
if err != nil {
glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
}

View file

@ -364,7 +364,9 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (s
statusCode = proxyResponse.StatusCode
}
w.WriteHeader(statusCode)
io.Copy(w, proxyResponse.Body)
if n, err := io.Copy(w, proxyResponse.Body); err != nil {
glog.V(1).Infof("passthrough response read %d bytes: %v", n, err)
}
return statusCode
}

View file

@ -392,11 +392,30 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
}
func CloseResponse(resp *http.Response) {
io.Copy(io.Discard, resp.Body)
reader := &CountingReader{reader: resp.Body}
io.Copy(io.Discard, reader)
resp.Body.Close()
if reader.BytesRead > 0 {
glog.V(1).Infof("response leftover %d bytes", reader.BytesRead)
}
}
func CloseRequest(req *http.Request) {
io.Copy(io.Discard, req.Body)
reader := &CountingReader{reader: req.Body}
io.Copy(io.Discard, reader)
req.Body.Close()
if reader.BytesRead > 0 {
glog.V(1).Infof("request leftover %d bytes", reader.BytesRead)
}
}
type CountingReader struct {
reader io.Reader
BytesRead int
}
func (r *CountingReader) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
r.BytesRead += n
return n, err
}