avoid http error: superfluous response.WriteHeader

https://github.com/chrislusf/seaweedfs/issues/1838
This commit is contained in:
Konstantin Lebedev 2021-03-15 18:52:59 +05:00
parent 9f00f95bfb
commit 7194a5e7bf
6 changed files with 50 additions and 14 deletions

4
go.mod
View file

@ -38,7 +38,7 @@ require (
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/json-iterator/go v1.1.10
@ -88,7 +88,7 @@ require (
gocloud.dev/pubsub/rabbitpubsub v0.20.0
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd
golang.org/x/tools v0.0.0-20200608174601-1b747fd94509
google.golang.org/api v0.26.0

2
go.sum
View file

@ -959,6 +959,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View file

@ -91,10 +91,10 @@ func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string,
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return nil, err
}
return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0)
return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, false, 0, 0)
}
func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) ([]byte, error) {
func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, isCheck bool, offset int64, size int) ([]byte, error) {
var err error
var buffer bytes.Buffer
@ -102,7 +102,7 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 {
for _, urlString := range urlStrings {
shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
shouldRetry, err = util.FastReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, isCheck, offset, size, func(data []byte) {
buffer.Write(data)
})
if !shouldRetry {

View file

@ -3,6 +3,7 @@ package filer
import (
"bytes"
"fmt"
"golang.org/x/sync/errgroup"
"io"
"math"
"strings"
@ -33,16 +34,32 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
fileId2Url[chunkView.FileId] = urlStrings
}
for _, chunkView := range chunkViews {
for idx, chunkView := range chunkViews {
urlStrings := fileId2Url[chunkView.FileId]
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
// Pre-check all chunkViews urls
gErr := new(errgroup.Group)
if len(chunkViews) > 1 && idx == 0 {
CheckAllChunkViews(chunkViews[1:], &fileId2Url, gErr)
}
data, err := retriedFetchChunkData(
urlStrings,
chunkView.CipherKey,
chunkView.IsGzipped,
chunkView.IsFullChunk(),
false,
chunkView.Offset,
int(chunkView.Size),
)
if err != nil {
glog.Errorf("read chunk: %v", err)
return fmt.Errorf("read chunk: %v", err)
}
if err := gErr.Wait(); err != nil {
glog.Errorf("check all chunks: %v", err)
return fmt.Errorf("check all chunks: %v", err)
}
_, err = w.Write(data)
if err != nil {
glog.Errorf("write chunk: %v", err)
@ -54,6 +71,22 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
}
func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) {
for _, chunkView := range chunkViews {
gErr.Go(func() error {
_, err := retriedFetchChunkData(
(*fileId2Url)[chunkView.FileId],
chunkView.CipherKey,
chunkView.IsGzipped,
chunkView.IsFullChunk(),
true,
chunkView.Offset,
int(chunkView.Size))
return err
})
}
}
// ---------------- ReadAllReader ----------------------------------
func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {
@ -73,7 +106,7 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
return nil, err
}
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), false, chunkView.Offset, int(chunkView.Size))
if err != nil {
return nil, err
}
@ -185,7 +218,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
var buffer bytes.Buffer
var shouldRetry bool
for _, urlString := range urlStrings {
shouldRetry, err = util.FastReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
shouldRetry, err = util.FastReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), false, chunkView.Offset, int(chunkView.Size), func(data []byte) {
buffer.Write(data)
})
if !shouldRetry {

View file

@ -20,7 +20,7 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
var shouldRetry bool
for _, fileUrl := range fileUrls {
shouldRetry, err = util.FastReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
shouldRetry, err = util.FastReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), false, chunk.Offset, int(chunk.Size), func(data []byte) {
writeErr = writeFunc(data)
})
if err != nil {

View file

@ -72,12 +72,11 @@ func FastGet(url string) ([]byte, bool, error) {
return out, false, nil
}
func FastReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) (retryable bool, err error) {
func FastReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, isCheck bool, offset int64, size int, fn func(data []byte)) (retryable bool, err error) {
if cipherKey != nil {
return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn)
}
req := fasthttp.AcquireRequest()
res := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
@ -85,7 +84,9 @@ func FastReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool
req.SetRequestURIBytes([]byte(fileUrl))
if isFullChunk {
if isCheck {
req.Header.Add("Range", "bytes=0-1")
} else if isFullChunk {
req.Header.Add("Accept-Encoding", "gzip")
} else {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))