mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
6769d07604
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.4.3</version>
|
<version>1.4.4</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.4.3</version>
|
<version>1.4.4</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.4.3</version>
|
<version>1.4.4</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -15,6 +15,7 @@ public class ChunkCache {
|
||||||
}
|
}
|
||||||
this.cache = CacheBuilder.newBuilder()
|
this.cache = CacheBuilder.newBuilder()
|
||||||
.maximumSize(maxEntries)
|
.maximumSize(maxEntries)
|
||||||
|
.weakValues()
|
||||||
.expireAfterAccess(1, TimeUnit.HOURS)
|
.expireAfterAccess(1, TimeUnit.HOURS)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,8 +76,11 @@ public class FileChunkManifest {
|
||||||
LOG.debug("doFetchFullChunkData:{}", chunkView);
|
LOG.debug("doFetchFullChunkData:{}", chunkView);
|
||||||
chunkData = SeaweedRead.doFetchFullChunkData(chunkView, locations);
|
chunkData = SeaweedRead.doFetchFullChunkData(chunkView, locations);
|
||||||
}
|
}
|
||||||
|
if(chunk.getIsChunkManifest()){
|
||||||
|
// only cache manifest chunks
|
||||||
LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length);
|
LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length);
|
||||||
SeaweedRead.chunkCache.setChunk(chunkView.fileId, chunkData);
|
SeaweedRead.chunkCache.setChunk(chunkView.fileId, chunkData);
|
||||||
|
}
|
||||||
|
|
||||||
return chunkData;
|
return chunkData;
|
||||||
|
|
||||||
|
|
|
@ -18,10 +18,14 @@ public class Gzip {
|
||||||
return compressed;
|
return compressed;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static byte[] decompress(byte[] compressed) throws IOException {
|
public static byte[] decompress(byte[] compressed) {
|
||||||
|
try {
|
||||||
ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
|
ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
|
||||||
GZIPInputStream gis = new GZIPInputStream(bis);
|
GZIPInputStream gis = new GZIPInputStream(bis);
|
||||||
return readAll(gis);
|
return readAll(gis);
|
||||||
|
} catch (Exception e) {
|
||||||
|
return compressed;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static byte[] readAll(InputStream input) throws IOException {
|
private static byte[] readAll(InputStream input) throws IOException {
|
||||||
|
|
|
@ -1,7 +1,10 @@
|
||||||
package seaweedfs.client;
|
package seaweedfs.client;
|
||||||
|
|
||||||
|
import org.apache.http.Header;
|
||||||
|
import org.apache.http.HeaderElement;
|
||||||
import org.apache.http.HttpEntity;
|
import org.apache.http.HttpEntity;
|
||||||
import org.apache.http.HttpHeaders;
|
import org.apache.http.HttpHeaders;
|
||||||
|
import org.apache.http.client.entity.GzipDecompressingEntity;
|
||||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||||
import org.apache.http.client.methods.HttpGet;
|
import org.apache.http.client.methods.HttpGet;
|
||||||
import org.apache.http.util.EntityUtils;
|
import org.apache.http.util.EntityUtils;
|
||||||
|
@ -78,7 +81,7 @@ public class SeaweedRead {
|
||||||
HttpGet request = new HttpGet(
|
HttpGet request = new HttpGet(
|
||||||
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
|
||||||
|
|
||||||
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
|
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip");
|
||||||
|
|
||||||
byte[] data = null;
|
byte[] data = null;
|
||||||
|
|
||||||
|
@ -87,6 +90,18 @@ public class SeaweedRead {
|
||||||
try {
|
try {
|
||||||
HttpEntity entity = response.getEntity();
|
HttpEntity entity = response.getEntity();
|
||||||
|
|
||||||
|
Header contentEncodingHeader = entity.getContentEncoding();
|
||||||
|
|
||||||
|
if (contentEncodingHeader != null) {
|
||||||
|
HeaderElement[] encodings =contentEncodingHeader.getElements();
|
||||||
|
for (int i = 0; i < encodings.length; i++) {
|
||||||
|
if (encodings[i].getName().equalsIgnoreCase("gzip")) {
|
||||||
|
entity = new GzipDecompressingEntity(entity);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data = EntityUtils.toByteArray(entity);
|
data = EntityUtils.toByteArray(entity);
|
||||||
|
|
||||||
EntityUtils.consume(entity);
|
EntityUtils.consume(entity);
|
||||||
|
@ -96,10 +111,6 @@ public class SeaweedRead {
|
||||||
request.releaseConnection();
|
request.releaseConnection();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chunkView.isCompressed) {
|
|
||||||
data = Gzip.decompress(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
|
if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
|
||||||
try {
|
try {
|
||||||
data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
|
data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
|
||||||
|
@ -108,6 +119,10 @@ public class SeaweedRead {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (chunkView.isCompressed) {
|
||||||
|
data = Gzip.decompress(data);
|
||||||
|
}
|
||||||
|
|
||||||
LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length);
|
LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length);
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
|
|
|
@ -127,7 +127,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -127,7 +127,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error)
|
||||||
if isCompressed {
|
if isCompressed {
|
||||||
var err error
|
var err error
|
||||||
if buffer, err = util.DecompressData(buffer); err != nil {
|
if buffer, err = util.DecompressData(buffer); err != nil {
|
||||||
return nil, err
|
glog.V(0).Infof("fail to decompress chunk manifest: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cm := ChunkManifest{}
|
cm := ChunkManifest{}
|
||||||
|
|
|
@ -26,6 +26,8 @@ var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
|
||||||
|
|
||||||
func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
|
func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
// println(r.Method + " " + r.URL.Path)
|
||||||
|
|
||||||
stats.VolumeServerRequestCounter.WithLabelValues("get").Inc()
|
stats.VolumeServerRequestCounter.WithLabelValues("get").Inc()
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }()
|
defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }()
|
||||||
|
@ -142,7 +144,6 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ext != ".gz" && ext != ".zst" {
|
|
||||||
if n.IsCompressed() {
|
if n.IsCompressed() {
|
||||||
if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize {
|
if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize {
|
||||||
if n.Data, err = util.DecompressData(n.Data); err != nil {
|
if n.Data, err = util.DecompressData(n.Data); err != nil {
|
||||||
|
@ -158,7 +159,6 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)
|
rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,8 @@ import (
|
||||||
func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) {
|
func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) {
|
||||||
disk = &volume_server_pb.DiskStatus{Dir: path}
|
disk = &volume_server_pb.DiskStatus{Dir: path}
|
||||||
fillInDiskStatus(disk)
|
fillInDiskStatus(disk)
|
||||||
glog.V(2).Infof("read disk size: %v", disk)
|
if disk.PercentUsed > 95 {
|
||||||
|
glog.V(0).Infof("disk status: %v", disk)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ type Needle struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Needle) String() (str string) {
|
func (n *Needle) String() (str string) {
|
||||||
str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime)
|
str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s Compressed:%v", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime, n.IsCompressed())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,6 +81,7 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if pu.IsGzipped {
|
if pu.IsGzipped {
|
||||||
|
// println(r.URL.Path, "is set to compressed", pu.FileName, pu.IsGzipped, "dataSize", pu.OriginalDataSize)
|
||||||
n.SetIsCompressed()
|
n.SetIsCompressed()
|
||||||
}
|
}
|
||||||
if n.LastModified == 0 {
|
if n.LastModified == 0 {
|
||||||
|
|
|
@ -54,7 +54,7 @@ func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) {
|
||||||
|
|
||||||
pu.OriginalDataSize = len(pu.Data)
|
pu.OriginalDataSize = len(pu.Data)
|
||||||
pu.UncompressedData = pu.Data
|
pu.UncompressedData = pu.Data
|
||||||
// println("received data", len(pu.Data), "isGzipped", pu.IsCompressed, "mime", pu.MimeType, "name", pu.FileName)
|
// println("received data", len(pu.Data), "isGzipped", pu.IsGzipped, "mime", pu.MimeType, "name", pu.FileName)
|
||||||
if pu.IsGzipped {
|
if pu.IsGzipped {
|
||||||
if unzipped, e := util.DecompressData(pu.Data); e == nil {
|
if unzipped, e := util.DecompressData(pu.Data); e == nil {
|
||||||
pu.OriginalDataSize = len(unzipped)
|
pu.OriginalDataSize = len(unzipped)
|
||||||
|
@ -72,7 +72,7 @@ func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) {
|
||||||
mimeType = ""
|
mimeType = ""
|
||||||
}
|
}
|
||||||
if shouldBeCompressed, iAmSure := util.IsCompressableFileType(ext, mimeType); mimeType == "" && !iAmSure || shouldBeCompressed && iAmSure {
|
if shouldBeCompressed, iAmSure := util.IsCompressableFileType(ext, mimeType); mimeType == "" && !iAmSure || shouldBeCompressed && iAmSure {
|
||||||
// println("ext", ext, "iAmSure", iAmSure, "shouldGzip", shouldGzip, "mimeType", pu.MimeType)
|
// println("ext", ext, "iAmSure", iAmSure, "shouldBeCompressed", shouldBeCompressed, "mimeType", pu.MimeType)
|
||||||
if compressedData, err := util.GzipData(pu.Data); err == nil {
|
if compressedData, err := util.GzipData(pu.Data); err == nil {
|
||||||
if len(compressedData)*10 < len(pu.Data)*9 {
|
if len(compressedData)*10 < len(pu.Data)*9 {
|
||||||
pu.Data = compressedData
|
pu.Data = compressedData
|
||||||
|
|
|
@ -39,7 +39,7 @@ func DecompressData(input []byte) ([]byte, error) {
|
||||||
if IsZstdContent(input) {
|
if IsZstdContent(input) {
|
||||||
return unzstdData(input)
|
return unzstdData(input)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unsupported compression")
|
return input, fmt.Errorf("unsupported compression")
|
||||||
}
|
}
|
||||||
|
|
||||||
func ungzipData(input []byte) ([]byte, error) {
|
func ungzipData(input []byte) ([]byte, error) {
|
||||||
|
|
|
@ -68,14 +68,28 @@ func Post(url string, values url.Values) ([]byte, error) {
|
||||||
// github.com/chrislusf/seaweedfs/unmaintained/repeated_vacuum/repeated_vacuum.go
|
// github.com/chrislusf/seaweedfs/unmaintained/repeated_vacuum/repeated_vacuum.go
|
||||||
// may need increasing http.Client.Timeout
|
// may need increasing http.Client.Timeout
|
||||||
func Get(url string) ([]byte, error) {
|
func Get(url string) ([]byte, error) {
|
||||||
r, err := client.Get(url)
|
|
||||||
|
request, err := http.NewRequest("GET", url, nil)
|
||||||
|
request.Header.Add("Accept-Encoding", "gzip")
|
||||||
|
|
||||||
|
response, err := client.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer r.Body.Close()
|
defer response.Body.Close()
|
||||||
b, err := ioutil.ReadAll(r.Body)
|
|
||||||
if r.StatusCode >= 400 {
|
var reader io.ReadCloser
|
||||||
return nil, fmt.Errorf("%s: %s", url, r.Status)
|
switch response.Header.Get("Content-Encoding") {
|
||||||
|
case "gzip":
|
||||||
|
reader, err = gzip.NewReader(response.Body)
|
||||||
|
defer reader.Close()
|
||||||
|
default:
|
||||||
|
reader = response.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(reader)
|
||||||
|
if response.StatusCode >= 400 {
|
||||||
|
return nil, fmt.Errorf("%s: %s", url, response.Status)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -269,7 +283,9 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isFullChunk {
|
if isFullChunk {
|
||||||
|
req.Header.Add("Accept-Encoding", "gzip")
|
||||||
|
} else {
|
||||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
|
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,13 +298,23 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
|
||||||
return fmt.Errorf("%s: %s", fileUrl, r.Status)
|
return fmt.Errorf("%s: %s", fileUrl, r.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var reader io.ReadCloser
|
||||||
|
contentEncoding := r.Header.Get("Content-Encoding")
|
||||||
|
switch contentEncoding {
|
||||||
|
case "gzip":
|
||||||
|
reader, err = gzip.NewReader(r.Body)
|
||||||
|
defer reader.Close()
|
||||||
|
default:
|
||||||
|
reader = r.Body
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
m int
|
m int
|
||||||
)
|
)
|
||||||
buf := make([]byte, 64*1024)
|
buf := make([]byte, 64*1024)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
m, err = r.Body.Read(buf)
|
m, err = reader.Read(buf)
|
||||||
fn(buf[:m])
|
fn(buf[:m])
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return nil
|
return nil
|
||||||
|
@ -312,7 +338,7 @@ func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool
|
||||||
if isContentCompressed {
|
if isContentCompressed {
|
||||||
decryptedData, err = DecompressData(decryptedData)
|
decryptedData, err = DecompressData(decryptedData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unzip decrypt %s: %v", fileUrl, err)
|
glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(decryptedData) < int(offset)+size {
|
if len(decryptedData) < int(offset)+size {
|
||||||
|
@ -334,6 +360,8 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
|
||||||
}
|
}
|
||||||
if rangeHeader != "" {
|
if rangeHeader != "" {
|
||||||
req.Header.Add("Range", rangeHeader)
|
req.Header.Add("Range", rangeHeader)
|
||||||
|
} else {
|
||||||
|
req.Header.Add("Accept-Encoding", "gzip")
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := client.Do(req)
|
r, err := client.Do(req)
|
||||||
|
@ -344,7 +372,17 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
|
||||||
return nil, fmt.Errorf("%s: %s", fileUrl, r.Status)
|
return nil, fmt.Errorf("%s: %s", fileUrl, r.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.Body, nil
|
var reader io.ReadCloser
|
||||||
|
contentEncoding := r.Header.Get("Content-Encoding")
|
||||||
|
switch contentEncoding {
|
||||||
|
case "gzip":
|
||||||
|
reader, err = gzip.NewReader(r.Body)
|
||||||
|
defer reader.Close()
|
||||||
|
default:
|
||||||
|
reader = r.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func CloseResponse(resp *http.Response) {
|
func CloseResponse(resp *http.Response) {
|
||||||
|
|
Loading…
Reference in a new issue