filer: add back image resizing capability

This commit is contained in:
Chris Lu 2020-03-20 20:31:11 -07:00
parent f251d03673
commit 308688c8d0
5 changed files with 87 additions and 7 deletions

View file

@ -1,7 +1,10 @@
package filer2 package filer2
import ( import (
"bytes"
"fmt"
"io" "io"
"math"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -40,3 +43,58 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
return nil return nil
} }
type ChunkStreamReader struct {
masterClient *wdclient.MasterClient
chunkViews []*ChunkView
logicOffset int64
buffer bytes.Buffer
bufferOffset int64
chunkIndex int
}
var _ = io.ReadSeeker(&ChunkStreamReader{})
func NewChunkStreamReader(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader {
chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
return &ChunkStreamReader{
masterClient: masterClient,
chunkViews: chunkViews,
bufferOffset: -1,
}
}
func (c *ChunkStreamReader) Read(p []byte) (n int, err error) {
if c.buffer.Len() == 0 {
if c.chunkIndex >= len(c.chunkViews) {
return 0, io.EOF
}
chunkView := c.chunkViews[c.chunkIndex]
c.fetchChunkToBuffer(chunkView)
c.chunkIndex++
}
return c.buffer.Read(p)
}
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
return 0, fmt.Errorf("ChunkStreamReader: seek not supported")
}
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
urlString, err := c.masterClient.LookupFileId(chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}
c.buffer.Reset()
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) {
c.buffer.Write(data)
})
if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
return err
}
return nil
}

View file

@ -131,6 +131,7 @@ func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileRea
for _, chunk := range chunkList { for _, chunk := range chunkList {
totalSize += chunk.Size totalSize += chunk.Size
} }
sort.Sort(ChunkList(chunkList))
return &ChunkedFileReader{ return &ChunkedFileReader{
totalSize: totalSize, totalSize: totalSize,
chunkList: chunkList, chunkList: chunkList,

View file

@ -224,7 +224,7 @@ func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename str
} }
} }
func processRangeRequst(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
rangeReq := r.Header.Get("Range") rangeReq := r.Header.Get("Range")
if rangeReq == "" { if rangeReq == "" {

View file

@ -11,6 +11,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/images"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
) )
@ -89,8 +90,19 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
totalSize := int64(filer2.TotalSize(entry.Chunks)) totalSize := int64(filer2.TotalSize(entry.Chunks))
processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { if rangeReq := r.Header.Get("Range"); rangeReq == "" {
return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, int(size)) ext := filepath.Ext(filename)
width, height, mode, shouldResize := shouldResizeImages(ext, r)
if shouldResize {
chunkedFileReader := filer2.NewChunkStreamReader(fs.filer.MasterClient, entry.Chunks)
rs, _, _ := images.Resized(ext, chunkedFileReader, width, height, mode)
io.Copy(w, rs)
return
}
}
processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, int(size))
}) })
} }

View file

@ -200,20 +200,29 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string,
func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker { func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker {
rs := originalDataReaderSeeker rs := originalDataReaderSeeker
width, height, mode, shouldResize := shouldResizeImages(ext, r)
if shouldResize {
rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, mode)
}
return rs
}
func shouldResizeImages(ext string, r *http.Request) (width, height int, mode string, shouldResize bool) {
if len(ext) > 0 { if len(ext) > 0 {
ext = strings.ToLower(ext) ext = strings.ToLower(ext)
} }
if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" { if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" {
width, height := 0, 0
if r.FormValue("width") != "" { if r.FormValue("width") != "" {
width, _ = strconv.Atoi(r.FormValue("width")) width, _ = strconv.Atoi(r.FormValue("width"))
} }
if r.FormValue("height") != "" { if r.FormValue("height") != "" {
height, _ = strconv.Atoi(r.FormValue("height")) height, _ = strconv.Atoi(r.FormValue("height"))
} }
rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode"))
} }
return rs mode = r.FormValue("mode")
shouldResize = width > 0 || height > 0
return
} }
func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {
@ -235,7 +244,7 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
adjustHeadersAfterHEAD(w, r, filename) adjustHeadersAfterHEAD(w, r, filename)
processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
if _, e = rs.Seek(offset, 0); e != nil { if _, e = rs.Seek(offset, 0); e != nil {
return e return e
} }