mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
a911375798
|
@ -1,4 +1,4 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
description: SeaweedFS
|
description: SeaweedFS
|
||||||
name: seaweedfs
|
name: seaweedfs
|
||||||
version: 2.03
|
version: 2.04
|
|
@ -4,7 +4,7 @@ global:
|
||||||
registry: ""
|
registry: ""
|
||||||
repository: ""
|
repository: ""
|
||||||
imageName: chrislusf/seaweedfs
|
imageName: chrislusf/seaweedfs
|
||||||
imageTag: "2.03"
|
imageTag: "2.04"
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
imagePullSecrets: imagepullsecret
|
imagePullSecrets: imagepullsecret
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.4.9</version>
|
<version>1.5.0</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
@ -65,7 +65,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.12</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.4.9</version>
|
<version>1.5.0</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.4.9</version>
|
<version>1.5.0</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
@ -65,7 +65,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.12</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
|
@ -301,7 +301,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.5.0</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.5.0</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -309,7 +309,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.5.0</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.4.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.5.0</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>3.8.1</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.12</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -32,7 +32,7 @@ func main() {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
println("vacuum threshold", *garbageThreshold)
|
println("vacuum threshold", *garbageThreshold)
|
||||||
_, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
|
_, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("vacuum: %v", err)
|
log.Fatalf("vacuum: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@ debug_shell:
|
||||||
|
|
||||||
debug_mount:
|
debug_mount:
|
||||||
go build -gcflags="all=-N -l"
|
go build -gcflags="all=-N -l"
|
||||||
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm
|
dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
|
||||||
|
|
||||||
debug_server:
|
debug_server:
|
||||||
go build -gcflags="all=-N -l"
|
go build -gcflags="all=-N -l"
|
||||||
|
|
|
@ -290,7 +290,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
|
||||||
}
|
}
|
||||||
var bytes []byte
|
var bytes []byte
|
||||||
for _, url := range urls {
|
for _, url := range urls {
|
||||||
bytes, err = util.Get(url)
|
bytes, _, err = util.Get(url)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ func init() {
|
||||||
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
|
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
|
||||||
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
|
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")
|
||||||
mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
|
mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory")
|
||||||
mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system")
|
mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access volume servers with publicUrl")
|
||||||
mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated <local_uid>:<filer_uid>")
|
mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated <local_uid>:<filer_uid>")
|
||||||
mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated <local_gid>:<filer_gid>")
|
mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated <local_gid>:<filer_gid>")
|
||||||
|
|
||||||
|
@ -72,11 +72,5 @@ var cmdMount = &Command{
|
||||||
|
|
||||||
On OS X, it requires OSXFUSE (http://osxfuse.github.com/).
|
On OS X, it requires OSXFUSE (http://osxfuse.github.com/).
|
||||||
|
|
||||||
If the SeaweedFS system runs in a container cluster, e.g. managed by kubernetes or docker compose,
|
|
||||||
the volume servers are not accessible by their own ip addresses.
|
|
||||||
In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming:
|
|
||||||
* All volume server containers are accessible through the same hostname or IP address as the filer.
|
|
||||||
* All volume server container ports are open external to the cluster.
|
|
||||||
|
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,12 +97,16 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
|
var shouldRetry bool
|
||||||
|
|
||||||
for waitTime := time.Second; waitTime < ReadWaitTime; waitTime += waitTime / 2 {
|
for waitTime := time.Second; waitTime < ReadWaitTime; waitTime += waitTime / 2 {
|
||||||
for _, urlString := range urlStrings {
|
for _, urlString := range urlStrings {
|
||||||
err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
|
shouldRetry, err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
|
||||||
buffer.Write(data)
|
buffer.Write(data)
|
||||||
})
|
})
|
||||||
|
if !shouldRetry {
|
||||||
|
break
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
|
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
|
||||||
buffer.Reset()
|
buffer.Reset()
|
||||||
|
@ -110,8 +114,8 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil && shouldRetry {
|
||||||
glog.V(0).Infof("sleep for %v before retrying reading", waitTime)
|
glog.V(0).Infof("retry reading in %v", waitTime)
|
||||||
time.Sleep(waitTime)
|
time.Sleep(waitTime)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
|
|
|
@ -75,7 +75,7 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, loc := range locations.Locations {
|
for _, loc := range locations.Locations {
|
||||||
volumeServerAddress := filerClient.AdjustedUrl(loc.Url)
|
volumeServerAddress := filerClient.AdjustedUrl(loc)
|
||||||
targetUrl := fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
|
targetUrl := fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
|
||||||
targetUrls = append(targetUrls, targetUrl)
|
targetUrls = append(targetUrls, targetUrl)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package filer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -35,10 +36,14 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
|
||||||
|
|
||||||
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
|
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
glog.Errorf("read chunk: %v", err)
|
||||||
|
return fmt.Errorf("read chunk: %v", err)
|
||||||
|
}
|
||||||
|
_, err = w.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("write chunk: %v", err)
|
||||||
|
return fmt.Errorf("write chunk: %v", err)
|
||||||
}
|
}
|
||||||
w.Write(data)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -174,10 +179,14 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
|
var shouldRetry bool
|
||||||
for _, urlString := range urlStrings {
|
for _, urlString := range urlStrings {
|
||||||
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
shouldRetry, err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) {
|
||||||
buffer.Write(data)
|
buffer.Write(data)
|
||||||
})
|
})
|
||||||
|
if !shouldRetry {
|
||||||
|
break
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
|
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
|
||||||
buffer.Reset()
|
buffer.Reset()
|
||||||
|
|
|
@ -234,7 +234,11 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
|
||||||
|
|
||||||
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
|
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
|
||||||
dirPath := util.FullPath(dir.FullPath())
|
dirPath := util.FullPath(dir.FullPath())
|
||||||
meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, util.FullPath(dirPath))
|
visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
|
||||||
|
if visitErr != nil {
|
||||||
|
glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
|
||||||
|
return nil, fuse.EIO
|
||||||
|
}
|
||||||
cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
|
cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
|
||||||
if cacheErr == filer_pb.ErrNotFound {
|
if cacheErr == filer_pb.ErrNotFound {
|
||||||
return nil, fuse.ENOENT
|
return nil, fuse.ENOENT
|
||||||
|
@ -296,7 +300,10 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
dirPath := util.FullPath(dir.FullPath())
|
dirPath := util.FullPath(dir.FullPath())
|
||||||
meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
|
if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
|
||||||
|
glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
|
||||||
|
return nil, fuse.EIO
|
||||||
|
}
|
||||||
listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
|
listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
|
||||||
if listErr != nil {
|
if listErr != nil {
|
||||||
glog.Errorf("list meta cache: %v", listErr)
|
glog.Errorf("list meta cache: %v", listErr)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package meta_cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -22,10 +23,10 @@ type MetaCache struct {
|
||||||
uidGidMapper *UidGidMapper
|
uidGidMapper *UidGidMapper
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMetaCache(dbFolder string, uidGidMapper *UidGidMapper) *MetaCache {
|
func NewMetaCache(dbFolder string, baseDir util.FullPath, uidGidMapper *UidGidMapper) *MetaCache {
|
||||||
return &MetaCache{
|
return &MetaCache{
|
||||||
localStore: openMetaStore(dbFolder),
|
localStore: openMetaStore(dbFolder),
|
||||||
visitedBoundary: bounded_tree.NewBoundedTree(),
|
visitedBoundary: bounded_tree.NewBoundedTree(baseDir),
|
||||||
uidGidMapper: uidGidMapper,
|
uidGidMapper: uidGidMapper,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,6 +117,10 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full
|
||||||
mc.RLock()
|
mc.RLock()
|
||||||
defer mc.RUnlock()
|
defer mc.RUnlock()
|
||||||
|
|
||||||
|
if !mc.visitedBoundary.HasVisited(dirPath) {
|
||||||
|
return nil, fmt.Errorf("unsynchronized dir: %v", dirPath)
|
||||||
|
}
|
||||||
|
|
||||||
entries, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
|
entries, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -3,6 +3,8 @@ package meta_cache
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
@ -10,25 +12,34 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) {
|
func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) error {
|
||||||
|
|
||||||
mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
|
return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
|
||||||
|
|
||||||
glog.V(4).Infof("ReadDirAllEntries %s ...", path)
|
glog.V(4).Infof("ReadDirAllEntries %s ...", path)
|
||||||
|
|
||||||
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
|
for waitTime := time.Second; waitTime < filer.ReadWaitTime; waitTime += waitTime / 2 {
|
||||||
entry := filer.FromPbEntry(string(dirPath), pbEntry)
|
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
|
||||||
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
|
entry := filer.FromPbEntry(string(dirPath), pbEntry)
|
||||||
glog.V(0).Infof("read %s: %v", entry.FullPath, err)
|
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
|
||||||
return err
|
glog.V(0).Infof("read %s: %v", entry.FullPath, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if entry.IsDirectory() {
|
||||||
|
childDirectories = append(childDirectories, entry.Name())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
if entry.IsDirectory() {
|
if strings.Contains(err.Error(), "transport: ") {
|
||||||
childDirectories = append(childDirectories, entry.Name())
|
glog.V(0).Infof("ReadDirAllEntries %s: %v. Retry in %v", path, err, waitTime)
|
||||||
|
time.Sleep(waitTime)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("list %s: %v", dirPath, err)
|
err = fmt.Errorf("list %s: %v", dirPath, err)
|
||||||
|
break
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
})
|
})
|
||||||
|
|
|
@ -92,7 +92,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
||||||
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
|
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), option.UidGidMapper)
|
wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), util.FullPath(option.FilerMountRootPath), option.UidGidMapper)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
|
go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano())
|
||||||
grace.OnInterrupt(func() {
|
grace.OnInterrupt(func() {
|
||||||
|
|
|
@ -68,7 +68,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
|
||||||
}
|
}
|
||||||
for _, loc := range locations.Locations {
|
for _, loc := range locations.Locations {
|
||||||
lr.Locations = append(lr.Locations, operation.Location{
|
lr.Locations = append(lr.Locations, operation.Location{
|
||||||
Url: wfs.AdjustedUrl(loc.Url),
|
Url: wfs.AdjustedUrl(loc),
|
||||||
PublicUrl: loc.PublicUrl,
|
PublicUrl: loc.PublicUrl,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,6 @@
|
||||||
package filesys
|
package filesys
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
@ -26,15 +23,9 @@ func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) erro
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wfs *WFS) AdjustedUrl(hostAndPort string) string {
|
func (wfs *WFS) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
if !wfs.option.OutsideContainerClusterMode {
|
if wfs.option.OutsideContainerClusterMode {
|
||||||
return hostAndPort
|
return location.PublicUrl
|
||||||
}
|
}
|
||||||
commaIndex := strings.Index(hostAndPort, ":")
|
return location.Url
|
||||||
if commaIndex < 0 {
|
|
||||||
return hostAndPort
|
|
||||||
}
|
|
||||||
filerCommaIndex := strings.Index(wfs.option.FilerGrpcAddress, ":")
|
|
||||||
return fmt.Sprintf("%s:%s", wfs.option.FilerGrpcAddress[:filerCommaIndex], hostAndPort[commaIndex+1:])
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,8 +38,12 @@ func (wfs *WFS) saveDataAsChunk(dir string) filer.SaveDataAsChunkFunctionType {
|
||||||
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
|
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
|
fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth)
|
||||||
host = wfs.AdjustedUrl(host)
|
loc := &filer_pb.Location{
|
||||||
|
Url: resp.Url,
|
||||||
|
PublicUrl: resp.PublicUrl,
|
||||||
|
}
|
||||||
|
host = wfs.AdjustedUrl(loc)
|
||||||
collection, replication = resp.Collection, resp.Replication
|
collection, replication = resp.Collection, resp.Replication
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -108,6 +108,6 @@ func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (broker *MessageBroker) AdjustedUrl(hostAndPort string) string {
|
func (broker *MessageBroker) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
return hostAndPort
|
return location.Url
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ var (
|
||||||
|
|
||||||
type FilerClient interface {
|
type FilerClient interface {
|
||||||
WithFilerClient(fn func(SeaweedFilerClient) error) error
|
WithFilerClient(fn func(SeaweedFilerClient) error) error
|
||||||
AdjustedUrl(hostAndPort string) string
|
AdjustedUrl(location *Location) string
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) {
|
func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) {
|
||||||
|
|
|
@ -19,7 +19,7 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
|
||||||
var writeErr error
|
var writeErr error
|
||||||
|
|
||||||
for _, fileUrl := range fileUrls {
|
for _, fileUrl := range fileUrls {
|
||||||
err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
|
_, err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) {
|
||||||
writeErr = writeFunc(data)
|
writeErr = writeFunc(data)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -124,6 +124,6 @@ func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error)
|
||||||
}, fs.grpcAddress, fs.grpcDialOption)
|
}, fs.grpcAddress, fs.grpcDialOption)
|
||||||
|
|
||||||
}
|
}
|
||||||
func (fs *FilerSink) AdjustedUrl(hostAndPort string) string {
|
func (fs *FilerSink) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
return hostAndPort
|
return location.Url
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,8 +111,8 @@ func (fs *FilerSource) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) erro
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FilerSource) AdjustedUrl(hostAndPort string) string {
|
func (fs *FilerSource) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
return hostAndPort
|
return location.Url
|
||||||
}
|
}
|
||||||
|
|
||||||
func volumeId(fileId string) string {
|
func volumeId(fileId string) string {
|
||||||
|
|
|
@ -50,8 +50,8 @@ func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) err
|
||||||
}, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
|
}, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
|
||||||
|
|
||||||
}
|
}
|
||||||
func (s3a *S3ApiServer) AdjustedUrl(hostAndPort string) string {
|
func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
return hostAndPort
|
return location.Url
|
||||||
}
|
}
|
||||||
|
|
||||||
// If none of the http routes match respond with MethodNotAllowed
|
// If none of the http routes match respond with MethodNotAllowed
|
||||||
|
|
|
@ -274,7 +274,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||||
ra := ranges[0]
|
ra := ranges[0]
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
|
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
|
||||||
w.Header().Set("Content-Range", ra.contentRange(totalSize))
|
w.Header().Set("Content-Range", ra.contentRange(totalSize))
|
||||||
// w.WriteHeader(http.StatusPartialContent)
|
w.WriteHeader(http.StatusPartialContent)
|
||||||
|
|
||||||
err = writeFn(w, ra.start, ra.length)
|
err = writeFn(w, ra.start, ra.length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -315,7 +315,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
|
||||||
if w.Header().Get("Content-Encoding") == "" {
|
if w.Header().Get("Content-Encoding") == "" {
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
|
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
|
||||||
}
|
}
|
||||||
// w.WriteHeader(http.StatusPartialContent)
|
w.WriteHeader(http.StatusPartialContent)
|
||||||
if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
|
if _, err := io.CopyN(w, sendContent, sendSize); err != nil {
|
||||||
http.Error(w, "Internal Error", http.StatusInternalServerError)
|
http.Error(w, "Internal Error", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
|
|
|
@ -118,8 +118,8 @@ func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient)
|
||||||
}, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption)
|
}, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption)
|
||||||
|
|
||||||
}
|
}
|
||||||
func (fs *WebDavFileSystem) AdjustedUrl(hostAndPort string) string {
|
func (fs *WebDavFileSystem) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
return hostAndPort
|
return location.Url
|
||||||
}
|
}
|
||||||
|
|
||||||
func clearName(name string) (string, error) {
|
func clearName(name string) (string, error) {
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,6 +152,11 @@ func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEc
|
||||||
|
|
||||||
for i := 0; i < len(otherNodes); i++ {
|
for i := 0; i < len(otherNodes); i++ {
|
||||||
emptyNode := otherNodes[i]
|
emptyNode := otherNodes[i]
|
||||||
|
collectionPrefix := ""
|
||||||
|
if ecShardInfo.Collection != "" {
|
||||||
|
collectionPrefix = ecShardInfo.Collection + "_"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stdout, "moving ec volume %s%d.%d %s => %s\n", collectionPrefix, ecShardInfo.Id, shardId, thisNode.info.Id, emptyNode.info.Id)
|
||||||
err = moveMountedShardToEcNode(commandEnv, thisNode, ecShardInfo.Collection, needle.VolumeId(ecShardInfo.Id), shardId, emptyNode, applyChange)
|
err = moveMountedShardToEcNode(commandEnv, thisNode, ecShardInfo.Collection, needle.VolumeId(ecShardInfo.Id), shardId, emptyNode, applyChange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
|
|
@ -102,8 +102,8 @@ func (ce *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *CommandEnv) AdjustedUrl(hostAndPort string) string {
|
func (ce *CommandEnv) AdjustedUrl(location *filer_pb.Location) string {
|
||||||
return hostAndPort
|
return location.Url
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) {
|
func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) {
|
||||||
|
|
|
@ -16,13 +16,15 @@ type Node struct {
|
||||||
type BoundedTree struct {
|
type BoundedTree struct {
|
||||||
root *Node
|
root *Node
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
baseDir util.FullPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBoundedTree() *BoundedTree {
|
func NewBoundedTree(baseDir util.FullPath) *BoundedTree {
|
||||||
return &BoundedTree{
|
return &BoundedTree{
|
||||||
root: &Node{
|
root: &Node{
|
||||||
Name: "/",
|
Name: "/",
|
||||||
},
|
},
|
||||||
|
baseDir: baseDir,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,21 +34,29 @@ type VisitNodeFunc func(path util.FullPath) (childDirectories []string, err erro
|
||||||
// No action if the directory has been visited before or does not exist.
|
// No action if the directory has been visited before or does not exist.
|
||||||
// A leaf node, which has no children, represents a directory not visited.
|
// A leaf node, which has no children, represents a directory not visited.
|
||||||
// A non-leaf node or a non-existing node represents a directory already visited, or does not need to visit.
|
// A non-leaf node or a non-existing node represents a directory already visited, or does not need to visit.
|
||||||
func (t *BoundedTree) EnsureVisited(p util.FullPath, visitFn VisitNodeFunc) {
|
func (t *BoundedTree) EnsureVisited(p util.FullPath, visitFn VisitNodeFunc) (visitErr error) {
|
||||||
t.Lock()
|
t.Lock()
|
||||||
defer t.Unlock()
|
defer t.Unlock()
|
||||||
|
|
||||||
if t.root == nil {
|
if t.root == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if t.baseDir != "/" {
|
||||||
|
p = p[len(t.baseDir):]
|
||||||
|
}
|
||||||
components := p.Split()
|
components := p.Split()
|
||||||
// fmt.Printf("components %v %d\n", components, len(components))
|
// fmt.Printf("components %v %d\n", components, len(components))
|
||||||
if canDelete := t.ensureVisited(t.root, util.FullPath("/"), components, 0, visitFn); canDelete {
|
canDelete, err := t.ensureVisited(t.root, t.baseDir, components, 0, visitFn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if canDelete {
|
||||||
t.root = nil
|
t.root = nil
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, components []string, i int, visitFn VisitNodeFunc) (canDeleteNode bool) {
|
func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, components []string, i int, visitFn VisitNodeFunc) (canDeleteNode bool, visitErr error) {
|
||||||
|
|
||||||
// println("ensureVisited", currentPath, i)
|
// println("ensureVisited", currentPath, i)
|
||||||
|
|
||||||
|
@ -60,15 +70,20 @@ func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, componen
|
||||||
} else {
|
} else {
|
||||||
// fmt.Printf("ensure %v\n", currentPath)
|
// fmt.Printf("ensure %v\n", currentPath)
|
||||||
|
|
||||||
children, err := visitFn(currentPath)
|
filerPath := currentPath
|
||||||
|
if t.baseDir != "/" {
|
||||||
|
filerPath = t.baseDir + filerPath
|
||||||
|
}
|
||||||
|
|
||||||
|
children, err := visitFn(filerPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("failed to visit %s: %v", currentPath, err)
|
glog.V(0).Infof("failed to visit %s: %v", currentPath, err)
|
||||||
return
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(children) == 0 {
|
if len(children) == 0 {
|
||||||
// fmt.Printf(" canDelete %v without children\n", currentPath)
|
// fmt.Printf(" canDelete %v without children\n", currentPath)
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
n.Children = make(map[string]*Node)
|
n.Children = make(map[string]*Node)
|
||||||
|
@ -93,19 +108,22 @@ func (t *BoundedTree) ensureVisited(n *Node, currentPath util.FullPath, componen
|
||||||
}
|
}
|
||||||
|
|
||||||
// fmt.Printf(" ensureVisited %v %v\n", currentPath, toVisitNode.Name)
|
// fmt.Printf(" ensureVisited %v %v\n", currentPath, toVisitNode.Name)
|
||||||
|
canDelete, childVisitErr := t.ensureVisited(toVisitNode, currentPath.Child(components[i]), components, i+1, visitFn)
|
||||||
if canDelete := t.ensureVisited(toVisitNode, currentPath.Child(components[i]), components, i+1, visitFn); canDelete {
|
if childVisitErr != nil {
|
||||||
|
return false, childVisitErr
|
||||||
|
}
|
||||||
|
if canDelete {
|
||||||
|
|
||||||
// fmt.Printf(" delete %v %v\n", currentPath, components[i])
|
// fmt.Printf(" delete %v %v\n", currentPath, components[i])
|
||||||
delete(n.Children, components[i])
|
delete(n.Children, components[i])
|
||||||
|
|
||||||
if len(n.Children) == 0 {
|
if len(n.Children) == 0 {
|
||||||
// fmt.Printf(" canDelete %v\n", currentPath)
|
// fmt.Printf(" canDelete %v\n", currentPath)
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ func TestBoundedTree(t *testing.T) {
|
||||||
// g
|
// g
|
||||||
// h
|
// h
|
||||||
|
|
||||||
tree := NewBoundedTree()
|
tree := NewBoundedTree(util.FullPath("/"))
|
||||||
|
|
||||||
tree.EnsureVisited(util.FullPath("/a/b/c"), visitFn)
|
tree.EnsureVisited(util.FullPath("/a/b/c"), visitFn)
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ func TestEmptyBoundedTree(t *testing.T) {
|
||||||
// g
|
// g
|
||||||
// h
|
// h
|
||||||
|
|
||||||
tree := NewBoundedTree()
|
tree := NewBoundedTree(util.FullPath("/"))
|
||||||
|
|
||||||
visitFn := func(path util.FullPath) (childDirectories []string, err error) {
|
visitFn := func(path util.FullPath) (childDirectories []string, err error) {
|
||||||
fmt.Printf(" visit %v ...\n", path)
|
fmt.Printf(" visit %v ...\n", path)
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 03)
|
VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 04)
|
||||||
COMMIT = ""
|
COMMIT = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -67,14 +67,14 @@ func Post(url string, values url.Values) ([]byte, error) {
|
||||||
|
|
||||||
// github.com/chrislusf/seaweedfs/unmaintained/repeated_vacuum/repeated_vacuum.go
|
// github.com/chrislusf/seaweedfs/unmaintained/repeated_vacuum/repeated_vacuum.go
|
||||||
// may need increasing http.Client.Timeout
|
// may need increasing http.Client.Timeout
|
||||||
func Get(url string) ([]byte, error) {
|
func Get(url string) ([]byte, bool, error) {
|
||||||
|
|
||||||
request, err := http.NewRequest("GET", url, nil)
|
request, err := http.NewRequest("GET", url, nil)
|
||||||
request.Header.Add("Accept-Encoding", "gzip")
|
request.Header.Add("Accept-Encoding", "gzip")
|
||||||
|
|
||||||
response, err := client.Do(request)
|
response, err := client.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, true, err
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
@ -89,12 +89,13 @@ func Get(url string) ([]byte, error) {
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(reader)
|
b, err := ioutil.ReadAll(reader)
|
||||||
if response.StatusCode >= 400 {
|
if response.StatusCode >= 400 {
|
||||||
return nil, fmt.Errorf("%s: %s", url, response.Status)
|
retryable := response.StatusCode >= 500
|
||||||
|
return nil, retryable, fmt.Errorf("%s: %s", url, response.Status)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Head(url string) (http.Header, error) {
|
func Head(url string) (http.Header, error) {
|
||||||
|
@ -207,7 +208,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
|
||||||
|
|
||||||
if cipherKey != nil {
|
if cipherKey != nil {
|
||||||
var n int
|
var n int
|
||||||
err := readEncryptedUrl(fileUrl, cipherKey, isContentCompressed, isFullChunk, offset, size, func(data []byte) {
|
_, err := readEncryptedUrl(fileUrl, cipherKey, isContentCompressed, isFullChunk, offset, size, func(data []byte) {
|
||||||
n = copy(buf, data)
|
n = copy(buf, data)
|
||||||
})
|
})
|
||||||
return int64(n), err
|
return int64(n), err
|
||||||
|
@ -272,7 +273,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
|
func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) (retryable bool, err error) {
|
||||||
|
|
||||||
if cipherKey != nil {
|
if cipherKey != nil {
|
||||||
return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn)
|
return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn)
|
||||||
|
@ -280,7 +281,7 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", fileUrl, nil)
|
req, err := http.NewRequest("GET", fileUrl, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFullChunk {
|
if isFullChunk {
|
||||||
|
@ -291,11 +292,12 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
|
||||||
|
|
||||||
r, err := client.Do(req)
|
r, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return true, err
|
||||||
}
|
}
|
||||||
defer CloseResponse(r)
|
defer CloseResponse(r)
|
||||||
if r.StatusCode >= 400 {
|
if r.StatusCode >= 400 {
|
||||||
return fmt.Errorf("%s: %s", fileUrl, r.Status)
|
retryable = r.StatusCode >= 500
|
||||||
|
return retryable, fmt.Errorf("%s: %s", fileUrl, r.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
|
@ -317,23 +319,23 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
|
||||||
m, err = reader.Read(buf)
|
m, err = reader.Read(buf)
|
||||||
fn(buf[:m])
|
fn(buf[:m])
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
|
func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) (bool, error) {
|
||||||
encryptedData, err := Get(fileUrl)
|
encryptedData, retryable, err := Get(fileUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("fetch %s: %v", fileUrl, err)
|
return retryable, fmt.Errorf("fetch %s: %v", fileUrl, err)
|
||||||
}
|
}
|
||||||
decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey))
|
decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decrypt %s: %v", fileUrl, err)
|
return false, fmt.Errorf("decrypt %s: %v", fileUrl, err)
|
||||||
}
|
}
|
||||||
if isContentCompressed {
|
if isContentCompressed {
|
||||||
decryptedData, err = DecompressData(decryptedData)
|
decryptedData, err = DecompressData(decryptedData)
|
||||||
|
@ -342,14 +344,14 @@ func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(decryptedData) < int(offset)+size {
|
if len(decryptedData) < int(offset)+size {
|
||||||
return fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size)
|
return false, fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size)
|
||||||
}
|
}
|
||||||
if isFullChunk {
|
if isFullChunk {
|
||||||
fn(decryptedData)
|
fn(decryptedData)
|
||||||
} else {
|
} else {
|
||||||
fn(decryptedData[int(offset) : int(offset)+size])
|
fn(decryptedData[int(offset) : int(offset)+size])
|
||||||
}
|
}
|
||||||
return nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) {
|
func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) {
|
||||||
|
|
|
@ -54,7 +54,8 @@ func (c *Conn) Read(b []byte) (count int, e error) {
|
||||||
|
|
||||||
func (c *Conn) Write(b []byte) (count int, e error) {
|
func (c *Conn) Write(b []byte) (count int, e error) {
|
||||||
if c.WriteTimeout != 0 {
|
if c.WriteTimeout != 0 {
|
||||||
err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout))
|
// minimum 4KB/s
|
||||||
|
err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout * time.Duration(len(b)/40000+1)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue