mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
ee90edd0e3
56
.github/workflows/binaries_dev.yml
vendored
56
.github/workflows/binaries_dev.yml
vendored
|
@ -20,16 +20,60 @@ jobs:
|
|||
assets: |
|
||||
weed-*
|
||||
|
||||
build_dev:
|
||||
build_dev_linux:
|
||||
needs: cleanup
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin, freebsd]
|
||||
goarch: [amd64, arm64]
|
||||
exclude:
|
||||
- goarch: arm64
|
||||
goos: windows
|
||||
goos: [linux]
|
||||
goarch: [amd64]
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set BUILD_TIME env
|
||||
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
|
||||
|
||||
- name: Go Release Binaries Large Disk
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0
|
||||
build_flags: -tags 5BytesOffset # optional, default is
|
||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-large-disk
|
||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
- name: Go Release Binaries Normal Volume Size
|
||||
uses: wangyoucao577/go-release-action@v1.20
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
release_tag: dev
|
||||
overwrite: true
|
||||
pre_command: export CGO_ENABLED=0
|
||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||
# Where to run `go build .`
|
||||
project_path: weed
|
||||
binary_name: weed-normal-disk
|
||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||
|
||||
build_dev_darwin:
|
||||
needs: build_dev_linux
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [darwin]
|
||||
goarch: [amd64, arm64]
|
||||
|
||||
steps:
|
||||
|
||||
|
|
|
@ -6,25 +6,37 @@ services:
|
|||
ports:
|
||||
- 9333:9333
|
||||
- 19333:19333
|
||||
command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m1"
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9334:9334
|
||||
- 19334:19334
|
||||
command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m2"
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
master2:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 9335:9335
|
||||
- 19335:19335
|
||||
command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m3"
|
||||
command: "-v=1 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp"
|
||||
environment:
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_1: 1
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_2: 2
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
volume1:
|
||||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 18080:18080
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1 -disk=ssd1'
|
||||
command: 'volume -dataCenter dc1 -rack v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -34,7 +46,7 @@ services:
|
|||
ports:
|
||||
- 8082:8082
|
||||
- 18082:18082
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1 -disk=ssd1'
|
||||
command: 'volume -dataCenter dc2 -rack v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -44,7 +56,7 @@ services:
|
|||
ports:
|
||||
- 8083:8083
|
||||
- 18083:18083
|
||||
command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
command: 'volume -dataCenter dc3 -rack v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -54,7 +66,8 @@ services:
|
|||
ports:
|
||||
- 8888:8888
|
||||
- 18888:18888
|
||||
command: 'filer -master="master0:9333,master1:9334,master2:9335"'
|
||||
- 8111:8111
|
||||
command: 'filer -defaultReplicaPlacement 100 -iam -master="master0:9333,master1:9334,master2:9335"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
|
@ -65,11 +78,11 @@ services:
|
|||
image: chrislusf/seaweedfs:local
|
||||
ports:
|
||||
- 8333:8333
|
||||
command: 's3 -filer="filer:8888"'
|
||||
command: '-v=9 s3 -filer="filer:8888"'
|
||||
depends_on:
|
||||
- master0
|
||||
- master1
|
||||
- master2
|
||||
- volume1
|
||||
- volume2
|
||||
- filer
|
||||
- filer
|
|
@ -1,5 +1,5 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
appVersion: "2.71"
|
||||
version: "2.71"
|
||||
appVersion: "2.74"
|
||||
version: "2.74"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.8</version>
|
||||
<version>1.6.9</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.8</version>
|
||||
<version>1.6.9</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.8</version>
|
||||
<version>1.6.9</version>
|
||||
|
||||
<parent>
|
||||
<groupId>org.sonatype.oss</groupId>
|
||||
|
|
109
other/java/client/src/main/java/seaweedfs/client/ReadChunks.java
Normal file
109
other/java/client/src/main/java/seaweedfs/client/ReadChunks.java
Normal file
|
@ -0,0 +1,109 @@
|
|||
package seaweedfs.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
public class ReadChunks {
|
||||
|
||||
public static List<SeaweedRead.VisibleInterval> readResolvedChunks(List<FilerProto.FileChunk> chunkList) throws IOException {
|
||||
List<Point> points = new ArrayList<>(chunkList.size() * 2);
|
||||
for (FilerProto.FileChunk chunk : chunkList) {
|
||||
points.add(new Point(chunk.getOffset(), chunk, true));
|
||||
points.add(new Point(chunk.getOffset() + chunk.getSize(), chunk, false));
|
||||
}
|
||||
Collections.sort(points, new Comparator<Point>() {
|
||||
@Override
|
||||
public int compare(Point a, Point b) {
|
||||
int x = (int) (a.x - b.x);
|
||||
if (a.x != b.x) {
|
||||
return (int) (a.x - b.x);
|
||||
}
|
||||
if (a.ts != b.ts) {
|
||||
return (int) (a.ts - b.ts);
|
||||
}
|
||||
if (!a.isStart) {
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
});
|
||||
|
||||
long prevX = 0;
|
||||
List<SeaweedRead.VisibleInterval> visibles = new ArrayList<>();
|
||||
ArrayList<Point> queue = new ArrayList<>();
|
||||
for (Point point : points) {
|
||||
if (point.isStart) {
|
||||
if (queue.size() > 0) {
|
||||
int lastIndex = queue.size() - 1;
|
||||
Point lastPoint = queue.get(lastIndex);
|
||||
if (point.x != prevX && lastPoint.ts < point.ts) {
|
||||
addToVisibles(visibles, prevX, lastPoint, point);
|
||||
prevX = point.x;
|
||||
}
|
||||
}
|
||||
// insert into queue
|
||||
for (int i = queue.size(); i >= 0; i--) {
|
||||
if (i == 0 || queue.get(i - 1).ts <= point.ts) {
|
||||
if (i == queue.size()) {
|
||||
prevX = point.x;
|
||||
}
|
||||
queue.add(i, point);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int lastIndex = queue.size() - 1;
|
||||
int index = lastIndex;
|
||||
Point startPoint = null;
|
||||
for (; index >= 0; index--) {
|
||||
startPoint = queue.get(index);
|
||||
if (startPoint.ts == point.ts) {
|
||||
queue.remove(index);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (index == lastIndex && startPoint != null) {
|
||||
addToVisibles(visibles, prevX, startPoint, point);
|
||||
prevX = point.x;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return visibles;
|
||||
|
||||
}
|
||||
|
||||
private static void addToVisibles(List<SeaweedRead.VisibleInterval> visibles, long prevX, Point startPoint, Point point) {
|
||||
if (prevX < point.x) {
|
||||
FilerProto.FileChunk chunk = startPoint.chunk;
|
||||
visibles.add(new SeaweedRead.VisibleInterval(
|
||||
prevX,
|
||||
point.x,
|
||||
chunk.getFileId(),
|
||||
chunk.getMtime(),
|
||||
prevX - chunk.getOffset(),
|
||||
chunk.getOffset() == prevX && chunk.getSize() == prevX - startPoint.x,
|
||||
chunk.getCipherKey().toByteArray(),
|
||||
chunk.getIsCompressed()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
static class Point {
|
||||
long x;
|
||||
long ts;
|
||||
FilerProto.FileChunk chunk;
|
||||
boolean isStart;
|
||||
|
||||
public Point(long x, FilerProto.FileChunk chunk, boolean isStart) {
|
||||
this.x = x;
|
||||
this.ts = chunk.getMtime();
|
||||
this.chunk = chunk;
|
||||
this.isStart = isStart;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -226,96 +226,8 @@ public class SeaweedRead {
|
|||
|
||||
chunkList = FileChunkManifest.resolveChunkManifest(filerClient, chunkList);
|
||||
|
||||
FilerProto.FileChunk[] chunks = chunkList.toArray(new FilerProto.FileChunk[0]);
|
||||
Arrays.sort(chunks, new Comparator<FilerProto.FileChunk>() {
|
||||
@Override
|
||||
public int compare(FilerProto.FileChunk a, FilerProto.FileChunk b) {
|
||||
// if just a.getMtime() - b.getMtime(), it will overflow!
|
||||
if (a.getMtime() < b.getMtime()) {
|
||||
return -1;
|
||||
} else if (a.getMtime() > b.getMtime()) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
return ReadChunks.readResolvedChunks(chunkList);
|
||||
|
||||
List<VisibleInterval> visibles = new ArrayList<>();
|
||||
for (FilerProto.FileChunk chunk : chunks) {
|
||||
List<VisibleInterval> newVisibles = new ArrayList<>();
|
||||
visibles = mergeIntoVisibles(visibles, newVisibles, chunk);
|
||||
}
|
||||
|
||||
return visibles;
|
||||
}
|
||||
|
||||
private static List<VisibleInterval> mergeIntoVisibles(List<VisibleInterval> visibles,
|
||||
List<VisibleInterval> newVisibles,
|
||||
FilerProto.FileChunk chunk) {
|
||||
VisibleInterval newV = new VisibleInterval(
|
||||
chunk.getOffset(),
|
||||
chunk.getOffset() + chunk.getSize(),
|
||||
chunk.getFileId(),
|
||||
chunk.getMtime(),
|
||||
0,
|
||||
true,
|
||||
chunk.getCipherKey().toByteArray(),
|
||||
chunk.getIsCompressed()
|
||||
);
|
||||
|
||||
// easy cases to speed up
|
||||
if (visibles.size() == 0) {
|
||||
visibles.add(newV);
|
||||
return visibles;
|
||||
}
|
||||
if (visibles.get(visibles.size() - 1).stop <= chunk.getOffset()) {
|
||||
visibles.add(newV);
|
||||
return visibles;
|
||||
}
|
||||
|
||||
for (VisibleInterval v : visibles) {
|
||||
if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) {
|
||||
newVisibles.add(new VisibleInterval(
|
||||
v.start,
|
||||
chunk.getOffset(),
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
v.chunkOffset,
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isCompressed
|
||||
));
|
||||
}
|
||||
long chunkStop = chunk.getOffset() + chunk.getSize();
|
||||
if (v.start < chunkStop && chunkStop < v.stop) {
|
||||
newVisibles.add(new VisibleInterval(
|
||||
chunkStop,
|
||||
v.stop,
|
||||
v.fileId,
|
||||
v.modifiedTime,
|
||||
v.chunkOffset + (chunkStop - v.start),
|
||||
false,
|
||||
v.cipherKey,
|
||||
v.isCompressed
|
||||
));
|
||||
}
|
||||
if (chunkStop <= v.start || v.stop <= chunk.getOffset()) {
|
||||
newVisibles.add(v);
|
||||
}
|
||||
}
|
||||
newVisibles.add(newV);
|
||||
|
||||
// keep everything sorted
|
||||
for (int i = newVisibles.size() - 1; i >= 0; i--) {
|
||||
if (i > 0 && newV.start < newVisibles.get(i - 1).start) {
|
||||
newVisibles.set(i, newVisibles.get(i - 1));
|
||||
} else {
|
||||
newVisibles.set(i, newV);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return newVisibles;
|
||||
}
|
||||
|
||||
public static String parseVolumeId(String fileId) {
|
||||
|
|
|
@ -30,6 +30,8 @@ service SeaweedFiler {
|
|||
|
||||
rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
|
||||
}
|
||||
rpc StreamRenameEntry (StreamRenameEntryRequest) returns (stream StreamRenameEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
|
||||
}
|
||||
|
@ -225,6 +227,18 @@ message AtomicRenameEntryRequest {
|
|||
message AtomicRenameEntryResponse {
|
||||
}
|
||||
|
||||
message StreamRenameEntryRequest {
|
||||
string old_directory = 1;
|
||||
string old_name = 2;
|
||||
string new_directory = 3;
|
||||
string new_name = 4;
|
||||
repeated int32 signatures = 5;
|
||||
}
|
||||
message StreamRenameEntryResponse {
|
||||
string directory = 1;
|
||||
EventNotification event_notification = 2;
|
||||
int64 ts_ns = 3;
|
||||
}
|
||||
message AssignVolumeRequest {
|
||||
int32 count = 1;
|
||||
string collection = 2;
|
||||
|
|
|
@ -6,6 +6,7 @@ import org.junit.Test;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
public class SeaweedReadTest {
|
||||
|
||||
|
@ -13,17 +14,17 @@ public class SeaweedReadTest {
|
|||
public void testNonOverlappingVisibleIntervals() throws IOException {
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("aaa")
|
||||
.setOffset(0)
|
||||
.setSize(100)
|
||||
.setMtime(1000)
|
||||
.build());
|
||||
.setFileId("aaa")
|
||||
.setOffset(0)
|
||||
.setSize(100)
|
||||
.setMtime(1000)
|
||||
.build());
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("bbb")
|
||||
.setOffset(100)
|
||||
.setSize(133)
|
||||
.setMtime(2000)
|
||||
.build());
|
||||
.setFileId("bbb")
|
||||
.setOffset(100)
|
||||
.setSize(133)
|
||||
.setMtime(2000)
|
||||
.build());
|
||||
|
||||
List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks);
|
||||
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
|
||||
|
@ -61,4 +62,106 @@ public class SeaweedReadTest {
|
|||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testReadResolvedChunks() throws IOException {
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("a")
|
||||
.setOffset(0)
|
||||
.setSize(100)
|
||||
.setMtime(1)
|
||||
.build());
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("b")
|
||||
.setOffset(50)
|
||||
.setSize(100)
|
||||
.setMtime(2)
|
||||
.build());
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("c")
|
||||
.setOffset(200)
|
||||
.setSize(50)
|
||||
.setMtime(3)
|
||||
.build());
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("d")
|
||||
.setOffset(250)
|
||||
.setSize(50)
|
||||
.setMtime(4)
|
||||
.build());
|
||||
chunks.add(FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("e")
|
||||
.setOffset(175)
|
||||
.setSize(100)
|
||||
.setMtime(5)
|
||||
.build());
|
||||
|
||||
List<SeaweedRead.VisibleInterval> visibleIntervals = ReadChunks.readResolvedChunks(chunks);
|
||||
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
|
||||
System.out.println("visible:" + visibleInterval);
|
||||
}
|
||||
|
||||
Assert.assertEquals(4, visibleIntervals.size());
|
||||
|
||||
SeaweedRead.VisibleInterval visibleInterval = visibleIntervals.get(0);
|
||||
Assert.assertEquals(visibleInterval.start, 0);
|
||||
Assert.assertEquals(visibleInterval.stop, 50);
|
||||
Assert.assertEquals(visibleInterval.modifiedTime, 1);
|
||||
Assert.assertEquals(visibleInterval.fileId, "a");
|
||||
|
||||
visibleInterval = visibleIntervals.get(1);
|
||||
Assert.assertEquals(visibleInterval.start, 50);
|
||||
Assert.assertEquals(visibleInterval.stop, 150);
|
||||
Assert.assertEquals(visibleInterval.modifiedTime, 2);
|
||||
Assert.assertEquals(visibleInterval.fileId, "b");
|
||||
|
||||
visibleInterval = visibleIntervals.get(2);
|
||||
Assert.assertEquals(visibleInterval.start, 175);
|
||||
Assert.assertEquals(visibleInterval.stop, 275);
|
||||
Assert.assertEquals(visibleInterval.modifiedTime, 5);
|
||||
Assert.assertEquals(visibleInterval.fileId, "e");
|
||||
|
||||
visibleInterval = visibleIntervals.get(3);
|
||||
Assert.assertEquals(visibleInterval.start, 275);
|
||||
Assert.assertEquals(visibleInterval.stop, 300);
|
||||
Assert.assertEquals(visibleInterval.modifiedTime, 4);
|
||||
Assert.assertEquals(visibleInterval.fileId, "d");
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testRandomizedReadResolvedChunks() throws IOException {
|
||||
Random random = new Random();
|
||||
int limit = 1024*1024;
|
||||
long[] array = new long[limit];
|
||||
List<FilerProto.FileChunk> chunks = new ArrayList<>();
|
||||
for (long ts=0;ts<1024;ts++){
|
||||
int x = random.nextInt(limit);
|
||||
int y = random.nextInt(limit);
|
||||
int size = Math.min(Math.abs(x-y), 1024);
|
||||
chunks.add(randomWrite(array, Math.min(x,y), size, ts));
|
||||
}
|
||||
|
||||
List<SeaweedRead.VisibleInterval> visibleIntervals = ReadChunks.readResolvedChunks(chunks);
|
||||
for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) {
|
||||
System.out.println("visible:" + visibleInterval);
|
||||
for (int i = (int) visibleInterval.start; i<visibleInterval.stop; i++) {
|
||||
Assert.assertEquals(array[i], visibleInterval.modifiedTime);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
private FilerProto.FileChunk randomWrite(long[] array, int start, int size, long ts) {
|
||||
for (int i=start;i<start+size;i++) {
|
||||
array[i] = ts;
|
||||
}
|
||||
return FilerProto.FileChunk.newBuilder()
|
||||
.setFileId("")
|
||||
.setOffset(start)
|
||||
.setSize(size)
|
||||
.setMtime(ts)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-client</artifactId>
|
||||
<version>1.6.8</version>
|
||||
<version>1.6.9</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.chrislusf</groupId>
|
||||
<artifactId>seaweedfs-hadoop2-client</artifactId>
|
||||
<version>1.6.8</version>
|
||||
<version>1.6.9</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
|
|
@ -301,7 +301,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<hadoop.version>2.9.2</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@
|
|||
</snapshotRepository>
|
||||
</distributionManagement>
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
</project>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<properties>
|
||||
<seaweedfs.client.version>1.6.8</seaweedfs.client.version>
|
||||
<seaweedfs.client.version>1.6.9</seaweedfs.client.version>
|
||||
<hadoop.version>3.1.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -2,14 +2,15 @@ package basic
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -108,8 +109,8 @@ func TestListBucket(t *testing.T) {
|
|||
func TestListObjectV2(t *testing.T) {
|
||||
|
||||
listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(Bucket),
|
||||
Prefix: aws.String("foo"),
|
||||
Bucket: aws.String(Bucket),
|
||||
Prefix: aws.String("foo"),
|
||||
Delimiter: aws.String("/"),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -169,7 +170,7 @@ func TestObjectOp(t *testing.T) {
|
|||
exitErrorf("Unable to get copy object, %v", err)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(getObj.Body)
|
||||
data, err := io.ReadAll(getObj.Body)
|
||||
if err != nil {
|
||||
exitErrorf("Unable to read object data, %v", err)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime/multipart"
|
||||
|
@ -45,7 +44,7 @@ func main() {
|
|||
defer wg.Done()
|
||||
|
||||
client := &http.Client{Transport: &http.Transport{
|
||||
MaxIdleConns: 1024,
|
||||
MaxIdleConns: 1024,
|
||||
MaxIdleConnsPerHost: 1024,
|
||||
}}
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))
|
||||
|
@ -128,7 +127,7 @@ func uploadFileToFiler(client *http.Client, data []byte, filename, destination s
|
|||
if err != nil {
|
||||
return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime/multipart"
|
||||
|
@ -36,7 +35,7 @@ func main() {
|
|||
|
||||
var fileNames []string
|
||||
|
||||
files, err := ioutil.ReadDir(*dir)
|
||||
files, err := os.ReadDir(*dir)
|
||||
if err != nil {
|
||||
log.Fatalf("fail to read dir %v: %v", *dir, err)
|
||||
}
|
||||
|
@ -142,7 +141,7 @@ func uploadFileToFiler(client *http.Client, filename, destination string) (size
|
|||
if err != nil {
|
||||
return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
|
||||
}
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,17 +2,17 @@ package command
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -82,7 +82,7 @@ func downloadToFile(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpti
|
|||
}
|
||||
defer f.Close()
|
||||
if isFileList {
|
||||
content, err := ioutil.ReadAll(rc.Body)
|
||||
content, err := io.ReadAll(rc.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ func fetchContent(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption
|
|||
return "", nil, e
|
||||
}
|
||||
defer util.CloseResponse(rc)
|
||||
content, e = ioutil.ReadAll(rc.Body)
|
||||
content, e = io.ReadAll(rc.Body)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -3,9 +3,7 @@ package command
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -16,14 +14,14 @@ import (
|
|||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/security"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"github.com/chrislusf/seaweedfs/weed/util/grace"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
)
|
||||
|
||||
|
@ -212,7 +210,7 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
|
|||
}
|
||||
|
||||
if mode.IsDir() {
|
||||
files, _ := ioutil.ReadDir(fileOrDir)
|
||||
files, _ := os.ReadDir(fileOrDir)
|
||||
for _, subFileOrDir := range files {
|
||||
cleanedDestDirectory := filepath.Clean(destPath + fi.Name())
|
||||
if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), cleanedDestDirectory+"/", fileCopyTaskChan); err != nil {
|
||||
|
@ -339,7 +337,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
|
|||
if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 {
|
||||
|
||||
mimeType = detectMimeType(f)
|
||||
data, err := ioutil.ReadAll(f)
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -2,9 +2,10 @@ package command
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/command/scaffold"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/command/scaffold"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -55,7 +56,7 @@ func runScaffold(cmd *Command, args []string) bool {
|
|||
}
|
||||
|
||||
if *outputPath != "" {
|
||||
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
|
||||
os.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
|
||||
} else {
|
||||
fmt.Println(content)
|
||||
}
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
[redis3]
|
||||
enabled = true
|
||||
address = "localhost:6379"
|
||||
password = ""
|
||||
database = 0
|
|
@ -225,6 +225,12 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunction
|
|||
|
||||
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
|
||||
|
||||
visibles2 := readResolvedChunks(chunks)
|
||||
|
||||
if true {
|
||||
return visibles2, err
|
||||
}
|
||||
|
||||
sort.Slice(chunks, func(i, j int) bool {
|
||||
if chunks[i].Mtime == chunks[j].Mtime {
|
||||
filer_pb.EnsureFid(chunks[i])
|
||||
|
@ -246,9 +252,26 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunction
|
|||
|
||||
}
|
||||
|
||||
if len(visibles) != len(visibles2) {
|
||||
fmt.Printf("different visibles size %d : %d\n", len(visibles), len(visibles2))
|
||||
} else {
|
||||
for i := 0; i < len(visibles); i++ {
|
||||
checkDifference(visibles[i], visibles2[i])
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func checkDifference(x, y VisibleInterval) {
|
||||
if x.start != y.start ||
|
||||
x.stop != y.stop ||
|
||||
x.fileId != y.fileId ||
|
||||
x.modifiedTime != y.modifiedTime {
|
||||
fmt.Printf("different visible %+v : %+v\n", x, y)
|
||||
}
|
||||
}
|
||||
|
||||
// find non-overlapping visible intervals
|
||||
// visible interval map to one file chunk
|
||||
|
||||
|
|
119
weed/filer/filechunks_read.go
Normal file
119
weed/filer/filechunks_read.go
Normal file
|
@ -0,0 +1,119 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) {
|
||||
|
||||
var points []*Point
|
||||
for _, chunk := range chunks {
|
||||
points = append(points, &Point{
|
||||
x: chunk.Offset,
|
||||
ts: chunk.Mtime,
|
||||
chunk: chunk,
|
||||
isStart: true,
|
||||
})
|
||||
points = append(points, &Point{
|
||||
x: chunk.Offset + int64(chunk.Size),
|
||||
ts: chunk.Mtime,
|
||||
chunk: chunk,
|
||||
isStart: false,
|
||||
})
|
||||
}
|
||||
sort.Slice(points, func(i, j int) bool {
|
||||
if points[i].x != points[j].x {
|
||||
return points[i].x < points[j].x
|
||||
}
|
||||
if points[i].ts != points[j].ts {
|
||||
return points[i].ts < points[j].ts
|
||||
}
|
||||
if !points[i].isStart {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
var prevX int64
|
||||
var queue []*Point
|
||||
for _, point := range points {
|
||||
if point.isStart {
|
||||
if len(queue) > 0 {
|
||||
lastIndex := len(queue) -1
|
||||
lastPoint := queue[lastIndex]
|
||||
if point.x != prevX && lastPoint.ts < point.ts {
|
||||
visibles = addToVisibles(visibles, prevX, lastPoint, point)
|
||||
prevX = point.x
|
||||
}
|
||||
}
|
||||
// insert into queue
|
||||
for i := len(queue); i >= 0; i-- {
|
||||
if i == 0 || queue[i-1].ts <= point.ts {
|
||||
if i == len(queue) {
|
||||
prevX = point.x
|
||||
}
|
||||
queue = addToQueue(queue, i, point)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
lastIndex := len(queue) - 1
|
||||
index := lastIndex
|
||||
var startPoint *Point
|
||||
for ; index >= 0; index-- {
|
||||
startPoint = queue[index]
|
||||
if startPoint.ts == point.ts {
|
||||
queue = removeFromQueue(queue, index)
|
||||
break
|
||||
}
|
||||
}
|
||||
if index == lastIndex && startPoint != nil {
|
||||
visibles = addToVisibles(visibles, prevX, startPoint, point)
|
||||
prevX = point.x
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func removeFromQueue(queue []*Point, index int) []*Point {
|
||||
for i := index; i < len(queue)-1; i++ {
|
||||
queue[i] = queue[i+1]
|
||||
}
|
||||
queue = queue[:len(queue)-1]
|
||||
return queue
|
||||
}
|
||||
|
||||
func addToQueue(queue []*Point, index int, point *Point) []*Point {
|
||||
queue = append(queue, point)
|
||||
for i := len(queue) - 1; i > index; i-- {
|
||||
queue[i], queue[i-1] = queue[i-1], queue[i]
|
||||
}
|
||||
return queue
|
||||
}
|
||||
|
||||
func addToVisibles(visibles []VisibleInterval, prevX int64, startPoint *Point, point *Point) []VisibleInterval {
|
||||
if prevX < point.x {
|
||||
chunk := startPoint.chunk
|
||||
visibles = append(visibles, VisibleInterval{
|
||||
start: prevX,
|
||||
stop: point.x,
|
||||
fileId: chunk.GetFileIdString(),
|
||||
modifiedTime: chunk.Mtime,
|
||||
chunkOffset: prevX - chunk.Offset,
|
||||
chunkSize: chunk.Size,
|
||||
cipherKey: chunk.CipherKey,
|
||||
isGzipped: chunk.IsCompressed,
|
||||
})
|
||||
}
|
||||
return visibles
|
||||
}
|
||||
|
||||
type Point struct {
|
||||
x int64
|
||||
ts int64
|
||||
chunk *filer_pb.FileChunk
|
||||
isStart bool
|
||||
}
|
210
weed/filer/filechunks_read_test.go
Normal file
210
weed/filer/filechunks_read_test.go
Normal file
|
@ -0,0 +1,210 @@
|
|||
package filer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadResolvedChunks(t *testing.T) {
|
||||
|
||||
chunks := []*filer_pb.FileChunk{
|
||||
{
|
||||
FileId: "a",
|
||||
Offset: 0,
|
||||
Size: 100,
|
||||
Mtime: 1,
|
||||
},
|
||||
{
|
||||
FileId: "b",
|
||||
Offset: 50,
|
||||
Size: 100,
|
||||
Mtime: 2,
|
||||
},
|
||||
{
|
||||
FileId: "c",
|
||||
Offset: 200,
|
||||
Size: 50,
|
||||
Mtime: 3,
|
||||
},
|
||||
{
|
||||
FileId: "d",
|
||||
Offset: 250,
|
||||
Size: 50,
|
||||
Mtime: 4,
|
||||
},
|
||||
{
|
||||
FileId: "e",
|
||||
Offset: 175,
|
||||
Size: 100,
|
||||
Mtime: 5,
|
||||
},
|
||||
}
|
||||
|
||||
visibles := readResolvedChunks(chunks)
|
||||
|
||||
for _, visible := range visibles {
|
||||
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRandomizedReadResolvedChunks(t *testing.T) {
|
||||
|
||||
var limit int64 = 1024*1024
|
||||
array := make([]int64, limit)
|
||||
var chunks []*filer_pb.FileChunk
|
||||
for ts := int64(0); ts < 1024; ts++ {
|
||||
x := rand.Int63n(limit)
|
||||
y := rand.Int63n(limit)
|
||||
size := x - y
|
||||
if size < 0 {
|
||||
size = -size
|
||||
}
|
||||
if size > 1024 {
|
||||
size = 1024
|
||||
}
|
||||
start := x
|
||||
if start > y {
|
||||
start = y
|
||||
}
|
||||
chunks = append(chunks, randomWrite(array, start, size, ts))
|
||||
}
|
||||
|
||||
visibles := readResolvedChunks(chunks)
|
||||
|
||||
for _, visible := range visibles {
|
||||
for i := visible.start; i<visible.stop;i++{
|
||||
if array[i] != visible.modifiedTime {
|
||||
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fmt.Printf("visibles %d", len(visibles))
|
||||
|
||||
}
|
||||
|
||||
func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.FileChunk {
|
||||
for i := start; i < start+size; i++ {
|
||||
array[i] = ts
|
||||
}
|
||||
// fmt.Printf("write [%d,%d) %d\n", start, start+size, ts)
|
||||
return &filer_pb.FileChunk{
|
||||
FileId: "",
|
||||
Offset: start,
|
||||
Size: uint64(size),
|
||||
Mtime: ts,
|
||||
}
|
||||
}
|
||||
|
||||
func TestSequentialReadResolvedChunks(t *testing.T) {
|
||||
|
||||
var chunkSize int64 = 1024*1024*2
|
||||
var chunks []*filer_pb.FileChunk
|
||||
for ts := int64(0); ts < 13; ts++ {
|
||||
chunks = append(chunks, &filer_pb.FileChunk{
|
||||
FileId: "",
|
||||
Offset: chunkSize*ts,
|
||||
Size: uint64(chunkSize),
|
||||
Mtime: 1,
|
||||
})
|
||||
}
|
||||
|
||||
visibles := readResolvedChunks(chunks)
|
||||
|
||||
fmt.Printf("visibles %d", len(visibles))
|
||||
|
||||
}
|
||||
|
||||
func TestActualReadResolvedChunks(t *testing.T) {
|
||||
|
||||
chunks := []*filer_pb.FileChunk{
|
||||
{
|
||||
FileId: "5,e7b96fef48",
|
||||
Offset: 0,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595823000,
|
||||
},
|
||||
{
|
||||
FileId: "5,e5562640b9",
|
||||
Offset: 2097152,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595826000,
|
||||
},
|
||||
{
|
||||
FileId: "5,df033e0fe4",
|
||||
Offset: 4194304,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595827000,
|
||||
},
|
||||
{
|
||||
FileId: "7,eb08148a9b",
|
||||
Offset: 6291456,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595827000,
|
||||
},
|
||||
{
|
||||
FileId: "7,e0f92d1604",
|
||||
Offset: 8388608,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595828000,
|
||||
},
|
||||
{
|
||||
FileId: "7,e33cb63262",
|
||||
Offset: 10485760,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595828000,
|
||||
},
|
||||
{
|
||||
FileId: "5,ea98e40e93",
|
||||
Offset: 12582912,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595829000,
|
||||
},
|
||||
{
|
||||
FileId: "5,e165661172",
|
||||
Offset: 14680064,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595829000,
|
||||
},
|
||||
{
|
||||
FileId: "3,e692097486",
|
||||
Offset: 16777216,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595830000,
|
||||
},
|
||||
{
|
||||
FileId: "3,e28e2e3cbd",
|
||||
Offset: 18874368,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595830000,
|
||||
},
|
||||
{
|
||||
FileId: "3,e443974d4e",
|
||||
Offset: 20971520,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595830000,
|
||||
},
|
||||
{
|
||||
FileId: "2,e815bed597",
|
||||
Offset: 23068672,
|
||||
Size: 2097152,
|
||||
Mtime: 1634447487595831000,
|
||||
},
|
||||
{
|
||||
FileId: "5,e94715199e",
|
||||
Offset: 25165824,
|
||||
Size: 1974736,
|
||||
Mtime: 1634447487595832000,
|
||||
},
|
||||
}
|
||||
|
||||
visibles := readResolvedChunks(chunks)
|
||||
|
||||
for _, visible := range visibles {
|
||||
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime)
|
||||
}
|
||||
|
||||
}
|
|
@ -3,6 +3,10 @@ package filer
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/wdclient"
|
||||
"google.golang.org/grpc"
|
||||
"io"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -26,6 +30,29 @@ type FilerConf struct {
|
|||
rules ptrie.Trie
|
||||
}
|
||||
|
||||
func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
|
||||
if masterClient != nil {
|
||||
return ReadEntry(masterClient, client, DirectoryEtcSeaweedFS, FilerConfName, &buf)
|
||||
} else {
|
||||
content, err := ReadInsideFiler(client, DirectoryEtcSeaweedFS, FilerConfName)
|
||||
buf = *bytes.NewBuffer(content)
|
||||
return err
|
||||
}
|
||||
}); err != nil && err != filer_pb.ErrNotFound {
|
||||
return nil, fmt.Errorf("read %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
|
||||
}
|
||||
|
||||
fc := NewFilerConf()
|
||||
if buf.Len() > 0 {
|
||||
if err := fc.LoadFromBytes(buf.Bytes()); err != nil {
|
||||
return nil, fmt.Errorf("parse %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
|
||||
}
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func NewFilerConf() (fc *FilerConf) {
|
||||
fc = &FilerConf{
|
||||
rules: ptrie.New(),
|
||||
|
@ -115,6 +142,18 @@ func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf
|
|||
return pathConf
|
||||
}
|
||||
|
||||
func (fc *FilerConf) GetCollectionTtls(collection string) (ttls map[string]string) {
|
||||
ttls = make(map[string]string)
|
||||
fc.rules.Walk(func(key []byte, value interface{}) bool {
|
||||
t := value.(*filer_pb.FilerConf_PathConf)
|
||||
if t.Collection == collection {
|
||||
ttls[t.LocationPrefix] = t.GetTtl()
|
||||
}
|
||||
return true
|
||||
})
|
||||
return ttls
|
||||
}
|
||||
|
||||
// merge if values in b is not empty, merge them into a
|
||||
func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
|
||||
a.Collection = util.Nvl(b.Collection, a.Collection)
|
||||
|
|
|
@ -253,13 +253,14 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context,
|
|||
limit = math.MaxInt32 - 1
|
||||
}
|
||||
glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
|
||||
lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc)
|
||||
adjustedEntryFunc := func(entry *Entry) bool {
|
||||
fsw.maybeReadHardLink(ctx, entry)
|
||||
filer_pb.AfterEntryDeserialization(entry.Chunks)
|
||||
return eachEntryFunc(entry)
|
||||
}
|
||||
lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, adjustedEntryFunc)
|
||||
if err == ErrUnsupportedListDirectoryPrefixed {
|
||||
lastFileName, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool {
|
||||
fsw.maybeReadHardLink(ctx, entry)
|
||||
filer_pb.AfterEntryDeserialization(entry.Chunks)
|
||||
return eachEntryFunc(entry)
|
||||
})
|
||||
lastFileName, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, adjustedEntryFunc)
|
||||
}
|
||||
return lastFileName, err
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package leveldb
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -14,7 +13,7 @@ import (
|
|||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDBStore{}
|
||||
store.initialize(dir)
|
||||
|
@ -68,7 +67,7 @@ func TestCreateAndFind(t *testing.T) {
|
|||
|
||||
func TestEmptyRoot(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDBStore{}
|
||||
store.initialize(dir)
|
||||
|
@ -91,7 +90,7 @@ func TestEmptyRoot(t *testing.T) {
|
|||
|
||||
func BenchmarkInsertEntry(b *testing.B) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_bench")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDBStore{}
|
||||
store.initialize(dir)
|
||||
|
|
|
@ -2,7 +2,6 @@ package leveldb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
|
@ -12,7 +11,7 @@ import (
|
|||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDB2Store{}
|
||||
store.initialize(dir, 2)
|
||||
|
@ -66,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
|
|||
|
||||
func TestEmptyRoot(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDB2Store{}
|
||||
store.initialize(dir, 2)
|
||||
|
|
|
@ -2,7 +2,6 @@ package leveldb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
|
@ -12,7 +11,7 @@ import (
|
|||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDB3Store{}
|
||||
store.initialize(dir)
|
||||
|
@ -66,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
|
|||
|
||||
func TestEmptyRoot(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &LevelDB3Store{}
|
||||
store.initialize(dir)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
@ -5,7 +6,6 @@ package rocksdb
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
func TestCreateAndFind(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &RocksDBStore{}
|
||||
store.initialize(dir)
|
||||
|
@ -70,7 +70,7 @@ func TestCreateAndFind(t *testing.T) {
|
|||
|
||||
func TestEmptyRoot(t *testing.T) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &RocksDBStore{}
|
||||
store.initialize(dir)
|
||||
|
@ -93,7 +93,7 @@ func TestEmptyRoot(t *testing.T) {
|
|||
|
||||
func BenchmarkInsertEntry(b *testing.B) {
|
||||
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
|
||||
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench")
|
||||
dir, _ := os.MkdirTemp("", "seaweedfs_filer_bench")
|
||||
defer os.RemoveAll(dir)
|
||||
store := &RocksDBStore{}
|
||||
store.initialize(dir)
|
||||
|
|
|
@ -2,12 +2,10 @@ package filesys
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"math"
|
||||
|
||||
"github.com/seaweedfs/fuse"
|
||||
"github.com/seaweedfs/fuse/fs"
|
||||
"io"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
|
@ -23,19 +21,12 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
|
||||
glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
|
||||
|
||||
// find local old entry
|
||||
oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath)
|
||||
if err != nil {
|
||||
glog.Errorf("dir Rename can not find source %s : %v", oldPath, err)
|
||||
return fuse.ENOENT
|
||||
}
|
||||
|
||||
// update remote filer
|
||||
err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
request := &filer_pb.AtomicRenameEntryRequest{
|
||||
request := &filer_pb.StreamRenameEntryRequest{
|
||||
OldDirectory: dir.FullPath(),
|
||||
OldName: req.OldName,
|
||||
NewDirectory: newDir.FullPath(),
|
||||
|
@ -43,12 +34,28 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
Signatures: []int32{dir.wfs.signature},
|
||||
}
|
||||
|
||||
_, err := client.AtomicRenameEntry(ctx, request)
|
||||
stream, err := client.StreamRenameEntry(ctx, request)
|
||||
if err != nil {
|
||||
glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
|
||||
return fuse.EXDEV
|
||||
}
|
||||
|
||||
for {
|
||||
resp, recvErr := stream.Recv()
|
||||
if recvErr != nil {
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
} else {
|
||||
return recvErr
|
||||
}
|
||||
}
|
||||
|
||||
if err = dir.handleRenameResponse(ctx, resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
})
|
||||
|
@ -57,23 +64,25 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
return fuse.EIO
|
||||
}
|
||||
|
||||
err = dir.moveEntry(context.Background(), util.FullPath(dir.FullPath()), oldEntry, util.FullPath(newDir.FullPath()), req.NewName)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("dir local Rename %s => %s : %v", oldPath, newPath, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dir *Dir) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error {
|
||||
|
||||
oldName := entry.Name()
|
||||
func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamRenameEntryResponse) error {
|
||||
// comes from filer StreamRenameEntry, can only be create or delete entry
|
||||
|
||||
oldPath := oldParent.Child(oldName)
|
||||
newPath := newParent.Child(newName)
|
||||
if err := dir.moveSelfEntry(ctx, oldParent, entry, newParent, newName, func() error {
|
||||
if resp.EventNotification.NewEntry != nil {
|
||||
// with new entry, the old entry name also exists. This is the first step to create new entry
|
||||
newEntry := filer.FromPbEntry(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry)
|
||||
if err := dir.wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, "", newEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldParent, newParent := util.FullPath(resp.Directory), util.FullPath(resp.EventNotification.NewParentPath)
|
||||
oldName, newName := resp.EventNotification.OldEntry.Name, resp.EventNotification.NewEntry.Name
|
||||
|
||||
oldPath := oldParent.Child(oldName)
|
||||
newPath := newParent.Child(newName)
|
||||
oldFsNode := NodeWithId(oldPath.AsInode())
|
||||
newFsNode := NodeWithId(newPath.AsInode())
|
||||
newDirNode, found := dir.wfs.Server.FindInternalNode(NodeWithId(newParent.AsInode()))
|
||||
|
@ -110,65 +119,13 @@ func (dir *Dir) moveEntry(ctx context.Context, oldParent util.FullPath, entry *f
|
|||
}
|
||||
dir.wfs.handlesLock.Unlock()
|
||||
|
||||
if entry.IsDirectory() {
|
||||
if err := dir.moveFolderSubEntries(ctx, oldParent, oldName, newParent, newName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("fail to move %s => %s: %v", oldPath, newPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dir *Dir) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, oldName string, newParent util.FullPath, newName string) error {
|
||||
|
||||
currentDirPath := oldParent.Child(oldName)
|
||||
newDirPath := newParent.Child(newName)
|
||||
|
||||
glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath)
|
||||
|
||||
var moveErr error
|
||||
listErr := dir.wfs.metaCache.ListDirectoryEntries(ctx, currentDirPath, "", false, int64(math.MaxInt32), func(item *filer.Entry) bool {
|
||||
moveErr = dir.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name())
|
||||
if moveErr != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if listErr != nil {
|
||||
return listErr
|
||||
}
|
||||
if moveErr != nil {
|
||||
return moveErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dir *Dir) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error) error {
|
||||
|
||||
newPath := newParent.Child(newName)
|
||||
oldPath := oldParent.Child(entry.Name())
|
||||
|
||||
entry.FullPath = newPath
|
||||
if err := dir.wfs.metaCache.InsertEntry(ctx, entry); err != nil {
|
||||
glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
if moveFolderSubEntries != nil {
|
||||
if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
|
||||
return moveChildrenErr
|
||||
}else if resp.EventNotification.OldEntry != nil {
|
||||
// without new entry, only old entry name exists. This is the second step to delete old entry
|
||||
if err := dir.wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := dir.wfs.metaCache.DeleteEntry(ctx, oldPath); err != nil {
|
||||
glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
|
||||
return fuse.EIO
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package meta_cache
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -122,7 +121,8 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full
|
|||
//defer mc.RUnlock()
|
||||
|
||||
if !mc.visitedBoundary.HasVisited(dirPath) {
|
||||
return fmt.Errorf("unsynchronized dir: %v", dirPath)
|
||||
// if this request comes after renaming, it should be fine
|
||||
glog.Warningf("unsynchronized dir: %v", dirPath)
|
||||
}
|
||||
|
||||
_, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool {
|
||||
|
|
|
@ -58,7 +58,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
|
|||
}
|
||||
|
||||
util.RetryForever("followMetaUpdates", func() error {
|
||||
return pb.WithFilerClientFollowMetadata(client, "mount", dir, lastTsNs, selfSignature, processEventFn, true)
|
||||
return pb.WithFilerClientFollowMetadata(client, "mount", dir, &lastTsNs, selfSignature, processEventFn, true)
|
||||
}, func(err error) bool {
|
||||
glog.Errorf("follow metadata updates: %v", err)
|
||||
return true
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
@ -9,11 +8,11 @@ import (
|
|||
func TestXYZ(t *testing.T) {
|
||||
fname := "sample1.jpg"
|
||||
|
||||
dat, _ := ioutil.ReadFile(fname)
|
||||
dat, _ := os.ReadFile(fname)
|
||||
|
||||
fixed_data := FixJpgOrientation(dat)
|
||||
|
||||
ioutil.WriteFile("fixed1.jpg", fixed_data, 0644)
|
||||
os.WriteFile("fixed1.jpg", fixed_data, 0644)
|
||||
|
||||
os.Remove("fixed1.jpg")
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package images
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
@ -10,13 +9,13 @@ import (
|
|||
func TestResizing(t *testing.T) {
|
||||
fname := "sample2.webp"
|
||||
|
||||
dat, _ := ioutil.ReadFile(fname)
|
||||
dat, _ := os.ReadFile(fname)
|
||||
|
||||
resized, _, _ := Resized(".webp", bytes.NewReader(dat), 100, 30, "")
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(resized)
|
||||
|
||||
ioutil.WriteFile("resized1.png", buf.Bytes(), 0644)
|
||||
os.WriteFile("resized1.png", buf.Bytes(), 0644)
|
||||
|
||||
os.Remove("resized1.png")
|
||||
|
||||
|
|
|
@ -4,9 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
|
@ -14,6 +12,7 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -108,7 +107,7 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (wri
|
|||
return written, err
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
|
@ -91,7 +90,7 @@ func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResul
|
|||
if ok {
|
||||
data = bytesReader.Bytes
|
||||
} else {
|
||||
data, err = ioutil.ReadAll(reader)
|
||||
data, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read input: %v", err)
|
||||
return
|
||||
|
@ -278,7 +277,7 @@ func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize
|
|||
return &ret, nil
|
||||
}
|
||||
|
||||
resp_body, ra_err := ioutil.ReadAll(resp.Body)
|
||||
resp_body, ra_err := io.ReadAll(resp.Body)
|
||||
if ra_err != nil {
|
||||
return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err)
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ service SeaweedFiler {
|
|||
|
||||
rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) {
|
||||
}
|
||||
rpc StreamRenameEntry (StreamRenameEntryRequest) returns (stream StreamRenameEntryResponse) {
|
||||
}
|
||||
|
||||
rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) {
|
||||
}
|
||||
|
@ -225,6 +227,18 @@ message AtomicRenameEntryRequest {
|
|||
message AtomicRenameEntryResponse {
|
||||
}
|
||||
|
||||
message StreamRenameEntryRequest {
|
||||
string old_directory = 1;
|
||||
string old_name = 2;
|
||||
string new_directory = 3;
|
||||
string new_name = 4;
|
||||
repeated int32 signatures = 5;
|
||||
}
|
||||
message StreamRenameEntryResponse {
|
||||
string directory = 1;
|
||||
EventNotification event_notification = 2;
|
||||
int64 ts_ns = 3;
|
||||
}
|
||||
message AssignVolumeRequest {
|
||||
int32 count = 1;
|
||||
string collection = 2;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -17,7 +17,7 @@ func FollowMetadata(filerAddress ServerAddress, grpcDialOption grpc.DialOption,
|
|||
processEventFn ProcessMetadataFunc, fatalOnError bool) error {
|
||||
|
||||
err := WithFilerClient(filerAddress, grpcDialOption, makeFunc(clientName,
|
||||
pathPrefix, additionalPathPrefixes, lastTsNs, selfSignature, processEventFn, fatalOnError))
|
||||
pathPrefix, additionalPathPrefixes, &lastTsNs, selfSignature, processEventFn, fatalOnError))
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscribing filer meta change: %v", err)
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func FollowMetadata(filerAddress ServerAddress, grpcDialOption grpc.DialOption,
|
|||
}
|
||||
|
||||
func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient,
|
||||
clientName string, pathPrefix string, lastTsNs int64, selfSignature int32,
|
||||
clientName string, pathPrefix string, lastTsNs *int64, selfSignature int32,
|
||||
processEventFn ProcessMetadataFunc, fatalOnError bool) error {
|
||||
|
||||
err := filerClient.WithFilerClient(makeFunc(clientName,
|
||||
|
@ -37,7 +37,7 @@ func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient,
|
|||
return nil
|
||||
}
|
||||
|
||||
func makeFunc(clientName string, pathPrefix string, additionalPathPrefixes []string, lastTsNs int64, selfSignature int32,
|
||||
func makeFunc(clientName string, pathPrefix string, additionalPathPrefixes []string, lastTsNs *int64, selfSignature int32,
|
||||
processEventFn ProcessMetadataFunc, fatalOnError bool) func(client filer_pb.SeaweedFilerClient) error {
|
||||
return func(client filer_pb.SeaweedFilerClient) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -46,7 +46,7 @@ func makeFunc(clientName string, pathPrefix string, additionalPathPrefixes []str
|
|||
ClientName: clientName,
|
||||
PathPrefix: pathPrefix,
|
||||
PathPrefixes: additionalPathPrefixes,
|
||||
SinceNs: lastTsNs,
|
||||
SinceNs: *lastTsNs,
|
||||
Signature: selfSignature,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -69,7 +69,7 @@ func makeFunc(clientName string, pathPrefix string, additionalPathPrefixes []str
|
|||
glog.Errorf("process %v: %v", resp, err)
|
||||
}
|
||||
}
|
||||
lastTsNs = resp.TsNs
|
||||
*lastTsNs = resp.TsNs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,17 +3,17 @@ package azure
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/remote_storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -115,7 +115,7 @@ func (az *azureRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocatio
|
|||
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
|
||||
defer bodyStream.Close()
|
||||
|
||||
data, err = ioutil.ReadAll(bodyStream)
|
||||
data, err = io.ReadAll(bodyStream)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err)
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
package gcs
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/storage"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
|
||||
|
@ -11,10 +15,6 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -110,7 +110,7 @@ func (gcs *gcsRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation
|
|||
if readErr != nil {
|
||||
return nil, readErr
|
||||
}
|
||||
data, err = ioutil.ReadAll(rangeReader)
|
||||
data, err = io.ReadAll(rangeReader)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err)
|
||||
|
|
|
@ -3,7 +3,7 @@ package sub
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -119,7 +119,7 @@ type KafkaProgress struct {
|
|||
|
||||
func loadProgress(offsetFile string) *KafkaProgress {
|
||||
progress := &KafkaProgress{}
|
||||
data, err := ioutil.ReadFile(offsetFile)
|
||||
data, err := os.ReadFile(offsetFile)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to read kafka progress file: %s", offsetFile)
|
||||
return nil
|
||||
|
@ -137,7 +137,7 @@ func (progress *KafkaProgress) saveProgress() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal progress: %v", err)
|
||||
}
|
||||
err = ioutil.WriteFile(progress.offsetFile, data, 0640)
|
||||
err = os.WriteFile(progress.offsetFile, data, 0640)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save progress to %s: %v", progress.offsetFile, err)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,10 @@ package s3api
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
|
@ -10,9 +14,6 @@ import (
|
|||
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Action string
|
||||
|
@ -37,6 +38,31 @@ type Credential struct {
|
|||
SecretKey string
|
||||
}
|
||||
|
||||
func (action Action) isAdmin() bool {
|
||||
return strings.HasPrefix(string(action), s3_constants.ACTION_ADMIN)
|
||||
}
|
||||
|
||||
func (action Action) isOwner(bucket string) bool {
|
||||
return string(action) == s3_constants.ACTION_ADMIN+":"+bucket
|
||||
}
|
||||
|
||||
func (action Action) overBucket(bucket string) bool {
|
||||
return strings.HasSuffix(string(action), ":"+bucket) || strings.HasSuffix(string(action), ":*")
|
||||
}
|
||||
|
||||
func (action Action) getPermission() Permission {
|
||||
switch act := strings.Split(string(action), ":")[0]; act {
|
||||
case s3_constants.ACTION_ADMIN:
|
||||
return Permission("FULL_CONTROL")
|
||||
case s3_constants.ACTION_WRITE:
|
||||
return Permission("WRITE")
|
||||
case s3_constants.ACTION_READ:
|
||||
return Permission("READ")
|
||||
default:
|
||||
return Permission("")
|
||||
}
|
||||
}
|
||||
|
||||
func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManagement {
|
||||
iam := &IdentityAccessManagement{
|
||||
domain: option.DomainName,
|
||||
|
@ -66,7 +92,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3A
|
|||
}
|
||||
|
||||
func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error {
|
||||
content, readErr := ioutil.ReadFile(fileName)
|
||||
content, readErr := os.ReadFile(fileName)
|
||||
if readErr != nil {
|
||||
glog.Warningf("fail to read %s : %v", fileName, readErr)
|
||||
return fmt.Errorf("fail to read %s : %v", fileName, readErr)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) error {
|
||||
func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) {
|
||||
|
||||
processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error {
|
||||
|
||||
|
@ -32,8 +32,11 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
|
|||
return nil
|
||||
}
|
||||
|
||||
return util.Retry("followIamChanges", func() error {
|
||||
return pb.WithFilerClientFollowMetadata(s3a, clientName, prefix, lastTsNs, 0, processEventFn, true)
|
||||
util.RetryForever("followIamChanges", func() error {
|
||||
return pb.WithFilerClientFollowMetadata(s3a, clientName, prefix, &lastTsNs, 0, processEventFn, true)
|
||||
}, func(err error) bool {
|
||||
glog.V(0).Infof("iam follow metadata changes: %v", err)
|
||||
return true
|
||||
})
|
||||
|
||||
}
|
||||
|
|
|
@ -23,8 +23,7 @@ import (
|
|||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/hex"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
@ -33,6 +32,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
)
|
||||
|
||||
func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) {
|
||||
|
@ -135,9 +136,9 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
|
|||
|
||||
// Get hashed Payload
|
||||
if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil {
|
||||
buf, _ := ioutil.ReadAll(r.Body)
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||
b, _ := ioutil.ReadAll(bytes.NewBuffer(buf))
|
||||
buf, _ := io.ReadAll(r.Body)
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(buf))
|
||||
b, _ := io.ReadAll(bytes.NewBuffer(buf))
|
||||
if len(b) != 0 {
|
||||
bodyHash := sha256.Sum256(b)
|
||||
hashedPayload = hex.EncodeToString(bodyHash[:])
|
||||
|
@ -433,7 +434,7 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s
|
|||
}
|
||||
}
|
||||
|
||||
/// Verify finally if signature is same.
|
||||
// / Verify finally if signature is same.
|
||||
|
||||
// Get canonical request.
|
||||
presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)
|
||||
|
|
|
@ -8,9 +8,7 @@ import (
|
|||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
|
@ -19,6 +17,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
)
|
||||
|
||||
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
|
||||
|
@ -86,7 +86,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
|||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error {
|
||||
ioutil.ReadAll(testCase.req.Body)
|
||||
io.ReadAll(testCase.req.Body)
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error)
|
||||
}
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek
|
|||
case body == nil:
|
||||
hashedPayload = getSHA256Hash([]byte{})
|
||||
default:
|
||||
payloadBytes, err := ioutil.ReadAll(body)
|
||||
payloadBytes, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -38,6 +38,9 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
|
|||
for k, v := range input.Metadata {
|
||||
entry.Extended[k] = []byte(*v)
|
||||
}
|
||||
if input.ContentType != nil {
|
||||
entry.Attributes.Mime = *input.ContentType
|
||||
}
|
||||
}); err != nil {
|
||||
glog.Errorf("NewMultipartUpload error: %v", err)
|
||||
return nil, s3err.ErrInternalError
|
||||
|
@ -65,7 +68,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
|||
|
||||
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
|
||||
|
||||
entries, _, err := s3a.list(uploadDirectory, "", "", false, 0)
|
||||
entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList)
|
||||
if err != nil || len(entries) == 0 {
|
||||
glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
|
||||
return nil, s3err.ErrNoSuchUpload
|
||||
|
@ -79,9 +82,13 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
|||
|
||||
var finalParts []*filer_pb.FileChunk
|
||||
var offset int64
|
||||
var mime string
|
||||
|
||||
for _, entry := range entries {
|
||||
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
|
||||
if entry.Name == "0001.part" && entry.Attributes.Mime != "" {
|
||||
mime = entry.Attributes.Mime
|
||||
}
|
||||
for _, chunk := range entry.Chunks {
|
||||
p := &filer_pb.FileChunk{
|
||||
FileId: chunk.GetFileIdString(),
|
||||
|
@ -121,6 +128,11 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
|||
entry.Extended[k] = v
|
||||
}
|
||||
}
|
||||
if pentry.Attributes.Mime != "" {
|
||||
entry.Attributes.Mime = pentry.Attributes.Mime
|
||||
} else if mime != "" {
|
||||
entry.Attributes.Mime = mime
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -4,7 +4,9 @@ import (
|
|||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
|
@ -205,3 +207,98 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBucketAclHandler Get Bucket ACL
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html
|
||||
func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// collect parameters
|
||||
bucket, _ := getBucketAndObject(r)
|
||||
glog.V(3).Infof("GetBucketAclHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
return
|
||||
}
|
||||
|
||||
response := AccessControlPolicy{}
|
||||
for _, ident := range s3a.iam.identities {
|
||||
if len(ident.Credentials) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, action := range ident.Actions {
|
||||
if !action.overBucket(bucket) || action.getPermission() == "" {
|
||||
continue
|
||||
}
|
||||
id := ident.Credentials[0].AccessKey
|
||||
if response.Owner.DisplayName == "" && action.isOwner(bucket) && len(ident.Credentials) > 0 {
|
||||
response.Owner.DisplayName = ident.Name
|
||||
response.Owner.ID = id
|
||||
}
|
||||
response.AccessControlList.Grant = append(response.AccessControlList.Grant, Grant{
|
||||
Grantee: Grantee{
|
||||
ID: id,
|
||||
DisplayName: ident.Name,
|
||||
Type: "CanonicalUser",
|
||||
XMLXSI: "CanonicalUser",
|
||||
XMLNS: "http://www.w3.org/2001/XMLSchema-instance"},
|
||||
Permission: action.getPermission(),
|
||||
})
|
||||
}
|
||||
}
|
||||
writeSuccessResponseXML(w, response)
|
||||
}
|
||||
|
||||
// GetBucketLifecycleConfigurationHandler Get Bucket Lifecycle configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
|
||||
func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// collect parameters
|
||||
bucket, _ := getBucketAndObject(r)
|
||||
glog.V(3).Infof("GetBucketAclHandler %s", bucket)
|
||||
|
||||
if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, err, r)
|
||||
return
|
||||
}
|
||||
fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("GetBucketLifecycleConfigurationHandler: %s", err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
return
|
||||
}
|
||||
ttls := fc.GetCollectionTtls(bucket)
|
||||
if len(ttls) == 0 {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNoSuchLifecycleConfiguration, r)
|
||||
}
|
||||
response := Lifecycle{}
|
||||
for prefix, internalTtl := range ttls {
|
||||
ttl, _ := needle.ReadTTL(internalTtl)
|
||||
days := int(ttl.Minutes() / 60 / 24)
|
||||
if days == 0 {
|
||||
continue
|
||||
}
|
||||
response.Rules = append(response.Rules, Rule{
|
||||
Status: Enabled, Filter: Filter{
|
||||
Prefix: Prefix{string: prefix, set: true},
|
||||
set: true,
|
||||
},
|
||||
Expiration: Expiration{Days: days, set: true},
|
||||
})
|
||||
}
|
||||
writeSuccessResponseXML(w, response)
|
||||
}
|
||||
|
||||
// PutBucketLifecycleConfigurationHandler Put Bucket Lifecycle configuration
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
|
||||
func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
|
||||
}
|
||||
|
||||
// DeleteBucketMetricsConfiguration Delete Bucket Lifecycle
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
|
||||
func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
|
||||
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) err
|
|||
}, s3a.option.Filer.ToGrpcAddress(), s3a.option.GrpcDialOption)
|
||||
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string {
|
||||
return location.Url
|
||||
}
|
||||
|
|
|
@ -1,20 +1,21 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/pquerna/cachecontrol/cacheobject"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/pquerna/cachecontrol/cacheobject"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
@ -36,6 +37,16 @@ func init() {
|
|||
}}
|
||||
}
|
||||
|
||||
func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser {
|
||||
mimeBuffer := make([]byte, 512)
|
||||
size, _ := dataReader.Read(mimeBuffer)
|
||||
if size > 0 {
|
||||
r.Header.Set("Content-Type", http.DetectContentType(mimeBuffer[:size]))
|
||||
return io.NopCloser(io.MultiReader(bytes.NewReader(mimeBuffer[:size]), dataReader))
|
||||
}
|
||||
return io.NopCloser(dataReader)
|
||||
}
|
||||
|
||||
func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
||||
|
@ -95,6 +106,10 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
|
|||
} else {
|
||||
uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object))
|
||||
|
||||
if r.Header.Get("Content-Type") == "" {
|
||||
dataReader = mimeDetect(r, dataReader)
|
||||
}
|
||||
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
|
||||
|
||||
if errCode != s3err.ErrNone {
|
||||
|
@ -198,7 +213,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
|
|||
bucket, _ := getBucketAndObject(r)
|
||||
glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket)
|
||||
|
||||
deleteXMLBytes, err := ioutil.ReadAll(r.Body)
|
||||
deleteXMLBytes, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
return
|
||||
|
@ -394,7 +409,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
|
|||
|
||||
etag = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
|
||||
resp_body, ra_err := ioutil.ReadAll(resp.Body)
|
||||
resp_body, ra_err := io.ReadAll(resp.Body)
|
||||
if ra_err != nil {
|
||||
glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
|
||||
return etag, s3err.ErrInternalError
|
||||
|
|
|
@ -5,17 +5,17 @@ import (
|
|||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/policy"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -152,7 +152,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
|
|||
|
||||
// Extract form fields and file data from a HTTP POST Policy
|
||||
func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
|
||||
/// HTML Form values
|
||||
// / HTML Form values
|
||||
fileName = ""
|
||||
|
||||
// Canonicalize the form values into http.Header.
|
||||
|
@ -175,7 +175,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
|
|||
b.WriteString(v)
|
||||
}
|
||||
fileSize = int64(b.Len())
|
||||
filePart = ioutil.NopCloser(b)
|
||||
filePart = io.NopCloser(b)
|
||||
return filePart, fileName, fileSize, formValues, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,10 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
|
|||
createMultipartUploadInput.Metadata[k] = aws.String(string(v))
|
||||
}
|
||||
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType != "" {
|
||||
createMultipartUploadInput.ContentType = &contentType
|
||||
}
|
||||
response, errCode := s3a.createMultipartUpload(createMultipartUploadInput)
|
||||
|
||||
glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode)
|
||||
|
@ -213,8 +217,11 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
|
|||
uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
|
||||
s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID, bucket)
|
||||
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
|
||||
if partID == 1 && r.Header.Get("Content-Type") == "" {
|
||||
dataReader = mimeDetect(r, dataReader)
|
||||
}
|
||||
|
||||
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
|
||||
if errCode != s3err.ErrNone {
|
||||
s3err.WriteErrorResponse(w, errCode, r)
|
||||
return
|
||||
|
|
|
@ -4,6 +4,14 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// GetObjectAclHandler Put object ACL
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
|
||||
func (s3a *S3ApiServer) GetObjectAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
}
|
||||
|
||||
// PutObjectAclHandler Put object ACL
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html
|
||||
func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
@ -3,13 +3,13 @@ package s3api
|
|||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// GetObjectTaggingHandler - GET object tagging
|
||||
|
@ -49,7 +49,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
|
|||
dir, name := target.DirAndName()
|
||||
|
||||
tagging := &Tagging{}
|
||||
input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
|
||||
input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
|
||||
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
|
||||
|
@ -90,7 +90,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
|
|||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
}
|
||||
|
||||
|
|
147
weed/s3api/s3api_policy.go
Normal file
147
weed/s3api/s3api_policy.go
Normal file
|
@ -0,0 +1,147 @@
|
|||
package s3api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Status represents lifecycle configuration status
|
||||
type ruleStatus string
|
||||
|
||||
// Supported status types
|
||||
const (
|
||||
Enabled ruleStatus = "Enabled"
|
||||
Disabled ruleStatus = "Disabled"
|
||||
)
|
||||
|
||||
// Lifecycle - Configuration for bucket lifecycle.
|
||||
type Lifecycle struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules []Rule `xml:"Rule"`
|
||||
}
|
||||
|
||||
// Rule - a rule for lifecycle configuration.
|
||||
type Rule struct {
|
||||
XMLName xml.Name `xml:"Rule"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
Status ruleStatus `xml:"Status"`
|
||||
Filter Filter `xml:"Filter,omitempty"`
|
||||
Prefix Prefix `xml:"Prefix,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty"`
|
||||
Transition Transition `xml:"Transition,omitempty"`
|
||||
}
|
||||
|
||||
// Filter - a filter for a lifecycle configuration Rule.
|
||||
type Filter struct {
|
||||
XMLName xml.Name `xml:"Filter"`
|
||||
set bool
|
||||
|
||||
Prefix Prefix
|
||||
|
||||
And And
|
||||
andSet bool
|
||||
|
||||
Tag Tag
|
||||
tagSet bool
|
||||
}
|
||||
|
||||
// Prefix holds the prefix xml tag in <Rule> and <Filter>
|
||||
type Prefix struct {
|
||||
string
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML - decodes XML data.
|
||||
func (p Prefix) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if !p.set {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(p.string, startElement)
|
||||
}
|
||||
|
||||
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.EncodeToken(xml.EndElement{Name: start.Name})
|
||||
}
|
||||
|
||||
// And - a tag to combine a prefix and multiple tags for lifecycle configuration rule.
|
||||
type And struct {
|
||||
XMLName xml.Name `xml:"And"`
|
||||
Prefix Prefix `xml:"Prefix,omitempty"`
|
||||
Tags []Tag `xml:"Tag,omitempty"`
|
||||
}
|
||||
|
||||
// Expiration - expiration actions for a rule in lifecycle configuration.
|
||||
type Expiration struct {
|
||||
XMLName xml.Name `xml:"Expiration"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Date ExpirationDate `xml:"Date,omitempty"`
|
||||
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker"`
|
||||
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration field into an XML form.
|
||||
func (e Expiration) MarshalXML(enc *xml.Encoder, startElement xml.StartElement) error {
|
||||
if !e.set {
|
||||
return nil
|
||||
}
|
||||
type expirationWrapper Expiration
|
||||
return enc.EncodeElement(expirationWrapper(e), startElement)
|
||||
}
|
||||
|
||||
// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
|
||||
type ExpireDeleteMarker struct {
|
||||
val bool
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML encodes delete marker boolean into an XML form.
|
||||
func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if !b.set {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(b.val, startElement)
|
||||
}
|
||||
|
||||
// ExpirationDate is a embedded type containing time.Time to unmarshal
|
||||
// Date in Expiration
|
||||
type ExpirationDate struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if eDate.Time.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// Transition - transition actions for a rule in lifecycle configuration.
|
||||
type Transition struct {
|
||||
XMLName xml.Name `xml:"Transition"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Date time.Time `xml:"Date,omitempty"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
|
||||
set bool
|
||||
}
|
||||
|
||||
// MarshalXML encodes transition field into an XML form.
|
||||
func (t Transition) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||
if !t.set {
|
||||
return nil
|
||||
}
|
||||
type transitionWrapper Transition
|
||||
return enc.EncodeElement(transitionWrapper(t), start)
|
||||
}
|
||||
|
||||
// TransitionDays is a type alias to unmarshal Days in Transition
|
||||
type TransitionDays int
|
|
@ -115,14 +115,30 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2")
|
||||
// GetObject, but directory listing is not supported
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
|
||||
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST"))
|
||||
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "")
|
||||
|
||||
// GetBucketACL
|
||||
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.GetBucketAclHandler, ACTION_READ)).Queries("acl", "")
|
||||
|
||||
// GetObjectACL
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectAclHandler, ACTION_READ)).Queries("acl", "")
|
||||
|
||||
// GetBucketLifecycleConfiguration
|
||||
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)).Queries("lifecycle", "")
|
||||
|
||||
// PutBucketLifecycleConfiguration
|
||||
bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)).Queries("lifecycle", "")
|
||||
|
||||
// DeleteBucketLifecycleConfiguration
|
||||
bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)).Queries("lifecycle", "")
|
||||
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST"))
|
||||
/*
|
||||
|
||||
// not implemented
|
||||
|
@ -132,8 +148,6 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
|
|||
bucket.Methods("GET").HandlerFunc(s3a.GetBucketPolicyHandler).Queries("policy", "")
|
||||
// GetObjectACL
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectACLHandler).Queries("acl", "")
|
||||
// GetBucketACL
|
||||
bucket.Methods("GET").HandlerFunc(s3a.GetBucketACLHandler).Queries("acl", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "")
|
||||
// DeleteBucketPolicy
|
||||
|
|
|
@ -8,12 +8,12 @@ import (
|
|||
)
|
||||
|
||||
type AccessControlList struct {
|
||||
Grant []Grant `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grant,omitempty"`
|
||||
Grant []Grant `xml:"Grant,omitempty"`
|
||||
}
|
||||
|
||||
type AccessControlPolicy struct {
|
||||
Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner"`
|
||||
AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"`
|
||||
Owner CanonicalUser `xml:"Owner"`
|
||||
AccessControlList AccessControlList `xml:"AccessControlList"`
|
||||
}
|
||||
|
||||
type AmazonCustomerByEmail struct {
|
||||
|
@ -467,11 +467,17 @@ func (t *GetObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e
|
|||
}
|
||||
|
||||
type Grant struct {
|
||||
Grantee Grantee `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Grantee"`
|
||||
Permission Permission `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Permission"`
|
||||
Grantee Grantee `xml:"Grantee"`
|
||||
Permission Permission `xml:"Permission"`
|
||||
}
|
||||
|
||||
type Grantee struct {
|
||||
XMLNS string `xml:"xmlns:xsi,attr"`
|
||||
XMLXSI string `xml:"xsi:type,attr"`
|
||||
Type string `xml:"Type"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
DisplayName string `xml:"DisplayName,omitempty"`
|
||||
URI string `xml:"URI,omitempty"`
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
|
|
|
@ -51,6 +51,7 @@ const (
|
|||
ErrBucketAlreadyExists
|
||||
ErrBucketAlreadyOwnedByYou
|
||||
ErrNoSuchBucket
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrInvalidBucketName
|
||||
|
@ -163,6 +164,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
|||
Description: "The specified bucket does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchLifecycleConfiguration: {
|
||||
Code: "NoSuchLifecycleConfiguration",
|
||||
Description: "The lifecycle configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchKey: {
|
||||
Code: "NoSuchKey",
|
||||
Description: "The specified key does not exist.",
|
||||
|
@ -196,7 +202,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
|
|||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTag: {
|
||||
Code: "InvalidArgument",
|
||||
Code: "InvalidTag",
|
||||
Description: "The Tag value you have provided is invalid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
|
|
@ -14,8 +14,9 @@ type TagSet struct {
|
|||
}
|
||||
|
||||
type Tagging struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"`
|
||||
XMLName xml.Name `xml:"Tagging"`
|
||||
TagSet TagSet `xml:"TagSet"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
}
|
||||
|
||||
func (t *Tagging) ToTags() map[string]string {
|
||||
|
@ -27,7 +28,7 @@ func (t *Tagging) ToTags() map[string]string {
|
|||
}
|
||||
|
||||
func FromTags(tags map[string]string) (t *Tagging) {
|
||||
t = &Tagging{}
|
||||
t = &Tagging{Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/"}
|
||||
for k, v := range tags {
|
||||
t.TagSet.Tag = append(t.TagSet.Tag, Tag{
|
||||
Key: k,
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestXMLUnmarshall(t *testing.T) {
|
|||
|
||||
func TestXMLMarshall(t *testing.T) {
|
||||
tags := &Tagging{
|
||||
Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
TagSet: TagSet{
|
||||
[]Tag{
|
||||
{
|
||||
|
|
|
@ -4,18 +4,18 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/status"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
type Authenticator struct {
|
||||
|
@ -37,7 +37,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption
|
|||
err)
|
||||
return nil, nil
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(config.GetString("grpc.ca"))
|
||||
caCert, err := os.ReadFile(config.GetString("grpc.ca"))
|
||||
if err != nil {
|
||||
glog.V(1).Infof("read ca cert file %s error: %v", config.GetString("grpc.ca"), err)
|
||||
return nil, nil
|
||||
|
@ -82,7 +82,7 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption {
|
|||
glog.V(1).Infof("load cert/key error: %v", err)
|
||||
return grpc.WithInsecure()
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(caFileName)
|
||||
caCert, err := os.ReadFile(caFileName)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("read ca cert file error: %v", err)
|
||||
return grpc.WithInsecure()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"io/fs"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -250,7 +251,16 @@ func handleStaticResources2(r *mux.Router) {
|
|||
}
|
||||
|
||||
func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, filename string) {
|
||||
responseContentDisposition := r.FormValue("response-content-disposition")
|
||||
if responseContentDisposition != "" {
|
||||
w.Header().Set("Content-Disposition", responseContentDisposition)
|
||||
return
|
||||
}
|
||||
if w.Header().Get("Content-Disposition") != "" {
|
||||
return
|
||||
}
|
||||
if filename != "" {
|
||||
filename = url.QueryEscape(filename)
|
||||
contentDisposition := "inline"
|
||||
if r.FormValue("dl") != "" {
|
||||
if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -33,7 +34,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
|
|||
return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err)
|
||||
}
|
||||
|
||||
moveErr := fs.moveEntry(ctx, oldParent, oldEntry, newParent, req.NewName, req.Signatures)
|
||||
moveErr := fs.moveEntry(ctx, nil, oldParent, oldEntry, newParent, req.NewName, req.Signatures)
|
||||
if moveErr != nil {
|
||||
fs.filer.RollbackTransaction(ctx)
|
||||
return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, moveErr)
|
||||
|
@ -47,11 +48,49 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
|
|||
return &filer_pb.AtomicRenameEntryResponse{}, nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, signatures []int32) error {
|
||||
func (fs *FilerServer) StreamRenameEntry(req *filer_pb.StreamRenameEntryRequest, stream filer_pb.SeaweedFiler_StreamRenameEntryServer) (err error) {
|
||||
|
||||
if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, func() error {
|
||||
glog.V(1).Infof("StreamRenameEntry %v", req)
|
||||
|
||||
oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory))
|
||||
newParent := util.FullPath(filepath.ToSlash(req.NewDirectory))
|
||||
|
||||
if err := fs.filer.CanRename(oldParent, newParent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
ctx, err = fs.filer.BeginTransaction(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName))
|
||||
if err != nil {
|
||||
fs.filer.RollbackTransaction(ctx)
|
||||
return fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err)
|
||||
}
|
||||
|
||||
moveErr := fs.moveEntry(ctx, stream, oldParent, oldEntry, newParent, req.NewName, req.Signatures)
|
||||
if moveErr != nil {
|
||||
fs.filer.RollbackTransaction(ctx)
|
||||
return fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, moveErr)
|
||||
} else {
|
||||
if commitError := fs.filer.CommitTransaction(ctx); commitError != nil {
|
||||
fs.filer.RollbackTransaction(ctx)
|
||||
return fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, commitError)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) moveEntry(ctx context.Context, stream filer_pb.SeaweedFiler_StreamRenameEntryServer, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, signatures []int32) error {
|
||||
|
||||
if err := fs.moveSelfEntry(ctx, stream, oldParent, entry, newParent, newName, func() error {
|
||||
if entry.IsDirectory() {
|
||||
if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, signatures); err != nil {
|
||||
if err := fs.moveFolderSubEntries(ctx, stream, oldParent, entry, newParent, newName, signatures); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +102,7 @@ func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, signatures []int32) error {
|
||||
func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, stream filer_pb.SeaweedFiler_StreamRenameEntryServer, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, signatures []int32) error {
|
||||
|
||||
currentDirPath := oldParent.Child(entry.Name())
|
||||
newDirPath := newParent.Child(newName)
|
||||
|
@ -84,7 +123,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
|
|||
for _, item := range entries {
|
||||
lastFileName = item.Name()
|
||||
// println("processing", lastFileName)
|
||||
err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), signatures)
|
||||
err := fs.moveEntry(ctx, stream, currentDirPath, item, newDirPath, item.Name(), signatures)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -96,7 +135,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error, signatures []int32) error {
|
||||
func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.SeaweedFiler_StreamRenameEntryServer, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error, signatures []int32) error {
|
||||
|
||||
oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
|
||||
|
||||
|
@ -118,6 +157,24 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
|
|||
if createErr := fs.filer.CreateEntry(ctx, newEntry, false, false, signatures); createErr != nil {
|
||||
return createErr
|
||||
}
|
||||
if stream != nil {
|
||||
if err := stream.Send(&filer_pb.StreamRenameEntryResponse{
|
||||
Directory: string(newParent),
|
||||
EventNotification: &filer_pb.EventNotification{
|
||||
OldEntry: &filer_pb.Entry{
|
||||
Name: entry.Name(),
|
||||
},
|
||||
NewEntry: newEntry.ToProtoEntry(),
|
||||
DeleteChunks: false,
|
||||
NewParentPath: string(newParent),
|
||||
IsFromOtherCluster: false,
|
||||
Signatures: nil,
|
||||
},
|
||||
TsNs: time.Now().UnixNano(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if moveFolderSubEntries != nil {
|
||||
if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
|
||||
|
@ -130,6 +187,24 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
|
|||
if deleteErr != nil {
|
||||
return deleteErr
|
||||
}
|
||||
if stream != nil {
|
||||
if err := stream.Send(&filer_pb.StreamRenameEntryResponse{
|
||||
Directory: string(oldParent),
|
||||
EventNotification: &filer_pb.EventNotification{
|
||||
OldEntry: &filer_pb.Entry{
|
||||
Name: entry.Name(),
|
||||
},
|
||||
NewEntry: nil,
|
||||
DeleteChunks: false,
|
||||
NewParentPath: "",
|
||||
IsFromOtherCluster: false,
|
||||
Signatures: nil,
|
||||
},
|
||||
TsNs: time.Now().UnixNano(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
|
|
|
@ -101,6 +101,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq
|
|||
glog.V(0).Infof("read on disk %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime)
|
||||
processedTsNs, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn)
|
||||
if readPersistedLogErr != nil {
|
||||
glog.V(0).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr)
|
||||
return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -114,17 +113,15 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
w.Header().Set("Access-Control-Expose-Headers", strings.Join(seaweedHeaders, ","))
|
||||
|
||||
//set tag count
|
||||
if r.Method == "GET" {
|
||||
tagCount := 0
|
||||
for k := range entry.Extended {
|
||||
if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") {
|
||||
tagCount++
|
||||
}
|
||||
}
|
||||
if tagCount > 0 {
|
||||
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
|
||||
tagCount := 0
|
||||
for k := range entry.Extended {
|
||||
if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") {
|
||||
tagCount++
|
||||
}
|
||||
}
|
||||
if tagCount > 0 {
|
||||
w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount))
|
||||
}
|
||||
|
||||
if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
|
@ -133,7 +130,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
|
|||
setEtag(w, etag)
|
||||
|
||||
filename := entry.Name()
|
||||
filename = url.QueryEscape(filename)
|
||||
adjustHeaderContentDisposition(w, r, filename)
|
||||
|
||||
totalSize := int64(entry.Size())
|
||||
|
|
|
@ -218,7 +218,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
|
|||
entry.Extended = SaveAmzMetaData(r, entry.Extended, false)
|
||||
|
||||
for k, v := range r.Header {
|
||||
if len(v) > 0 && (strings.HasPrefix(k, needle.PairNamePrefix) || k == "Cache-Control" || k == "Expires") {
|
||||
if len(v) > 0 && (strings.HasPrefix(k, needle.PairNamePrefix) || k == "Cache-Control" || k == "Expires" || k == "Content-Disposition") {
|
||||
entry.Extended[k] = []byte(v[0])
|
||||
}
|
||||
}
|
||||
|
@ -327,6 +327,8 @@ func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool
|
|||
tag := strings.Split(v, "=")
|
||||
if len(tag) == 2 {
|
||||
metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1])
|
||||
} else if len(tag) == 1 {
|
||||
metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/md5"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -31,7 +30,7 @@ var bufPool = sync.Pool{
|
|||
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
|
||||
|
||||
md5Hash = md5.New()
|
||||
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
|
||||
var partReader = io.NopCloser(io.TeeReader(reader, md5Hash))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var bytesBufferCounter int64
|
||||
|
@ -57,7 +56,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
dataSize, err := bytesBuffer.ReadFrom(limitedReader)
|
||||
|
||||
// data, err := ioutil.ReadAll(limitedReader)
|
||||
// data, err := io.ReadAll(limitedReader)
|
||||
if err != nil || dataSize == 0 {
|
||||
bufPool.Put(bytesBuffer)
|
||||
atomic.AddInt64(&bytesBufferCounter, -1)
|
||||
|
|
|
@ -3,20 +3,19 @@ package weed_server
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -68,7 +67,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
|||
dataBaseFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
|
||||
indexBaseFileName = storage.VolumeFileName(location.IdxDirectory, volFileInfoResp.Collection, int(req.VolumeId))
|
||||
|
||||
ioutil.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755)
|
||||
os.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
|
|
@ -3,10 +3,7 @@ package weed_server
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -14,11 +11,13 @@ import (
|
|||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -200,12 +199,12 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
|
|||
existingShardCount := 0
|
||||
|
||||
for _, location := range vs.store.Locations {
|
||||
fileInfos, err := ioutil.ReadDir(location.Directory)
|
||||
fileInfos, err := os.ReadDir(location.Directory)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if location.IdxDirectory != location.Directory {
|
||||
idxFileInfos, err := ioutil.ReadDir(location.IdxDirectory)
|
||||
idxFileInfos, err := os.ReadDir(location.IdxDirectory)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
|
|||
return nil
|
||||
}
|
||||
|
||||
fc, err := readFilerConf(commandEnv)
|
||||
fc, err := filer.ReadFilerConf(commandEnv.option.FilerAddress, commandEnv.option.GrpcDialOption, commandEnv.MasterClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -122,20 +122,3 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
|
|||
return nil
|
||||
|
||||
}
|
||||
|
||||
func readFilerConf(commandEnv *CommandEnv) (*filer.FilerConf, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
|
||||
return filer.ReadEntry(commandEnv.MasterClient, client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, &buf)
|
||||
}); err != nil && err != filer_pb.ErrNotFound {
|
||||
return nil, fmt.Errorf("read %s/%s: %v", filer.DirectoryEtcSeaweedFS, filer.FilerConfName, err)
|
||||
}
|
||||
|
||||
fc := filer.NewFilerConf()
|
||||
if buf.Len() > 0 {
|
||||
if err := fc.LoadFromBytes(buf.Bytes()); err != nil {
|
||||
return nil, fmt.Errorf("parse %s/%s: %v", filer.DirectoryEtcSeaweedFS, filer.FilerConfName, err)
|
||||
}
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
|
|
@ -5,10 +5,7 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -16,9 +13,11 @@ import (
|
|||
|
||||
"github.com/chrislusf/seaweedfs/weed/filer"
|
||||
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
|
@ -74,7 +73,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
|
|||
c.env = commandEnv
|
||||
|
||||
// create a temp folder
|
||||
tempFolder, err := ioutil.TempDir("", "sw_fsck")
|
||||
tempFolder, err := os.MkdirTemp("", "sw_fsck")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temp folder: %v", err)
|
||||
}
|
||||
|
@ -402,7 +401,7 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder stri
|
|||
return
|
||||
}
|
||||
|
||||
filerFileIdsData, err := ioutil.ReadFile(getFilerFileIdFile(tempFolder, volumeId))
|
||||
filerFileIdsData, err := os.ReadFile(getFilerFileIdFile(tempFolder, volumeId))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -14,6 +12,7 @@ import (
|
|||
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -85,9 +84,9 @@ func getValidVolumeName(basename string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapKind) bool {
|
||||
basename := fileInfo.Name()
|
||||
if fileInfo.IsDir() {
|
||||
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind) bool {
|
||||
basename := dirEntry.Name()
|
||||
if dirEntry.IsDir() {
|
||||
return false
|
||||
}
|
||||
volumeName := getValidVolumeName(basename)
|
||||
|
@ -103,7 +102,7 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
|
|||
// check for incomplete volume
|
||||
noteFile := l.Directory + "/" + volumeName + ".note"
|
||||
if util.FileExists(noteFile) {
|
||||
note, _ := ioutil.ReadFile(noteFile)
|
||||
note, _ := os.ReadFile(noteFile)
|
||||
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
|
||||
removeVolumeFiles(l.Directory + "/" + volumeName)
|
||||
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
|
||||
|
@ -143,18 +142,18 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
|
|||
|
||||
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int) {
|
||||
|
||||
task_queue := make(chan os.FileInfo, 10*concurrency)
|
||||
task_queue := make(chan os.DirEntry, 10*concurrency)
|
||||
go func() {
|
||||
foundVolumeNames := make(map[string]bool)
|
||||
if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil {
|
||||
for _, fi := range fileInfos {
|
||||
volumeName := getValidVolumeName(fi.Name())
|
||||
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
|
||||
for _, entry := range dirEntries {
|
||||
volumeName := getValidVolumeName(entry.Name())
|
||||
if volumeName == "" {
|
||||
continue
|
||||
}
|
||||
if _, found := foundVolumeNames[volumeName]; !found {
|
||||
foundVolumeNames[volumeName] = true
|
||||
task_queue <- fi
|
||||
task_queue <- entry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -332,12 +331,12 @@ func (l *DiskLocation) Close() {
|
|||
return
|
||||
}
|
||||
|
||||
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) {
|
||||
if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil {
|
||||
for _, fileInfo := range fileInfos {
|
||||
volId, _, err := volumeIdFromFileName(fileInfo.Name())
|
||||
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
|
||||
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
|
||||
for _, entry := range dirEntries {
|
||||
volId, _, err := volumeIdFromFileName(entry.Name())
|
||||
if vid == volId && err == nil {
|
||||
return fileInfo, true
|
||||
return entry, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
|
@ -118,25 +117,25 @@ func (l *DiskLocation) loadEcShards(shards []string, collection string, vid need
|
|||
|
||||
func (l *DiskLocation) loadAllEcShards() (err error) {
|
||||
|
||||
fileInfos, err := ioutil.ReadDir(l.Directory)
|
||||
dirEntries, err := os.ReadDir(l.Directory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load all ec shards in dir %s: %v", l.Directory, err)
|
||||
}
|
||||
if l.IdxDirectory != l.Directory {
|
||||
indexFileInfos, err := ioutil.ReadDir(l.IdxDirectory)
|
||||
indexDirEntries, err := os.ReadDir(l.IdxDirectory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load all ec shards in dir %s: %v", l.IdxDirectory, err)
|
||||
}
|
||||
fileInfos = append(fileInfos, indexFileInfos...)
|
||||
dirEntries = append(dirEntries, indexDirEntries...)
|
||||
}
|
||||
|
||||
sort.Slice(fileInfos, func(i, j int) bool {
|
||||
return fileInfos[i].Name() < fileInfos[j].Name()
|
||||
sort.Slice(dirEntries, func(i, j int) bool {
|
||||
return dirEntries[i].Name() < dirEntries[j].Name()
|
||||
})
|
||||
|
||||
var sameVolumeShards []string
|
||||
var prevVolumeId needle.VolumeId
|
||||
for _, fileInfo := range fileInfos {
|
||||
for _, fileInfo := range dirEntries {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
|
@ -108,7 +107,7 @@ func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) error {
|
|||
pu.FileName = ""
|
||||
dataSize, err := pu.bytesBuffer.ReadFrom(io.LimitReader(r.Body, sizeLimit+1))
|
||||
if err == io.EOF || dataSize == sizeLimit+1 {
|
||||
io.Copy(ioutil.Discard, r.Body)
|
||||
io.Copy(io.Discard, r.Body)
|
||||
}
|
||||
pu.Data = pu.bytesBuffer.Bytes()
|
||||
r.Body.Close()
|
||||
|
@ -118,7 +117,7 @@ func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) error {
|
|||
func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
|
||||
defer func() {
|
||||
if e != nil && r.Body != nil {
|
||||
io.Copy(ioutil.Discard, r.Body)
|
||||
io.Copy(io.Discard, r.Body)
|
||||
r.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package needle
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
|
@ -31,7 +30,7 @@ func TestAppend(t *testing.T) {
|
|||
Padding: nil, // Padding []byte `comment:"Aligned to 8 bytes"`
|
||||
}
|
||||
|
||||
tempFile, err := ioutil.TempFile("", ".dat")
|
||||
tempFile, err := os.CreateTemp("", ".dat")
|
||||
if err != nil {
|
||||
t.Errorf("Fail TempFile. %v", err)
|
||||
return
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
func TestFastLoadingNeedleMapMetrics(t *testing.T) {
|
||||
|
||||
idxFile, _ := ioutil.TempFile("", "tmp.idx")
|
||||
idxFile, _ := os.CreateTemp("", "tmp.idx")
|
||||
nm := NewCompactNeedleMap(idxFile)
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
|
|
|
@ -194,6 +194,35 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast
|
|||
err = fmt.Errorf("read entry %d: %v", m, err)
|
||||
return
|
||||
}
|
||||
if offset.IsZero() {
|
||||
leftIndex, _, leftNs, leftErr := v.readLeftNs(m)
|
||||
if leftErr != nil {
|
||||
err = leftErr
|
||||
return
|
||||
}
|
||||
rightIndex, rightOffset, rightNs, rightErr := v.readRightNs(m)
|
||||
if rightErr != nil {
|
||||
err = rightErr
|
||||
return
|
||||
}
|
||||
if rightNs <= sinceNs {
|
||||
l = rightIndex
|
||||
if l == entryCount {
|
||||
return Offset{}, true, nil
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if sinceNs < leftNs {
|
||||
h = leftIndex + 1
|
||||
continue
|
||||
}
|
||||
return rightOffset, false, nil
|
||||
|
||||
}
|
||||
if offset.IsZero() {
|
||||
return Offset{}, true, nil
|
||||
}
|
||||
|
||||
mNs, nsReadErr := v.readAppendAtNs(offset)
|
||||
if nsReadErr != nil {
|
||||
|
@ -220,6 +249,38 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast
|
|||
|
||||
}
|
||||
|
||||
func (v *Volume) readRightNs(m int64) (index int64, offset Offset, ts uint64, err error) {
|
||||
index = m
|
||||
for offset.IsZero() {
|
||||
index++
|
||||
offset, err = v.readOffsetFromIndex(index)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read entry %d: %v", index, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if !offset.IsZero() {
|
||||
ts, err = v.readAppendAtNs(offset)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Volume) readLeftNs(m int64) (index int64, offset Offset, ts uint64, err error) {
|
||||
index = m
|
||||
for offset.IsZero() {
|
||||
index--
|
||||
offset, err = v.readOffsetFromIndex(index)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read entry %d: %v", index, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if !offset.IsZero() {
|
||||
ts, err = v.readAppendAtNs(offset)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// bytes is of size NeedleMapEntrySize
|
||||
func (v *Volume) readOffsetFromIndex(m int64) (Offset, error) {
|
||||
v.dataFileAccessLock.RLock()
|
||||
|
|
|
@ -3,15 +3,14 @@ package volume_info
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
_ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||
_ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
|
||||
"github.com/chrislusf/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
// MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil
|
||||
|
@ -36,7 +35,7 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn
|
|||
hasVolumeInfoFile = true
|
||||
|
||||
glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName)
|
||||
tierData, readErr := ioutil.ReadFile(fileName)
|
||||
tierData, readErr := os.ReadFile(fileName)
|
||||
if readErr != nil {
|
||||
glog.Warningf("fail to read %s : %v", fileName, readErr)
|
||||
err = fmt.Errorf("fail to read %s : %v", fileName, readErr)
|
||||
|
@ -76,7 +75,7 @@ func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) er
|
|||
return fmt.Errorf("marshal to %s: %v", fileName, marshalErr)
|
||||
}
|
||||
|
||||
writeErr := ioutil.WriteFile(fileName, []byte(text), 0755)
|
||||
writeErr := os.WriteFile(fileName, []byte(text), 0755)
|
||||
if writeErr != nil {
|
||||
return fmt.Errorf("fail to write %s : %v", fileName, writeErr)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -45,7 +44,7 @@ preparing test prerequisite easier )
|
|||
func TestMakeDiff(t *testing.T) {
|
||||
|
||||
v := new(Volume)
|
||||
//lastCompactIndexOffset value is the index file size before step 4
|
||||
// lastCompactIndexOffset value is the index file size before step 4
|
||||
v.lastCompactIndexOffset = 96
|
||||
v.SuperBlock.Version = 0x2
|
||||
/*
|
||||
|
@ -63,7 +62,7 @@ func TestMakeDiff(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCompaction(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "example")
|
||||
dir, err := os.MkdirTemp("", "example")
|
||||
if err != nil {
|
||||
t.Fatalf("temp dir creation: %v", err)
|
||||
}
|
||||
|
|
|
@ -2,16 +2,17 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func TestSearchVolumesWithDeletedNeedles(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "example")
|
||||
dir, err := os.MkdirTemp("", "example")
|
||||
if err != nil {
|
||||
t.Fatalf("temp dir creation: %v", err)
|
||||
}
|
||||
|
@ -22,9 +23,9 @@ func TestSearchVolumesWithDeletedNeedles(t *testing.T) {
|
|||
t.Fatalf("volume creation: %v", err)
|
||||
}
|
||||
|
||||
count := 10
|
||||
count := 20
|
||||
|
||||
for i:=1;i<count;i++{
|
||||
for i := 1; i < count; i++ {
|
||||
n := newRandomNeedle(uint64(i))
|
||||
_, _, _, err := v.writeNeedle2(n, true, false)
|
||||
if err != nil {
|
||||
|
@ -32,9 +33,9 @@ func TestSearchVolumesWithDeletedNeedles(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
for i:=1;i<5;i++{
|
||||
for i := 1; i < 15; i++ {
|
||||
n := newEmptyNeedle(uint64(i))
|
||||
_, err := v.doDeleteRequest(n)
|
||||
err := v.nm.Put(n.Id, types.Offset{}, types.TombstoneFileSize)
|
||||
if err != nil {
|
||||
t.Fatalf("delete needle %d: %v", i, err)
|
||||
}
|
||||
|
@ -42,15 +43,12 @@ func TestSearchVolumesWithDeletedNeedles(t *testing.T) {
|
|||
|
||||
ts1 := time.Now().UnixNano()
|
||||
|
||||
var ts2 uint64
|
||||
|
||||
for i:=5;i<count;i++{
|
||||
for i := 15; i < count; i++ {
|
||||
n := newEmptyNeedle(uint64(i))
|
||||
_, err := v.doDeleteRequest(n)
|
||||
if err != nil {
|
||||
t.Fatalf("delete needle %d: %v", i, err)
|
||||
}
|
||||
ts2 = n.AppendAtNs
|
||||
}
|
||||
|
||||
offset, isLast, err := v.BinarySearchByAppendAtNs(uint64(ts1))
|
||||
|
@ -59,11 +57,4 @@ func TestSearchVolumesWithDeletedNeedles(t *testing.T) {
|
|||
}
|
||||
fmt.Printf("offset: %v, isLast: %v\n", offset.ToActualOffset(), isLast)
|
||||
|
||||
offset, isLast, err = v.BinarySearchByAppendAtNs(uint64(ts2))
|
||||
if err != nil {
|
||||
t.Fatalf("lookup by ts: %v", err)
|
||||
}
|
||||
fmt.Printf("offset: %v, isLast: %v\n", offset.ToActualOffset(), isLast)
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package chunk_cache
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -11,7 +10,7 @@ import (
|
|||
|
||||
func TestOnDisk(t *testing.T) {
|
||||
|
||||
tmpDir, _ := ioutil.TempDir("", "c")
|
||||
tmpDir, _ := os.MkdirTemp("", "c")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
totalDiskSizeInKB := int64(32)
|
||||
|
|
|
@ -96,7 +96,7 @@ func (c *OnDiskCacheLayer) getChunkSlice(needleId types.NeedleId, offset, length
|
|||
continue
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
|
||||
glog.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err)
|
||||
continue
|
||||
}
|
||||
if len(data) != 0 {
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.71)
|
||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.74)
|
||||
VERSION = sizeLimit + " " + VERSION_NUMBER
|
||||
COMMIT = ""
|
||||
)
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -35,7 +34,7 @@ func Post(url string, values url.Values) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if r.StatusCode >= 400 {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %d - %s", url, r.StatusCode, string(b))
|
||||
|
@ -71,7 +70,7 @@ func Get(url string) ([]byte, bool, error) {
|
|||
reader = response.Body
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
b, err := io.ReadAll(reader)
|
||||
if response.StatusCode >= 400 {
|
||||
retryable := response.StatusCode >= 500
|
||||
return nil, retryable, fmt.Errorf("%s: %s", url, response.Status)
|
||||
|
@ -107,7 +106,7 @@ func Delete(url string, jwt string) error {
|
|||
return e
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -137,7 +136,7 @@ func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err err
|
|||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -271,7 +270,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
|
|||
}
|
||||
}
|
||||
// drains the response body to avoid memory leak
|
||||
data, _ := ioutil.ReadAll(reader)
|
||||
data, _ := io.ReadAll(reader)
|
||||
if len(data) != 0 {
|
||||
glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data))
|
||||
}
|
||||
|
@ -393,11 +392,11 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
|
|||
}
|
||||
|
||||
func CloseResponse(resp *http.Response) {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func CloseRequest(req *http.Request) {
|
||||
io.Copy(ioutil.Discard, req.Body)
|
||||
io.Copy(io.Discard, req.Body)
|
||||
req.Body.Close()
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ type LogBuffer struct {
|
|||
pos int
|
||||
startTime time.Time
|
||||
stopTime time.Time
|
||||
lastFlushTime time.Time
|
||||
sizeBuf []byte
|
||||
flushInterval time.Duration
|
||||
flushFn func(startTime, stopTime time.Time, buf []byte)
|
||||
|
@ -132,6 +133,8 @@ func (m *LogBuffer) loopFlush() {
|
|||
// glog.V(4).Infof("%s flush [%v, %v] size %d", m.name, d.startTime, d.stopTime, len(d.data.Bytes()))
|
||||
m.flushFn(d.startTime, d.stopTime, d.data.Bytes())
|
||||
d.releaseMemory()
|
||||
// local logbuffer is different from aggregate logbuffer here
|
||||
m.lastFlushTime = d.stopTime
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -162,6 +165,9 @@ func (m *LogBuffer) copyToFlush() *dataToFlush {
|
|||
data: copiedBytes(m.buf[:m.pos]),
|
||||
}
|
||||
// glog.V(4).Infof("%s flushing [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime)
|
||||
} else {
|
||||
// glog.V(4).Infof("%s removed from memory [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime)
|
||||
m.lastFlushTime = m.stopTime
|
||||
}
|
||||
m.buf = m.prevBuffers.SealBuffer(m.startTime, m.stopTime, m.buf, m.pos)
|
||||
m.startTime = time.Unix(0, 0)
|
||||
|
@ -203,7 +209,10 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (bufferCopy *bytes.Bu
|
|||
if tsMemory.IsZero() { // case 2.2
|
||||
return nil, nil
|
||||
} else if lastReadTime.Before(tsMemory) { // case 2.3
|
||||
return nil, ResumeFromDiskError
|
||||
if !m.lastFlushTime.IsZero() {
|
||||
glog.V(0).Infof("resume with last flush time: %v", m.lastFlushTime)
|
||||
return nil, ResumeFromDiskError
|
||||
}
|
||||
}
|
||||
|
||||
// the following is case 2.1
|
||||
|
|
|
@ -2,7 +2,6 @@ package skiplist
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
@ -230,7 +229,7 @@ func TestFindGreaterOrEqual(t *testing.T) {
|
|||
|
||||
// Test on empty list.
|
||||
if _, _, ok, _ := listPointer.FindGreaterOrEqual(Element(0)); ok {
|
||||
t.Fail()
|
||||
t.Errorf("found element 0 in an empty list")
|
||||
}
|
||||
|
||||
list = New(memStore)
|
||||
|
@ -243,23 +242,20 @@ func TestFindGreaterOrEqual(t *testing.T) {
|
|||
key := Element(rand.Intn(maxNumber))
|
||||
if _, v, ok, _ := list.FindGreaterOrEqual(key); ok {
|
||||
// if f is v should be bigger than the element before
|
||||
if v.Prev != nil && bytes.Compare(v.Prev.Key, key) >= 0 {
|
||||
fmt.Printf("PrevV: %s\n key: %s\n\n", string(v.Prev.Key), string(key))
|
||||
t.Fail()
|
||||
if v.Prev != nil && bytes.Compare(key, v.Prev.Key) < 0 {
|
||||
t.Errorf("PrevV: %s\n key: %s\n\n", string(v.Prev.Key), string(key))
|
||||
}
|
||||
// v should be bigger or equal to f
|
||||
// If we compare directly, we get an equal key with a difference on the 10th decimal point, which fails.
|
||||
if bytes.Compare(v.Key, key) < 0 {
|
||||
fmt.Printf("v: %s\n key: %s\n\n", string(v.Key), string(key))
|
||||
t.Fail()
|
||||
t.Errorf("v: %s\n key: %s\n\n", string(v.Key), string(key))
|
||||
}
|
||||
} else {
|
||||
lastNode, _ := list.GetLargestNode()
|
||||
lastV := lastNode.GetValue()
|
||||
// It is OK, to fail, as long as f is bigger than the last element.
|
||||
if bytes.Compare(key, lastV) <= 0 {
|
||||
fmt.Printf("lastV: %s\n key: %s\n\n", string(lastV), string(key))
|
||||
t.Fail()
|
||||
t.Errorf("lastV: %s\n key: %s\n\n", string(lastV), string(key))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue