HCFS: 1.5.2

This commit is contained in:
Chris Lu 2020-10-25 14:44:18 -07:00
parent 63b0fb54f7
commit 596d476e3d
11 changed files with 20 additions and 18 deletions

View file

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.5.1</version> <version>1.5.2</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

View file

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.5.1</version> <version>1.5.2</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

View file

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.5.1</version> <version>1.5.2</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

View file

@ -86,7 +86,7 @@ public class FileChunkManifest {
} }
public static List<FilerProto.FileChunk> maybeManifestize( public static List<FilerProto.FileChunk> maybeManifestize(
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> inputChunks) throws IOException { final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
// the return variable // the return variable
List<FilerProto.FileChunk> chunks = new ArrayList<>(); List<FilerProto.FileChunk> chunks = new ArrayList<>();
@ -101,7 +101,7 @@ public class FileChunkManifest {
int remaining = dataChunks.size(); int remaining = dataChunks.size();
for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) { for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) {
FilerProto.FileChunk chunk = mergeIntoManifest(filerGrpcClient, dataChunks.subList(i, i + mergeFactor)); FilerProto.FileChunk chunk = mergeIntoManifest(filerGrpcClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
chunks.add(chunk); chunks.add(chunk);
remaining -= mergeFactor; remaining -= mergeFactor;
} }
@ -113,7 +113,7 @@ public class FileChunkManifest {
return chunks; return chunks;
} }
private static FilerProto.FileChunk mergeIntoManifest(final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> dataChunks) throws IOException { private static FilerProto.FileChunk mergeIntoManifest(final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
// create and serialize the manifest // create and serialize the manifest
dataChunks = FilerClient.beforeEntrySerialization(dataChunks); dataChunks = FilerClient.beforeEntrySerialization(dataChunks);
FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks); FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks);
@ -130,7 +130,7 @@ public class FileChunkManifest {
filerGrpcClient.getReplication(), filerGrpcClient.getReplication(),
filerGrpcClient, filerGrpcClient,
minOffset, minOffset,
data, 0, data.length); data, 0, data.length, parentDirectory);
manifestChunk.setIsChunkManifest(true); manifestChunk.setIsChunkManifest(true);
manifestChunk.setSize(maxOffset - minOffset); manifestChunk.setSize(maxOffset - minOffset);
return manifestChunk.build(); return manifestChunk.build();

View file

@ -26,9 +26,10 @@ public class SeaweedWrite {
final FilerGrpcClient filerGrpcClient, final FilerGrpcClient filerGrpcClient,
final long offset, final long offset,
final byte[] bytes, final byte[] bytes,
final long bytesOffset, final long bytesLength) throws IOException { final long bytesOffset, final long bytesLength,
final String parentPath) throws IOException {
FilerProto.FileChunk.Builder chunkBuilder = writeChunk( FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength); replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength, parentPath);
synchronized (entry) { synchronized (entry) {
entry.addChunks(chunkBuilder); entry.addChunks(chunkBuilder);
} }
@ -39,13 +40,15 @@ public class SeaweedWrite {
final long offset, final long offset,
final byte[] bytes, final byte[] bytes,
final long bytesOffset, final long bytesOffset,
final long bytesLength) throws IOException { final long bytesLength,
final String parentPath) throws IOException {
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume( FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
FilerProto.AssignVolumeRequest.newBuilder() FilerProto.AssignVolumeRequest.newBuilder()
.setCollection(filerGrpcClient.getCollection()) .setCollection(filerGrpcClient.getCollection())
.setReplication(replication == null ? filerGrpcClient.getReplication() : replication) .setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
.setDataCenter("") .setDataCenter("")
.setTtlSec(0) .setTtlSec(0)
.setParentPath(parentPath)
.build()); .build());
String fileId = response.getFileId(); String fileId = response.getFileId();
String url = response.getUrl(); String url = response.getUrl();
@ -77,7 +80,7 @@ public class SeaweedWrite {
final FilerProto.Entry.Builder entry) throws IOException { final FilerProto.Entry.Builder entry) throws IOException {
synchronized (entry) { synchronized (entry) {
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList()); List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList(), parentDirectory);
entry.clearChunks(); entry.clearChunks();
entry.addAllChunks(chunks); entry.addAllChunks(chunks);
filerGrpcClient.getBlockingStub().createEntry( filerGrpcClient.getBlockingStub().createEntry(

View file

@ -301,7 +301,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version> <seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>
</project> </project>

View file

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version> <seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>

View file

@ -185,7 +185,7 @@ public class SeaweedOutputStream extends OutputStream {
} }
final Future<Void> job = completionService.submit(() -> { final Future<Void> job = completionService.submit(() -> {
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); // System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit()); SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), getParentDirectory(path));
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); // System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
ByteBufferPool.release(bufferToWrite); ByteBufferPool.release(bufferToWrite);
return null; return null;

View file

@ -309,7 +309,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version> <seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>
</project> </project>

View file

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version> <seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>

View file

@ -18,7 +18,6 @@ import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Locale; import java.util.Locale;
import java.util.concurrent.*; import java.util.concurrent.*;
@ -232,7 +231,7 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
} }
final Future<Void> job = completionService.submit(() -> { final Future<Void> job = completionService.submit(() -> {
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); // System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit()); SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), getParentDirectory(path));
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); // System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
ByteBufferPool.release(bufferToWrite); ByteBufferPool.release(bufferToWrite);
return null; return null;