HDFS: add tls secured grpc

This commit is contained in:
Chris Lu 2019-02-19 11:57:25 -08:00
parent 07af52cb6f
commit 58d4088db4
5 changed files with 142 additions and 86 deletions

View file

@ -4,7 +4,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.0.5</version> <version>1.0.7</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

View file

@ -2,7 +2,14 @@ package seaweedfs.client;
import io.grpc.ManagedChannel; import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder; import io.grpc.ManagedChannelBuilder;
import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts;
import io.grpc.netty.shaded.io.grpc.netty.NegotiationType;
import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder;
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder;
import javax.net.ssl.SSLException;
import java.io.File;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.logging.Logger; import java.util.logging.Logger;
@ -20,6 +27,16 @@ public class FilerGrpcClient {
this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()); this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext());
} }
public FilerGrpcClient(String host, int grpcPort,
String caFilePath,
String clientCertFilePath,
String clientPrivateKeyFilePath) throws SSLException {
this(NettyChannelBuilder.forAddress(host, grpcPort)
.negotiationType(NegotiationType.TLS)
.sslContext(buildSslContext(caFilePath,clientCertFilePath,clientPrivateKeyFilePath)));
}
public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) { public FilerGrpcClient(ManagedChannelBuilder<?> channelBuilder) {
channel = channelBuilder.build(); channel = channelBuilder.build();
blockingStub = SeaweedFilerGrpc.newBlockingStub(channel); blockingStub = SeaweedFilerGrpc.newBlockingStub(channel);
@ -42,4 +59,18 @@ public class FilerGrpcClient {
public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() { public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() {
return futureStub; return futureStub;
} }
private static SslContext buildSslContext(String trustCertCollectionFilePath,
String clientCertChainFilePath,
String clientPrivateKeyFilePath) throws SSLException {
SslContextBuilder builder = GrpcSslContexts.forClient();
if (trustCertCollectionFilePath != null) {
builder.trustManager(new File(trustCertCollectionFilePath));
}
if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) {
builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath));
}
return builder.build();
}
} }

View file

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.0.5</seaweedfs.client.version> <seaweedfs.client.version>1.0.7</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>

View file

@ -34,6 +34,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port";
public static final String FS_SEAWEED_GRPC_CA = "fs.seaweed.ca";
public static final String FS_SEAWEED_GRPC_CLIENT_KEY = "fs.seaweed.client.key";
public static final String FS_SEAWEED_GRPC_CLIENT_CERT = "fs.seaweed.client.cert";
private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class);
private static int BUFFER_SIZE = 16 * 1024 * 1024; private static int BUFFER_SIZE = 16 * 1024 * 1024;
@ -72,7 +75,17 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
setConf(conf); setConf(conf);
this.uri = uri; this.uri = uri;
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); if (conf.get(FS_SEAWEED_GRPC_CA) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CA).length() != 0
&& conf.get(FS_SEAWEED_GRPC_CLIENT_CERT) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_CERT).length() != 0
&& conf.get(FS_SEAWEED_GRPC_CLIENT_KEY) != null && conf.getTrimmed(FS_SEAWEED_GRPC_CLIENT_KEY).length() != 0) {
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port,
conf.get(FS_SEAWEED_GRPC_CA),
conf.get(FS_SEAWEED_GRPC_CLIENT_CERT),
conf.get(FS_SEAWEED_GRPC_CLIENT_KEY));
} else {
seaweedFileSystemStore = new SeaweedFileSystemStore(host, port);
}
} }
@Override @Override
@ -206,8 +219,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return seaweedFileSystemStore.createDirectory(path, currentUser, return seaweedFileSystemStore.createDirectory(path, currentUser,
fsPermission == null ? FsPermission.getDirDefault() : fsPermission, fsPermission == null ? FsPermission.getDirDefault() : fsPermission,
FsPermission.getUMask(getConf())); FsPermission.getUMask(getConf()));
} }
@ -238,7 +251,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
*/ */
@Override @Override
public void setOwner(Path path, final String owner, final String group) public void setOwner(Path path, final String owner, final String group)
throws IOException { throws IOException {
LOG.debug("setOwner path: {}", path); LOG.debug("setOwner path: {}", path);
path = qualify(path); path = qualify(path);
@ -271,54 +284,55 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
/** /**
* Concat existing files together. * Concat existing files together.
* @param trg the path to the target destination. *
* @param trg the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation. * @param psrcs the paths to the sources to use for the concatenation.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default). * (default).
*/ */
@Override @Override
public void concat(final Path trg, final Path [] psrcs) throws IOException { public void concat(final Path trg, final Path[] psrcs) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " + throw new UnsupportedOperationException("Not implemented by the " +
getClass().getSimpleName() + " FileSystem implementation"); getClass().getSimpleName() + " FileSystem implementation");
} }
/** /**
* Truncate the file in the indicated path to the indicated size. * Truncate the file in the indicated path to the indicated size.
* <ul> * <ul>
* <li>Fails if path is a directory.</li> * <li>Fails if path is a directory.</li>
* <li>Fails if path does not exist.</li> * <li>Fails if path does not exist.</li>
* <li>Fails if path is not closed.</li> * <li>Fails if path is not closed.</li>
* <li>Fails if new size is greater than current size.</li> * <li>Fails if new size is greater than current size.</li>
* </ul> * </ul>
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
* *
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
* @return <code>true</code> if the file has been truncated to the desired * @return <code>true</code> if the file has been truncated to the desired
* <code>newLength</code> and is immediately available to be reused for * <code>newLength</code> and is immediately available to be reused for
* write operations such as <code>append</code>, or * write operations such as <code>append</code>, or
* <code>false</code> if a background process of adjusting the length of * <code>false</code> if a background process of adjusting the length of
* the last block has been started, and clients should wait for it to * the last block has been started, and clients should wait for it to
* complete before proceeding with further file updates. * complete before proceeding with further file updates.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default). * (default).
*/ */
@Override @Override
public boolean truncate(Path f, long newLength) throws IOException { public boolean truncate(Path f, long newLength) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " + throw new UnsupportedOperationException("Not implemented by the " +
getClass().getSimpleName() + " FileSystem implementation"); getClass().getSimpleName() + " FileSystem implementation");
} }
@Override @Override
public void createSymlink(final Path target, final Path link, public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException, final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException, FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException, ParentNotDirectoryException, UnsupportedFileSystemException,
IOException { IOException {
// Supporting filesystems should override this method // Supporting filesystems should override this method
throw new UnsupportedOperationException( throw new UnsupportedOperationException(
"Filesystem does not support symlinks!"); "Filesystem does not support symlinks!");
} }
public boolean supportsSymlinks() { public boolean supportsSymlinks() {
@ -327,48 +341,51 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
/** /**
* Create a snapshot. * Create a snapshot.
* @param path The directory where snapshots will be taken. *
* @param path The directory where snapshots will be taken.
* @param snapshotName The name of the snapshot * @param snapshotName The name of the snapshot
* @return the snapshot path. * @return the snapshot path.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
*/ */
@Override @Override
public Path createSnapshot(Path path, String snapshotName) public Path createSnapshot(Path path, String snapshotName)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot"); + " doesn't support createSnapshot");
} }
/** /**
* Rename a snapshot. * Rename a snapshot.
* @param path The directory path where the snapshot was taken *
* @param path The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot * @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot * @param snapshotNewName New name of the snapshot
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void renameSnapshot(Path path, String snapshotOldName, public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException { String snapshotNewName) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support renameSnapshot"); + " doesn't support renameSnapshot");
} }
/** /**
* Delete a snapshot of a directory. * Delete a snapshot of a directory.
* @param path The directory that the to-be-deleted snapshot belongs to *
* @param path The directory that the to-be-deleted snapshot belongs to
* @param snapshotName The name of the snapshot * @param snapshotName The name of the snapshot
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void deleteSnapshot(Path path, String snapshotName) public void deleteSnapshot(Path path, String snapshotName)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support deleteSnapshot"); + " doesn't support deleteSnapshot");
} }
/** /**
@ -377,49 +394,49 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* ACL entries that are not specified in this call are retained without * ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.) * changes. (Modifications are merged into the current ACL.)
* *
* @param path Path to modify * @param path Path to modify
* @param aclSpec List&lt;AclEntry&gt; describing modifications * @param aclSpec List&lt;AclEntry&gt; describing modifications
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec) public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support modifyAclEntries"); + " doesn't support modifyAclEntries");
} }
/** /**
* Removes ACL entries from files and directories. Other ACL entries are * Removes ACL entries from files and directories. Other ACL entries are
* retained. * retained.
* *
* @param path Path to modify * @param path Path to modify
* @param aclSpec List describing entries to remove * @param aclSpec List describing entries to remove
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec) public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAclEntries"); + " doesn't support removeAclEntries");
} }
/** /**
* Removes all default ACL entries from files and directories. * Removes all default ACL entries from files and directories.
* *
* @param path Path to modify * @param path Path to modify
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeDefaultAcl(Path path) public void removeDefaultAcl(Path path)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeDefaultAcl"); + " doesn't support removeDefaultAcl");
} }
/** /**
@ -428,32 +445,32 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* bits. * bits.
* *
* @param path Path to modify * @param path Path to modify
* @throws IOException if an ACL could not be removed * @throws IOException if an ACL could not be removed
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeAcl(Path path) public void removeAcl(Path path)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAcl"); + " doesn't support removeAcl");
} }
/** /**
* Fully replaces ACL of files and directories, discarding all existing * Fully replaces ACL of files and directories, discarding all existing
* entries. * entries.
* *
* @param path Path to modify * @param path Path to modify
* @param aclSpec List describing modifications, which must include entries * @param aclSpec List describing modifications, which must include entries
* for user, group, and others for compatibility with permission bits. * for user, group, and others for compatibility with permission bits.
* @throws IOException if an ACL could not be modified * @throws IOException if an ACL could not be modified
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setAcl"); + " doesn't support setAcl");
} }
/** /**
@ -461,14 +478,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to get * @param path Path to get
* @return AclStatus describing the ACL of the file or directory * @return AclStatus describing the ACL of the file or directory
* @throws IOException if an ACL could not be read * @throws IOException if an ACL could not be read
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public AclStatus getAclStatus(Path path) throws IOException { public AclStatus getAclStatus(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAclStatus"); + " doesn't support getAclStatus");
} }
/** /**
@ -478,19 +495,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* <p> * <p>
* Refer to the HDFS extended attributes user documentation for details. * Refer to the HDFS extended attributes user documentation for details.
* *
* @param path Path to modify * @param path Path to modify
* @param name xattr name. * @param name xattr name.
* @param value xattr value. * @param value xattr value.
* @param flag xattr set flag * @param flag xattr set flag
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void setXAttr(Path path, String name, byte[] value, public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException { EnumSet<XAttrSetFlag> flag) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setXAttr"); + " doesn't support setXAttr");
} }
/** /**
@ -503,14 +520,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* @param path Path to get extended attribute * @param path Path to get extended attribute
* @param name xattr name. * @param name xattr name.
* @return byte[] xattr value. * @return byte[] xattr value.
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public byte[] getXAttr(Path path, String name) throws IOException { public byte[] getXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttr"); + " doesn't support getXAttr");
} }
/** /**
@ -522,14 +539,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to get extended attributes * @param path Path to get extended attributes
* @return Map describing the XAttrs of the file or directory * @return Map describing the XAttrs of the file or directory
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException { public Map<String, byte[]> getXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs"); + " doesn't support getXAttrs");
} }
/** /**
@ -539,18 +556,18 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* <p> * <p>
* Refer to the HDFS extended attributes user documentation for details. * Refer to the HDFS extended attributes user documentation for details.
* *
* @param path Path to get extended attributes * @param path Path to get extended attributes
* @param names XAttr names. * @param names XAttr names.
* @return Map describing the XAttrs of the file or directory * @return Map describing the XAttrs of the file or directory
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public Map<String, byte[]> getXAttrs(Path path, List<String> names) public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException { throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs"); + " doesn't support getXAttrs");
} }
/** /**
@ -562,14 +579,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to get extended attributes * @param path Path to get extended attributes
* @return List{@literal <String>} of the XAttr names of the file or directory * @return List{@literal <String>} of the XAttr names of the file or directory
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public List<String> listXAttrs(Path path) throws IOException { public List<String> listXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support listXAttrs"); + " doesn't support listXAttrs");
} }
/** /**
@ -581,14 +598,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
* *
* @param path Path to remove extended attribute * @param path Path to remove extended attribute
* @param name xattr name * @param name xattr name
* @throws IOException IO failure * @throws IOException IO failure
* @throws UnsupportedOperationException if the operation is unsupported * @throws UnsupportedOperationException if the operation is unsupported
* (default outcome). * (default outcome).
*/ */
@Override @Override
public void removeXAttr(Path path, String name) throws IOException { public void removeXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName() throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeXAttr"); + " doesn't support removeXAttr");
} }
} }

View file

@ -12,6 +12,7 @@ import seaweedfs.client.FilerGrpcClient;
import seaweedfs.client.FilerProto; import seaweedfs.client.FilerProto;
import seaweedfs.client.SeaweedRead; import seaweedfs.client.SeaweedRead;
import javax.net.ssl.SSLException;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
@ -33,6 +34,13 @@ public class SeaweedFileSystemStore {
filerClient = new FilerClient(filerGrpcClient); filerClient = new FilerClient(filerGrpcClient);
} }
public SeaweedFileSystemStore(String host, int port,
String caFile, String clientCertFile, String clientKeyFile) throws SSLException {
int grpcPort = 10000 + port;
filerGrpcClient = new FilerGrpcClient(host, grpcPort, caFile, clientCertFile, clientKeyFile);
filerClient = new FilerClient(filerGrpcClient);
}
public static String getParentDirectory(Path path) { public static String getParentDirectory(Path path) {
return path.isRoot() ? "/" : path.getParent().toUri().getPath(); return path.isRoot() ? "/" : path.getParent().toUri().getPath();
} }