From 10761f0f0f5ea9d574e7270db23cf26a26cd32c6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 13 Sep 2021 02:44:27 -0700 Subject: [PATCH] 1.6.8 --- Hadoop-Benchmark.md | 2 +- Hadoop-Compatible-File-System.md | 24 ++++++++++++------------ Run-Blob-Storage-on-Public-Internet.md | 2 +- Run-Presto-on-SeaweedFS.md | 10 +++++----- run-HBase-on-SeaweedFS.md | 2 +- run-Spark-on-SeaweedFS.md | 14 +++++++------- 6 files changed, 27 insertions(+), 27 deletions(-) diff --git a/Hadoop-Benchmark.md b/Hadoop-Benchmark.md index e9f8548..3751448 100644 --- a/Hadoop-Benchmark.md +++ b/Hadoop-Benchmark.md @@ -26,7 +26,7 @@ Then get the seaweedfs hadoop client jar. ``` cd share/hadoop/common/lib/ -wget https://oss.sonatype.org/service/local/repositories/releases/content/com/github/chrislusf/seaweedfs-hadoop2-client/1.6.7/seaweedfs-hadoop2-client-1.6.7.jar +wget https://oss.sonatype.org/service/local/repositories/releases/content/com/github/chrislusf/seaweedfs-hadoop2-client/1.6.8/seaweedfs-hadoop2-client-1.6.8.jar ``` # TestDFSIO Benchmark diff --git a/Hadoop-Compatible-File-System.md b/Hadoop-Compatible-File-System.md index 5de1141..7e8308f 100644 --- a/Hadoop-Compatible-File-System.md +++ b/Hadoop-Compatible-File-System.md @@ -10,12 +10,12 @@ $ mvn install # build for hadoop2 $cd $GOPATH/src/github.com/chrislusf/seaweedfs/other/java/hdfs2 $ mvn package -$ ls -al target/seaweedfs-hadoop2-client-1.6.7.jar +$ ls -al target/seaweedfs-hadoop2-client-1.6.8.jar # build for hadoop3 $cd $GOPATH/src/github.com/chrislusf/seaweedfs/other/java/hdfs3 $ mvn package -$ ls -al target/seaweedfs-hadoop3-client-1.6.7.jar +$ ls -al target/seaweedfs-hadoop3-client-1.6.8.jar ``` Maven @@ -23,7 +23,7 @@ Maven com.github.chrislusf seaweedfs-hadoop3-client - 1.6.7 + 1.6.8 or @@ -31,23 +31,23 @@ or com.github.chrislusf seaweedfs-hadoop2-client - 1.6.7 + 1.6.8 ``` Or you can download the latest version from MavenCentral * https://mvnrepository.com/artifact/com.github.chrislusf/seaweedfs-hadoop2-client - * [seaweedfs-hadoop2-client-1.6.7.jar](https://oss.sonatype.org/service/local/repositories/releases/content/com/github/chrislusf/seaweedfs-hadoop2-client/1.6.7/seaweedfs-hadoop2-client-1.6.7.jar) + * [seaweedfs-hadoop2-client-1.6.8.jar](https://oss.sonatype.org/service/local/repositories/releases/content/com/github/chrislusf/seaweedfs-hadoop2-client/1.6.8/seaweedfs-hadoop2-client-1.6.8.jar) * https://mvnrepository.com/artifact/com.github.chrislusf/seaweedfs-hadoop3-client - * [seaweedfs-hadoop3-client-1.6.7.jar](https://oss.sonatype.org/service/local/repositories/releases/content/com/github/chrislusf/seaweedfs-hadoop3-client/1.6.7/seaweedfs-hadoop3-client-1.6.7.jar) + * [seaweedfs-hadoop3-client-1.6.8.jar](https://oss.sonatype.org/service/local/repositories/releases/content/com/github/chrislusf/seaweedfs-hadoop3-client/1.6.8/seaweedfs-hadoop3-client-1.6.8.jar) # Test SeaweedFS on Hadoop Suppose you are getting a new Hadoop installation. Here are the minimum steps to get SeaweedFS to run. -You would need to start a weed filer first, build the seaweedfs-hadoop2-client-1.6.7.jar -or seaweedfs-hadoop3-client-1.6.7.jar, and do the following: +You would need to start a weed filer first, build the seaweedfs-hadoop2-client-1.6.8.jar +or seaweedfs-hadoop3-client-1.6.8.jar, and do the following: ``` # optionally adjust hadoop memory allocation @@ -60,12 +60,12 @@ $ echo "" > etc/hadoop/mapred-site.xml # on hadoop2 $ bin/hdfs dfs -Dfs.defaultFS=seaweedfs://localhost:8888 \ -Dfs.seaweedfs.impl=seaweed.hdfs.SeaweedFileSystem \ - -libjars ./seaweedfs-hadoop2-client-1.6.7.jar \ + -libjars ./seaweedfs-hadoop2-client-1.6.8.jar \ -ls / # or on hadoop3 $ bin/hdfs dfs -Dfs.defaultFS=seaweedfs://localhost:8888 \ -Dfs.seaweedfs.impl=seaweed.hdfs.SeaweedFileSystem \ - -libjars ./seaweedfs-hadoop3-client-1.6.7.jar \ + -libjars ./seaweedfs-hadoop3-client-1.6.8.jar \ -ls / ``` @@ -112,9 +112,9 @@ $ bin/hadoop classpath # Copy SeaweedFS HDFS client jar to one of the folders $ cd ${HADOOP_HOME} # for hadoop2 -$ cp ./seaweedfs-hadoop2-client-1.6.7.jar share/hadoop/common/lib/ +$ cp ./seaweedfs-hadoop2-client-1.6.8.jar share/hadoop/common/lib/ # or for hadoop3 -$ cp ./seaweedfs-hadoop3-client-1.6.7.jar share/hadoop/common/lib/ +$ cp ./seaweedfs-hadoop3-client-1.6.8.jar share/hadoop/common/lib/ ``` Now you can do this: diff --git a/Run-Blob-Storage-on-Public-Internet.md b/Run-Blob-Storage-on-Public-Internet.md index 248c41a..b4069d0 100644 --- a/Run-Blob-Storage-on-Public-Internet.md +++ b/Run-Blob-Storage-on-Public-Internet.md @@ -14,7 +14,7 @@ We will use 2 servers. Server 1 will host master, 2x volumes (2 disks, one volum # todo: use 2 step build process, copy over weed binary to fresh container (do not need curl and tar at runtime) FROM alpine RUN apk update && apk add wget tar -RUN wget https://github.com/chrislusf/seaweedfs/releases/download/2.66/linux_amd64_large_disk.tar.gz +RUN wget https://github.com/chrislusf/seaweedfs/releases/download/2.68/linux_amd64_large_disk.tar.gz RUN tar -xf linux_amd64_large_disk.tar.gz RUN chmod +x weed RUN mv weed /usr/bin/ diff --git a/Run-Presto-on-SeaweedFS.md b/Run-Presto-on-SeaweedFS.md index 3990328..a0307ef 100644 --- a/Run-Presto-on-SeaweedFS.md +++ b/Run-Presto-on-SeaweedFS.md @@ -5,10 +5,10 @@ The installation steps are divided into 2 steps: * https://cwiki.apache.org/confluence/display/Hive/AdminManual+Metastore+Administration ### Configure Hive Metastore to support SeaweedFS -1. Copy the seaweedfs-hadoop2-client-1.6.7.jar to hive lib directory,for example: +1. Copy the seaweedfs-hadoop2-client-1.6.8.jar to hive lib directory,for example: ``` -cp seaweedfs-hadoop2-client-1.6.7.jar /opt/hadoop/share/hadoop/common/lib/ -cp seaweedfs-hadoop2-client-1.6.7.jar /opt/hive-metastore/lib/ +cp seaweedfs-hadoop2-client-1.6.8.jar /opt/hadoop/share/hadoop/common/lib/ +cp seaweedfs-hadoop2-client-1.6.8.jar /opt/hive-metastore/lib/ ``` 2. Modify core-site.xml modify core-site.xml to support SeaweedFS, 30888 is the filer port @@ -50,9 +50,9 @@ metastore.thrift.port is the access port exposed by the Hive Metadata service it Follow instructions for installation of Presto: * https://prestosql.io/docs/current/installation/deployment.html ### Configure Presto to support SeaweedFS -1. Copy the seaweedfs-hadoop2-client-1.6.7.jar to Presto directory,for example: +1. Copy the seaweedfs-hadoop2-client-1.6.8.jar to Presto directory,for example: ``` -cp seaweedfs-hadoop2-client-1.6.7.jar /opt/presto-server-347/plugin/hive-hadoop2/ +cp seaweedfs-hadoop2-client-1.6.8.jar /opt/presto-server-347/plugin/hive-hadoop2/ ``` 2. Modify core-site.xml diff --git a/run-HBase-on-SeaweedFS.md b/run-HBase-on-SeaweedFS.md index f877996..8989a64 100644 --- a/run-HBase-on-SeaweedFS.md +++ b/run-HBase-on-SeaweedFS.md @@ -1,7 +1,7 @@ # Installation for HBase Two steps to run HBase on SeaweedFS -1. Copy the seaweedfs-hadoop2-client-1.6.7.jar to `${HBASE_HOME}/lib` +1. Copy the seaweedfs-hadoop2-client-1.6.8.jar to `${HBASE_HOME}/lib` 1. And add the following 2 properties in `${HBASE_HOME}/conf/hbase-site.xml` ``` diff --git a/run-Spark-on-SeaweedFS.md b/run-Spark-on-SeaweedFS.md index a705757..0d39ec4 100644 --- a/run-Spark-on-SeaweedFS.md +++ b/run-Spark-on-SeaweedFS.md @@ -11,12 +11,12 @@ To make these files visible to Spark, set HADOOP_CONF_DIR in $SPARK_HOME/conf/sp ## installation not inheriting from Hadoop cluster configuration -Copy the seaweedfs-hadoop2-client-1.6.7.jar to all executor machines. +Copy the seaweedfs-hadoop2-client-1.6.8.jar to all executor machines. Add the following to spark/conf/spark-defaults.conf on every node running Spark ``` -spark.driver.extraClassPath=/path/to/seaweedfs-hadoop2-client-1.6.7.jar -spark.executor.extraClassPath=/path/to/seaweedfs-hadoop2-client-1.6.7.jar +spark.driver.extraClassPath=/path/to/seaweedfs-hadoop2-client-1.6.8.jar +spark.executor.extraClassPath=/path/to/seaweedfs-hadoop2-client-1.6.8.jar ``` And modify the configuration at runtime: @@ -37,8 +37,8 @@ And modify the configuration at runtime: 1. change the spark-defaults.conf ``` -spark.driver.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.7.jar -spark.executor.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.7.jar +spark.driver.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.8.jar +spark.executor.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.8.jar spark.hadoop.fs.seaweedfs.impl=seaweed.hdfs.SeaweedFileSystem ``` @@ -81,8 +81,8 @@ spark.history.fs.cleaner.enabled=true spark.history.fs.logDirectory=seaweedfs://localhost:8888/spark2-history/ spark.eventLog.dir=seaweedfs://localhost:8888/spark2-history/ -spark.driver.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.7.jar -spark.executor.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.7.jar +spark.driver.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.8.jar +spark.executor.extraClassPath=/Users/chris/go/src/github.com/chrislusf/seaweedfs/other/java/hdfs2/target/seaweedfs-hadoop2-client-1.6.8.jar spark.hadoop.fs.seaweedfs.impl=seaweed.hdfs.SeaweedFileSystem spark.hadoop.fs.defaultFS=seaweedfs://localhost:8888 ```