diff --git a/Makefile b/Makefile
index cce9d586d..ce20a482b 100644
--- a/Makefile
+++ b/Makefile
@@ -27,6 +27,8 @@ clean:
deps:
go get $(GO_FLAGS) -d $(SOURCE_DIR)
+ rm -rf /home/travis/gopath/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace
+ rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace
build: deps
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml
index dc3f8f2ca..b5c7af29e 100644
--- a/other/java/client/pom.xml
+++ b/other/java/client/pom.xml
@@ -4,7 +4,7 @@
com.github.chrislusf
seaweedfs-client
- 1.1.5
+ 1.1.6
org.sonatype.oss
diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml
index bbec239d5..949616f1c 100644
--- a/other/java/hdfs2/dependency-reduced-pom.xml
+++ b/other/java/hdfs2/dependency-reduced-pom.xml
@@ -123,7 +123,7 @@
- 1.1.5
+ 1.1.6
2.9.2
diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml
index 4ad14a7f7..e48bf87b0 100644
--- a/other/java/hdfs2/pom.xml
+++ b/other/java/hdfs2/pom.xml
@@ -5,7 +5,7 @@
4.0.0
- 1.1.5
+ 1.1.6
2.9.2
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index 7cf76e5e8..d471d8440 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -1,14 +1,7 @@
package seaweed.hdfs;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -87,6 +80,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
return new FSDataInputStream(inputStream);
} catch (Exception ex) {
+ LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
@@ -104,10 +98,36 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
return null;
}
}
+ /**
+ * {@inheritDoc}
+ * @throws FileNotFoundException if the parent directory is not present -or
+ * is not a directory.
+ */
+ @Override
+ public FSDataOutputStream createNonRecursive(Path path,
+ FsPermission permission,
+ EnumSet flags,
+ int bufferSize,
+ short replication,
+ long blockSize,
+ Progressable progress) throws IOException {
+ Path parent = path.getParent();
+ if (parent != null) {
+ // expect this to raise an exception if there is no parent
+ if (!getFileStatus(parent).isDirectory()) {
+ throw new FileAlreadyExistsException("Not a directory: " + parent);
+ }
+ }
+ return create(path, permission,
+ flags.contains(CreateFlag.OVERWRITE), bufferSize,
+ replication, blockSize, progress);
+ }
+
@Override
public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
@@ -118,6 +138,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 2ddcd41e8..826b74560 100644
--- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -137,7 +137,7 @@ public class SeaweedFileSystemStore {
if (source.isRoot()) {
return;
}
- LOG.warn("rename lookupEntry source: {}", source);
+ LOG.warn("rename source: {} destination:{}", source, destination);
FilerProto.Entry entry = lookupEntry(source);
if (entry == null) {
LOG.warn("rename non-existing source: {}", source);
@@ -171,10 +171,10 @@ public class SeaweedFileSystemStore {
entry = FilerProto.Entry.newBuilder();
entry.mergeFrom(existingEntry);
entry.getAttributesBuilder().setMtime(now);
+ LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
+ writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
+ replication = existingEntry.getAttributes().getReplication();
}
- LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
- writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
- replication = existingEntry.getAttributes().getReplication();
}
if (entry == null) {
entry = FilerProto.Entry.newBuilder()
@@ -266,4 +266,5 @@ public class SeaweedFileSystemStore {
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
}
+
}
diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml
index 71c74b0c8..667713e7c 100644
--- a/other/java/hdfs3/dependency-reduced-pom.xml
+++ b/other/java/hdfs3/dependency-reduced-pom.xml
@@ -123,7 +123,7 @@
- 1.1.5
+ 1.1.6
3.1.1
diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml
index f97b0ef5e..078e76757 100644
--- a/other/java/hdfs3/pom.xml
+++ b/other/java/hdfs3/pom.xml
@@ -5,7 +5,7 @@
4.0.0
- 1.1.5
+ 1.1.6
3.1.1
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
index 7cf76e5e8..c12da8261 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java
@@ -1,14 +1,7 @@
package seaweed.hdfs;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
-import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -29,7 +22,7 @@ import java.util.Map;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
+public class SeaweedFileSystem extends FileSystem {
public static final int FS_SEAWEED_DEFAULT_PORT = 8888;
public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host";
@@ -87,6 +80,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize);
return new FSDataInputStream(inputStream);
} catch (Exception ex) {
+ LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
@@ -104,10 +98,36 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement);
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex);
return null;
}
}
+ /**
+ * {@inheritDoc}
+ * @throws FileNotFoundException if the parent directory is not present -or
+ * is not a directory.
+ */
+ @Override
+ public FSDataOutputStream createNonRecursive(Path path,
+ FsPermission permission,
+ EnumSet flags,
+ int bufferSize,
+ short replication,
+ long blockSize,
+ Progressable progress) throws IOException {
+ Path parent = path.getParent();
+ if (parent != null) {
+ // expect this to raise an exception if there is no parent
+ if (!getFileStatus(parent).isDirectory()) {
+ throw new FileAlreadyExistsException("Not a directory: " + parent);
+ }
+ }
+ return create(path, permission,
+ flags.contains(CreateFlag.OVERWRITE), bufferSize,
+ replication, blockSize, progress);
+ }
+
@Override
public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException {
@@ -118,6 +138,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem {
OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, "");
return new FSDataOutputStream(outputStream, statistics);
} catch (Exception ex) {
+ LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex);
return null;
}
}
diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
index 2ddcd41e8..826b74560 100644
--- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
+++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
@@ -137,7 +137,7 @@ public class SeaweedFileSystemStore {
if (source.isRoot()) {
return;
}
- LOG.warn("rename lookupEntry source: {}", source);
+ LOG.warn("rename source: {} destination:{}", source, destination);
FilerProto.Entry entry = lookupEntry(source);
if (entry == null) {
LOG.warn("rename non-existing source: {}", source);
@@ -171,10 +171,10 @@ public class SeaweedFileSystemStore {
entry = FilerProto.Entry.newBuilder();
entry.mergeFrom(existingEntry);
entry.getAttributesBuilder().setMtime(now);
+ LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
+ writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
+ replication = existingEntry.getAttributes().getReplication();
}
- LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry);
- writePosition = SeaweedRead.totalSize(existingEntry.getChunksList());
- replication = existingEntry.getAttributes().getReplication();
}
if (entry == null) {
entry = FilerProto.Entry.newBuilder()
@@ -266,4 +266,5 @@ public class SeaweedFileSystemStore {
filerClient.updateEntry(getParentDirectory(path), entryBuilder.build());
}
+
}
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index b5d2c8163..556e53fd3 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -73,11 +73,11 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer
return err
}
- println("found", len(entries), "entries under", currentDirPath)
+ // println("found", len(entries), "entries under", currentDirPath)
for _, item := range entries {
lastFileName = item.Name()
- println("processing", lastFileName)
+ // println("processing", lastFileName)
err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events)
if err != nil {
return err
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 36e82a480..830a85057 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -5,5 +5,5 @@ import (
)
var (
- VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 42)
+ VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 43)
)