mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
437d18705d
|
@ -1,4 +1,4 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
description: SeaweedFS
|
description: SeaweedFS
|
||||||
name: seaweedfs
|
name: seaweedfs
|
||||||
version: 1.85
|
version: 1.86
|
|
@ -4,7 +4,7 @@ global:
|
||||||
registry: ""
|
registry: ""
|
||||||
repository: ""
|
repository: ""
|
||||||
imageName: chrislusf/seaweedfs
|
imageName: chrislusf/seaweedfs
|
||||||
imageTag: "1.85"
|
imageTag: "1.86"
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
imagePullSecrets: imagepullsecret
|
imagePullSecrets: imagepullsecret
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.3.9</version>
|
<version>1.4.1</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.3.9</version>
|
<version>1.4.1</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.github.chrislusf</groupId>
|
<groupId>com.github.chrislusf</groupId>
|
||||||
<artifactId>seaweedfs-client</artifactId>
|
<artifactId>seaweedfs-client</artifactId>
|
||||||
<version>1.3.9</version>
|
<version>1.4.1</version>
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -18,6 +18,7 @@ public class ByteBufferPool {
|
||||||
if (bufferSize < MIN_BUFFER_SIZE) {
|
if (bufferSize < MIN_BUFFER_SIZE) {
|
||||||
bufferSize = MIN_BUFFER_SIZE;
|
bufferSize = MIN_BUFFER_SIZE;
|
||||||
}
|
}
|
||||||
|
LOG.debug("requested new buffer {}", bufferSize);
|
||||||
if (bufferList.isEmpty()) {
|
if (bufferList.isEmpty()) {
|
||||||
return ByteBuffer.allocate(bufferSize);
|
return ByteBuffer.allocate(bufferSize);
|
||||||
}
|
}
|
||||||
|
@ -33,6 +34,7 @@ public class ByteBufferPool {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static synchronized void release(ByteBuffer obj) {
|
public static synchronized void release(ByteBuffer obj) {
|
||||||
|
obj.clear();
|
||||||
bufferList.add(0, obj);
|
bufferList.add(0, obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,8 @@ import org.apache.http.client.methods.HttpPost;
|
||||||
import org.apache.http.entity.mime.HttpMultipartMode;
|
import org.apache.http.entity.mime.HttpMultipartMode;
|
||||||
import org.apache.http.entity.mime.MultipartEntityBuilder;
|
import org.apache.http.entity.mime.MultipartEntityBuilder;
|
||||||
import org.apache.http.util.EntityUtils;
|
import org.apache.http.util.EntityUtils;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -15,6 +17,8 @@ import java.util.List;
|
||||||
|
|
||||||
public class SeaweedWrite {
|
public class SeaweedWrite {
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(SeaweedWrite.class);
|
||||||
|
|
||||||
private static final SecureRandom random = new SecureRandom();
|
private static final SecureRandom random = new SecureRandom();
|
||||||
|
|
||||||
public static void writeData(FilerProto.Entry.Builder entry,
|
public static void writeData(FilerProto.Entry.Builder entry,
|
||||||
|
@ -23,8 +27,10 @@ public class SeaweedWrite {
|
||||||
final long offset,
|
final long offset,
|
||||||
final byte[] bytes,
|
final byte[] bytes,
|
||||||
final long bytesOffset, final long bytesLength) throws IOException {
|
final long bytesOffset, final long bytesLength) throws IOException {
|
||||||
|
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
|
||||||
|
replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength);
|
||||||
synchronized (entry) {
|
synchronized (entry) {
|
||||||
entry.addChunks(writeChunk(replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength));
|
entry.addChunks(chunkBuilder);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,6 +64,8 @@ public class SeaweedWrite {
|
||||||
// cache fileId ~ bytes
|
// cache fileId ~ bytes
|
||||||
SeaweedRead.chunkCache.setChunk(fileId, bytes);
|
SeaweedRead.chunkCache.setChunk(fileId, bytes);
|
||||||
|
|
||||||
|
LOG.debug("write file chunk {} size {}", targetUrl, bytesLength);
|
||||||
|
|
||||||
return FilerProto.FileChunk.newBuilder()
|
return FilerProto.FileChunk.newBuilder()
|
||||||
.setFileId(fileId)
|
.setFileId(fileId)
|
||||||
.setOffset(offset)
|
.setOffset(offset)
|
||||||
|
@ -71,10 +79,8 @@ public class SeaweedWrite {
|
||||||
final String parentDirectory,
|
final String parentDirectory,
|
||||||
final FilerProto.Entry.Builder entry) throws IOException {
|
final FilerProto.Entry.Builder entry) throws IOException {
|
||||||
|
|
||||||
int chunkSize = entry.getChunksCount();
|
|
||||||
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList());
|
|
||||||
|
|
||||||
synchronized (entry) {
|
synchronized (entry) {
|
||||||
|
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList());
|
||||||
entry.clearChunks();
|
entry.clearChunks();
|
||||||
entry.addAllChunks(chunks);
|
entry.addAllChunks(chunks);
|
||||||
filerGrpcClient.getBlockingStub().createEntry(
|
filerGrpcClient.getBlockingStub().createEntry(
|
||||||
|
|
|
@ -127,7 +127,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.3.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.1</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.3.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.1</seaweedfs.client.version>
|
||||||
<hadoop.version>2.9.2</hadoop.version>
|
<hadoop.version>2.9.2</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -187,7 +187,6 @@ public class SeaweedOutputStream extends OutputStream {
|
||||||
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit());
|
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit());
|
||||||
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||||
bufferToWrite.clear();
|
|
||||||
ByteBufferPool.release(bufferToWrite);
|
ByteBufferPool.release(bufferToWrite);
|
||||||
return null;
|
return null;
|
||||||
});
|
});
|
||||||
|
|
|
@ -127,7 +127,7 @@
|
||||||
</snapshotRepository>
|
</snapshotRepository>
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.3.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.1</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<seaweedfs.client.version>1.3.9</seaweedfs.client.version>
|
<seaweedfs.client.version>1.4.1</seaweedfs.client.version>
|
||||||
<hadoop.version>3.1.1</hadoop.version>
|
<hadoop.version>3.1.1</hadoop.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -234,7 +234,6 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
|
||||||
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||||
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit());
|
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit());
|
||||||
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
|
||||||
bufferToWrite.clear();
|
|
||||||
ByteBufferPool.release(bufferToWrite);
|
ByteBufferPool.release(bufferToWrite);
|
||||||
return null;
|
return null;
|
||||||
});
|
});
|
||||||
|
|
194
unmaintained/diff_volume_servers/diff_volume_servers.go
Normal file
194
unmaintained/diff_volume_servers/diff_volume_servers.go
Normal file
|
@ -0,0 +1,194 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/operation"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/security"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/idx"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
serversStr = flag.String("volumeServers", "", "comma-delimited list of volume servers to diff the volume against")
|
||||||
|
volumeId = flag.Int("volumeId", -1, "a volume id to diff from servers")
|
||||||
|
volumeCollection = flag.String("collection", "", "the volume collection name")
|
||||||
|
grpcDialOption grpc.DialOption
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Diff the volume's files across multiple volume servers.
|
||||||
|
diff_volume_servers -volumeServers 127.0.0.1:8080,127.0.0.1:8081 -volumeId 5
|
||||||
|
|
||||||
|
Example Output:
|
||||||
|
reference 127.0.0.1:8081
|
||||||
|
fileId volumeServer message
|
||||||
|
5,01617c3f61 127.0.0.1:8080 wrongSize
|
||||||
|
*/
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
util.LoadConfiguration("security", false)
|
||||||
|
grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
|
||||||
|
|
||||||
|
vid := uint32(*volumeId)
|
||||||
|
servers := strings.Split(*serversStr, ",")
|
||||||
|
if len(servers) < 2 {
|
||||||
|
glog.Fatalf("You must specify more than 1 server\n")
|
||||||
|
}
|
||||||
|
var referenceServer string
|
||||||
|
var maxOffset int64
|
||||||
|
allFiles := map[string]map[types.NeedleId]needleState{}
|
||||||
|
for _, addr := range servers {
|
||||||
|
files, offset, err := getVolumeFiles(vid, addr)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("Failed to copy idx from volume server %s\n", err)
|
||||||
|
}
|
||||||
|
allFiles[addr] = files
|
||||||
|
if offset > maxOffset {
|
||||||
|
referenceServer = addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
same := true
|
||||||
|
fmt.Println("reference", referenceServer)
|
||||||
|
fmt.Println("fileId volumeServer message")
|
||||||
|
for nid, n := range allFiles[referenceServer] {
|
||||||
|
for addr, files := range allFiles {
|
||||||
|
if addr == referenceServer {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var diffMsg string
|
||||||
|
n2, ok := files[nid]
|
||||||
|
if !ok {
|
||||||
|
if n.state == stateDeleted {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
diffMsg = "missing"
|
||||||
|
} else if n2.state != n.state {
|
||||||
|
switch n.state {
|
||||||
|
case stateDeleted:
|
||||||
|
diffMsg = "notDeleted"
|
||||||
|
case statePresent:
|
||||||
|
diffMsg = "deleted"
|
||||||
|
}
|
||||||
|
} else if n2.size != n.size {
|
||||||
|
diffMsg = "wrongSize"
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
same = false
|
||||||
|
|
||||||
|
// fetch the needle details
|
||||||
|
var id string
|
||||||
|
var err error
|
||||||
|
if n.state == statePresent {
|
||||||
|
id, err = getNeedleFileId(vid, nid, referenceServer)
|
||||||
|
} else {
|
||||||
|
id, err = getNeedleFileId(vid, nid, addr)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
|
||||||
|
}
|
||||||
|
fmt.Println(id, addr, diffMsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !same {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
stateDeleted uint8 = 1
|
||||||
|
statePresent uint8 = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
type needleState struct {
|
||||||
|
state uint8
|
||||||
|
size uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) {
|
||||||
|
var idxFile *bytes.Reader
|
||||||
|
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||||
|
copyFileClient, err := vs.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
|
||||||
|
VolumeId: v,
|
||||||
|
Ext: ".idx",
|
||||||
|
CompactionRevision: math.MaxUint32,
|
||||||
|
StopOffset: math.MaxInt64,
|
||||||
|
Collection: *volumeCollection,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for {
|
||||||
|
resp, err := copyFileClient.Recv()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
buf.Write(resp.FileContent)
|
||||||
|
}
|
||||||
|
idxFile = bytes.NewReader(buf.Bytes())
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var maxOffset int64
|
||||||
|
files := map[types.NeedleId]needleState{}
|
||||||
|
err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
|
||||||
|
if offset.IsZero() || size == types.TombstoneFileSize {
|
||||||
|
files[key] = needleState{
|
||||||
|
state: stateDeleted,
|
||||||
|
size: size,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
files[key] = needleState{
|
||||||
|
state: statePresent,
|
||||||
|
size: size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if actual := offset.ToAcutalOffset(); actual > maxOffset {
|
||||||
|
maxOffset = actual
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
return files, maxOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNeedleFileId(v uint32, nid types.NeedleId, addr string) (string, error) {
|
||||||
|
var id string
|
||||||
|
err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error {
|
||||||
|
resp, err := vs.VolumeNeedleStatus(context.Background(), &volume_server_pb.VolumeNeedleStatusRequest{
|
||||||
|
VolumeId: v,
|
||||||
|
NeedleId: uint64(nid),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
id = needle.NewFileId(needle.VolumeId(v), resp.NeedleId, resp.Cookie).String()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return id, err
|
||||||
|
}
|
|
@ -86,6 +86,8 @@ service VolumeServer {
|
||||||
rpc Query (QueryRequest) returns (stream QueriedStripe) {
|
rpc Query (QueryRequest) returns (stream QueriedStripe) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rpc VolumeNeedleStatus (VolumeNeedleStatusRequest) returns (VolumeNeedleStatusResponse) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
|
@ -463,3 +465,16 @@ message QueryRequest {
|
||||||
message QueriedStripe {
|
message QueriedStripe {
|
||||||
bytes records = 1;
|
bytes records = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message VolumeNeedleStatusRequest {
|
||||||
|
uint32 volume_id = 1;
|
||||||
|
uint64 needle_id = 2;
|
||||||
|
}
|
||||||
|
message VolumeNeedleStatusResponse {
|
||||||
|
uint64 needle_id = 1;
|
||||||
|
uint32 cookie = 2;
|
||||||
|
uint32 size = 3;
|
||||||
|
uint64 last_modified = 4;
|
||||||
|
uint32 crc = 5;
|
||||||
|
string ttl = 6;
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/stats"
|
"github.com/chrislusf/seaweedfs/weed/stats"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
"github.com/chrislusf/seaweedfs/weed/storage/needle"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) {
|
func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) {
|
||||||
|
@ -166,3 +167,44 @@ func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_serv
|
||||||
return resp, nil
|
return resp, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (vs *VolumeServer) VolumeNeedleStatus(ctx context.Context, req *volume_server_pb.VolumeNeedleStatusRequest) (*volume_server_pb.VolumeNeedleStatusResponse, error) {
|
||||||
|
|
||||||
|
resp := &volume_server_pb.VolumeNeedleStatusResponse{}
|
||||||
|
|
||||||
|
volumeId := needle.VolumeId(req.VolumeId)
|
||||||
|
|
||||||
|
n := &needle.Needle{
|
||||||
|
Id: types.NeedleId(req.NeedleId),
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int
|
||||||
|
var err error
|
||||||
|
hasVolume := vs.store.HasVolume(volumeId)
|
||||||
|
if !hasVolume {
|
||||||
|
_, hasEcVolume := vs.store.FindEcVolume(volumeId)
|
||||||
|
if !hasEcVolume {
|
||||||
|
return nil, fmt.Errorf("volume not found %d", req.VolumeId)
|
||||||
|
}
|
||||||
|
count, err = vs.store.ReadEcShardNeedle(volumeId, n)
|
||||||
|
} else {
|
||||||
|
count, err = vs.store.ReadVolumeNeedle(volumeId, n)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if count < 0 {
|
||||||
|
return nil, fmt.Errorf("needle not found %d", n.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.NeedleId = uint64(n.Id)
|
||||||
|
resp.Cookie = uint32(n.Cookie)
|
||||||
|
resp.Size = n.Size
|
||||||
|
resp.LastModified = n.LastModified
|
||||||
|
resp.Crc = n.Checksum.Value()
|
||||||
|
if n.HasTtl() {
|
||||||
|
resp.Ttl = n.Ttl.String()
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package idx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/chrislusf/seaweedfs/weed/glog"
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
|
@ -11,11 +10,11 @@ import (
|
||||||
|
|
||||||
// walks through the index file, calls fn function with each key, offset, size
|
// walks through the index file, calls fn function with each key, offset, size
|
||||||
// stops with the error returned by the fn function
|
// stops with the error returned by the fn function
|
||||||
func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
|
func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offset, size uint32) error) error {
|
||||||
var readerOffset int64
|
var readerOffset int64
|
||||||
bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead)
|
bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead)
|
||||||
count, e := r.ReadAt(bytes, readerOffset)
|
count, e := r.ReadAt(bytes, readerOffset)
|
||||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
|
||||||
readerOffset += int64(count)
|
readerOffset += int64(count)
|
||||||
var (
|
var (
|
||||||
key types.NeedleId
|
key types.NeedleId
|
||||||
|
@ -35,7 +34,7 @@ func WalkIndexFile(r *os.File, fn func(key types.NeedleId, offset types.Offset,
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
count, e = r.ReadAt(bytes, readerOffset)
|
count, e = r.ReadAt(bytes, readerOffset)
|
||||||
glog.V(3).Infoln("file", r.Name(), "readerOffset", readerOffset, "count", count, "e", e)
|
glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e)
|
||||||
readerOffset += int64(count)
|
readerOffset += int64(count)
|
||||||
}
|
}
|
||||||
return e
|
return e
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 85)
|
VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 86)
|
||||||
COMMIT = ""
|
COMMIT = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue