mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
commit
6999325d36
5
.github/workflows/container_latest.yml
vendored
5
.github/workflows/container_latest.yml
vendored
|
@ -2,7 +2,8 @@ name: "docker: build latest container"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master ]
|
tags:
|
||||||
|
- '*'
|
||||||
workflow_dispatch: []
|
workflow_dispatch: []
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
@ -23,7 +24,7 @@ jobs:
|
||||||
chrislusf/seaweedfs
|
chrislusf/seaweedfs
|
||||||
ghcr.io/chrislusf/seaweedfs
|
ghcr.io/chrislusf/seaweedfs
|
||||||
tags: |
|
tags: |
|
||||||
type=raw,value=dev
|
type=raw,value=latest
|
||||||
labels: |
|
labels: |
|
||||||
org.opencontainers.image.title=seaweedfs
|
org.opencontainers.image.title=seaweedfs
|
||||||
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast!
|
||||||
|
|
6
go.mod
6
go.mod
|
@ -112,9 +112,9 @@ require (
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
|
github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
||||||
github.com/tidwall/gjson v1.8.1
|
github.com/tidwall/gjson v1.10.2
|
||||||
github.com/tidwall/match v1.0.3
|
github.com/tidwall/match v1.1.1
|
||||||
github.com/tidwall/pretty v1.1.0 // indirect
|
github.com/tidwall/pretty v1.2.0 // indirect
|
||||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365
|
||||||
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43
|
||||||
github.com/valyala/bytebufferpool v1.0.0
|
github.com/valyala/bytebufferpool v1.0.0
|
||||||
|
|
6
go.sum
6
go.sum
|
@ -778,11 +778,17 @@ github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
|
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
|
||||||
github.com/tidwall/gjson v1.8.1 h1:8j5EE9Hrh3l9Od1OIEDAb7IpezNA20UdRngNAj5N0WU=
|
github.com/tidwall/gjson v1.8.1 h1:8j5EE9Hrh3l9Od1OIEDAb7IpezNA20UdRngNAj5N0WU=
|
||||||
github.com/tidwall/gjson v1.8.1/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk=
|
github.com/tidwall/gjson v1.8.1/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk=
|
||||||
|
github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo=
|
||||||
|
github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||||
github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
|
github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
|
||||||
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||||
|
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||||
|
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||||
github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=
|
github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8=
|
||||||
github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||||
|
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||||
|
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 h1:6iRwZdrFUzbcVYZwa8dXTIILGIxmmhjyUPJEcwzPGaU=
|
github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 h1:6iRwZdrFUzbcVYZwa8dXTIILGIxmmhjyUPJEcwzPGaU=
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
description: SeaweedFS
|
description: SeaweedFS
|
||||||
name: seaweedfs
|
name: seaweedfs
|
||||||
appVersion: "2.74"
|
appVersion: "2.75"
|
||||||
version: "2.74"
|
version: "2.75"
|
||||||
|
|
|
@ -120,7 +120,7 @@ func runBackup(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
|
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
|
||||||
if err = v.Compact2(30*1024*1024*1024, 0); err != nil {
|
if err = v.Compact2(30*1024*1024*1024, 0, nil); err != nil {
|
||||||
fmt.Printf("Compact Volume before synchronizing %v\n", err)
|
fmt.Printf("Compact Volume before synchronizing %v\n", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool {
|
||||||
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err = v.Compact2(preallocate, 0); err != nil {
|
if err = v.Compact2(preallocate, 0, nil); err != nil {
|
||||||
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ service VolumeServer {
|
||||||
|
|
||||||
rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) {
|
rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) {
|
||||||
}
|
}
|
||||||
rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) {
|
rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (stream VacuumVolumeCompactResponse) {
|
||||||
}
|
}
|
||||||
rpc VacuumVolumeCommit (VacuumVolumeCommitRequest) returns (VacuumVolumeCommitResponse) {
|
rpc VacuumVolumeCommit (VacuumVolumeCommitRequest) returns (VacuumVolumeCommitResponse) {
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,7 @@ service VolumeServer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy the .idx .dat files, and mount this volume
|
// copy the .idx .dat files, and mount this volume
|
||||||
rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) {
|
rpc VolumeCopy (VolumeCopyRequest) returns (stream VolumeCopyResponse) {
|
||||||
}
|
}
|
||||||
rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) {
|
rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) {
|
||||||
}
|
}
|
||||||
|
@ -142,6 +142,7 @@ message VacuumVolumeCompactRequest {
|
||||||
int64 preallocate = 2;
|
int64 preallocate = 2;
|
||||||
}
|
}
|
||||||
message VacuumVolumeCompactResponse {
|
message VacuumVolumeCompactResponse {
|
||||||
|
int64 processed_bytes = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message VacuumVolumeCommitRequest {
|
message VacuumVolumeCommitRequest {
|
||||||
|
@ -251,6 +252,7 @@ message VolumeCopyRequest {
|
||||||
}
|
}
|
||||||
message VolumeCopyResponse {
|
message VolumeCopyResponse {
|
||||||
uint64 last_append_at_ns = 1;
|
uint64 last_append_at_ns = 1;
|
||||||
|
int64 processed_bytes = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CopyFileRequest {
|
message CopyFileRequest {
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -22,7 +22,7 @@ import (
|
||||||
const BufferSizeLimit = 1024 * 1024 * 2
|
const BufferSizeLimit = 1024 * 1024 * 2
|
||||||
|
|
||||||
// VolumeCopy copy the .idx .dat .vif files, and mount the volume
|
// VolumeCopy copy the .idx .dat .vif files, and mount the volume
|
||||||
func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) {
|
func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stream volume_server_pb.VolumeServer_VolumeCopyServer) error {
|
||||||
|
|
||||||
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
v := vs.store.GetVolume(needle.VolumeId(req.VolumeId))
|
||||||
if v != nil {
|
if v != nil {
|
||||||
|
@ -31,7 +31,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||||
|
|
||||||
err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
|
err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err)
|
return fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId)
|
glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId)
|
||||||
|
@ -79,22 +79,38 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// println("source:", volFileInfoResp.String())
|
// println("source:", volFileInfoResp.String())
|
||||||
|
copyResponse := &volume_server_pb.VolumeCopyResponse{}
|
||||||
|
reportInterval := int64(1024*1024*128)
|
||||||
|
nextReportTarget := reportInterval
|
||||||
var modifiedTsNs int64
|
var modifiedTsNs int64
|
||||||
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".dat", false, true); err != nil {
|
var sendErr error
|
||||||
|
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".dat", false, true, func(processed int64) bool {
|
||||||
|
if processed > nextReportTarget {
|
||||||
|
copyResponse.ProcessedBytes = processed
|
||||||
|
if sendErr = stream.Send(copyResponse); sendErr != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
nextReportTarget = processed + reportInterval
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if sendErr != nil {
|
||||||
|
return sendErr
|
||||||
|
}
|
||||||
if modifiedTsNs > 0 {
|
if modifiedTsNs > 0 {
|
||||||
os.Chtimes(dataBaseFileName+".dat", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
os.Chtimes(dataBaseFileName+".dat", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
||||||
}
|
}
|
||||||
|
|
||||||
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, indexBaseFileName, ".idx", false, false); err != nil {
|
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, indexBaseFileName, ".idx", false, false, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if modifiedTsNs > 0 {
|
if modifiedTsNs > 0 {
|
||||||
os.Chtimes(indexBaseFileName+".idx", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
os.Chtimes(indexBaseFileName+".idx", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
||||||
}
|
}
|
||||||
|
|
||||||
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".vif", false, true); err != nil {
|
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".vif", false, true, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if modifiedTsNs > 0 {
|
if modifiedTsNs > 0 {
|
||||||
|
@ -107,10 +123,10 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if dataBaseFileName == "" {
|
if dataBaseFileName == "" {
|
||||||
return nil, fmt.Errorf("not found volume %d file", req.VolumeId)
|
return fmt.Errorf("not found volume %d file", req.VolumeId)
|
||||||
}
|
}
|
||||||
|
|
||||||
idxFileName = indexBaseFileName + ".idx"
|
idxFileName = indexBaseFileName + ".idx"
|
||||||
|
@ -125,21 +141,25 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16
|
if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// mount the volume
|
// mount the volume
|
||||||
err = vs.store.MountVolume(needle.VolumeId(req.VolumeId))
|
err = vs.store.MountVolume(needle.VolumeId(req.VolumeId))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err)
|
return fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &volume_server_pb.VolumeCopyResponse{
|
if err = stream.Send(&volume_server_pb.VolumeCopyResponse{
|
||||||
LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second),
|
LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second),
|
||||||
}, err
|
}); err != nil {
|
||||||
|
glog.Errorf("send response: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) (modifiedTsNs int64, err error) {
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool, progressFn storage.ProgressFunc) (modifiedTsNs int64, err error) {
|
||||||
|
|
||||||
copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
|
copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
|
||||||
VolumeId: vid,
|
VolumeId: vid,
|
||||||
|
@ -154,7 +174,7 @@ func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, i
|
||||||
return modifiedTsNs, fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err)
|
return modifiedTsNs, fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
modifiedTsNs, err = writeToFile(copyFileClient, baseFileName+ext, util.NewWriteThrottler(vs.compactionBytePerSecond), isAppend)
|
modifiedTsNs, err = writeToFile(copyFileClient, baseFileName+ext, util.NewWriteThrottler(vs.compactionBytePerSecond), isAppend, progressFn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return modifiedTsNs, fmt.Errorf("failed to copy %s file: %v", baseFileName+ext, err)
|
return modifiedTsNs, fmt.Errorf("failed to copy %s file: %v", baseFileName+ext, err)
|
||||||
}
|
}
|
||||||
|
@ -188,7 +208,7 @@ func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool) (modifiedTsNs int64, err error) {
|
func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool, progressFn storage.ProgressFunc) (modifiedTsNs int64, err error) {
|
||||||
glog.V(4).Infof("writing to %s", fileName)
|
glog.V(4).Infof("writing to %s", fileName)
|
||||||
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
|
flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
|
||||||
if isAppend {
|
if isAppend {
|
||||||
|
@ -200,6 +220,7 @@ func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName s
|
||||||
}
|
}
|
||||||
defer dst.Close()
|
defer dst.Close()
|
||||||
|
|
||||||
|
var progressedBytes int64
|
||||||
for {
|
for {
|
||||||
resp, receiveErr := client.Recv()
|
resp, receiveErr := client.Recv()
|
||||||
if receiveErr == io.EOF {
|
if receiveErr == io.EOF {
|
||||||
|
@ -212,6 +233,12 @@ func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName s
|
||||||
return modifiedTsNs, fmt.Errorf("receiving %s: %v", fileName, receiveErr)
|
return modifiedTsNs, fmt.Errorf("receiving %s: %v", fileName, receiveErr)
|
||||||
}
|
}
|
||||||
dst.Write(resp.FileContent)
|
dst.Write(resp.FileContent)
|
||||||
|
progressedBytes += int64(len(resp.FileContent))
|
||||||
|
if progressFn != nil {
|
||||||
|
if !progressFn(progressedBytes) {
|
||||||
|
return modifiedTsNs, fmt.Errorf("interrupted copy operation")
|
||||||
|
}
|
||||||
|
}
|
||||||
wt.MaybeSlowdown(int64(len(resp.FileContent)))
|
wt.MaybeSlowdown(int64(len(resp.FileContent)))
|
||||||
}
|
}
|
||||||
return modifiedTsNs, nil
|
return modifiedTsNs, nil
|
||||||
|
|
|
@ -130,7 +130,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
|
||||||
|
|
||||||
// copy ec data slices
|
// copy ec data slices
|
||||||
for _, shardId := range req.ShardIds {
|
for _, shardId := range req.ShardIds {
|
||||||
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil {
|
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, erasure_coding.ToExt(int(shardId)), false, false, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -138,7 +138,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
|
||||||
if req.CopyEcxFile {
|
if req.CopyEcxFile {
|
||||||
|
|
||||||
// copy ecx file
|
// copy ecx file
|
||||||
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecx", false, false); err != nil {
|
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecx", false, false, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -146,14 +146,14 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv
|
||||||
|
|
||||||
if req.CopyEcjFile {
|
if req.CopyEcjFile {
|
||||||
// copy ecj file
|
// copy ecj file
|
||||||
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecj", true, true); err != nil {
|
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, indexBaseFileName, ".ecj", true, true, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.CopyVifFile {
|
if req.CopyVifFile {
|
||||||
// copy vif file
|
// copy vif file
|
||||||
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, ".vif", false, true); err != nil {
|
if _, err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, dataBaseFileName, ".vif", false, true, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,19 +24,35 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_server_pb.VacuumVolumeCompactRequest) (*volume_server_pb.VacuumVolumeCompactResponse, error) {
|
func (vs *VolumeServer) VacuumVolumeCompact(req *volume_server_pb.VacuumVolumeCompactRequest, stream volume_server_pb.VolumeServer_VacuumVolumeCompactServer) error {
|
||||||
|
|
||||||
resp := &volume_server_pb.VacuumVolumeCompactResponse{}
|
resp := &volume_server_pb.VacuumVolumeCompactResponse{}
|
||||||
|
reportInterval := int64(1024*1024*128)
|
||||||
|
nextReportTarget := reportInterval
|
||||||
|
|
||||||
err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond)
|
var sendErr error
|
||||||
|
err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond, func(processed int64) bool {
|
||||||
|
if processed > nextReportTarget {
|
||||||
|
resp.ProcessedBytes = processed
|
||||||
|
if sendErr = stream.Send(resp); sendErr != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
nextReportTarget = processed + reportInterval
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("compact volume %d: %v", req.VolumeId, err)
|
glog.Errorf("compact volume %d: %v", req.VolumeId, err)
|
||||||
} else {
|
return err
|
||||||
glog.V(1).Infof("compact volume %d", req.VolumeId)
|
}
|
||||||
|
if sendErr != nil {
|
||||||
|
glog.Errorf("compact volume %d report progress: %v", req.VolumeId, sendErr)
|
||||||
|
return sendErr
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, err
|
glog.V(1).Infof("compact volume %d", req.VolumeId)
|
||||||
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
"io"
|
"io"
|
||||||
|
@ -40,6 +41,7 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
|
||||||
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
|
||||||
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
|
volumeId := encodeCommand.Int("volumeId", 0, "the volume id")
|
||||||
collection := encodeCommand.String("collection", "", "the collection name")
|
collection := encodeCommand.String("collection", "", "the collection name")
|
||||||
|
forceChanges := encodeCommand.Bool("force", false, "force the encoding even if the cluster has less than recommended 4 nodes")
|
||||||
if err = encodeCommand.Parse(args); err != nil {
|
if err = encodeCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -56,6 +58,17 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !*forceChanges {
|
||||||
|
var nodeCount int
|
||||||
|
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||||
|
nodeCount++
|
||||||
|
})
|
||||||
|
if nodeCount < erasure_coding.ParityShardsCount {
|
||||||
|
glog.V(0).Infof("skip erasure coding with %d nodes, less than recommended %d nodes", nodeCount, erasure_coding.ParityShardsCount)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// volumeId is provided
|
// volumeId is provided
|
||||||
if vid != 0 {
|
if vid != 0 {
|
||||||
return doEcDecode(commandEnv, topologyInfo, *collection, vid)
|
return doEcDecode(commandEnv, topologyInfo, *collection, vid)
|
||||||
|
|
|
@ -53,6 +53,6 @@ func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
return fmt.Errorf("source and target volume servers are the same!")
|
return fmt.Errorf("source and target volume servers are the same!")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, "")
|
_, err = copyVolume(commandEnv.option.GrpcDialOption, writer, volumeId, sourceVolumeServer, targetVolumeServer, "")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -255,13 +255,27 @@ func (c *commandVolumeFixReplication) fixOneUnderReplicatedVolume(commandEnv *Co
|
||||||
}
|
}
|
||||||
|
|
||||||
err := operation.WithVolumeServerClient(pb.NewServerAddressFromDataNode(dst.dataNode), commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
err := operation.WithVolumeServerClient(pb.NewServerAddressFromDataNode(dst.dataNode), commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
||||||
_, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
|
stream, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
|
||||||
VolumeId: replica.info.Id,
|
VolumeId: replica.info.Id,
|
||||||
SourceDataNode: string(pb.NewServerAddressFromDataNode(replica.location.dataNode)),
|
SourceDataNode: string(pb.NewServerAddressFromDataNode(replica.location.dataNode)),
|
||||||
})
|
})
|
||||||
if replicateErr != nil {
|
if replicateErr != nil {
|
||||||
return fmt.Errorf("copying from %s => %s : %v", replica.location.dataNode.Id, dst.dataNode.Id, replicateErr)
|
return fmt.Errorf("copying from %s => %s : %v", replica.location.dataNode.Id, dst.dataNode.Id, replicateErr)
|
||||||
}
|
}
|
||||||
|
for {
|
||||||
|
resp, recvErr := stream.Recv()
|
||||||
|
if recvErr != nil {
|
||||||
|
if recvErr == io.EOF {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
return recvErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resp.ProcessedBytes > 0 {
|
||||||
|
fmt.Fprintf(writer, "volume %d processed %d bytes\n", replica.info.Id, resp.ProcessedBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.
|
||||||
func LiveMoveVolume(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer pb.ServerAddress, idleTimeout time.Duration, diskType string, skipTailError bool) (err error) {
|
func LiveMoveVolume(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer pb.ServerAddress, idleTimeout time.Duration, diskType string, skipTailError bool) (err error) {
|
||||||
|
|
||||||
log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
|
log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer)
|
||||||
lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, diskType)
|
lastAppendAtNs, err := copyVolume(grpcDialOption, writer, volumeId, sourceVolumeServer, targetVolumeServer, diskType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
|
return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err)
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ func LiveMoveVolume(grpcDialOption grpc.DialOption, writer io.Writer, volumeId n
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer pb.ServerAddress, diskType string) (lastAppendAtNs uint64, err error) {
|
func copyVolume(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer pb.ServerAddress, diskType string) (lastAppendAtNs uint64, err error) {
|
||||||
|
|
||||||
// check to see if the volume is already read-only and if its not then we need
|
// check to see if the volume is already read-only and if its not then we need
|
||||||
// to mark it as read-only and then before we return we need to undo what we
|
// to mark it as read-only and then before we return we need to undo what we
|
||||||
|
@ -141,15 +141,31 @@ func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, source
|
||||||
}
|
}
|
||||||
|
|
||||||
err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
||||||
resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
|
stream, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{
|
||||||
VolumeId: uint32(volumeId),
|
VolumeId: uint32(volumeId),
|
||||||
SourceDataNode: string(sourceVolumeServer),
|
SourceDataNode: string(sourceVolumeServer),
|
||||||
DiskType: diskType,
|
DiskType: diskType,
|
||||||
})
|
})
|
||||||
if replicateErr == nil {
|
if replicateErr != nil {
|
||||||
lastAppendAtNs = resp.LastAppendAtNs
|
|
||||||
}
|
|
||||||
return replicateErr
|
return replicateErr
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, recvErr := stream.Recv()
|
||||||
|
if recvErr != nil {
|
||||||
|
if recvErr == io.EOF {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
return recvErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resp.LastAppendAtNs != 0 {
|
||||||
|
lastAppendAtNs = resp.LastAppendAtNs
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(writer, "volume %d processed %d bytes\n", volumeId, resp.ProcessedBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -15,13 +15,13 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
|
||||||
}
|
}
|
||||||
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)
|
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)
|
||||||
}
|
}
|
||||||
func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error {
|
func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64, progressFn ProgressFunc) error {
|
||||||
if v := s.findVolume(vid); v != nil {
|
if v := s.findVolume(vid); v != nil {
|
||||||
s := stats.NewDiskStatus(v.dir)
|
s := stats.NewDiskStatus(v.dir)
|
||||||
if int64(s.Free) < preallocate {
|
if int64(s.Free) < preallocate {
|
||||||
return fmt.Errorf("free space: %d bytes, not enough for %d bytes", s.Free, preallocate)
|
return fmt.Errorf("free space: %d bytes, not enough for %d bytes", s.Free, preallocate)
|
||||||
}
|
}
|
||||||
return v.Compact2(preallocate, compactionBytePerSecond)
|
return v.Compact2(preallocate, compactionBytePerSecond, progressFn)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("volume id %d is not found during compact", vid)
|
return fmt.Errorf("volume id %d is not found during compact", vid)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,8 @@ import (
|
||||||
"github.com/chrislusf/seaweedfs/weed/util"
|
"github.com/chrislusf/seaweedfs/weed/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type ProgressFunc func(processed int64) bool
|
||||||
|
|
||||||
func (v *Volume) garbageLevel() float64 {
|
func (v *Volume) garbageLevel() float64 {
|
||||||
if v.ContentSize() == 0 {
|
if v.ContentSize() == 0 {
|
||||||
return 0
|
return 0
|
||||||
|
@ -62,7 +64,7 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// compact a volume based on deletions in .idx files
|
// compact a volume based on deletions in .idx files
|
||||||
func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) error {
|
func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64, progressFn ProgressFunc) error {
|
||||||
|
|
||||||
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
|
if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory
|
||||||
return nil
|
return nil
|
||||||
|
@ -83,7 +85,7 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) erro
|
||||||
if err := v.nm.Sync(); err != nil {
|
if err := v.nm.Sync(); err != nil {
|
||||||
glog.V(0).Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
|
glog.V(0).Infof("compact2 fail to sync volume idx %d: %v", v.Id, err)
|
||||||
}
|
}
|
||||||
return copyDataBasedOnIndexFile(v.FileName(".dat"), v.FileName(".idx"), v.FileName(".cpd"), v.FileName(".cpx"), v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond)
|
return copyDataBasedOnIndexFile(v.FileName(".dat"), v.FileName(".idx"), v.FileName(".cpd"), v.FileName(".cpx"), v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond, progressFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Volume) CommitCompact() error {
|
func (v *Volume) CommitCompact() error {
|
||||||
|
@ -382,7 +384,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64, compactionBytePerSecond int64) (err error) {
|
func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate, compactionBytePerSecond int64, progressFn ProgressFunc) (err error) {
|
||||||
var (
|
var (
|
||||||
srcDatBackend, dstDatBackend backend.BackendStorageFile
|
srcDatBackend, dstDatBackend backend.BackendStorageFile
|
||||||
dataFile *os.File
|
dataFile *os.File
|
||||||
|
@ -421,6 +423,12 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if progressFn != nil {
|
||||||
|
if !progressFn(offset.ToActualOffset()) {
|
||||||
|
return fmt.Errorf("interrupted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
n := new(needle.Needle)
|
n := new(needle.Needle)
|
||||||
err := n.ReadData(srcDatBackend, offset.ToActualOffset(), size, version)
|
err := n.ReadData(srcDatBackend, offset.ToActualOffset(), size, version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -83,7 +83,7 @@ func TestCompaction(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
v.Compact2(0, 0)
|
v.Compact2(0, 0, nil)
|
||||||
speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds()
|
speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds()
|
||||||
t.Logf("compaction speed: %.2f bytes/s", speed)
|
t.Logf("compaction speed: %.2f bytes/s", speed)
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package topology
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/chrislusf/seaweedfs/weed/pb"
|
"github.com/chrislusf/seaweedfs/weed/pb"
|
||||||
|
"io"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -70,11 +71,26 @@ func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *
|
||||||
go func(index int, url pb.ServerAddress, vid needle.VolumeId) {
|
go func(index int, url pb.ServerAddress, vid needle.VolumeId) {
|
||||||
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
|
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
|
||||||
err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
||||||
_, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
|
stream, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
|
||||||
VolumeId: uint32(vid),
|
VolumeId: uint32(vid),
|
||||||
Preallocate: preallocate,
|
Preallocate: preallocate,
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
resp, recvErr := stream.Recv()
|
||||||
|
if recvErr != nil {
|
||||||
|
if recvErr == io.EOF {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
return recvErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.V(0).Infof("%d vacuum %d on %s processed %d bytes", index, vid, url, resp.ProcessedBytes)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err)
|
glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err)
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.74)
|
VERSION_NUMBER = fmt.Sprintf("%.02f", 2.75)
|
||||||
VERSION = sizeLimit + " " + VERSION_NUMBER
|
VERSION = sizeLimit + " " + VERSION_NUMBER
|
||||||
COMMIT = ""
|
COMMIT = ""
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in a new issue