mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
go fmt
This commit is contained in:
parent
5160eb08f7
commit
0c8dea9de8
|
@ -40,7 +40,7 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva
|
|||
for _, point := range points {
|
||||
if point.isStart {
|
||||
if len(queue) > 0 {
|
||||
lastIndex := len(queue) -1
|
||||
lastIndex := len(queue) - 1
|
||||
lastPoint := queue[lastIndex]
|
||||
if point.x != prevX && lastPoint.ts < point.ts {
|
||||
visibles = addToVisibles(visibles, prevX, lastPoint, point)
|
||||
|
|
|
@ -52,7 +52,7 @@ func TestReadResolvedChunks(t *testing.T) {
|
|||
|
||||
func TestRandomizedReadResolvedChunks(t *testing.T) {
|
||||
|
||||
var limit int64 = 1024*1024
|
||||
var limit int64 = 1024 * 1024
|
||||
array := make([]int64, limit)
|
||||
var chunks []*filer_pb.FileChunk
|
||||
for ts := int64(0); ts < 1024; ts++ {
|
||||
|
@ -75,7 +75,7 @@ func TestRandomizedReadResolvedChunks(t *testing.T) {
|
|||
visibles := readResolvedChunks(chunks)
|
||||
|
||||
for _, visible := range visibles {
|
||||
for i := visible.start; i<visible.stop;i++{
|
||||
for i := visible.start; i < visible.stop; i++ {
|
||||
if array[i] != visible.modifiedTime {
|
||||
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime)
|
||||
}
|
||||
|
@ -101,12 +101,12 @@ func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.Fil
|
|||
|
||||
func TestSequentialReadResolvedChunks(t *testing.T) {
|
||||
|
||||
var chunkSize int64 = 1024*1024*2
|
||||
var chunkSize int64 = 1024 * 1024 * 2
|
||||
var chunks []*filer_pb.FileChunk
|
||||
for ts := int64(0); ts < 13; ts++ {
|
||||
chunks = append(chunks, &filer_pb.FileChunk{
|
||||
FileId: "",
|
||||
Offset: chunkSize*ts,
|
||||
Offset: chunkSize * ts,
|
||||
Size: uint64(chunkSize),
|
||||
Mtime: 1,
|
||||
})
|
||||
|
|
|
@ -70,7 +70,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onChunksFn OnChunksFunc, onHardLinkIdsFn OnHardLinkIdsFunc)(err error) {
|
||||
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onChunksFn OnChunksFunc, onHardLinkIdsFn OnHardLinkIdsFunc) (err error) {
|
||||
|
||||
lastFileName := ""
|
||||
includeLastFile := false
|
||||
|
|
|
@ -134,8 +134,8 @@ func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, ful
|
|||
}
|
||||
|
||||
members, err := store.Client.ZRangeByLex(ctx, genDirectoryListKey(string(fullpath)), &redis.ZRangeBy{
|
||||
Min: "-",
|
||||
Max: "+",
|
||||
Min: "-",
|
||||
Max: "+",
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err)
|
||||
|
|
|
@ -67,7 +67,6 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
|
|||
return nil
|
||||
}
|
||||
|
||||
|
||||
func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamRenameEntryResponse) error {
|
||||
// comes from filer StreamRenameEntry, can only be create or delete entry
|
||||
|
||||
|
@ -119,7 +118,7 @@ func (dir *Dir) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR
|
|||
}
|
||||
dir.wfs.handlesLock.Unlock()
|
||||
|
||||
}else if resp.EventNotification.OldEntry != nil {
|
||||
} else if resp.EventNotification.OldEntry != nil {
|
||||
// without new entry, only old entry name exists. This is the second step to delete old entry
|
||||
if err := dir.wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil); err != nil {
|
||||
return err
|
||||
|
|
|
@ -161,7 +161,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee
|
|||
if err := stream.Send(&filer_pb.StreamRenameEntryResponse{
|
||||
Directory: string(newParent),
|
||||
EventNotification: &filer_pb.EventNotification{
|
||||
OldEntry: &filer_pb.Entry{
|
||||
OldEntry: &filer_pb.Entry{
|
||||
Name: entry.Name(),
|
||||
},
|
||||
NewEntry: newEntry.ToProtoEntry(),
|
||||
|
|
|
@ -56,7 +56,7 @@ func (cluster *Cluster) RemoveClusterNode(nodeType string, address pb.ServerAddr
|
|||
}
|
||||
}
|
||||
|
||||
func (cluster *Cluster) ListClusterNode(nodeType string) (nodes []*ClusterNode){
|
||||
func (cluster *Cluster) ListClusterNode(nodeType string) (nodes []*ClusterNode) {
|
||||
switch nodeType {
|
||||
case "filer":
|
||||
cluster.filersLock.RLock()
|
||||
|
|
|
@ -80,7 +80,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre
|
|||
|
||||
// println("source:", volFileInfoResp.String())
|
||||
copyResponse := &volume_server_pb.VolumeCopyResponse{}
|
||||
reportInterval := int64(1024*1024*128)
|
||||
reportInterval := int64(1024 * 1024 * 128)
|
||||
nextReportTarget := reportInterval
|
||||
var modifiedTsNs int64
|
||||
var sendErr error
|
||||
|
|
|
@ -27,7 +27,7 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve
|
|||
func (vs *VolumeServer) VacuumVolumeCompact(req *volume_server_pb.VacuumVolumeCompactRequest, stream volume_server_pb.VolumeServer_VacuumVolumeCompactServer) error {
|
||||
|
||||
resp := &volume_server_pb.VacuumVolumeCompactResponse{}
|
||||
reportInterval := int64(1024*1024*128)
|
||||
reportInterval := int64(1024 * 1024 * 128)
|
||||
nextReportTarget := reportInterval
|
||||
|
||||
var sendErr error
|
||||
|
|
|
@ -49,10 +49,10 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey
|
|||
// Upload the file to S3.
|
||||
var result *s3manager.UploadOutput
|
||||
result, err = uploader.Upload(&s3manager.UploadInput{
|
||||
Bucket: aws.String(destBucket),
|
||||
Key: aws.String(destKey),
|
||||
Body: fileReader,
|
||||
StorageClass: aws.String("STANDARD_IA"),
|
||||
Bucket: aws.String(destBucket),
|
||||
Key: aws.String(destKey),
|
||||
Body: fileReader,
|
||||
StorageClass: aws.String("STANDARD_IA"),
|
||||
})
|
||||
|
||||
//in case it fails to upload
|
||||
|
|
Loading…
Reference in a new issue