mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Revert "Revert "Merge branch 'master' of https://github.com/seaweedfs/seaweedfs""
This reverts commit 8cb42c39
This commit is contained in:
parent
3d07895518
commit
645ae8c57b
|
@ -288,8 +288,8 @@ func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers strin
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool {
|
func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool {
|
||||||
slices.SortFunc(peers, func(a, b pb.ServerAddress) bool {
|
slices.SortFunc(peers, func(a, b pb.ServerAddress) int {
|
||||||
return strings.Compare(string(a), string(b)) < 0
|
return strings.Compare(string(a), string(b))
|
||||||
})
|
})
|
||||||
if len(peers) <= 0 {
|
if len(peers) <= 0 {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -73,11 +73,11 @@ func TestCompactFileChunksRealCase(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func printChunks(name string, chunks []*filer_pb.FileChunk) {
|
func printChunks(name string, chunks []*filer_pb.FileChunk) {
|
||||||
slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool {
|
slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) int {
|
||||||
if a.Offset == b.Offset {
|
if a.Offset == b.Offset {
|
||||||
return a.ModifiedTsNs < b.ModifiedTsNs
|
return int(a.ModifiedTsNs - b.ModifiedTsNs)
|
||||||
}
|
}
|
||||||
return a.Offset < b.Offset
|
return int(a.Offset - b.Offset)
|
||||||
})
|
})
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
|
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
|
||||||
|
|
|
@ -30,14 +30,20 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOff
|
||||||
isStart: false,
|
isStart: false,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
slices.SortFunc(points, func(a, b *Point) bool {
|
slices.SortFunc(points, func(a, b *Point) int {
|
||||||
if a.x != b.x {
|
if a.x != b.x {
|
||||||
return a.x < b.x
|
return int(a.x - b.x)
|
||||||
}
|
}
|
||||||
if a.ts != b.ts {
|
if a.ts != b.ts {
|
||||||
return a.ts < b.ts
|
return int(a.ts - b.ts)
|
||||||
}
|
}
|
||||||
return !a.isStart
|
if a.isStart {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if b.isStart {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
})
|
})
|
||||||
|
|
||||||
var prevX int64
|
var prevX int64
|
||||||
|
|
|
@ -164,8 +164,8 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort
|
// sort
|
||||||
slices.SortFunc(members, func(a, b string) bool {
|
slices.SortFunc(members, func(a, b string) int {
|
||||||
return strings.Compare(a, b) < 0
|
return strings.Compare(a, b)
|
||||||
})
|
})
|
||||||
|
|
||||||
// limit
|
// limit
|
||||||
|
|
|
@ -45,11 +45,11 @@ func isSameChunks(a, b []*filer_pb.FileChunk) bool {
|
||||||
if len(a) != len(b) {
|
if len(a) != len(b) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
slices.SortFunc(a, func(i, j *filer_pb.FileChunk) bool {
|
slices.SortFunc(a, func(i, j *filer_pb.FileChunk) int {
|
||||||
return strings.Compare(i.ETag, j.ETag) < 0
|
return strings.Compare(i.ETag, j.ETag)
|
||||||
})
|
})
|
||||||
slices.SortFunc(b, func(i, j *filer_pb.FileChunk) bool {
|
slices.SortFunc(b, func(i, j *filer_pb.FileChunk) int {
|
||||||
return strings.Compare(i.ETag, j.ETag) < 0
|
return strings.Compare(i.ETag, j.ETag)
|
||||||
})
|
})
|
||||||
for i := 0; i < len(a); i++ {
|
for i := 0; i < len(a); i++ {
|
||||||
if a[i].ETag != b[i].ETag {
|
if a[i].ETag != b[i].ETag {
|
||||||
|
|
|
@ -72,8 +72,8 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
|
||||||
glog.V(2).Infof("completeMultipartUpload input %v", input)
|
glog.V(2).Infof("completeMultipartUpload input %v", input)
|
||||||
|
|
||||||
completedParts := parts.Parts
|
completedParts := parts.Parts
|
||||||
slices.SortFunc(completedParts, func(a, b CompletedPart) bool {
|
slices.SortFunc(completedParts, func(a, b CompletedPart) int {
|
||||||
return a.PartNumber < b.PartNumber
|
return a.PartNumber - b.PartNumber
|
||||||
})
|
})
|
||||||
|
|
||||||
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
|
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
|
||||||
|
|
|
@ -334,8 +334,8 @@ func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerCli
|
||||||
for dir := range directoriesWithDeletion {
|
for dir := range directoriesWithDeletion {
|
||||||
allDirs = append(allDirs, dir)
|
allDirs = append(allDirs, dir)
|
||||||
}
|
}
|
||||||
slices.SortFunc(allDirs, func(a, b string) bool {
|
slices.SortFunc(allDirs, func(a, b string) int {
|
||||||
return len(a) > len(b)
|
return len(b) - len(a)
|
||||||
})
|
})
|
||||||
newDirectoriesWithDeletion = make(map[string]int)
|
newDirectoriesWithDeletion = make(map[string]int)
|
||||||
for _, dir := range allDirs {
|
for _, dir := range allDirs {
|
||||||
|
|
|
@ -135,8 +135,8 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
|
||||||
fs.filer.DeleteChunks(fileChunks)
|
fs.filer.DeleteChunks(fileChunks)
|
||||||
return nil, md5Hash, 0, uploadErr, nil
|
return nil, md5Hash, 0, uploadErr, nil
|
||||||
}
|
}
|
||||||
slices.SortFunc(fileChunks, func(a, b *filer_pb.FileChunk) bool {
|
slices.SortFunc(fileChunks, func(a, b *filer_pb.FileChunk) int {
|
||||||
return a.Offset < b.Offset
|
return int(a.Offset - b.Offset)
|
||||||
})
|
})
|
||||||
return fileChunks, md5Hash, chunkOffset, nil, smallContent
|
return fileChunks, md5Hash, chunkOffset, nil, smallContent
|
||||||
}
|
}
|
||||||
|
|
|
@ -411,8 +411,8 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
|
||||||
hasMove := true
|
hasMove := true
|
||||||
for hasMove {
|
for hasMove {
|
||||||
hasMove = false
|
hasMove = false
|
||||||
slices.SortFunc(rackEcNodes, func(a, b *EcNode) bool {
|
slices.SortFunc(rackEcNodes, func(a, b *EcNode) int {
|
||||||
return a.freeEcSlot > b.freeEcSlot
|
return b.freeEcSlot - a.freeEcSlot
|
||||||
})
|
})
|
||||||
emptyNode, fullNode := rackEcNodes[0], rackEcNodes[len(rackEcNodes)-1]
|
emptyNode, fullNode := rackEcNodes[0], rackEcNodes[len(rackEcNodes)-1]
|
||||||
emptyNodeShardCount, fullNodeShardCount := ecNodeIdToShardCount[emptyNode.info.Id], ecNodeIdToShardCount[fullNode.info.Id]
|
emptyNodeShardCount, fullNodeShardCount := ecNodeIdToShardCount[emptyNode.info.Id], ecNodeIdToShardCount[fullNode.info.Id]
|
||||||
|
@ -492,8 +492,8 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slices.SortFunc(candidateEcNodes, func(a, b *CandidateEcNode) bool {
|
slices.SortFunc(candidateEcNodes, func(a, b *CandidateEcNode) int {
|
||||||
return a.shardCount > b.shardCount
|
return b.shardCount - a.shardCount
|
||||||
})
|
})
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
selectedEcNodeIndex := -1
|
selectedEcNodeIndex := -1
|
||||||
|
|
|
@ -119,14 +119,14 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId,
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortEcNodesByFreeslotsDescending(ecNodes []*EcNode) {
|
func sortEcNodesByFreeslotsDescending(ecNodes []*EcNode) {
|
||||||
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
|
slices.SortFunc(ecNodes, func(a, b *EcNode) int {
|
||||||
return a.freeEcSlot > b.freeEcSlot
|
return b.freeEcSlot - a.freeEcSlot
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
|
func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
|
||||||
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
|
slices.SortFunc(ecNodes, func(a, b *EcNode) int {
|
||||||
return a.freeEcSlot < b.freeEcSlot
|
return a.freeEcSlot - b.freeEcSlot
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -243,8 +243,8 @@ func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
|
func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
|
||||||
slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) bool {
|
slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) int {
|
||||||
return a.Size < b.Size
|
return int(a.Size - b.Size)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,8 +269,8 @@ func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volu
|
||||||
|
|
||||||
for hasMoved {
|
for hasMoved {
|
||||||
hasMoved = false
|
hasMoved = false
|
||||||
slices.SortFunc(nodesWithCapacity, func(a, b *Node) bool {
|
slices.SortFunc(nodesWithCapacity, func(a, b *Node) int {
|
||||||
return a.localVolumeRatio(capacityFunc) < b.localVolumeRatio(capacityFunc)
|
return int(a.localVolumeRatio(capacityFunc) - b.localVolumeRatio(capacityFunc))
|
||||||
})
|
})
|
||||||
if len(nodesWithCapacity) == 0 {
|
if len(nodesWithCapacity) == 0 {
|
||||||
fmt.Printf("no volume server found with capacity for %s", diskType.ReadableString())
|
fmt.Printf("no volume server found with capacity for %s", diskType.ReadableString())
|
||||||
|
|
|
@ -80,8 +80,8 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
|
||||||
if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
|
if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
|
slices.SortFunc(replicas, func(a, b *VolumeReplica) int {
|
||||||
return fileCount(a) > fileCount(b)
|
return int(fileCount(b) - fileCount(a))
|
||||||
})
|
})
|
||||||
for len(replicas) >= 2 {
|
for len(replicas) >= 2 {
|
||||||
a, b := replicas[0], replicas[1]
|
a, b := replicas[0], replicas[1]
|
||||||
|
|
|
@ -328,8 +328,8 @@ func (c *commandVolumeFixReplication) fixOneUnderReplicatedVolume(commandEnv *Co
|
||||||
|
|
||||||
func keepDataNodesSorted(dataNodes []location, diskType types.DiskType) {
|
func keepDataNodesSorted(dataNodes []location, diskType types.DiskType) {
|
||||||
fn := capacityByFreeVolumeCount(diskType)
|
fn := capacityByFreeVolumeCount(diskType)
|
||||||
slices.SortFunc(dataNodes, func(a, b location) bool {
|
slices.SortFunc(dataNodes, func(a, b location) int {
|
||||||
return fn(a.dataNode) > fn(b.dataNode)
|
return int(fn(b.dataNode) - fn(a.dataNode))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -514,17 +514,17 @@ func countReplicas(replicas []*VolumeReplica) (diffDc, diffRack, diffNode map[st
|
||||||
}
|
}
|
||||||
|
|
||||||
func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica {
|
func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica {
|
||||||
slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
|
slices.SortFunc(replicas, func(a, b *VolumeReplica) int {
|
||||||
if a.info.Size != b.info.Size {
|
if a.info.Size != b.info.Size {
|
||||||
return a.info.Size < b.info.Size
|
return int(a.info.Size - b.info.Size)
|
||||||
}
|
}
|
||||||
if a.info.ModifiedAtSecond != b.info.ModifiedAtSecond {
|
if a.info.ModifiedAtSecond != b.info.ModifiedAtSecond {
|
||||||
return a.info.ModifiedAtSecond < b.info.ModifiedAtSecond
|
return int(a.info.ModifiedAtSecond - b.info.ModifiedAtSecond)
|
||||||
}
|
}
|
||||||
if a.info.CompactRevision != b.info.CompactRevision {
|
if a.info.CompactRevision != b.info.CompactRevision {
|
||||||
return a.info.CompactRevision < b.info.CompactRevision
|
return int(a.info.CompactRevision - b.info.CompactRevision)
|
||||||
}
|
}
|
||||||
return false
|
return 0
|
||||||
})
|
})
|
||||||
|
|
||||||
return replicas[0]
|
return replicas[0]
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
@ -81,8 +82,8 @@ func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
|
||||||
|
|
||||||
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
|
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
|
||||||
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
|
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
|
||||||
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
|
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) int {
|
||||||
return a.Id < b.Id
|
return strings.Compare(a.Id, b.Id)
|
||||||
})
|
})
|
||||||
var s statistics
|
var s statistics
|
||||||
for _, dc := range t.DataCenterInfos {
|
for _, dc := range t.DataCenterInfos {
|
||||||
|
@ -98,8 +99,8 @@ func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.Top
|
||||||
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
|
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
|
||||||
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||||
var s statistics
|
var s statistics
|
||||||
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
|
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) int {
|
||||||
return a.Id < b.Id
|
return strings.Compare(a.Id, b.Id)
|
||||||
})
|
})
|
||||||
for _, r := range t.RackInfos {
|
for _, r := range t.RackInfos {
|
||||||
if *c.rack != "" && *c.rack != r.Id {
|
if *c.rack != "" && *c.rack != r.Id {
|
||||||
|
@ -114,8 +115,8 @@ func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.D
|
||||||
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
|
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
|
||||||
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
|
||||||
var s statistics
|
var s statistics
|
||||||
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
|
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) int {
|
||||||
return a.Id < b.Id
|
return strings.Compare(a.Id, b.Id)
|
||||||
})
|
})
|
||||||
for _, dn := range t.DataNodeInfos {
|
for _, dn := range t.DataNodeInfos {
|
||||||
if *c.dataNode != "" && *c.dataNode != dn.Id {
|
if *c.dataNode != "" && *c.dataNode != dn.Id {
|
||||||
|
@ -159,8 +160,8 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf
|
||||||
diskType = "hdd"
|
diskType = "hdd"
|
||||||
}
|
}
|
||||||
output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
|
output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
|
||||||
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) bool {
|
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) int {
|
||||||
return a.Id < b.Id
|
return int(a.Id - b.Id)
|
||||||
})
|
})
|
||||||
for _, vi := range t.VolumeInfos {
|
for _, vi := range t.VolumeInfos {
|
||||||
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
|
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {
|
||||||
|
|
|
@ -179,8 +179,8 @@ func (c *commandVolumeServerEvacuate) evacuateEcVolumes(commandEnv *CommandEnv,
|
||||||
func (c *commandVolumeServerEvacuate) moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEcShardInformationMessage, thisNode *EcNode, otherNodes []*EcNode, applyChange bool) (hasMoved bool, err error) {
|
func (c *commandVolumeServerEvacuate) moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEcShardInformationMessage, thisNode *EcNode, otherNodes []*EcNode, applyChange bool) (hasMoved bool, err error) {
|
||||||
|
|
||||||
for _, shardId := range erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() {
|
for _, shardId := range erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() {
|
||||||
slices.SortFunc(otherNodes, func(a, b *EcNode) bool {
|
slices.SortFunc(otherNodes, func(a, b *EcNode) int {
|
||||||
return a.localShardIdCount(ecShardInfo.Id) < b.localShardIdCount(ecShardInfo.Id)
|
return a.localShardIdCount(ecShardInfo.Id) - b.localShardIdCount(ecShardInfo.Id)
|
||||||
})
|
})
|
||||||
for i := 0; i < len(otherNodes); i++ {
|
for i := 0; i < len(otherNodes); i++ {
|
||||||
emptyNode := otherNodes[i]
|
emptyNode := otherNodes[i]
|
||||||
|
@ -214,8 +214,8 @@ func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// most empty one is in the front
|
// most empty one is in the front
|
||||||
slices.SortFunc(otherNodes, func(a, b *Node) bool {
|
slices.SortFunc(otherNodes, func(a, b *Node) int {
|
||||||
return a.localVolumeRatio(maxVolumeCountFn) < b.localVolumeRatio(maxVolumeCountFn)
|
return int(a.localVolumeRatio(maxVolumeCountFn) - b.localVolumeRatio(maxVolumeCountFn))
|
||||||
})
|
})
|
||||||
for i := 0; i < len(otherNodes); i++ {
|
for i := 0; i < len(otherNodes); i++ {
|
||||||
emptyNode := otherNodes[i]
|
emptyNode := otherNodes[i]
|
||||||
|
|
|
@ -26,8 +26,8 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func RunShell(options ShellOptions) {
|
func RunShell(options ShellOptions) {
|
||||||
slices.SortFunc(Commands, func(a, b command) bool {
|
slices.SortFunc(Commands, func(a, b command) int {
|
||||||
return strings.Compare(a.Name(), b.Name()) < 0
|
return strings.Compare(a.Name(), b.Name())
|
||||||
})
|
})
|
||||||
line = liner.NewLiner()
|
line = liner.NewLiner()
|
||||||
defer line.Close()
|
defer line.Close()
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||||
|
@ -144,8 +145,8 @@ func (l *DiskLocation) loadAllEcShards() (err error) {
|
||||||
}
|
}
|
||||||
dirEntries = append(dirEntries, indexDirEntries...)
|
dirEntries = append(dirEntries, indexDirEntries...)
|
||||||
}
|
}
|
||||||
slices.SortFunc(dirEntries, func(a, b os.DirEntry) bool {
|
slices.SortFunc(dirEntries, func(a, b os.DirEntry) int {
|
||||||
return a.Name() < b.Name()
|
return strings.Compare(a.Name(), b.Name())
|
||||||
})
|
})
|
||||||
var sameVolumeShards []string
|
var sameVolumeShards []string
|
||||||
var prevVolumeId needle.VolumeId
|
var prevVolumeId needle.VolumeId
|
||||||
|
|
|
@ -84,8 +84,11 @@ func (ev *EcVolume) AddEcVolumeShard(ecVolumeShard *EcVolumeShard) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ev.Shards = append(ev.Shards, ecVolumeShard)
|
ev.Shards = append(ev.Shards, ecVolumeShard)
|
||||||
slices.SortFunc(ev.Shards, func(a, b *EcVolumeShard) bool {
|
slices.SortFunc(ev.Shards, func(a, b *EcVolumeShard) int {
|
||||||
return a.VolumeId < b.VolumeId || a.VolumeId == b.VolumeId && a.ShardId < b.ShardId
|
if a.VolumeId != b.VolumeId {
|
||||||
|
return int(a.VolumeId - b.VolumeId)
|
||||||
|
}
|
||||||
|
return int(a.ShardId - b.ShardId)
|
||||||
})
|
})
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -400,8 +400,8 @@ func (s *Store) EcVolumes() (ecVolumes []*erasure_coding.EcVolume) {
|
||||||
}
|
}
|
||||||
location.ecVolumesLock.RUnlock()
|
location.ecVolumesLock.RUnlock()
|
||||||
}
|
}
|
||||||
slices.SortFunc(ecVolumes, func(a, b *erasure_coding.EcVolume) bool {
|
slices.SortFunc(ecVolumes, func(a, b *erasure_coding.EcVolume) int {
|
||||||
return a.VolumeId > b.VolumeId
|
return int(b.VolumeId - a.VolumeId)
|
||||||
})
|
})
|
||||||
return ecVolumes
|
return ecVolumes
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package topology
|
||||||
import (
|
import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DataCenter struct {
|
type DataCenter struct {
|
||||||
|
@ -46,8 +47,8 @@ func (dc *DataCenter) ToInfo() (info DataCenterInfo) {
|
||||||
racks = append(racks, rack.ToInfo())
|
racks = append(racks, rack.ToInfo())
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.SortFunc(racks, func(a, b RackInfo) bool {
|
slices.SortFunc(racks, func(a, b RackInfo) int {
|
||||||
return a.Id < b.Id
|
return strings.Compare(string(a.Id), string(b.Id))
|
||||||
})
|
})
|
||||||
info.Racks = racks
|
info.Racks = racks
|
||||||
return
|
return
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -69,8 +70,8 @@ func (r *Rack) ToInfo() (info RackInfo) {
|
||||||
dns = append(dns, dn.ToInfo())
|
dns = append(dns, dn.ToInfo())
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.SortFunc(dns, func(a, b DataNodeInfo) bool {
|
slices.SortFunc(dns, func(a, b DataNodeInfo) int {
|
||||||
return a.Url < b.Url
|
return strings.Compare(a.Url, b.Url)
|
||||||
})
|
})
|
||||||
|
|
||||||
info.DataNodes = dns
|
info.DataNodes = dns
|
||||||
|
|
|
@ -3,6 +3,7 @@ package topology
|
||||||
import (
|
import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TopologyInfo struct {
|
type TopologyInfo struct {
|
||||||
|
@ -21,8 +22,8 @@ func (t *Topology) ToInfo() (info TopologyInfo) {
|
||||||
dcs = append(dcs, dc.ToInfo())
|
dcs = append(dcs, dc.ToInfo())
|
||||||
}
|
}
|
||||||
|
|
||||||
slices.SortFunc(dcs, func(a, b DataCenterInfo) bool {
|
slices.SortFunc(dcs, func(a, b DataCenterInfo) int {
|
||||||
return a.Id < b.Id
|
return strings.Compare(string(a.Id), string(b.Id))
|
||||||
})
|
})
|
||||||
|
|
||||||
info.DataCenters = dcs
|
info.DataCenters = dcs
|
||||||
|
|
|
@ -32,8 +32,8 @@ func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount in
|
||||||
}
|
}
|
||||||
|
|
||||||
// keep newest cache to the front
|
// keep newest cache to the front
|
||||||
slices.SortFunc(c.diskCaches, func(a, b *ChunkCacheVolume) bool {
|
slices.SortFunc(c.diskCaches, func(a, b *ChunkCacheVolume) int {
|
||||||
return a.lastModTime.After(b.lastModTime)
|
return b.lastModTime.Compare(a.lastModTime)
|
||||||
})
|
})
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue