This commit is contained in:
chrislu 2022-09-15 03:11:34 -07:00
commit f9e3e7d1c4
14 changed files with 26 additions and 24 deletions

View file

@ -44,7 +44,7 @@ docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,l
docker buildx stop $BUILDER
```
## Minio debuging
## Minio debugging
```
mc config host add local http://127.0.0.1:9000 some_access_key1 some_secret_key1
mc admin trace --all --verbose local

View file

@ -1,5 +1,5 @@
[notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate"
# this is only for debugging purpose and does not work with "weed filer.replicate"
enabled = false

View file

@ -22,7 +22,7 @@ var cmdScaffold = &Command{
export WEED_MYSQL_PASSWORD=some_password
Environment variable rules:
* Prefix the variable name with "WEED_"
* Upppercase the reset of variable name.
* Uppercase the reset of variable name.
* Replace '.' with '_'
`,

View file

@ -314,7 +314,7 @@ dialTimeOut = 10
# To add path-specific filer store:
#
# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
# 2. Add a location configuraiton. E.g., location = "/tmp/"
# 2. Add a location configuration. E.g., location = "/tmp/"
# 3. Copy and customize all other configurations.
# Make sure they are not the same if using the same store type!
# 4. Set enabled to true

View file

@ -10,7 +10,7 @@
# send and receive filer updates for each file to an external message queue
####################################################
[notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate"
# this is only for debugging purpose and does not work with "weed filer.replicate"
enabled = false

View file

@ -94,10 +94,10 @@ func (f *Filer) doDeleteFileIds(fileIds []string) {
}
func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) {
var fildIdsToDelete []string
var fileIdsToDelete []string
for _, chunk := range chunks {
if !chunk.IsChunkManifest {
fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString())
fileIdsToDelete = append(fileIdsToDelete, chunk.GetFileIdString())
continue
}
dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
@ -105,12 +105,12 @@ func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
fildIdsToDelete = append(fildIdsToDelete, dChunk.GetFileIdString())
fileIdsToDelete = append(fileIdsToDelete, dChunk.GetFileIdString())
}
fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString())
fileIdsToDelete = append(fileIdsToDelete, chunk.GetFileIdString())
}
f.doDeleteFileIds(fildIdsToDelete)
f.doDeleteFileIds(fileIdsToDelete)
}
func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {

View file

@ -6,7 +6,7 @@ var (
_ = util.Configuration(&cacheConfig{})
)
// implementing util.Configuraion
// implementing util.Configuration
type cacheConfig struct {
dir string
}

View file

@ -46,7 +46,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
mc.invalidateFunc(newKey, message.NewEntry)
}
} else if filer_pb.IsCreate(resp) {
// no need to invaalidate
// no need to invalidate
} else if filer_pb.IsDelete(resp) {
oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name)
mc.invalidateFunc(oldKey, message.OldEntry)

View file

@ -117,7 +117,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
}
}
entryName := filepath.Base(*input.Key)
dirName := filepath.Dir(*input.Key)
if dirName == "." {
@ -147,6 +147,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
} else if mime != "" {
entry.Attributes.Mime = mime
}
entry.Attributes.FileSize = uint64(offset)
})
if err != nil {
@ -244,6 +245,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
KeyMarker: input.KeyMarker,
MaxUploads: input.MaxUploads,
Prefix: input.Prefix,
IsTruncated: aws.Bool(false),
}
entries, _, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, math.MaxInt32)

View file

@ -94,13 +94,13 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
grpcConection, err := pb.GrpcDial(ctx, masterAddress.ToGrpcAddress(), false, grpcDialOption)
grpcConnection, err := pb.GrpcDial(ctx, masterAddress.ToGrpcAddress(), false, grpcDialOption)
if err != nil {
return "", fmt.Errorf("fail to dial %s : %v", masterAddress, err)
}
defer grpcConection.Close()
defer grpcConnection.Close()
client := master_pb.NewSeaweedClient(grpcConection)
client := master_pb.NewSeaweedClient(grpcConnection)
stream, err := client.SendHeartbeat(ctx)
if err != nil {
glog.V(0).Infof("SendHeartbeat to %s: %v", masterAddress, err)

View file

@ -205,7 +205,7 @@ func GetWritableRemoteReplications(s *storage.Store, grpcDialOption grpc.DialOpt
// has one local and has remote replications
copyCount := v.ReplicaPlacement.GetCopyCount()
if len(lookupResult.Locations) < copyCount {
err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]",
err = fmt.Errorf("replicating operations [%d] is less than volume %d replication copy count [%d]",
len(lookupResult.Locations), volumeId, copyCount)
}
}

View file

@ -54,7 +54,7 @@ func TestVolumesBinaryState(t *testing.T) {
expectResultAfterUpdate []bool
}{
{
name: "mark true when exist copies",
name: "mark true when copies exist",
state: state_exist,
expectResult: []bool{true, true, true, false, true},
update: func() {
@ -67,7 +67,7 @@ func TestVolumesBinaryState(t *testing.T) {
expectResultAfterUpdate: []bool{true, false, true, false, false},
},
{
name: "mark true when inexist copies",
name: "mark true when no copies exist",
state: state_no,
expectResult: []bool{false, true, true, false, true},
update: func() {
@ -92,7 +92,7 @@ func TestVolumesBinaryState(t *testing.T) {
}
for index, val := range result {
if val != test.expectResult[index] {
t.Fatalf("result not matched, index %d, got %v, expect %v\n",
t.Fatalf("result not matched, index %d, got %v, expected %v\n",
index, val, test.expectResult[index])
}
}
@ -107,7 +107,7 @@ func TestVolumesBinaryState(t *testing.T) {
}
for index, val := range updateResult {
if val != test.expectResultAfterUpdate[index] {
t.Fatalf("update result not matched, index %d, got %v, expect %v\n",
t.Fatalf("update result not matched, index %d, got %v, expected %v\n",
index, val, test.expectResultAfterUpdate[index])
}
}

View file

@ -128,7 +128,7 @@ func (v *ChunkCacheVolume) getNeedleSlice(key types.NeedleId, offset, length uin
}
wanted := min(int(length), int(nv.Size)-int(offset))
if wanted < 0 {
// should never happen, but better than panicing
// should never happen, but better than panicking
return nil, ErrorOutOfBounds
}
data := make([]byte, wanted)
@ -151,7 +151,7 @@ func (v *ChunkCacheVolume) readNeedleSliceAt(data []byte, key types.NeedleId, of
}
wanted := min(len(data), int(nv.Size)-int(offset))
if wanted < 0 {
// should never happen, but better than panicing
// should never happen, but better than panicking
return 0, ErrorOutOfBounds
}
if n, err = v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); err != nil {

View file

@ -105,7 +105,7 @@ func (s *unboundedSemaphore) Release() {
s.lock.Lock()
s.counter += 1
if s.counter > 0 {
// Not broadcasting here since it's unlike we can satify all waiting
// Not broadcasting here since it's unlike we can satisfy all waiting
// goroutines. Instead, we will Signal again if there are left over
// quota after Acquire, in case of lost wakeups.
s.cond.Signal()