mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
go fmt
This commit is contained in:
parent
d1a4e19a3f
commit
7ce97b59d8
|
@ -154,7 +154,7 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if message.NewEntry.Name == remoteStorage.Name + filer.REMOTE_STORAGE_CONF_SUFFIX {
|
if message.NewEntry.Name == remoteStorage.Name+filer.REMOTE_STORAGE_CONF_SUFFIX {
|
||||||
conf := &remote_pb.RemoteConf{}
|
conf := &remote_pb.RemoteConf{}
|
||||||
if err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {
|
if err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {
|
||||||
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.NewEntry.Name, err)
|
return fmt.Errorf("unmarshal %s/%s: %v", filer.DirectoryEtcRemote, message.NewEntry.Name, err)
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package command
|
package command
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
// +build !linux
|
//go:build !linux && !darwin && !freebsd
|
||||||
// +build !darwin
|
// +build !linux,!darwin,!freebsd
|
||||||
// +build !freebsd
|
|
||||||
|
|
||||||
package command
|
package command
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build linux || darwin || freebsd
|
||||||
// +build linux darwin freebsd
|
// +build linux darwin freebsd
|
||||||
|
|
||||||
package command
|
package command
|
||||||
|
|
|
@ -21,7 +21,7 @@ func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMou
|
||||||
return remoteLocation
|
return remoteLocation
|
||||||
}
|
}
|
||||||
|
|
||||||
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, remoteLocationPath string)(fp util.FullPath) {
|
func MapRemoteStorageLocationPathToFullPath(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, remoteLocationPath string) (fp util.FullPath) {
|
||||||
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
|
return localMountedDir.Child(remoteLocationPath[len(remoteMountedLocation.Path):])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build linux || darwin || windows
|
||||||
// +build linux darwin windows
|
// +build linux darwin windows
|
||||||
|
|
||||||
// limited GOOS due to modernc.org/libc/unistd
|
// limited GOOS due to modernc.org/libc/unistd
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux && !darwin && !windows && !s390 && !ppc64le && !mips64
|
||||||
// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64
|
// +build !linux,!darwin,!windows,!s390,!ppc64le,!mips64
|
||||||
|
|
||||||
// limited GOOS due to modernc.org/libc/unistd
|
// limited GOOS due to modernc.org/libc/unistd
|
||||||
|
|
|
@ -127,14 +127,14 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
|
||||||
|
|
||||||
// ---------------- ChunkStreamReader ----------------------------------
|
// ---------------- ChunkStreamReader ----------------------------------
|
||||||
type ChunkStreamReader struct {
|
type ChunkStreamReader struct {
|
||||||
chunkViews []*ChunkView
|
chunkViews []*ChunkView
|
||||||
totalSize int64
|
totalSize int64
|
||||||
logicOffset int64
|
logicOffset int64
|
||||||
buffer []byte
|
buffer []byte
|
||||||
bufferOffset int64
|
bufferOffset int64
|
||||||
bufferLock sync.Mutex
|
bufferLock sync.Mutex
|
||||||
chunk string
|
chunk string
|
||||||
lookupFileId wdclient.LookupFileIdFunctionType
|
lookupFileId wdclient.LookupFileIdFunctionType
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = io.ReadSeeker(&ChunkStreamReader{})
|
var _ = io.ReadSeeker(&ChunkStreamReader{})
|
||||||
|
@ -206,7 +206,7 @@ func (c *ChunkStreamReader) doRead(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChunkStreamReader) isBufferEmpty() bool {
|
func (c *ChunkStreamReader) isBufferEmpty() bool {
|
||||||
return len(c.buffer) <= int(c.logicOffset - c.bufferOffset)
|
return len(c.buffer) <= int(c.logicOffset-c.bufferOffset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
|
func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
@ -261,7 +261,7 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) {
|
||||||
} else if currentChunkIndex > 0 {
|
} else if currentChunkIndex > 0 {
|
||||||
if insideChunk(offset, c.chunkViews[currentChunkIndex]) {
|
if insideChunk(offset, c.chunkViews[currentChunkIndex]) {
|
||||||
// good hit
|
// good hit
|
||||||
} else if insideChunk(offset, c.chunkViews[currentChunkIndex-1]){
|
} else if insideChunk(offset, c.chunkViews[currentChunkIndex-1]) {
|
||||||
currentChunkIndex -= 1
|
currentChunkIndex -= 1
|
||||||
// fmt.Printf("select -1 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
|
// fmt.Printf("select -1 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build tikv
|
||||||
// +build tikv
|
// +build tikv
|
||||||
|
|
||||||
package tikv
|
package tikv
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build tikv
|
||||||
// +build tikv
|
// +build tikv
|
||||||
|
|
||||||
package tikv
|
package tikv
|
||||||
|
|
|
@ -53,8 +53,8 @@ func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.
|
||||||
}
|
}
|
||||||
|
|
||||||
type gcsRemoteStorageClient struct {
|
type gcsRemoteStorageClient struct {
|
||||||
conf *remote_pb.RemoteConf
|
conf *remote_pb.RemoteConf
|
||||||
client *storage.Client
|
client *storage.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = remote_storage.RemoteStorageClient(&gcsRemoteStorageClient{})
|
var _ = remote_storage.RemoteStorageClient(&gcsRemoteStorageClient{})
|
||||||
|
@ -169,7 +169,7 @@ func (gcs *gcsRemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStora
|
||||||
|
|
||||||
if len(metadata) > 0 {
|
if len(metadata) > 0 {
|
||||||
_, err = gcs.client.Bucket(loc.Bucket).Object(key).Update(context.Background(), storage.ObjectAttrsToUpdate{
|
_, err = gcs.client.Bucket(loc.Bucket).Object(key).Update(context.Background(), storage.ObjectAttrsToUpdate{
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
// no way to delete the metadata yet
|
// no way to delete the metadata yet
|
||||||
|
|
|
@ -30,9 +30,9 @@ func (s AliyunRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stora
|
||||||
secretKey := util.Nvl(conf.AliyunSecretKey, os.Getenv("ALICLOUD_ACCESS_KEY_SECRET"))
|
secretKey := util.Nvl(conf.AliyunSecretKey, os.Getenv("ALICLOUD_ACCESS_KEY_SECRET"))
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Endpoint: aws.String(conf.AliyunEndpoint),
|
Endpoint: aws.String(conf.AliyunEndpoint),
|
||||||
Region: aws.String(conf.AliyunRegion),
|
Region: aws.String(conf.AliyunRegion),
|
||||||
S3ForcePathStyle: aws.Bool(false),
|
S3ForcePathStyle: aws.Bool(false),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
|
|
|
@ -25,9 +25,9 @@ func (s BackBlazeRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_st
|
||||||
conf: conf,
|
conf: conf,
|
||||||
}
|
}
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Endpoint: aws.String(conf.BackblazeEndpoint),
|
Endpoint: aws.String(conf.BackblazeEndpoint),
|
||||||
Region: aws.String("us-west-002"),
|
Region: aws.String("us-west-002"),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if conf.BackblazeKeyId != "" && conf.BackblazeApplicationKey != "" {
|
if conf.BackblazeKeyId != "" && conf.BackblazeApplicationKey != "" {
|
||||||
|
|
|
@ -30,9 +30,9 @@ func (s BaiduRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag
|
||||||
secretKey := util.Nvl(conf.BaiduSecretKey, os.Getenv("BDCLOUD_SECRET_KEY"))
|
secretKey := util.Nvl(conf.BaiduSecretKey, os.Getenv("BDCLOUD_SECRET_KEY"))
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Endpoint: aws.String(conf.BaiduEndpoint),
|
Endpoint: aws.String(conf.BaiduEndpoint),
|
||||||
Region: aws.String(conf.BaiduRegion),
|
Region: aws.String(conf.BaiduRegion),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
|
|
|
@ -31,9 +31,9 @@ func (s FilebaseRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_sto
|
||||||
secretKey := util.Nvl(conf.FilebaseSecretKey, os.Getenv("AWS_SECRET_ACCESS_KEY"))
|
secretKey := util.Nvl(conf.FilebaseSecretKey, os.Getenv("AWS_SECRET_ACCESS_KEY"))
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Endpoint: aws.String(conf.FilebaseEndpoint),
|
Endpoint: aws.String(conf.FilebaseEndpoint),
|
||||||
Region: aws.String("us-east-1"),
|
Region: aws.String("us-east-1"),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
|
|
|
@ -30,9 +30,9 @@ func (s StorjRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag
|
||||||
secretKey := util.Nvl(conf.StorjSecretKey, os.Getenv("AWS_SECRET_ACCESS_KEY"))
|
secretKey := util.Nvl(conf.StorjSecretKey, os.Getenv("AWS_SECRET_ACCESS_KEY"))
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Endpoint: aws.String(conf.StorjEndpoint),
|
Endpoint: aws.String(conf.StorjEndpoint),
|
||||||
Region: aws.String("us-west-2"),
|
Region: aws.String("us-west-2"),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
|
|
|
@ -30,9 +30,9 @@ func (s TencentRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stor
|
||||||
secretKey := util.Nvl(conf.TencentSecretKey, os.Getenv("COS_SECRETKEY"))
|
secretKey := util.Nvl(conf.TencentSecretKey, os.Getenv("COS_SECRETKEY"))
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Endpoint: aws.String(conf.TencentEndpoint),
|
Endpoint: aws.String(conf.TencentEndpoint),
|
||||||
Region: aws.String("us-west-2"),
|
Region: aws.String("us-west-2"),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if accessKey != "" && secretKey != "" {
|
if accessKey != "" && secretKey != "" {
|
||||||
|
|
|
@ -45,7 +45,7 @@ func TraverseBfs(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func processOneDirectory(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc, dirQueue *util.Queue, dirQueueWg *sync.WaitGroup) (error) {
|
func processOneDirectory(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc, dirQueue *util.Queue, dirQueueWg *sync.WaitGroup) error {
|
||||||
|
|
||||||
return listDirFn(parentPath, func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error {
|
return listDirFn(parentPath, func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error {
|
||||||
if err := visitFn(dir, name, isDirectory, remoteEntry); err != nil {
|
if err := visitFn(dir, name, isDirectory, remoteEntry); err != nil {
|
||||||
|
|
|
@ -74,9 +74,9 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc
|
||||||
s3sink.endpoint = endpoint
|
s3sink.endpoint = endpoint
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Region: aws.String(s3sink.region),
|
Region: aws.String(s3sink.region),
|
||||||
Endpoint: aws.String(s3sink.endpoint),
|
Endpoint: aws.String(s3sink.endpoint),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string
|
||||||
func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) {
|
func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) {
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||||
|
|
|
@ -114,11 +114,11 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
||||||
// tell filer to tell volume server to download into needles
|
// tell filer to tell volume server to download into needles
|
||||||
err = operation.WithVolumeServerClient(assignResult.Url, fs.grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
err = operation.WithVolumeServerClient(assignResult.Url, fs.grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
||||||
_, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{
|
_, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{
|
||||||
VolumeId: uint32(fileId.VolumeId),
|
VolumeId: uint32(fileId.VolumeId),
|
||||||
NeedleId: uint64(fileId.Key),
|
NeedleId: uint64(fileId.Key),
|
||||||
Cookie: uint32(fileId.Cookie),
|
Cookie: uint32(fileId.Cookie),
|
||||||
Offset: localOffset,
|
Offset: localOffset,
|
||||||
Size: size,
|
Size: size,
|
||||||
RemoteConf: storageConf,
|
RemoteConf: storageConf,
|
||||||
RemoteLocation: &remote_pb.RemoteStorageLocation{
|
RemoteLocation: &remote_pb.RemoteStorageLocation{
|
||||||
Name: remoteStorageMountedLocation.Name,
|
Name: remoteStorageMountedLocation.Name,
|
||||||
|
|
|
@ -203,7 +203,7 @@ func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRe
|
||||||
|
|
||||||
if hasPrefixIn(fullpath, req.PathPrefixes) {
|
if hasPrefixIn(fullpath, req.PathPrefixes) {
|
||||||
// good
|
// good
|
||||||
}else {
|
} else {
|
||||||
if !strings.HasPrefix(fullpath, req.PathPrefix) {
|
if !strings.HasPrefix(fullpath, req.PathPrefix) {
|
||||||
if eventNotification.NewParentPath != "" {
|
if eventNotification.NewParentPath != "" {
|
||||||
newFullPath := util.Join(eventNotification.NewParentPath, entryName)
|
newFullPath := util.Join(eventNotification.NewParentPath, entryName)
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build rocksdb
|
||||||
// +build rocksdb
|
// +build rocksdb
|
||||||
|
|
||||||
package weed_server
|
package weed_server
|
||||||
|
|
|
@ -84,21 +84,21 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if modifiedTsNs > 0 {
|
if modifiedTsNs > 0 {
|
||||||
os.Chtimes(dataBaseFileName + ".dat", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
os.Chtimes(dataBaseFileName+".dat", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
||||||
}
|
}
|
||||||
|
|
||||||
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, indexBaseFileName, ".idx", false, false); err != nil {
|
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, indexBaseFileName, ".idx", false, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if modifiedTsNs > 0 {
|
if modifiedTsNs > 0 {
|
||||||
os.Chtimes(indexBaseFileName + ".idx", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
os.Chtimes(indexBaseFileName+".idx", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
||||||
}
|
}
|
||||||
|
|
||||||
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".vif", false, true); err != nil {
|
if modifiedTsNs, err = vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, dataBaseFileName, ".vif", false, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if modifiedTsNs > 0 {
|
if modifiedTsNs > 0 {
|
||||||
os.Chtimes(dataBaseFileName + ".vif", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
os.Chtimes(dataBaseFileName+".vif", time.Unix(0, modifiedTsNs), time.Unix(0, modifiedTsNs))
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Remove(dataBaseFileName + ".note")
|
os.Remove(dataBaseFileName + ".note")
|
||||||
|
@ -167,7 +167,7 @@ func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, i
|
||||||
only check the the differ of the file size
|
only check the the differ of the file size
|
||||||
todo: maybe should check the received count and deleted count of the volume
|
todo: maybe should check the received count and deleted count of the volume
|
||||||
*/
|
*/
|
||||||
func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) (error) {
|
func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) error {
|
||||||
stat, err := os.Stat(idxFileName)
|
stat, err := os.Stat(idxFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("stat idx file %s failed: %v", idxFileName, err)
|
return fmt.Errorf("stat idx file %s failed: %v", idxFileName, err)
|
||||||
|
|
|
@ -59,7 +59,7 @@ func (c *commandRemoteCache) Do(args []string, commandEnv *CommandEnv, writer io
|
||||||
}
|
}
|
||||||
|
|
||||||
mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, detectErr := detectMountInfo(commandEnv, writer, *dir)
|
mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, detectErr := detectMountInfo(commandEnv, writer, *dir)
|
||||||
if detectErr != nil{
|
if detectErr != nil {
|
||||||
jsonPrintln(writer, mappings)
|
jsonPrintln(writer, mappings)
|
||||||
return detectErr
|
return detectErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,7 +116,6 @@ Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.
|
||||||
`)
|
`)
|
||||||
remoteConfigureCommand.StringVar(&conf.HdfsDataTransferProtection, "hdfs.dataTransferProtection", "", "[authentication|integrity|privacy] Kerberos data transfer protection")
|
remoteConfigureCommand.StringVar(&conf.HdfsDataTransferProtection, "hdfs.dataTransferProtection", "", "[authentication|integrity|privacy] Kerberos data transfer protection")
|
||||||
|
|
||||||
|
|
||||||
if err = remoteConfigureCommand.Parse(args); err != nil {
|
if err = remoteConfigureCommand.Parse(args); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build openbsd || netbsd || plan9 || solaris
|
||||||
// +build openbsd netbsd plan9 solaris
|
// +build openbsd netbsd plan9 solaris
|
||||||
|
|
||||||
package stats
|
package stats
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows && !openbsd && !netbsd && !plan9 && !solaris
|
||||||
// +build !windows,!openbsd,!netbsd,!plan9,!solaris
|
// +build !windows,!openbsd,!netbsd,!plan9,!solaris
|
||||||
|
|
||||||
package stats
|
package stats
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package stats
|
package stats
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package stats
|
package stats
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package memory_map
|
package memory_map
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package memory_map
|
package memory_map
|
||||||
|
|
|
@ -34,9 +34,9 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string)
|
||||||
}
|
}
|
||||||
|
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Region: aws.String(region),
|
Region: aws.String(region),
|
||||||
Endpoint: aws.String(endpoint),
|
Endpoint: aws.String(endpoint),
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
S3DisableContentMD5Validation: aws.Bool(true),
|
S3DisableContentMD5Validation: aws.Bool(true),
|
||||||
}
|
}
|
||||||
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
if awsAccessKeyId != "" && awsSecretAccessKey != "" {
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux && !windows
|
||||||
// +build !linux,!windows
|
// +build !linux,!windows
|
||||||
|
|
||||||
package backend
|
package backend
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package backend
|
package backend
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package backend
|
package backend
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build 5BytesOffset
|
||||||
// +build 5BytesOffset
|
// +build 5BytesOffset
|
||||||
|
|
||||||
package needle_map
|
package needle_map
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !5BytesOffset
|
||||||
// +build !5BytesOffset
|
// +build !5BytesOffset
|
||||||
|
|
||||||
package types
|
package types
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build 5BytesOffset
|
||||||
// +build 5BytesOffset
|
// +build 5BytesOffset
|
||||||
|
|
||||||
package types
|
package types
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !5BytesOffset
|
||||||
// +build !5BytesOffset
|
// +build !5BytesOffset
|
||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build 5BytesOffset
|
||||||
// +build 5BytesOffset
|
// +build 5BytesOffset
|
||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build linux || darwin || freebsd || netbsd || openbsd || plan9 || solaris || zos
|
||||||
// +build linux darwin freebsd netbsd openbsd plan9 solaris zos
|
// +build linux darwin freebsd netbsd openbsd plan9 solaris zos
|
||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !plan9
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
package grace
|
package grace
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build plan9
|
||||||
// +build plan9
|
// +build plan9
|
||||||
|
|
||||||
package grace
|
package grace
|
||||||
|
|
Loading…
Reference in a new issue