refactor: move from io/ioutil to io and os package

The io/ioutil package has been deprecated as of Go 1.16, see
https://golang.org/doc/go1.16#ioutil. This commit replaces the existing
io/ioutil functions with their new definitions in io and os packages.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
This commit is contained in:
Eng Zer Jun 2021-10-14 12:27:58 +08:00
parent 4cbd390fbe
commit a23bcbb7ec
No known key found for this signature in database
GPG key ID: DAEBBD2E34C111E6
38 changed files with 160 additions and 179 deletions

View file

@ -2,14 +2,15 @@ package basic
import ( import (
"fmt" "fmt"
"io"
"os"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"io/ioutil"
"os"
"strings"
"testing"
) )
var ( var (
@ -108,8 +109,8 @@ func TestListBucket(t *testing.T) {
func TestListObjectV2(t *testing.T) { func TestListObjectV2(t *testing.T) {
listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(Bucket), Bucket: aws.String(Bucket),
Prefix: aws.String("foo"), Prefix: aws.String("foo"),
Delimiter: aws.String("/"), Delimiter: aws.String("/"),
}) })
if err != nil { if err != nil {
@ -169,7 +170,7 @@ func TestObjectOp(t *testing.T) {
exitErrorf("Unable to get copy object, %v", err) exitErrorf("Unable to get copy object, %v", err)
} }
data, err := ioutil.ReadAll(getObj.Body) data, err := io.ReadAll(getObj.Body)
if err != nil { if err != nil {
exitErrorf("Unable to read object data, %v", err) exitErrorf("Unable to read object data, %v", err)
} }

View file

@ -5,7 +5,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"math/rand" "math/rand"
"mime/multipart" "mime/multipart"
@ -45,7 +44,7 @@ func main() {
defer wg.Done() defer wg.Done()
client := &http.Client{Transport: &http.Transport{ client := &http.Client{Transport: &http.Transport{
MaxIdleConns: 1024, MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024, MaxIdleConnsPerHost: 1024,
}} }}
r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x)))
@ -128,7 +127,7 @@ func uploadFileToFiler(client *http.Client, data []byte, filename, destination s
if err != nil { if err != nil {
return 0, fmt.Errorf("read http POST %s response: %v", uri, err) return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
} }
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
} }

View file

@ -5,7 +5,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"math/rand" "math/rand"
"mime/multipart" "mime/multipart"
@ -36,7 +35,7 @@ func main() {
var fileNames []string var fileNames []string
files, err := ioutil.ReadDir(*dir) files, err := os.ReadDir(*dir)
if err != nil { if err != nil {
log.Fatalf("fail to read dir %v: %v", *dir, err) log.Fatalf("fail to read dir %v: %v", *dir, err)
} }
@ -142,7 +141,7 @@ func uploadFileToFiler(client *http.Client, filename, destination string) (size
if err != nil { if err != nil {
return 0, fmt.Errorf("read http POST %s response: %v", uri, err) return 0, fmt.Errorf("read http POST %s response: %v", uri, err)
} }
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
} }

View file

@ -2,17 +2,17 @@ package command
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
"google.golang.org/grpc"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"path" "path"
"strings" "strings"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -82,7 +82,7 @@ func downloadToFile(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpti
} }
defer f.Close() defer f.Close()
if isFileList { if isFileList {
content, err := ioutil.ReadAll(rc.Body) content, err := io.ReadAll(rc.Body)
if err != nil { if err != nil {
return err return err
} }
@ -119,7 +119,7 @@ func fetchContent(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption
return "", nil, e return "", nil, e
} }
defer util.CloseResponse(rc) defer util.CloseResponse(rc)
content, e = ioutil.ReadAll(rc.Body) content, e = io.ReadAll(rc.Body)
return return
} }

View file

@ -3,9 +3,7 @@ package command
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
@ -16,14 +14,14 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/util/grace" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
) )
@ -212,7 +210,7 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
} }
if mode.IsDir() { if mode.IsDir() {
files, _ := ioutil.ReadDir(fileOrDir) files, _ := os.ReadDir(fileOrDir)
for _, subFileOrDir := range files { for _, subFileOrDir := range files {
cleanedDestDirectory := filepath.Clean(destPath + fi.Name()) cleanedDestDirectory := filepath.Clean(destPath + fi.Name())
if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), cleanedDestDirectory+"/", fileCopyTaskChan); err != nil { if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), cleanedDestDirectory+"/", fileCopyTaskChan); err != nil {
@ -339,7 +337,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 { if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 {
mimeType = detectMimeType(f) mimeType = detectMimeType(f)
data, err := ioutil.ReadAll(f) data, err := io.ReadAll(f)
if err != nil { if err != nil {
return err return err
} }

View file

@ -2,9 +2,10 @@ package command
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/command/scaffold" "os"
"io/ioutil"
"path/filepath" "path/filepath"
"github.com/chrislusf/seaweedfs/weed/command/scaffold"
) )
func init() { func init() {
@ -55,7 +56,7 @@ func runScaffold(cmd *Command, args []string) bool {
} }
if *outputPath != "" { if *outputPath != "" {
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644) os.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
} else { } else {
fmt.Println(content) fmt.Println(content)
} }

View file

@ -3,7 +3,6 @@ package leveldb
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
@ -14,7 +13,7 @@ import (
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
store.initialize(dir) store.initialize(dir)
@ -68,7 +67,7 @@ func TestCreateAndFind(t *testing.T) {
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
store.initialize(dir) store.initialize(dir)
@ -91,7 +90,7 @@ func TestEmptyRoot(t *testing.T) {
func BenchmarkInsertEntry(b *testing.B) { func BenchmarkInsertEntry(b *testing.B) {
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") dir, _ := os.MkdirTemp("", "seaweedfs_filer_bench")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
store.initialize(dir) store.initialize(dir)

View file

@ -2,7 +2,6 @@ package leveldb
import ( import (
"context" "context"
"io/ioutil"
"os" "os"
"testing" "testing"
@ -12,7 +11,7 @@ import (
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}
store.initialize(dir, 2) store.initialize(dir, 2)
@ -66,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}
store.initialize(dir, 2) store.initialize(dir, 2)

View file

@ -2,7 +2,6 @@ package leveldb
import ( import (
"context" "context"
"io/ioutil"
"os" "os"
"testing" "testing"
@ -12,7 +11,7 @@ import (
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB3Store{} store := &LevelDB3Store{}
store.initialize(dir) store.initialize(dir)
@ -66,7 +65,7 @@ func TestCreateAndFind(t *testing.T) {
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB3Store{} store := &LevelDB3Store{}
store.initialize(dir) store.initialize(dir)

View file

@ -1,3 +1,4 @@
//go:build rocksdb
// +build rocksdb // +build rocksdb
package rocksdb package rocksdb
@ -5,7 +6,6 @@ package rocksdb
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
@ -16,7 +16,7 @@ import (
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &RocksDBStore{} store := &RocksDBStore{}
store.initialize(dir) store.initialize(dir)
@ -70,7 +70,7 @@ func TestCreateAndFind(t *testing.T) {
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := os.MkdirTemp("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &RocksDBStore{} store := &RocksDBStore{}
store.initialize(dir) store.initialize(dir)
@ -93,7 +93,7 @@ func TestEmptyRoot(t *testing.T) {
func BenchmarkInsertEntry(b *testing.B) { func BenchmarkInsertEntry(b *testing.B) {
testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") dir, _ := os.MkdirTemp("", "seaweedfs_filer_bench")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &RocksDBStore{} store := &RocksDBStore{}
store.initialize(dir) store.initialize(dir)

View file

@ -1,7 +1,6 @@
package images package images
import ( import (
"io/ioutil"
"os" "os"
"testing" "testing"
) )
@ -9,11 +8,11 @@ import (
func TestXYZ(t *testing.T) { func TestXYZ(t *testing.T) {
fname := "sample1.jpg" fname := "sample1.jpg"
dat, _ := ioutil.ReadFile(fname) dat, _ := os.ReadFile(fname)
fixed_data := FixJpgOrientation(dat) fixed_data := FixJpgOrientation(dat)
ioutil.WriteFile("fixed1.jpg", fixed_data, 0644) os.WriteFile("fixed1.jpg", fixed_data, 0644)
os.Remove("fixed1.jpg") os.Remove("fixed1.jpg")

View file

@ -2,7 +2,6 @@ package images
import ( import (
"bytes" "bytes"
"io/ioutil"
"os" "os"
"testing" "testing"
) )
@ -10,13 +9,13 @@ import (
func TestResizing(t *testing.T) { func TestResizing(t *testing.T) {
fname := "sample2.webp" fname := "sample2.webp"
dat, _ := ioutil.ReadFile(fname) dat, _ := os.ReadFile(fname)
resized, _, _ := Resized(".webp", bytes.NewReader(dat), 100, 30, "") resized, _, _ := Resized(".webp", bytes.NewReader(dat), 100, 30, "")
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
buf.ReadFrom(resized) buf.ReadFrom(resized)
ioutil.WriteFile("resized1.png", buf.Bytes(), 0644) os.WriteFile("resized1.png", buf.Bytes(), 0644)
os.Remove("resized1.png") os.Remove("resized1.png")

View file

@ -4,9 +4,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"sort" "sort"
"sync" "sync"
@ -14,6 +12,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -108,7 +107,7 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (wri
return written, err return written, err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()

View file

@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
@ -91,7 +90,7 @@ func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResul
if ok { if ok {
data = bytesReader.Bytes data = bytesReader.Bytes
} else { } else {
data, err = ioutil.ReadAll(reader) data, err = io.ReadAll(reader)
if err != nil { if err != nil {
err = fmt.Errorf("read input: %v", err) err = fmt.Errorf("read input: %v", err)
return return
@ -278,7 +277,7 @@ func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize
return &ret, nil return &ret, nil
} }
resp_body, ra_err := ioutil.ReadAll(resp.Body) resp_body, ra_err := io.ReadAll(resp.Body)
if ra_err != nil { if ra_err != nil {
return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err) return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err)
} }

View file

@ -3,17 +3,17 @@ package azure
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"net/url"
"os"
"reflect"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb" "github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
"github.com/chrislusf/seaweedfs/weed/remote_storage" "github.com/chrislusf/seaweedfs/weed/remote_storage"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io"
"io/ioutil"
"net/url"
"os"
"reflect"
) )
func init() { func init() {
@ -115,7 +115,7 @@ func (az *azureRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocatio
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}) bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
defer bodyStream.Close() defer bodyStream.Close()
data, err = ioutil.ReadAll(bodyStream) data, err = io.ReadAll(bodyStream)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err) return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err)

View file

@ -1,9 +1,13 @@
package gcs package gcs
import ( import (
"cloud.google.com/go/storage"
"context" "context"
"fmt" "fmt"
"io"
"os"
"reflect"
"cloud.google.com/go/storage"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/remote_pb" "github.com/chrislusf/seaweedfs/weed/pb/remote_pb"
@ -11,10 +15,6 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"google.golang.org/api/iterator" "google.golang.org/api/iterator"
"google.golang.org/api/option" "google.golang.org/api/option"
"io"
"io/ioutil"
"os"
"reflect"
) )
func init() { func init() {
@ -110,7 +110,7 @@ func (gcs *gcsRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation
if readErr != nil { if readErr != nil {
return nil, readErr return nil, readErr
} }
data, err = ioutil.ReadAll(rangeReader) data, err = io.ReadAll(rangeReader)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err) return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err)

View file

@ -3,7 +3,7 @@ package sub
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "os"
"sync" "sync"
"time" "time"
@ -119,7 +119,7 @@ type KafkaProgress struct {
func loadProgress(offsetFile string) *KafkaProgress { func loadProgress(offsetFile string) *KafkaProgress {
progress := &KafkaProgress{} progress := &KafkaProgress{}
data, err := ioutil.ReadFile(offsetFile) data, err := os.ReadFile(offsetFile)
if err != nil { if err != nil {
glog.Warningf("failed to read kafka progress file: %s", offsetFile) glog.Warningf("failed to read kafka progress file: %s", offsetFile)
return nil return nil
@ -137,7 +137,7 @@ func (progress *KafkaProgress) saveProgress() error {
if err != nil { if err != nil {
return fmt.Errorf("failed to marshal progress: %v", err) return fmt.Errorf("failed to marshal progress: %v", err)
} }
err = ioutil.WriteFile(progress.offsetFile, data, 0640) err = os.WriteFile(progress.offsetFile, data, 0640)
if err != nil { if err != nil {
return fmt.Errorf("failed to save progress to %s: %v", progress.offsetFile, err) return fmt.Errorf("failed to save progress to %s: %v", progress.offsetFile, err)
} }

View file

@ -2,6 +2,10 @@ package s3api
import ( import (
"fmt" "fmt"
"net/http"
"os"
"strings"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
@ -10,9 +14,6 @@ import (
xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io/ioutil"
"net/http"
"strings"
) )
type Action string type Action string
@ -91,7 +92,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3A
} }
func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error { func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error {
content, readErr := ioutil.ReadFile(fileName) content, readErr := os.ReadFile(fileName)
if readErr != nil { if readErr != nil {
glog.Warningf("fail to read %s : %v", fileName, readErr) glog.Warningf("fail to read %s : %v", fileName, readErr)
return fmt.Errorf("fail to read %s : %v", fileName, readErr) return fmt.Errorf("fail to read %s : %v", fileName, readErr)

View file

@ -23,8 +23,7 @@ import (
"crypto/sha256" "crypto/sha256"
"crypto/subtle" "crypto/subtle"
"encoding/hex" "encoding/hex"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"regexp" "regexp"
@ -33,6 +32,8 @@ import (
"strings" "strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
) )
func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) { func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) {
@ -135,9 +136,9 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
// Get hashed Payload // Get hashed Payload
if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil { if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil {
buf, _ := ioutil.ReadAll(r.Body) buf, _ := io.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) r.Body = io.NopCloser(bytes.NewBuffer(buf))
b, _ := ioutil.ReadAll(bytes.NewBuffer(buf)) b, _ := io.ReadAll(bytes.NewBuffer(buf))
if len(b) != 0 { if len(b) != 0 {
bodyHash := sha256.Sum256(b) bodyHash := sha256.Sum256(b)
hashedPayload = hex.EncodeToString(bodyHash[:]) hashedPayload = hex.EncodeToString(bodyHash[:])
@ -433,7 +434,7 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s
} }
} }
/// Verify finally if signature is same. // / Verify finally if signature is same.
// Get canonical request. // Get canonical request.
presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)

View file

@ -8,9 +8,7 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"sort" "sort"
@ -19,6 +17,8 @@ import (
"testing" "testing"
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
) )
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. // TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
@ -86,7 +86,7 @@ func TestIsReqAuthenticated(t *testing.T) {
// Validates all testcases. // Validates all testcases.
for i, testCase := range testCases { for i, testCase := range testCases {
if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error { if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error {
ioutil.ReadAll(testCase.req.Body) io.ReadAll(testCase.req.Body)
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error) t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error)
} }
} }
@ -167,7 +167,7 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek
case body == nil: case body == nil:
hashedPayload = getSHA256Hash([]byte{}) hashedPayload = getSHA256Hash([]byte{})
default: default:
payloadBytes, err := ioutil.ReadAll(body) payloadBytes, err := io.ReadAll(body)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -5,16 +5,16 @@ import (
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/pquerna/cachecontrol/cacheobject"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"sort" "sort"
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/pquerna/cachecontrol/cacheobject"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/gorilla/mux" "github.com/gorilla/mux"
@ -198,7 +198,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
bucket, _ := getBucketAndObject(r) bucket, _ := getBucketAndObject(r)
glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket)
deleteXMLBytes, err := ioutil.ReadAll(r.Body) deleteXMLBytes, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r) s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
return return
@ -394,7 +394,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
etag = fmt.Sprintf("%x", hash.Sum(nil)) etag = fmt.Sprintf("%x", hash.Sum(nil))
resp_body, ra_err := ioutil.ReadAll(resp.Body) resp_body, ra_err := io.ReadAll(resp.Body)
if ra_err != nil { if ra_err != nil {
glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
return etag, s3err.ErrInternalError return etag, s3err.ErrInternalError

View file

@ -5,17 +5,17 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api/policy" "github.com/chrislusf/seaweedfs/weed/s3api/policy"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"strings"
) )
func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
@ -152,7 +152,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R
// Extract form fields and file data from a HTTP POST Policy // Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values // / HTML Form values
fileName = "" fileName = ""
// Canonicalize the form values into http.Header. // Canonicalize the form values into http.Header.
@ -175,7 +175,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
b.WriteString(v) b.WriteString(v)
} }
fileSize = int64(b.Len()) fileSize = int64(b.Len())
filePart = ioutil.NopCloser(b) filePart = io.NopCloser(b)
return filePart, fileName, fileSize, formValues, nil return filePart, fileName, fileSize, formValues, nil
} }

View file

@ -3,13 +3,13 @@ package s3api
import ( import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io"
"net/http"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"io"
"io/ioutil"
"net/http"
) )
// GetObjectTaggingHandler - GET object tagging // GetObjectTaggingHandler - GET object tagging
@ -49,7 +49,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R
dir, name := target.DirAndName() dir, name := target.DirAndName()
tagging := &Tagging{} tagging := &Tagging{}
input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil { if err != nil {
glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err)
s3err.WriteErrorResponse(w, s3err.ErrInternalError, r) s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)

View file

@ -4,18 +4,18 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"github.com/chrislusf/seaweedfs/weed/util" "os"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"io/ioutil"
"strings" "strings"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util"
) )
type Authenticator struct { type Authenticator struct {
@ -37,7 +37,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption
err) err)
return nil, nil return nil, nil
} }
caCert, err := ioutil.ReadFile(config.GetString("grpc.ca")) caCert, err := os.ReadFile(config.GetString("grpc.ca"))
if err != nil { if err != nil {
glog.V(1).Infof("read ca cert file %s error: %v", config.GetString("grpc.ca"), err) glog.V(1).Infof("read ca cert file %s error: %v", config.GetString("grpc.ca"), err)
return nil, nil return nil, nil
@ -82,7 +82,7 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption {
glog.V(1).Infof("load cert/key error: %v", err) glog.V(1).Infof("load cert/key error: %v", err)
return grpc.WithInsecure() return grpc.WithInsecure()
} }
caCert, err := ioutil.ReadFile(caFileName) caCert, err := os.ReadFile(caFileName)
if err != nil { if err != nil {
glog.V(1).Infof("read ca cert file error: %v", err) glog.V(1).Infof("read ca cert file error: %v", err)
return grpc.WithInsecure() return grpc.WithInsecure()

View file

@ -5,7 +5,6 @@ import (
"crypto/md5" "crypto/md5"
"hash" "hash"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"sort" "sort"
"strings" "strings"
@ -31,7 +30,7 @@ var bufPool = sync.Pool{
func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) {
md5Hash = md5.New() md5Hash = md5.New()
var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) var partReader = io.NopCloser(io.TeeReader(reader, md5Hash))
var wg sync.WaitGroup var wg sync.WaitGroup
var bytesBufferCounter int64 var bytesBufferCounter int64
@ -57,7 +56,7 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
dataSize, err := bytesBuffer.ReadFrom(limitedReader) dataSize, err := bytesBuffer.ReadFrom(limitedReader)
// data, err := ioutil.ReadAll(limitedReader) // data, err := io.ReadAll(limitedReader)
if err != nil || dataSize == 0 { if err != nil || dataSize == 0 {
bufPool.Put(bytesBuffer) bufPool.Put(bytesBuffer)
atomic.AddInt64(&bytesBufferCounter, -1) atomic.AddInt64(&bytesBufferCounter, -1)

View file

@ -3,20 +3,19 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"io" "io"
"io/ioutil"
"math" "math"
"os" "os"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -68,7 +67,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo
dataBaseFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId)) dataBaseFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId))
indexBaseFileName = storage.VolumeFileName(location.IdxDirectory, volFileInfoResp.Collection, int(req.VolumeId)) indexBaseFileName = storage.VolumeFileName(location.IdxDirectory, volFileInfoResp.Collection, int(req.VolumeId))
ioutil.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755) os.WriteFile(dataBaseFileName+".note", []byte(fmt.Sprintf("copying from %s", req.SourceDataNode)), 0755)
defer func() { defer func() {
if err != nil { if err != nil {

View file

@ -3,10 +3,7 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
"io" "io"
"io/ioutil"
"math" "math"
"os" "os"
"path" "path"
@ -14,11 +11,13 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/storage/volume_info"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -200,12 +199,12 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se
existingShardCount := 0 existingShardCount := 0
for _, location := range vs.store.Locations { for _, location := range vs.store.Locations {
fileInfos, err := ioutil.ReadDir(location.Directory) fileInfos, err := os.ReadDir(location.Directory)
if err != nil { if err != nil {
continue continue
} }
if location.IdxDirectory != location.Directory { if location.IdxDirectory != location.Directory {
idxFileInfos, err := ioutil.ReadDir(location.IdxDirectory) idxFileInfos, err := os.ReadDir(location.IdxDirectory)
if err != nil { if err != nil {
continue continue
} }

View file

@ -5,10 +5,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"io" "io"
"io/ioutil"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@ -16,9 +13,11 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -74,7 +73,7 @@ func (c *commandVolumeFsck) Do(args []string, commandEnv *CommandEnv, writer io.
c.env = commandEnv c.env = commandEnv
// create a temp folder // create a temp folder
tempFolder, err := ioutil.TempDir("", "sw_fsck") tempFolder, err := os.MkdirTemp("", "sw_fsck")
if err != nil { if err != nil {
return fmt.Errorf("failed to create temp folder: %v", err) return fmt.Errorf("failed to create temp folder: %v", err)
} }
@ -402,7 +401,7 @@ func (c *commandVolumeFsck) oneVolumeFileIdsSubtractFilerFileIds(tempFolder stri
return return
} }
filerFileIdsData, err := ioutil.ReadFile(getFilerFileIdFile(tempFolder, volumeId)) filerFileIdsData, err := os.ReadFile(getFilerFileIdFile(tempFolder, volumeId))
if err != nil { if err != nil {
return return
} }

View file

@ -2,8 +2,6 @@ package storage
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -14,6 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -85,9 +84,9 @@ func getValidVolumeName(basename string) string {
return "" return ""
} }
func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapKind) bool { func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind) bool {
basename := fileInfo.Name() basename := dirEntry.Name()
if fileInfo.IsDir() { if dirEntry.IsDir() {
return false return false
} }
volumeName := getValidVolumeName(basename) volumeName := getValidVolumeName(basename)
@ -103,7 +102,7 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
// check for incomplete volume // check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note" noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) { if util.FileExists(noteFile) {
note, _ := ioutil.ReadFile(noteFile) note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note)) glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName) removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName) removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
@ -143,18 +142,18 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int) { func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int) {
task_queue := make(chan os.FileInfo, 10*concurrency) task_queue := make(chan os.DirEntry, 10*concurrency)
go func() { go func() {
foundVolumeNames := make(map[string]bool) foundVolumeNames := make(map[string]bool)
if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, fi := range fileInfos { for _, entry := range dirEntries {
volumeName := getValidVolumeName(fi.Name()) volumeName := getValidVolumeName(entry.Name())
if volumeName == "" { if volumeName == "" {
continue continue
} }
if _, found := foundVolumeNames[volumeName]; !found { if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true foundVolumeNames[volumeName] = true
task_queue <- fi task_queue <- entry
} }
} }
} }
@ -332,12 +331,12 @@ func (l *DiskLocation) Close() {
return return
} }
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) { func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, fileInfo := range fileInfos { for _, entry := range dirEntries {
volId, _, err := volumeIdFromFileName(fileInfo.Name()) volId, _, err := volumeIdFromFileName(entry.Name())
if vid == volId && err == nil { if vid == volId && err == nil {
return fileInfo, true return entry, true
} }
} }
} }

View file

@ -2,7 +2,6 @@ package storage
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path" "path"
"regexp" "regexp"
@ -118,25 +117,25 @@ func (l *DiskLocation) loadEcShards(shards []string, collection string, vid need
func (l *DiskLocation) loadAllEcShards() (err error) { func (l *DiskLocation) loadAllEcShards() (err error) {
fileInfos, err := ioutil.ReadDir(l.Directory) dirEntries, err := os.ReadDir(l.Directory)
if err != nil { if err != nil {
return fmt.Errorf("load all ec shards in dir %s: %v", l.Directory, err) return fmt.Errorf("load all ec shards in dir %s: %v", l.Directory, err)
} }
if l.IdxDirectory != l.Directory { if l.IdxDirectory != l.Directory {
indexFileInfos, err := ioutil.ReadDir(l.IdxDirectory) indexDirEntries, err := os.ReadDir(l.IdxDirectory)
if err != nil { if err != nil {
return fmt.Errorf("load all ec shards in dir %s: %v", l.IdxDirectory, err) return fmt.Errorf("load all ec shards in dir %s: %v", l.IdxDirectory, err)
} }
fileInfos = append(fileInfos, indexFileInfos...) dirEntries = append(dirEntries, indexDirEntries...)
} }
sort.Slice(fileInfos, func(i, j int) bool { sort.Slice(dirEntries, func(i, j int) bool {
return fileInfos[i].Name() < fileInfos[j].Name() return dirEntries[i].Name() < dirEntries[j].Name()
}) })
var sameVolumeShards []string var sameVolumeShards []string
var prevVolumeId needle.VolumeId var prevVolumeId needle.VolumeId
for _, fileInfo := range fileInfos { for _, fileInfo := range dirEntries {
if fileInfo.IsDir() { if fileInfo.IsDir() {
continue continue
} }

View file

@ -6,7 +6,6 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"net/http" "net/http"
"path" "path"
@ -108,7 +107,7 @@ func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) error {
pu.FileName = "" pu.FileName = ""
dataSize, err := pu.bytesBuffer.ReadFrom(io.LimitReader(r.Body, sizeLimit+1)) dataSize, err := pu.bytesBuffer.ReadFrom(io.LimitReader(r.Body, sizeLimit+1))
if err == io.EOF || dataSize == sizeLimit+1 { if err == io.EOF || dataSize == sizeLimit+1 {
io.Copy(ioutil.Discard, r.Body) io.Copy(io.Discard, r.Body)
} }
pu.Data = pu.bytesBuffer.Bytes() pu.Data = pu.bytesBuffer.Bytes()
r.Body.Close() r.Body.Close()
@ -118,7 +117,7 @@ func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) error {
func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) {
defer func() { defer func() {
if e != nil && r.Body != nil { if e != nil && r.Body != nil {
io.Copy(ioutil.Discard, r.Body) io.Copy(io.Discard, r.Body)
r.Body.Close() r.Body.Close()
} }
}() }()

View file

@ -1,7 +1,6 @@
package needle package needle
import ( import (
"io/ioutil"
"os" "os"
"testing" "testing"
@ -31,7 +30,7 @@ func TestAppend(t *testing.T) {
Padding: nil, // Padding []byte `comment:"Aligned to 8 bytes"` Padding: nil, // Padding []byte `comment:"Aligned to 8 bytes"`
} }
tempFile, err := ioutil.TempFile("", ".dat") tempFile, err := os.CreateTemp("", ".dat")
if err != nil { if err != nil {
t.Errorf("Fail TempFile. %v", err) t.Errorf("Fail TempFile. %v", err)
return return

View file

@ -1,8 +1,8 @@
package storage package storage
import ( import (
"io/ioutil"
"math/rand" "math/rand"
"os"
"testing" "testing"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -11,7 +11,7 @@ import (
func TestFastLoadingNeedleMapMetrics(t *testing.T) { func TestFastLoadingNeedleMapMetrics(t *testing.T) {
idxFile, _ := ioutil.TempFile("", "tmp.idx") idxFile, _ := os.CreateTemp("", "tmp.idx")
nm := NewCompactNeedleMap(idxFile) nm := NewCompactNeedleMap(idxFile)
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {

View file

@ -3,15 +3,14 @@ package volume_info
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil" "os"
_ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
_ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend"
"github.com/chrislusf/seaweedfs/weed/util"
) )
// MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil // MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil
@ -36,7 +35,7 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn
hasVolumeInfoFile = true hasVolumeInfoFile = true
glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName)
tierData, readErr := ioutil.ReadFile(fileName) tierData, readErr := os.ReadFile(fileName)
if readErr != nil { if readErr != nil {
glog.Warningf("fail to read %s : %v", fileName, readErr) glog.Warningf("fail to read %s : %v", fileName, readErr)
err = fmt.Errorf("fail to read %s : %v", fileName, readErr) err = fmt.Errorf("fail to read %s : %v", fileName, readErr)
@ -76,7 +75,7 @@ func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) er
return fmt.Errorf("marshal to %s: %v", fileName, marshalErr) return fmt.Errorf("marshal to %s: %v", fileName, marshalErr)
} }
writeErr := ioutil.WriteFile(fileName, []byte(text), 0755) writeErr := os.WriteFile(fileName, []byte(text), 0755)
if writeErr != nil { if writeErr != nil {
return fmt.Errorf("fail to write %s : %v", fileName, writeErr) return fmt.Errorf("fail to write %s : %v", fileName, writeErr)
} }

View file

@ -1,7 +1,6 @@
package storage package storage
import ( import (
"io/ioutil"
"math/rand" "math/rand"
"os" "os"
"testing" "testing"
@ -45,7 +44,7 @@ preparing test prerequisite easier )
func TestMakeDiff(t *testing.T) { func TestMakeDiff(t *testing.T) {
v := new(Volume) v := new(Volume)
//lastCompactIndexOffset value is the index file size before step 4 // lastCompactIndexOffset value is the index file size before step 4
v.lastCompactIndexOffset = 96 v.lastCompactIndexOffset = 96
v.SuperBlock.Version = 0x2 v.SuperBlock.Version = 0x2
/* /*
@ -63,7 +62,7 @@ func TestMakeDiff(t *testing.T) {
} }
func TestCompaction(t *testing.T) { func TestCompaction(t *testing.T) {
dir, err := ioutil.TempDir("", "example") dir, err := os.MkdirTemp("", "example")
if err != nil { if err != nil {
t.Fatalf("temp dir creation: %v", err) t.Fatalf("temp dir creation: %v", err)
} }

View file

@ -2,17 +2,17 @@ package storage
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/storage/types"
) )
func TestSearchVolumesWithDeletedNeedles(t *testing.T) { func TestSearchVolumesWithDeletedNeedles(t *testing.T) {
dir, err := ioutil.TempDir("", "example") dir, err := os.MkdirTemp("", "example")
if err != nil { if err != nil {
t.Fatalf("temp dir creation: %v", err) t.Fatalf("temp dir creation: %v", err)
} }

View file

@ -3,7 +3,6 @@ package chunk_cache
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil"
"math/rand" "math/rand"
"os" "os"
"testing" "testing"
@ -11,7 +10,7 @@ import (
func TestOnDisk(t *testing.T) { func TestOnDisk(t *testing.T) {
tmpDir, _ := ioutil.TempDir("", "c") tmpDir, _ := os.MkdirTemp("", "c")
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
totalDiskSizeInKB := int64(32) totalDiskSizeInKB := int64(32)

View file

@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -35,7 +34,7 @@ func Post(url string, values url.Values) ([]byte, error) {
return nil, err return nil, err
} }
defer r.Body.Close() defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body) b, err := io.ReadAll(r.Body)
if r.StatusCode >= 400 { if r.StatusCode >= 400 {
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: %d - %s", url, r.StatusCode, string(b)) return nil, fmt.Errorf("%s: %d - %s", url, r.StatusCode, string(b))
@ -71,7 +70,7 @@ func Get(url string) ([]byte, bool, error) {
reader = response.Body reader = response.Body
} }
b, err := ioutil.ReadAll(reader) b, err := io.ReadAll(reader)
if response.StatusCode >= 400 { if response.StatusCode >= 400 {
retryable := response.StatusCode >= 500 retryable := response.StatusCode >= 500
return nil, retryable, fmt.Errorf("%s: %s", url, response.Status) return nil, retryable, fmt.Errorf("%s: %s", url, response.Status)
@ -107,7 +106,7 @@ func Delete(url string, jwt string) error {
return e return e
} }
defer resp.Body.Close() defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return err return err
} }
@ -137,7 +136,7 @@ func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err err
return return
} }
defer resp.Body.Close() defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body) body, err = io.ReadAll(resp.Body)
if err != nil { if err != nil {
return return
} }
@ -271,7 +270,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC
} }
} }
// drains the response body to avoid memory leak // drains the response body to avoid memory leak
data, _ := ioutil.ReadAll(reader) data, _ := io.ReadAll(reader)
if len(data) != 0 { if len(data) != 0 {
glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data))
} }
@ -393,11 +392,11 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
} }
func CloseResponse(resp *http.Response) { func CloseResponse(resp *http.Response) {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
} }
func CloseRequest(req *http.Request) { func CloseRequest(req *http.Request) {
io.Copy(ioutil.Discard, req.Body) io.Copy(io.Discard, req.Body)
req.Body.Close() req.Body.Close()
} }