mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2024-01-19 02:48:24 +00:00
Merge pull request #3 from chrislusf/master
This commit is contained in:
commit
f69356f589
22
.github/workflows/cleanup.yml
vendored
Normal file
22
.github/workflows/cleanup.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
name: Cleanup
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Delete old release assets
|
||||||
|
uses: mknejp/delete-release-assets@v1
|
||||||
|
with:
|
||||||
|
token: ${{ github.token }}
|
||||||
|
tag: dev
|
||||||
|
fail-if-no-assets: false
|
||||||
|
assets: |
|
||||||
|
weed-*
|
76
.github/workflows/release.yml
vendored
76
.github/workflows/release.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
goos: [linux, windows, darwin, freebsd ]
|
goos: [linux, windows, darwin, freebsd ]
|
||||||
goarch: [amd64, arm]
|
goarch: [amd64, arm]
|
||||||
exclude:
|
exclude:
|
||||||
- goarch: arm
|
- goarch: arm
|
||||||
goos: darwin
|
goos: darwin
|
||||||
- goarch: arm
|
- goarch: arm
|
||||||
|
@ -21,46 +21,42 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Delete old release assets
|
- name: Wait for the deletion
|
||||||
uses: mknejp/delete-release-assets@v1
|
uses: jakejarvis/wait-action@master
|
||||||
with:
|
with:
|
||||||
token: ${{ github.token }}
|
time: '30s'
|
||||||
tag: dev
|
|
||||||
fail-if-no-assets: false
|
|
||||||
assets: |
|
|
||||||
weed-*
|
|
||||||
|
|
||||||
- name: Set BUILD_TIME env
|
- name: Set BUILD_TIME env
|
||||||
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
|
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
|
||||||
|
|
||||||
- name: Go Release Binaries
|
- name: Go Release Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.14
|
uses: wangyoucao577/go-release-action@v1.14
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
release_tag: dev
|
release_tag: dev
|
||||||
overwrite: true
|
overwrite: true
|
||||||
build_flags: -tags 5BytesOffset # optional, default is
|
build_flags: -tags 5BytesOffset # optional, default is
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
project_path: weed
|
project_path: weed
|
||||||
binary_name: weed-large-disk
|
binary_name: weed-large-disk
|
||||||
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||||
|
|
||||||
- name: Go Release Binaries
|
- name: Go Release Binaries
|
||||||
uses: wangyoucao577/go-release-action@v1.14
|
uses: wangyoucao577/go-release-action@v1.14
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
goos: ${{ matrix.goos }}
|
goos: ${{ matrix.goos }}
|
||||||
goarch: ${{ matrix.goarch }}
|
goarch: ${{ matrix.goarch }}
|
||||||
release_tag: dev
|
release_tag: dev
|
||||||
overwrite: true
|
overwrite: true
|
||||||
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
|
||||||
# Where to run `go build .`
|
# Where to run `go build .`
|
||||||
project_path: weed
|
project_path: weed
|
||||||
binary_name: weed-
|
binary_name: weed-
|
||||||
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
FROM alpine
|
FROM alpine
|
||||||
|
|
||||||
|
ARG RELEASE=latest # 'latest' or 'dev'
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
|
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
|
||||||
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
|
||||||
|
@ -13,7 +15,7 @@ RUN \
|
||||||
# Install SeaweedFS and Supercronic ( for cron job mode )
|
# Install SeaweedFS and Supercronic ( for cron job mode )
|
||||||
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
|
||||||
apk add fuse && \
|
apk add fuse && \
|
||||||
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
|
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
|
||||||
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
|
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
|
||||||
curl -fsSLO "$SUPERCRONIC_URL" && \
|
curl -fsSLO "$SUPERCRONIC_URL" && \
|
||||||
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
|
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \
|
||||||
|
|
|
@ -30,7 +30,7 @@ services:
|
||||||
mount_1:
|
mount_1:
|
||||||
image: chrislusf/seaweedfs:local
|
image: chrislusf/seaweedfs:local
|
||||||
privileged: true
|
privileged: true
|
||||||
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1" -volumeServerAccess=filerProxy'
|
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1 -volumeServerAccess=filerProxy"'
|
||||||
depends_on:
|
depends_on:
|
||||||
- master
|
- master
|
||||||
- volume
|
- volume
|
||||||
|
@ -38,7 +38,7 @@ services:
|
||||||
mount_2:
|
mount_2:
|
||||||
image: chrislusf/seaweedfs:local
|
image: chrislusf/seaweedfs:local
|
||||||
privileged: true
|
privileged: true
|
||||||
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1" -volumeServerAcess=publicUrl'
|
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1 -volumeServerAcess=publicUrl"'
|
||||||
depends_on:
|
depends_on:
|
||||||
- master
|
- master
|
||||||
- volume
|
- volume
|
||||||
|
|
|
@ -52,7 +52,7 @@ type FilerOptions struct {
|
||||||
func init() {
|
func init() {
|
||||||
cmdFiler.Run = runFiler // break init cycle
|
cmdFiler.Run = runFiler // break init cycle
|
||||||
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
|
||||||
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
|
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
|
||||||
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
|
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
|
||||||
f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
|
f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
|
||||||
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
|
||||||
|
@ -83,6 +83,8 @@ func init() {
|
||||||
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
|
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
|
||||||
filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
||||||
filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
|
filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
|
||||||
|
filerWebDavOptions.replication = cmdFiler.Flag.String("webdav.replication", "", "replication to create the files")
|
||||||
|
filerWebDavOptions.disk = cmdFiler.Flag.String("webdav.disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||||
filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
|
filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
|
||||||
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
|
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
|
||||||
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
|
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||||
|
|
|
@ -121,6 +121,8 @@ func init() {
|
||||||
|
|
||||||
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
|
||||||
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
|
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
|
||||||
|
webdavOptions.replication = cmdServer.Flag.String("webdav.replication", "", "replication to create the files")
|
||||||
|
webdavOptions.disk = cmdServer.Flag.String("webdav.disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||||
webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
|
webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
|
||||||
webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
|
webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
|
||||||
webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
|
webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||||
|
|
|
@ -25,6 +25,8 @@ type WebDavOption struct {
|
||||||
filer *string
|
filer *string
|
||||||
port *int
|
port *int
|
||||||
collection *string
|
collection *string
|
||||||
|
replication *string
|
||||||
|
disk *string
|
||||||
tlsPrivateKey *string
|
tlsPrivateKey *string
|
||||||
tlsCertificate *string
|
tlsCertificate *string
|
||||||
cacheDir *string
|
cacheDir *string
|
||||||
|
@ -36,6 +38,8 @@ func init() {
|
||||||
webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address")
|
webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address")
|
||||||
webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port")
|
webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port")
|
||||||
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
|
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
|
||||||
|
webDavStandaloneOptions.replication = cmdWebDav.Flag.String("replication", "", "replication to create the files")
|
||||||
|
webDavStandaloneOptions.disk = cmdWebDav.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
|
||||||
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
|
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
|
||||||
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
|
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
|
||||||
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
|
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
|
||||||
|
@ -107,6 +111,8 @@ func (wo *WebDavOption) startWebDav() bool {
|
||||||
FilerGrpcAddress: filerGrpcAddress,
|
FilerGrpcAddress: filerGrpcAddress,
|
||||||
GrpcDialOption: grpcDialOption,
|
GrpcDialOption: grpcDialOption,
|
||||||
Collection: *wo.collection,
|
Collection: *wo.collection,
|
||||||
|
Replication: *wo.replication,
|
||||||
|
DiskType: *wo.disk,
|
||||||
Uid: uid,
|
Uid: uid,
|
||||||
Gid: gid,
|
Gid: gid,
|
||||||
Cipher: cipher,
|
Cipher: cipher,
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
[elastic7]
|
|
||||||
enabled = true
|
|
||||||
servers = [
|
|
||||||
"http://localhost:9200",
|
|
||||||
]
|
|
||||||
username = ""
|
|
||||||
password = ""
|
|
||||||
sniff_enabled = false
|
|
||||||
healthcheck_enabled = false
|
|
||||||
# increase the value is recommend, be sure the value in Elastic is greater or equal here
|
|
||||||
index.max_result_window = 10000
|
|
|
@ -46,17 +46,19 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
|
||||||
return fc.LoadFromBytes(entry.Content)
|
return fc.LoadFromBytes(entry.Content)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fc.loadFromChunks(filer, entry.Chunks)
|
return fc.loadFromChunks(filer, entry.Content, entry.Chunks)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) {
|
func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk) (err error) {
|
||||||
data, err := filer.readEntry(chunks)
|
if len(content) == 0 {
|
||||||
if err != nil {
|
content, err = filer.readEntry(chunks)
|
||||||
glog.Errorf("read filer conf content: %v", err)
|
if err != nil {
|
||||||
return
|
glog.Errorf("read filer conf content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fc.LoadFromBytes(data)
|
return fc.LoadFromBytes(content)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
|
func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) {
|
||||||
|
|
||||||
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
|
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
|
||||||
fc := NewFilerConf()
|
fc := NewFilerConf()
|
||||||
err := fc.loadFromChunks(f, entry.Chunks)
|
err := fc.loadFromChunks(f, entry.Content, entry.Chunks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("read filer conf chunks: %v", err)
|
glog.Errorf("read filer conf chunks: %v", err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -64,7 +64,6 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr
|
||||||
grpc.WithDefaultCallOptions(
|
grpc.WithDefaultCallOptions(
|
||||||
grpc.MaxCallSendMsgSize(Max_Message_Size),
|
grpc.MaxCallSendMsgSize(Max_Message_Size),
|
||||||
grpc.MaxCallRecvMsgSize(Max_Message_Size),
|
grpc.MaxCallRecvMsgSize(Max_Message_Size),
|
||||||
grpc.WaitForReady(true),
|
|
||||||
),
|
),
|
||||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||||
Time: 30 * time.Second, // client ping server if no activity for this long
|
Time: 30 * time.Second, // client ping server if no activity for this long
|
||||||
|
|
|
@ -77,7 +77,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
|
||||||
|
|
||||||
if !ms.Topo.HasWritableVolume(option) {
|
if !ms.Topo.HasWritableVolume(option) {
|
||||||
if ms.Topo.AvailableSpaceFor(option) <= 0 {
|
if ms.Topo.AvailableSpaceFor(option) <= 0 {
|
||||||
return nil, fmt.Errorf("No free volumes left!")
|
return nil, fmt.Errorf("no free volumes left for "+option.String())
|
||||||
}
|
}
|
||||||
ms.vgLock.Lock()
|
ms.vgLock.Lock()
|
||||||
if !ms.Topo.HasWritableVolume(option) {
|
if !ms.Topo.HasWritableVolume(option) {
|
||||||
|
|
|
@ -113,7 +113,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
if !ms.Topo.HasWritableVolume(option) {
|
if !ms.Topo.HasWritableVolume(option) {
|
||||||
if ms.Topo.AvailableSpaceFor(option) <= 0 {
|
if ms.Topo.AvailableSpaceFor(option) <= 0 {
|
||||||
writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left!"})
|
writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left for " + option.String()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ms.vgLock.Lock()
|
ms.vgLock.Lock()
|
||||||
|
|
|
@ -33,6 +33,7 @@ type WebDavOption struct {
|
||||||
BucketsPath string
|
BucketsPath string
|
||||||
GrpcDialOption grpc.DialOption
|
GrpcDialOption grpc.DialOption
|
||||||
Collection string
|
Collection string
|
||||||
|
Replication string
|
||||||
DiskType string
|
DiskType string
|
||||||
Uid uint32
|
Uid uint32
|
||||||
Gid uint32
|
Gid uint32
|
||||||
|
@ -225,7 +226,7 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f
|
||||||
Uid: fs.option.Uid,
|
Uid: fs.option.Uid,
|
||||||
Gid: fs.option.Gid,
|
Gid: fs.option.Gid,
|
||||||
Collection: fs.option.Collection,
|
Collection: fs.option.Collection,
|
||||||
Replication: "000",
|
Replication: fs.option.Replication,
|
||||||
TtlSec: 0,
|
TtlSec: 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -381,7 +382,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
|
||||||
|
|
||||||
request := &filer_pb.AssignVolumeRequest{
|
request := &filer_pb.AssignVolumeRequest{
|
||||||
Count: 1,
|
Count: 1,
|
||||||
Replication: "",
|
Replication: f.fs.option.Replication,
|
||||||
Collection: f.fs.option.Collection,
|
Collection: f.fs.option.Collection,
|
||||||
DiskType: f.fs.option.DiskType,
|
DiskType: f.fs.option.DiskType,
|
||||||
Path: name,
|
Path: name,
|
||||||
|
|
|
@ -20,11 +20,11 @@ type commandVolumeTierMove struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeTierMove) Name() string {
|
func (c *commandVolumeTierMove) Name() string {
|
||||||
return "volume.tier.upload"
|
return "volume.tier.move"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commandVolumeTierMove) Help() string {
|
func (c *commandVolumeTierMove) Help() string {
|
||||||
return `change a volume from one disk type to another
|
return `<WIP> change a volume from one disk type to another
|
||||||
|
|
||||||
volume.tier.move -source=hdd -target=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
|
volume.tier.move -source=hdd -target=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
|
||||||
volume.tier.move -target=hdd [-collection=""] -volumeId=<volume_id>
|
volume.tier.move -target=hdd [-collection=""] -volumeId=<volume_id>
|
||||||
|
|
|
@ -92,7 +92,7 @@ func processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool
|
||||||
|
|
||||||
func printGenericHelp() {
|
func printGenericHelp() {
|
||||||
msg :=
|
msg :=
|
||||||
`Type: "help <command>" for help on <command>
|
`Type: "help <command>" for help on <command>. Most commands support "<command> -h" also for options.
|
||||||
`
|
`
|
||||||
fmt.Print(msg)
|
fmt.Print(msg)
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ReplicaPlacement struct {
|
type ReplicaPlacement struct {
|
||||||
SameRackCount int
|
SameRackCount int `json:"node,omitempty"`
|
||||||
DiffRackCount int
|
DiffRackCount int `json:"rack,omitempty"`
|
||||||
DiffDataCenterCount int
|
DiffDataCenterCount int `json:"dc,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReplicaPlacementFromString(t string) (*ReplicaPlacement, error) {
|
func NewReplicaPlacementFromString(t string) (*ReplicaPlacement, error) {
|
||||||
|
|
|
@ -207,7 +207,26 @@ func (dn *DataNode) ToMap() interface{} {
|
||||||
ret := make(map[string]interface{})
|
ret := make(map[string]interface{})
|
||||||
ret["Url"] = dn.Url()
|
ret["Url"] = dn.Url()
|
||||||
ret["PublicUrl"] = dn.PublicUrl
|
ret["PublicUrl"] = dn.PublicUrl
|
||||||
ret["Disks"] = dn.diskUsages.ToMap()
|
|
||||||
|
// aggregated volume info
|
||||||
|
var volumeCount, ecShardCount, maxVolumeCount int64
|
||||||
|
var volumeIds string
|
||||||
|
for _, diskUsage := range dn.diskUsages.usages {
|
||||||
|
volumeCount += diskUsage.volumeCount
|
||||||
|
ecShardCount += diskUsage.ecShardCount
|
||||||
|
maxVolumeCount += diskUsage.maxVolumeCount
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, disk := range dn.Children() {
|
||||||
|
d := disk.(*Disk)
|
||||||
|
volumeIds += " " + d.GetVolumeIds()
|
||||||
|
}
|
||||||
|
|
||||||
|
ret["Volumes"] = volumeCount
|
||||||
|
ret["EcShards"] = ecShardCount
|
||||||
|
ret["Max"] = maxVolumeCount
|
||||||
|
ret["VolumeIds"] = volumeIds
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,16 +58,6 @@ func (d *DiskUsages) negative() *DiskUsages {
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DiskUsages) ToMap() interface{} {
|
|
||||||
d.RLock()
|
|
||||||
defer d.RUnlock()
|
|
||||||
ret := make(map[string]interface{})
|
|
||||||
for diskType, diskUsage := range d.usages {
|
|
||||||
ret[diskType.String()] = diskUsage.ToMap()
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DiskUsages) ToDiskInfo() map[string]*master_pb.DiskInfo {
|
func (d *DiskUsages) ToDiskInfo() map[string]*master_pb.DiskInfo {
|
||||||
ret := make(map[string]*master_pb.DiskInfo)
|
ret := make(map[string]*master_pb.DiskInfo)
|
||||||
for diskType, diskUsageCounts := range d.usages {
|
for diskType, diskUsageCounts := range d.usages {
|
||||||
|
@ -135,15 +125,6 @@ func (a *DiskUsageCounts) minus(b *DiskUsageCounts) *DiskUsageCounts {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (diskUsage *DiskUsageCounts) ToMap() interface{} {
|
|
||||||
ret := make(map[string]interface{})
|
|
||||||
ret["Volumes"] = diskUsage.volumeCount
|
|
||||||
ret["EcShards"] = diskUsage.ecShardCount
|
|
||||||
ret["Max"] = diskUsage.maxVolumeCount
|
|
||||||
ret["Free"] = diskUsage.FreeSpace()
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
|
func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
|
||||||
du.Lock()
|
du.Lock()
|
||||||
defer du.Unlock()
|
defer du.Unlock()
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package topology
|
package topology
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
"github.com/chrislusf/seaweedfs/weed/storage/types"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -25,15 +26,15 @@ This package is created to resolve these replica placement issues:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
type VolumeGrowOption struct {
|
type VolumeGrowOption struct {
|
||||||
Collection string
|
Collection string `json:"collection,omitempty"`
|
||||||
ReplicaPlacement *super_block.ReplicaPlacement
|
ReplicaPlacement *super_block.ReplicaPlacement `json:"replication,omitempty"`
|
||||||
Ttl *needle.TTL
|
Ttl *needle.TTL `json:"ttl,omitempty"`
|
||||||
DiskType types.DiskType
|
DiskType types.DiskType `json:"disk,omitempty"`
|
||||||
Prealloacte int64
|
Prealloacte int64 `json:"prealloacte,omitempty"`
|
||||||
DataCenter string
|
DataCenter string `json:"dataCenter,omitempty"`
|
||||||
Rack string
|
Rack string `json:"rack,omitempty"`
|
||||||
DataNode string
|
DataNode string `json:"dataNode,omitempty"`
|
||||||
MemoryMapMaxSizeMb uint32
|
MemoryMapMaxSizeMb uint32 `json:"memoryMapMaxSizeMb,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type VolumeGrowth struct {
|
type VolumeGrowth struct {
|
||||||
|
@ -41,7 +42,8 @@ type VolumeGrowth struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *VolumeGrowOption) String() string {
|
func (o *VolumeGrowOption) String() string {
|
||||||
return fmt.Sprintf("Collection:%s, ReplicaPlacement:%v, Ttl:%v, DataCenter:%s, Rack:%s, DataNode:%s", o.Collection, o.ReplicaPlacement, o.Ttl, o.DataCenter, o.Rack, o.DataNode)
|
blob, _ := json.Marshal(o)
|
||||||
|
return string(blob)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDefaultVolumeGrowth() *VolumeGrowth {
|
func NewDefaultVolumeGrowth() *VolumeGrowth {
|
||||||
|
|
Loading…
Reference in a new issue