seaweedfs/weed/command/scaffold.go

365 lines
10 KiB
Go
Raw Normal View History

2018-08-19 22:36:30 +00:00
package command
import (
"io/ioutil"
"path/filepath"
)
func init() {
cmdScaffold.Run = runScaffold // break init cycle
}
var cmdScaffold = &Command{
UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
2018-08-19 22:36:30 +00:00
Short: "generate basic configuration files",
Long: `Generate filer.toml with all possible configurations for you to customize.
`,
}
var (
outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
2018-08-19 22:36:30 +00:00
)
func runScaffold(cmd *Command, args []string) bool {
content := ""
switch *config {
case "filer":
content = FILER_TOML_EXAMPLE
case "notification":
content = NOTIFICATION_TOML_EXAMPLE
2018-09-17 07:27:56 +00:00
case "replication":
content = REPLICATION_TOML_EXAMPLE
2019-02-10 05:07:12 +00:00
case "security":
content = SECURITY_TOML_EXAMPLE
case "master":
content = MASTER_TOML_EXAMPLE
2018-08-19 22:36:30 +00:00
}
if content == "" {
println("need a valid -config option")
return false
}
if *outputPath != "" {
ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
2018-08-19 22:36:30 +00:00
} else {
println(content)
}
return true
}
const (
FILER_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
2018-09-23 05:12:06 +00:00
# Used with "weed filer" or "weed server -filer"
# Put this file to one of the location, with descending priority
# ./filer.toml
# $HOME/.seaweedfs/filer.toml
# /etc/seaweedfs/filer.toml
2018-08-19 21:58:24 +00:00
[leveldb2]
# local on disk, mostly for simple single-machine setup, fairly scalable
# faster than previous leveldb, recommended.
2018-09-23 16:26:25 +00:00
enabled = true
2018-08-19 21:58:24 +00:00
dir = "." # directory to store level db files
####################################################
# multiple filers on shared storage, fairly scalable
####################################################
2019-08-05 23:45:19 +00:00
[mysql] # or tidb
2018-08-19 21:58:24 +00:00
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) COMMENT 'directory or file name',
# directory TEXT COMMENT 'full path to parent directory',
# meta LONGBLOB,
2018-08-19 21:58:24 +00:00
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
2018-09-23 16:26:25 +00:00
enabled = false
2018-08-19 21:58:24 +00:00
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
interpolateParams = false
2018-08-19 21:58:24 +00:00
2019-08-05 23:45:19 +00:00
[postgres] # or cockroachdb
2018-08-19 21:58:24 +00:00
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT,
# name VARCHAR(65535),
# directory VARCHAR(65535),
2018-08-19 21:58:24 +00:00
# meta bytea,
# PRIMARY KEY (dirhash, name)
# );
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
database = "" # create or use an existing database
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
[cassandra]
# CREATE TABLE filemeta (
# directory varchar,
# name varchar,
# meta blob,
# PRIMARY KEY (directory, name)
# ) WITH CLUSTERING ORDER BY (name ASC);
enabled = false
keyspace="seaweedfs"
hosts=[
"localhost:9042",
]
[redis]
2018-09-23 16:26:25 +00:00
enabled = false
2018-08-19 21:58:24 +00:00
address = "localhost:6379"
password = ""
database = 0
2018-08-19 21:58:24 +00:00
[redis_cluster]
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
2018-08-19 21:58:24 +00:00
]
password = ""
// allows reads from slave servers or the master, but all writes still go to the master
readOnly = true
// automatically use the closest Redis server for reads
routeByLatency = true
2018-08-19 21:58:24 +00:00
2019-08-01 02:16:45 +00:00
[etcd]
enabled = false
servers = "localhost:2379"
timeout = "3s"
2019-10-23 07:31:31 +00:00
[tikv]
enabled = false
pdAddress = "192.168.199.113:2379"
`
NOTIFICATION_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
# Put this file to one of the location, with descending priority
# ./notification.toml
# $HOME/.seaweedfs/notification.toml
# /etc/seaweedfs/notification.toml
2018-08-19 22:36:30 +00:00
2018-08-19 21:58:24 +00:00
####################################################
# notification
# send and receive filer updates for each file to an external message queue
2018-08-19 21:58:24 +00:00
####################################################
[notification.log]
# this is only for debugging perpose and does not work with "weed filer.replicate"
2018-09-23 16:26:25 +00:00
enabled = false
2018-08-19 21:58:24 +00:00
2018-08-19 21:58:24 +00:00
[notification.kafka]
enabled = false
hosts = [
"localhost:9092"
]
topic = "seaweedfs_filer"
offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10
2018-08-19 21:58:24 +00:00
[notification.aws_sqs]
# experimental, let me know if it works
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name
[notification.google_pub_sub]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
[notification.gocdk_pub_sub]
# The Go Cloud Development Kit (https://gocloud.dev).
# PubSub API (https://godoc.org/gocloud.dev/pubsub).
# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange".
# The exchange must have already been created by some other means, like
# the RabbitMQ management plugin.
topic_url = "rabbit://myexchange"
2019-04-05 17:43:38 +00:00
sub_url = "rabbit://myqueue"
2018-09-17 07:27:56 +00:00
`
2018-09-17 07:27:56 +00:00
REPLICATION_TOML_EXAMPLE = `
2018-09-23 05:12:06 +00:00
# A sample TOML config file for replicating SeaweedFS filer
# Used with "weed filer.replicate"
# Put this file to one of the location, with descending priority
# ./replication.toml
# $HOME/.seaweedfs/replication.toml
# /etc/seaweedfs/replication.toml
2018-09-17 07:27:56 +00:00
[source.filer]
enabled = true
grpcAddress = "localhost:18888"
2018-12-05 10:03:03 +00:00
# all files under this directory tree are replicated.
# this is not a directory on your hard drive, but on your filer.
# i.e., all files with this "prefix" are sent to notification message queue.
2019-08-01 02:16:45 +00:00
directory = "/buckets"
2018-09-17 07:27:56 +00:00
[sink.filer]
2018-10-04 06:36:52 +00:00
enabled = false
2018-09-17 07:27:56 +00:00
grpcAddress = "localhost:18888"
2018-12-05 10:03:03 +00:00
# all replicated files are under this directory tree
2019-08-01 02:16:45 +00:00
# this is not a directory on your hard drive, but on your filer.
2018-12-05 10:03:03 +00:00
# i.e., all received files will be "prefixed" to this directory.
2019-08-01 02:16:45 +00:00
directory = "/backup"
2018-09-22 07:53:52 +00:00
replication = ""
collection = ""
ttlSec = 0
2018-09-17 07:27:56 +00:00
2018-10-04 06:36:52 +00:00
[sink.s3]
# read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
2019-08-01 02:16:45 +00:00
# default loads credentials from the shared credentials file (~/.aws/credentials).
2018-10-04 06:36:52 +00:00
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
2018-11-04 19:58:59 +00:00
directory = "/" # destination directory
2018-10-04 06:36:52 +00:00
2018-10-04 08:14:44 +00:00
[sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started
2018-10-04 08:14:44 +00:00
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory
2018-10-04 08:14:44 +00:00
2018-10-09 08:35:48 +00:00
[sink.azure]
2018-10-24 06:59:40 +00:00
# experimental, let me know if it works
2018-10-09 08:35:48 +00:00
enabled = false
account_name = ""
account_key = ""
container = "mycontainer" # an existing container
2018-11-04 19:58:59 +00:00
directory = "/" # destination directory
2018-10-09 08:35:48 +00:00
2018-10-24 06:59:40 +00:00
[sink.backblaze]
enabled = false
2018-11-04 19:58:59 +00:00
b2_account_id = ""
b2_master_application_key = ""
2018-10-24 06:59:40 +00:00
bucket = "mybucket" # an existing bucket
2018-11-04 19:58:59 +00:00
directory = "/" # destination directory
2018-10-24 06:59:40 +00:00
2019-02-10 05:07:12 +00:00
`
SECURITY_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./security.toml
# $HOME/.seaweedfs/security.toml
# /etc/seaweedfs/security.toml
2019-02-10 05:07:12 +00:00
# this file is read by master, volume server, and filer
2019-05-04 15:42:25 +00:00
# the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds.
[jwt.signing]
key = ""
2019-05-04 15:42:25 +00:00
expires_after_seconds = 10 # seconds
2019-02-10 05:07:12 +00:00
2019-06-06 07:29:02 +00:00
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read]
key = ""
expires_after_seconds = 10 # seconds
2019-02-20 05:10:10 +00:00
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
2019-06-06 07:29:02 +00:00
# the host name is not checked, so the PERM files can be shared.
2019-02-18 20:11:52 +00:00
[grpc]
ca = ""
[grpc.volume]
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2019-02-18 20:11:52 +00:00
[grpc.master]
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2019-02-18 20:11:52 +00:00
[grpc.filer]
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2019-02-18 20:11:52 +00:00
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2019-02-18 20:11:52 +00:00
2019-02-25 08:43:36 +00:00
# volume server https options
# Note: work in progress!
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
[https.client]
enabled = true
[https.volume]
cert = ""
key = ""
`
MASTER_TOML_EXAMPLE = `
# Put this file to one of the location, with descending priority
# ./master.toml
# $HOME/.seaweedfs/master.toml
# /etc/seaweedfs/master.toml
# this file is read by master
[master.maintenance]
2019-06-05 08:48:03 +00:00
# periodically run these scripts are the same as running them from 'weed shell'
scripts = """
ec.encode -fullPercent=95 -quietFor=1h
ec.rebuild -force
ec.balance -force
volume.balance -force
"""
2019-06-06 07:39:08 +00:00
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
default_filer_url = "http://localhost:8888/"
2019-11-11 01:15:17 +00:00
[master.sequencer]
2019-11-29 02:04:25 +00:00
type = "memory" # Choose [memory|etcd] type for storing the file id sequence
2019-11-11 01:15:17 +00:00
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
2019-11-29 02:04:25 +00:00
sequencer_etcd_urls = "http://127.0.0.1:2379"
2019-11-11 01:15:17 +00:00
[storage.backend]
[storage.backend.s3.default]
2019-11-29 02:47:51 +00:00
enabled = false
aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
2019-11-19 03:24:34 +00:00
2018-08-19 22:36:30 +00:00
`
)