2018-08-19 22:36:30 +00:00
package command
import (
"io/ioutil"
"path/filepath"
)
func init ( ) {
cmdScaffold . Run = runScaffold // break init cycle
}
var cmdScaffold = & Command {
2019-06-05 08:30:24 +00:00
UsageLine : "scaffold -config=[filer|notification|replication|security|master]" ,
2018-08-19 22:36:30 +00:00
Short : "generate basic configuration files" ,
Long : ` Generate filer . toml with all possible configurations for you to customize .
2020-01-29 17:09:55 +00:00
The options can also be overwritten by environment variables .
For example , the filer . toml mysql password can be overwritten by environment variable
2020-01-29 17:11:07 +00:00
export WEED_MYSQL_PASSWORD = some_password
2020-01-29 17:09:55 +00:00
Environment variable rules :
2020-04-10 21:50:10 +00:00
* Prefix the variable name with "WEED_"
2020-01-29 17:09:55 +00:00
* Upppercase the reset of variable name .
* Replace '.' with '_'
2018-08-19 22:36:30 +00:00
` ,
}
var (
outputPath = cmdScaffold . Flag . String ( "output" , "" , "if not empty, save the configuration file to this directory" )
2019-06-05 08:30:24 +00:00
config = cmdScaffold . Flag . String ( "config" , "filer" , "[filer|notification|replication|security|master] the configuration file to generate" )
2018-08-19 22:36:30 +00:00
)
func runScaffold ( cmd * Command , args [ ] string ) bool {
content := ""
switch * config {
case "filer" :
content = FILER_TOML_EXAMPLE
2018-11-01 08:11:09 +00:00
case "notification" :
content = NOTIFICATION_TOML_EXAMPLE
2018-09-17 07:27:56 +00:00
case "replication" :
content = REPLICATION_TOML_EXAMPLE
2019-02-10 05:07:12 +00:00
case "security" :
content = SECURITY_TOML_EXAMPLE
2019-06-05 08:30:24 +00:00
case "master" :
content = MASTER_TOML_EXAMPLE
2020-12-28 23:07:16 +00:00
case "shell" :
content = SHELL_TOML_EXAMPLE
2018-08-19 22:36:30 +00:00
}
if content == "" {
println ( "need a valid -config option" )
return false
}
if * outputPath != "" {
2018-11-01 08:11:09 +00:00
ioutil . WriteFile ( filepath . Join ( * outputPath , * config + ".toml" ) , [ ] byte ( content ) , 0644 )
2018-08-19 22:36:30 +00:00
} else {
println ( content )
}
return true
}
const (
FILER_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
2018-09-23 05:12:06 +00:00
# Used with "weed filer" or "weed server -filer"
# Put this file to one of the location , with descending priority
# . / filer . toml
# $ HOME / . seaweedfs / filer . toml
# / etc / seaweedfs / filer . toml
2018-08-19 21:58:24 +00:00
2019-12-31 19:52:54 +00:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Customizable filer server options
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
[ filer . options ]
# with http DELETE , by default the filer would check whether a folder is empty .
# recursive_delete will delete all sub folders and files , similar to "rm -Rf"
recursive_delete = false
2020-02-24 22:42:57 +00:00
# directories under this folder will be automatically creating a separate bucket
2020-03-06 08:49:47 +00:00
buckets_folder = "/buckets"
2019-12-31 19:52:54 +00:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The following are filer store options
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2019-05-18 00:33:49 +00:00
[ leveldb2 ]
# local on disk , mostly for simple single - machine setup , fairly scalable
# faster than previous leveldb , recommended .
2018-09-23 16:26:25 +00:00
enabled = true
2021-01-03 08:44:52 +00:00
dir = "./filerldb2" # directory to store level db files
2021-01-12 10:29:44 +00:00
[ leveldb3 ]
# similar to leveldb2 .
# each bucket has its own meta store .
enabled = false
dir = "./filerldb3" # directory to store level db files
2021-01-03 08:44:52 +00:00
[ rocksdb ]
# local on disk , similar to leveldb
# since it is using a C wrapper , you need to install rocksdb and build it by yourself
enabled = false
dir = "./filerrdb" # directory to store rocksdb files
2018-08-19 21:58:24 +00:00
2021-05-24 06:58:28 +00:00
[ sqlite ]
# local on disk , similar to leveldb
enabled = false
dbFile = "./filer.db" # sqlite db file
2021-01-19 19:31:55 +00:00
[ mysql ] # or memsql , tidb
2018-08-19 21:58:24 +00:00
# CREATE TABLE IF NOT EXISTS filemeta (
2021-03-23 07:46:50 +00:00
# dirhash BIGINT COMMENT ' first 64 bits of MD5 hash value of directory field ' ,
# name VARCHAR ( 1000 ) BINARY COMMENT ' directory or file name ' ,
# directory TEXT COMMENT ' full path to parent directory ' ,
2019-05-17 16:11:08 +00:00
# meta LONGBLOB ,
2018-08-19 21:58:24 +00:00
# PRIMARY KEY ( dirhash , name )
# ) DEFAULT CHARSET = utf8 ;
2019-04-19 16:55:46 +00:00
2018-09-23 16:26:25 +00:00
enabled = false
2018-08-19 21:58:24 +00:00
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
2021-01-14 06:14:21 +00:00
connection_max_lifetime_seconds = 0
2019-11-27 20:34:03 +00:00
interpolateParams = false
2021-03-29 21:32:03 +00:00
# if insert / upsert failing , you can disable upsert or update query syntax to match your RDBMS syntax :
enableUpsert = true
2021-03-30 07:26:57 +00:00
upsertQuery = "" "INSERT INTO ` + " ` %s ` " + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)" ""
2021-03-29 07:49:50 +00:00
[ mysql2 ] # or memsql , tidb
enabled = false
2021-01-20 01:21:50 +00:00
createTable = "" "
2021-02-14 11:14:36 +00:00
CREATE TABLE IF NOT EXISTS ` + " ` % s ` " + ` (
dirhash BIGINT ,
2021-03-23 07:46:50 +00:00
name VARCHAR ( 1000 ) BINARY ,
2021-02-14 11:14:36 +00:00
directory TEXT ,
meta LONGBLOB ,
2021-01-20 01:21:50 +00:00
PRIMARY KEY ( dirhash , name )
) DEFAULT CHARSET = utf8 ;
"" "
hostname = "localhost"
port = 3306
username = "root"
password = ""
database = "" # create or use an existing database
connection_max_idle = 2
connection_max_open = 100
connection_max_lifetime_seconds = 0
interpolateParams = false
2021-03-29 21:32:03 +00:00
# if insert / upsert failing , you can disable upsert or update query syntax to match your RDBMS syntax :
enableUpsert = true
2021-03-30 07:26:57 +00:00
upsertQuery = "" "INSERT INTO ` + " ` %s ` " + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)" ""
2021-03-29 07:49:50 +00:00
[ postgres ] # or cockroachdb , YugabyteDB
2018-08-19 21:58:24 +00:00
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT ,
2019-04-01 19:37:54 +00:00
# name VARCHAR ( 65535 ) ,
# directory VARCHAR ( 65535 ) ,
2018-08-19 21:58:24 +00:00
# meta bytea ,
# PRIMARY KEY ( dirhash , name )
# ) ;
enabled = false
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
2021-01-20 02:10:36 +00:00
database = "postgres" # create or use an existing database
2021-01-20 02:12:07 +00:00
schema = ""
2018-08-19 21:58:24 +00:00
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
2021-02-15 05:45:09 +00:00
connection_max_lifetime_seconds = 0
2021-03-29 21:32:03 +00:00
# if insert / upsert failing , you can disable upsert or update query syntax to match your RDBMS syntax :
enableUpsert = true
2021-03-30 07:26:57 +00:00
upsertQuery = "" "INSERT INTO " % [ 1 ] s " (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE " % [ 1 ] s ".meta != EXCLUDED.meta" ""
2021-03-29 07:49:50 +00:00
[ postgres2 ]
enabled = false
2021-01-20 02:07:29 +00:00
createTable = "" "
2021-02-14 11:14:36 +00:00
CREATE TABLE IF NOT EXISTS "%s" (
2021-01-20 02:07:29 +00:00
dirhash BIGINT ,
name VARCHAR ( 65535 ) ,
directory VARCHAR ( 65535 ) ,
meta bytea ,
PRIMARY KEY ( dirhash , name )
) ;
"" "
hostname = "localhost"
port = 5432
username = "postgres"
password = ""
2021-01-20 02:10:36 +00:00
database = "postgres" # create or use an existing database
2021-01-20 02:12:07 +00:00
schema = ""
2021-01-20 02:07:29 +00:00
sslmode = "disable"
connection_max_idle = 100
connection_max_open = 100
2021-02-15 05:45:09 +00:00
connection_max_lifetime_seconds = 0
2021-03-29 21:32:03 +00:00
# if insert / upsert failing , you can disable upsert or update query syntax to match your RDBMS syntax :
enableUpsert = true
2021-03-30 07:51:52 +00:00
upsertQuery = "" "INSERT INTO " % [ 1 ] s " (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE " % [ 1 ] s ".meta != EXCLUDED.meta" ""
2021-01-20 02:07:29 +00:00
2018-08-19 21:58:24 +00:00
[ cassandra ]
# CREATE TABLE filemeta (
# directory varchar ,
# name varchar ,
# meta blob ,
# PRIMARY KEY ( directory , name )
# ) WITH CLUSTERING ORDER BY ( name ASC ) ;
enabled = false
keyspace = "seaweedfs"
hosts = [
"localhost:9042" ,
]
2020-09-25 04:31:06 +00:00
username = ""
password = ""
2020-12-22 10:26:05 +00:00
# This changes the data layout . Only add new directories . Removing / Updating will cause data loss .
superLargeDirectories = [ ]
2021-07-02 20:02:26 +00:00
# Name of the datacenter local to this filer , used as host selection fallback .
localDC = ""
2018-08-19 21:58:24 +00:00
2020-12-24 05:49:01 +00:00
[ hbase ]
2020-12-24 08:19:16 +00:00
enabled = false
2020-12-24 05:49:01 +00:00
zkquorum = ""
table = "seaweedfs"
2020-04-12 09:50:41 +00:00
[ redis2 ]
2018-09-23 16:26:25 +00:00
enabled = false
2018-08-19 21:58:24 +00:00
address = "localhost:6379"
password = ""
2019-08-06 07:42:19 +00:00
database = 0
2020-12-22 10:26:05 +00:00
# This changes the data layout . Only add new directories . Removing / Updating will cause data loss .
superLargeDirectories = [ ]
2018-08-19 21:58:24 +00:00
2020-04-12 09:50:41 +00:00
[ redis_cluster2 ]
2018-08-19 21:58:24 +00:00
enabled = false
addresses = [
2018-08-23 01:23:19 +00:00
"localhost:30001" ,
"localhost:30002" ,
"localhost:30003" ,
"localhost:30004" ,
"localhost:30005" ,
"localhost:30006" ,
2018-08-19 21:58:24 +00:00
]
2019-05-20 16:00:30 +00:00
password = ""
2020-01-13 04:31:33 +00:00
# allows reads from slave servers or the master , but all writes still go to the master
2021-01-11 10:43:53 +00:00
readOnly = false
2020-01-13 04:31:33 +00:00
# automatically use the closest Redis server for reads
2021-01-11 10:43:53 +00:00
routeByLatency = false
2020-12-22 10:26:05 +00:00
# This changes the data layout . Only add new directories . Removing / Updating will cause data loss .
superLargeDirectories = [ ]
2018-08-19 21:58:24 +00:00
2019-08-01 02:16:45 +00:00
[ etcd ]
enabled = false
servers = "localhost:2379"
timeout = "3s"
2020-04-19 10:52:38 +00:00
[ mongodb ]
enabled = false
uri = "mongodb://localhost:27017"
option_pool_size = 0
database = "seaweedfs"
2020-09-03 08:34:58 +00:00
[ elastic7 ]
enabled = false
2020-09-10 08:24:09 +00:00
servers = [
"http://localhost1:9200" ,
"http://localhost2:9200" ,
"http://localhost3:9200" ,
]
2020-09-10 06:22:07 +00:00
username = ""
password = ""
sniff_enabled = false
healthcheck_enabled = false
2020-09-04 07:40:13 +00:00
# increase the value is recommend , be sure the value in Elastic is greater or equal here
2020-09-03 08:34:58 +00:00
index . max_result_window = 10000
2020-12-19 09:27:09 +00:00
# # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # #
# To add path - specific filer store :
#
# 1. Add a name following the store type separated by a dot "." . E . g . , cassandra . tmp
# 2. Add a location configuraiton . E . g . , location = "/tmp/"
# 3. Copy and customize all other configurations .
# Make sure they are not the same if using the same store type !
# 4. Set enabled to true
#
2021-04-20 02:08:58 +00:00
# The following is just using redis as an example
2020-12-19 09:27:09 +00:00
# # # # # # # # # # # # # # # # # # # # # # # # # #
[ redis2 . tmp ]
enabled = false
location = "/tmp/"
address = "localhost:6379"
password = ""
database = 1
2018-11-01 08:11:09 +00:00
`
NOTIFICATION_TOML_EXAMPLE = `
# A sample TOML config file for SeaweedFS filer store
# Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
# Put this file to one of the location , with descending priority
# . / notification . toml
# $ HOME / . seaweedfs / notification . toml
# / etc / seaweedfs / notification . toml
2018-08-19 22:36:30 +00:00
2018-08-19 21:58:24 +00:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# notification
2018-11-01 08:11:09 +00:00
# send and receive filer updates for each file to an external message queue
2018-08-19 21:58:24 +00:00
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
[ notification . log ]
2018-11-01 08:11:09 +00:00
# this is only for debugging perpose and does not work with "weed filer.replicate"
2018-09-23 16:26:25 +00:00
enabled = false
2018-08-19 21:58:24 +00:00
2018-11-01 08:11:09 +00:00
2018-08-19 21:58:24 +00:00
[ notification . kafka ]
enabled = false
hosts = [
"localhost:9092"
]
topic = "seaweedfs_filer"
2018-11-01 08:11:09 +00:00
offsetFile = "./last.offset"
offsetSaveIntervalSeconds = 10
2018-08-19 21:58:24 +00:00
2018-10-31 08:11:19 +00:00
[ notification . aws_sqs ]
# experimental , let me know if it works
enabled = false
aws_access_key_id = "" # if empty , loads from the shared credentials file ( ~ / . aws / credentials ) .
aws_secret_access_key = "" # if empty , loads from the shared credentials file ( ~ / . aws / credentials ) .
region = "us-east-2"
sqs_queue_name = "my_filer_queue" # an existing queue name
2018-11-01 08:11:09 +00:00
[ notification . google_pub_sub ]
# read credentials doc at https : //cloud.google.com/docs/authentication/getting-started
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
project_id = "" # an existing project id
topic = "seaweedfs_filer_topic" # a topic , auto created if does not exists
2019-03-19 14:10:43 +00:00
[ notification . gocdk_pub_sub ]
# The Go Cloud Development Kit ( https : //gocloud.dev).
# PubSub API ( https : //godoc.org/gocloud.dev/pubsub).
# Supports AWS SNS / SQS , Azure Service Bus , Google PubSub , NATS and RabbitMQ .
enabled = false
# This URL will Dial the RabbitMQ server at the URL in the environment
# variable RABBIT_SERVER_URL and open the exchange "myexchange" .
# The exchange must have already been created by some other means , like
2021-01-27 07:45:58 +00:00
# the RabbitMQ management plugin . С reate myexchange of type fanout and myqueue then
# create binding myexchange = > myqueue
2019-03-19 14:10:43 +00:00
topic_url = "rabbit://myexchange"
2019-04-05 17:43:38 +00:00
sub_url = "rabbit://myqueue"
2018-09-17 07:27:56 +00:00
`
2018-11-01 08:11:09 +00:00
2018-09-17 07:27:56 +00:00
REPLICATION_TOML_EXAMPLE = `
2018-09-23 05:12:06 +00:00
# A sample TOML config file for replicating SeaweedFS filer
2021-05-27 21:40:25 +00:00
# Used with "weed filer.backup"
# Using with "weed filer.replicate" is deprecated .
2018-09-23 05:12:06 +00:00
# Put this file to one of the location , with descending priority
# . / replication . toml
# $ HOME / . seaweedfs / replication . toml
# / etc / seaweedfs / replication . toml
2018-09-17 07:27:56 +00:00
2021-05-27 21:40:25 +00:00
[ source . filer ] # deprecated . Only useful with "weed filer.replicate"
2018-09-17 07:27:56 +00:00
enabled = true
grpcAddress = "localhost:18888"
2018-12-05 10:03:03 +00:00
# all files under this directory tree are replicated .
# this is not a directory on your hard drive , but on your filer .
# i . e . , all files with this "prefix" are sent to notification message queue .
2019-08-01 02:16:45 +00:00
directory = "/buckets"
2018-09-17 07:27:56 +00:00
2021-01-26 17:50:25 +00:00
[ sink . local ]
enabled = false
2021-01-27 10:01:33 +00:00
directory = "/data"
2021-03-01 00:19:03 +00:00
# all replicated files are under modified time as yyyy - mm - dd directories
# so each date directory contains all new and updated files .
is_incremental = false
2021-01-26 17:50:25 +00:00
2018-09-17 07:27:56 +00:00
[ sink . filer ]
2018-10-04 06:36:52 +00:00
enabled = false
2018-09-17 07:27:56 +00:00
grpcAddress = "localhost:18888"
2018-12-05 10:03:03 +00:00
# all replicated files are under this directory tree
2019-08-01 02:16:45 +00:00
# this is not a directory on your hard drive , but on your filer .
2018-12-05 10:03:03 +00:00
# i . e . , all received files will be "prefixed" to this directory .
2019-08-01 02:16:45 +00:00
directory = "/backup"
2018-09-22 07:53:52 +00:00
replication = ""
collection = ""
ttlSec = 0
2021-03-01 00:19:03 +00:00
is_incremental = false
2018-09-17 07:27:56 +00:00
2018-10-04 06:36:52 +00:00
[ sink . s3 ]
2018-10-06 20:08:38 +00:00
# read credentials doc at https : //docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
2019-08-01 02:16:45 +00:00
# default loads credentials from the shared credentials file ( ~ / . aws / credentials ) .
2018-10-04 06:36:52 +00:00
enabled = false
aws_access_key_id = "" # if empty , loads from the shared credentials file ( ~ / . aws / credentials ) .
aws_secret_access_key = "" # if empty , loads from the shared credentials file ( ~ / . aws / credentials ) .
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
2018-11-04 19:58:59 +00:00
directory = "/" # destination directory
2020-04-08 00:49:00 +00:00
endpoint = ""
2021-03-01 00:19:03 +00:00
is_incremental = false
2018-10-04 06:36:52 +00:00
2018-10-04 08:14:44 +00:00
[ sink . google_cloud_storage ]
2018-10-06 20:08:38 +00:00
# read credentials doc at https : //cloud.google.com/docs/authentication/getting-started
2018-10-04 08:14:44 +00:00
enabled = false
google_application_credentials = "/path/to/x.json" # path to json credential file
2018-10-06 20:08:38 +00:00
bucket = "your_bucket_seaweedfs" # an existing bucket
directory = "/" # destination directory
2021-03-01 00:19:03 +00:00
is_incremental = false
2018-10-04 08:14:44 +00:00
2018-10-09 08:35:48 +00:00
[ sink . azure ]
2018-10-24 06:59:40 +00:00
# experimental , let me know if it works
2018-10-09 08:35:48 +00:00
enabled = false
account_name = ""
account_key = ""
container = "mycontainer" # an existing container
2018-11-04 19:58:59 +00:00
directory = "/" # destination directory
2021-03-01 00:19:03 +00:00
is_incremental = false
2018-10-09 08:35:48 +00:00
2018-10-24 06:59:40 +00:00
[ sink . backblaze ]
enabled = false
2018-11-04 19:58:59 +00:00
b2_account_id = ""
b2_master_application_key = ""
2018-10-24 06:59:40 +00:00
bucket = "mybucket" # an existing bucket
2018-11-04 19:58:59 +00:00
directory = "/" # destination directory
2021-03-01 00:19:03 +00:00
is_incremental = false
2018-10-24 06:59:40 +00:00
2019-02-10 05:07:12 +00:00
`
SECURITY_TOML_EXAMPLE = `
2019-02-14 08:08:20 +00:00
# Put this file to one of the location , with descending priority
# . / security . toml
# $ HOME / . seaweedfs / security . toml
# / etc / seaweedfs / security . toml
2019-02-10 05:07:12 +00:00
# this file is read by master , volume server , and filer
2019-05-04 15:42:25 +00:00
# the jwt signing key is read by master and volume server .
# a jwt defaults to expire after 10 seconds .
2019-02-14 08:08:20 +00:00
[ jwt . signing ]
key = ""
2019-05-04 15:42:25 +00:00
expires_after_seconds = 10 # seconds
2019-02-10 05:07:12 +00:00
2019-06-06 07:29:02 +00:00
# jwt for read is only supported with master + volume setup . Filer does not support this mode .
[ jwt . signing . read ]
key = ""
expires_after_seconds = 10 # seconds
2019-02-20 05:10:10 +00:00
# all grpc tls authentications are mutual
# the values for the following ca , cert , and key are paths to the PERM files .
2019-06-06 07:29:02 +00:00
# the host name is not checked , so the PERM files can be shared .
2019-02-18 20:11:52 +00:00
[ grpc ]
ca = ""
2021-03-10 09:42:39 +00:00
# Set wildcard domain for enable TLS authentication by common names
2021-03-10 09:02:13 +00:00
allowed_wildcard_domain = "" # . mycompany . com
2019-02-18 20:11:52 +00:00
[ grpc . volume ]
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2021-03-10 07:43:13 +00:00
allowed_commonNames = "" # comma - separated SSL certificate common names
2019-02-18 20:11:52 +00:00
[ grpc . master ]
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2021-03-10 07:43:13 +00:00
allowed_commonNames = "" # comma - separated SSL certificate common names
2019-02-18 20:11:52 +00:00
[ grpc . filer ]
cert = ""
2020-02-27 08:07:13 +00:00
key = ""
2021-03-10 07:43:13 +00:00
allowed_commonNames = "" # comma - separated SSL certificate common names
2020-02-27 08:07:13 +00:00
2020-03-04 08:39:47 +00:00
[ grpc . msg_broker ]
2020-02-27 08:07:13 +00:00
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2021-03-10 07:43:13 +00:00
allowed_commonNames = "" # comma - separated SSL certificate common names
2019-02-18 20:11:52 +00:00
# use this for any place needs a grpc client
# i . e . , "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[ grpc . client ]
cert = ""
2019-02-20 05:10:10 +00:00
key = ""
2019-02-18 20:11:52 +00:00
2019-02-25 08:43:36 +00:00
# volume server https options
# Note : work in progress !
# this does not work with other clients , e . g . , "weed filer|mount" etc , yet .
[ https . client ]
enabled = true
[ https . volume ]
cert = ""
key = ""
2019-06-05 08:30:24 +00:00
`
MASTER_TOML_EXAMPLE = `
# Put this file to one of the location , with descending priority
# . / master . toml
# $ HOME / . seaweedfs / master . toml
# / etc / seaweedfs / master . toml
# this file is read by master
[ master . maintenance ]
2019-06-05 08:48:03 +00:00
# periodically run these scripts are the same as running them from ' weed shell '
2019-06-05 08:30:24 +00:00
scripts = "" "
2020-04-24 01:41:48 +00:00
lock
2019-06-05 08:30:24 +00:00
ec . encode - fullPercent = 95 - quietFor = 1 h
ec . rebuild - force
ec . balance - force
volume . balance - force
2020-04-09 06:36:22 +00:00
volume . fix . replication
2020-04-24 01:41:48 +00:00
unlock
2019-06-05 08:30:24 +00:00
"" "
2019-06-06 07:39:08 +00:00
sleep_minutes = 17 # sleep minutes between each script execution
2019-06-05 08:30:24 +00:00
2019-11-15 06:20:38 +00:00
[ master . filer ]
2020-04-09 06:57:15 +00:00
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
2019-11-11 01:15:17 +00:00
2019-11-15 06:20:38 +00:00
[ master . sequencer ]
2021-03-25 10:49:26 +00:00
type = "raft" # Choose [ raft | etcd | snowflake ] type for storing the file id sequence
2019-11-11 01:15:17 +00:00
# when sequencer . type = etcd , set listen client urls of etcd cluster that store file id sequence
# example : http : //127.0.0.1:2379,http://127.0.0.1:2389
2019-11-29 02:04:25 +00:00
sequencer_etcd_urls = "http://127.0.0.1:2379"
2019-11-11 01:15:17 +00:00
2020-02-04 02:15:16 +00:00
# configurations for tiered cloud storage
# old volumes are transparently moved to cloud for cost efficiency
2019-11-29 02:33:18 +00:00
[ storage . backend ]
[ storage . backend . s3 . default ]
2019-11-29 02:47:51 +00:00
enabled = false
2019-11-29 02:33:18 +00:00
aws_access_key_id = "" # if empty , loads from the shared credentials file ( ~ / . aws / credentials ) .
aws_secret_access_key = "" # if empty , loads from the shared credentials file ( ~ / . aws / credentials ) .
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
2020-03-20 04:13:56 +00:00
endpoint = ""
2019-11-19 03:24:34 +00:00
2020-02-04 02:15:16 +00:00
# create this number of logical volumes if no more writable volumes
2020-02-27 21:15:21 +00:00
# count_x means how many copies of data .
# e . g . :
2020-03-23 09:54:49 +00:00
# 000 has only one copy , copy_1
# 010 and 001 has two copies , copy_2
# 011 has only 3 copies , copy_3
2020-02-04 02:15:16 +00:00
[ master . volume_growth ]
2020-03-23 08:45:43 +00:00
copy_1 = 7 # create 1 x 7 = 7 actual volumes
copy_2 = 6 # create 2 x 6 = 12 actual volumes
copy_3 = 3 # create 3 x 3 = 9 actual volumes
copy_other = 1 # create n x 1 = n actual volumes
2020-02-04 02:15:16 +00:00
2020-04-01 19:18:40 +00:00
# configuration flags for replication
[ master . replication ]
# any replication counts should be considered minimums . If you specify 010 and
# have 3 different racks , that ' s still considered writable . Writes will still
# try to replicate to all available volumes . You should only use this option
# if you are doing your own replication or periodic sync of volumes .
treat_replication_as_minimums = false
2020-12-28 23:07:16 +00:00
`
SHELL_TOML_EXAMPLE = `
[ cluster ]
default = "c1"
[ cluster . c1 ]
master = "localhost:9333" # comma - separated master servers
filer = "localhost:8888" # filer host and port
[ cluster . c2 ]
master = ""
filer = ""
2018-08-19 22:36:30 +00:00
`
)