Updated weed shell (markdown)

Chris Lu 2022-01-27 00:03:24 -08:00
parent 0c702319a9
commit 9a51270799

@ -4,6 +4,7 @@
$ weed shell
> help
Type: "help <command>" for help on <command>. Most commands support "<command> -h" also for options.
cluster.ps # check current cluster process status
collection.delete # delete specified collection
collection.list # list all collections
ec.balance # balance all ec shards among all racks and volume servers
@ -19,22 +20,34 @@ Type: "help <command>" for help on <command>. Most commands support "<command> -
fs.meta.load # load saved filer meta data to restore the directory and file structure
fs.meta.notify # recursively send directory and file meta data to notifiction message queue
fs.meta.save # save all directory and file meta data to a local file for metadata backup.
fs.mkdir # create a directory
fs.mv # move or rename a file or a folder
fs.pwd # print out current directory
fs.rm # remove file and directory entries
fs.tree # recursively list all files under a directory
lock # lock in order to exclusively manage the cluster
remote.cache # cache the file content for mounted directories or files
remote.configure # remote storage configuration
remote.meta.sync # synchronize the local file meta data with the remote file metadata
remote.mount # mount remote storage and pull its metadata
remote.mount.buckets # mount all buckets in remote storage and pull its metadata
remote.uncache # keep the metadata but remote cache the file content for mounted directories or files
remote.unmount # unmount remote storage
s3.bucket.create # create a bucket with a given name
s3.bucket.delete # delete a bucket by a given name
s3.bucket.list # list all buckets
s3.bucket.quota # set/remove/enable/disable quota for a bucket
s3.bucket.quota.enforce # check quota for all buckets, make the bucket read only if over the limit
s3.clean.uploads # clean up stale multipart uploads
s3.configure # configure and apply s3 options for each bucket
unlock # unlock the cluster-wide lock
volume.balance # balance all volumes among volume servers
volume.check.disk # check all replicated volumes to find and fix inconsistencies
volume.check.disk # check all replicated volumes to find and fix inconsistencies. It is optional and resource intensive.
volume.configure.replication # change volume replication value
volume.copy # copy a volume from one volume server to another volume server
volume.delete # delete a live volume from one volume server
volume.fix.replication # add replicas to volumes that are missing replicas
volume.deleteEmpty # delete empty volumes from all volume servers
volume.fix.replication # add or remove replicas to volumes that are missing replicas or over-replicated
volume.fsck # check all volumes to find entries not used by the filer
volume.list # list all volumes
volume.mark # Mark volume writable or readonly from one volume server
@ -80,4 +93,23 @@ replicating volume 241 001 from localhost:8080 to dataNode 127.0.0.1:7823 ...
$ echo "lock; volume.fix.replication -n ; unlock" | weed shell
no under replicated volumes
```
# One more trick
You can skip the "fs." prefix, for all "fs.*" commands:
```
> fs.ls
dd.dat
topics
> ls
dd.dat
topics
> ls -al topics
drwxr-xr-x 0 chrislu staff 0 /topics/.system
total 1
> fs.du
block: 515 byte:10039099653 /
> du
block: 515 byte:10039099653 /
```