From 58b04cb505dc355228e642126cdd04ad1967e823 Mon Sep 17 00:00:00 2001 From: Konstantin Lebedev <9497591+kmlebedev@users.noreply.github.com> Date: Mon, 13 Feb 2023 14:24:43 +0500 Subject: [PATCH] Updated weed shell (markdown) --- weed-shell.md | 125 +++++++++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 57 deletions(-) diff --git a/weed-shell.md b/weed-shell.md index 7459372..bb6b50f 100644 --- a/weed-shell.md +++ b/weed-shell.md @@ -3,63 +3,74 @@ ``` $ weed shell > help -Type: "help " for help on . Most commands support " -h" also for options. - cluster.ps # check current cluster process status - collection.delete # delete specified collection - collection.list # list all collections - ec.balance # balance all ec shards among all racks and volume servers - ec.decode # decode a erasure coded volume into a normal volume - ec.encode # apply erasure coding to a volume - ec.rebuild # find and rebuild missing ec shards among volume servers - fs.cat # stream the file content on to the screen - fs.cd # change directory to a directory /path/to/dir - fs.configure # configure and apply storage options for each location - fs.du # show disk usage - fs.ls # list all files under a directory - fs.meta.cat # print out the meta data content for a file or directory - fs.meta.load # load saved filer meta data to restore the directory and file structure - fs.meta.notify # recursively send directory and file meta data to notifiction message queue - fs.meta.save # save all directory and file meta data to a local file for metadata backup. - fs.mkdir # create a directory - fs.mv # move or rename a file or a folder - fs.pwd # print out current directory - fs.rm # remove file and directory entries - fs.tree # recursively list all files under a directory - lock # lock in order to exclusively manage the cluster - remote.cache # cache the file content for mounted directories or files - remote.configure # remote storage configuration - remote.meta.sync # synchronize the local file meta data with the remote file metadata - remote.mount # mount remote storage and pull its metadata - remote.mount.buckets # mount all buckets in remote storage and pull its metadata - remote.uncache # keep the metadata but remote cache the file content for mounted directories or files - remote.unmount # unmount remote storage - s3.bucket.create # create a bucket with a given name - s3.bucket.delete # delete a bucket by a given name - s3.bucket.list # list all buckets - s3.bucket.quota # set/remove/enable/disable quota for a bucket - s3.bucket.quota.enforce # check quota for all buckets, make the bucket read only if over the limit - s3.clean.uploads # clean up stale multipart uploads - s3.configure # configure and apply s3 options for each bucket - unlock # unlock the cluster-wide lock - volume.balance # balance all volumes among volume servers - volume.check.disk # check all replicated volumes to find and fix inconsistencies. It is optional and resource intensive. - volume.configure.replication # change volume replication value - volume.copy # copy a volume from one volume server to another volume server - volume.delete # delete a live volume from one volume server - volume.deleteEmpty # delete empty volumes from all volume servers - volume.fix.replication # add or remove replicas to volumes that are missing replicas or over-replicated - volume.fsck # check all volumes to find entries not used by the filer - volume.list # list all volumes - volume.mark # Mark volume writable or readonly from one volume server - volume.mount # mount a volume from one volume server - volume.move # move a live volume from one volume server to another volume server - volume.tier.download # download the dat file of a volume from a remote tier - volume.tier.move # change a volume from one disk type to another - volume.tier.upload # upload the dat file of a volume to a remote tier - volume.unmount # unmount a volume from one volume server - volume.vacuum # compact volumes if deleted entries are more than the limit - volumeServer.evacuate # move out all data on a volume server - volumeServer.leave # stop a volume server from sending heartbeats to the master +Type: "help " for help on . Most commands support " -h" also for options. + cluster.check # check current cluster network connectivity + cluster.ps # check current cluster process status + cluster.raft.add # add a server to the raft cluster + cluster.raft.ps # check current raft cluster status + cluster.raft.remove # remove a server from the raft cluster + collection.delete # delete specified collection + collection.list # list all collections + ec.balance # balance all ec shards among all racks and volume servers + ec.decode # decode a erasure coded volume into a normal volume + ec.encode # apply erasure coding to a volume + ec.rebuild # find and rebuild missing ec shards among volume servers + fs.cat # stream the file content on to the screen + fs.cd # change directory to a directory /path/to/dir + fs.configure # configure and apply storage options for each location + fs.du # show disk usage + fs.ls # list all files under a directory + fs.meta.cat # print out the meta data content for a file or directory + fs.meta.changeVolumeId # change volume id in existing metadata. + fs.meta.load # load saved filer meta data to restore the directory and file structure + fs.meta.notify # recursively send directory and file meta data to notification message queue + fs.meta.save # save all directory and file meta data to a local file for metadata backup. + fs.mkdir # create a directory + fs.mv # move or rename a file or a folder + fs.pwd # print out current directory + fs.rm # remove file and directory entries + fs.tree # recursively list all files under a directory + fs.verify # recursively verify all files under a directory + lock # lock in order to exclusively manage the cluster + mount.configure # configure the mount on current server + mq.topic.list # print out all topics + remote.cache # cache the file content for mounted directories or files + remote.configure # remote storage configuration + remote.meta.sync # synchronize the local file meta data with the remote file metadata + remote.mount # mount remote storage and pull its metadata + remote.mount.buckets # mount all buckets in remote storage and pull its metadata + remote.uncache # keep the metadata but remote cache the file content for mounted directories or files + remote.unmount # unmount remote storage + s3.bucket.create # create a bucket with a given name + s3.bucket.delete # delete a bucket by a given name + s3.bucket.list # list all buckets + s3.bucket.quota # set/remove/enable/disable quota for a bucket + s3.bucket.quota.enforce # check quota for all buckets, make the bucket read only if over the limit + s3.circuitBreaker # configure and apply s3 circuit breaker options for each bucket + s3.clean.uploads # clean up stale multipart uploads + s3.configure # configure and apply s3 options for each bucket + unlock # unlock the cluster-wide lock + volume.balance # balance all volumes among volume servers + volume.check.disk # check all replicated volumes to find and fix inconsistencies. It is optional and resource intensive. + volume.configure.replication # change volume replication value + volume.copy # copy a volume from one volume server to another volume server + volume.delete # delete a live volume from one volume server + volume.deleteEmpty # delete empty volumes from all volume servers + volume.fix.replication # add or remove replicas to volumes that are missing replicas or over-replicated + volume.fsck # check all volumes to find entries not used by the filer + volume.list # list all volumes + volume.mark # Mark volume writable or readonly from one volume server + volume.mount # mount a volume from one volume server + volume.move # move a live volume from one volume server to another volume server + volume.tier.download # download the dat file of a volume from a remote tier + volume.tier.move # change a volume from one disk type to another + volume.tier.upload # upload the dat file of a volume to a remote tier + volume.unmount # unmount a volume from one volume server + volume.vacuum # compact volumes if deleted entries are more than the limit + volume.vacuum.disable # disable vacuuming request from Master, however volume.vacuum still works. + volume.vacuum.enable # enable vacuuming request from Master + volumeServer.evacuate # move out all data on a volume server + volumeServer.leave # stop a volume server from sending heartbeats to the master ``` For example: