# ceph --verbose fs authorize cephfs client.cephfs / rw parsed_args: Namespace(admin_socket=None, cephconf=None, client_id=None, client_name=None, cluster=None, cluster_timeout=None, completion=False, help=False, input_file=None, output_file=None, output_format=None, setgroup=None, setuser=None, status=False, verbose=True, version=False, watch=False, watch_channel='cluster', watch_debug=False, watch_error=False, watch_info=False, watch_sec=False, watch_warn=False), childargs: ['fs', 'authorize', 'cephfs', 'client.cephfs', '/', 'rw'] cmd000: pg stat cmd001: pg getmap cmd002: pg dump {all|summary|sum|delta|pools|osds|pgs|pgs_brief [all|summary|sum|delta|pools|osds|pgs|pgs_brief...]} cmd003: pg dump_json {all|summary|sum|pools|osds|pgs [all|summary|sum|pools|osds|pgs...]} cmd004: pg dump_pools_json cmd005: pg ls-by-pool { [...]} cmd006: pg ls-by-primary {} { [...]} cmd007: pg ls-by-osd {} { [...]} cmd008: pg ls {} { [...]} cmd009: pg dump_stuck {inactive|unclean|stale|undersized|degraded [inactive|unclean|stale|undersized|degraded...]} {} cmd010: pg debug unfound_objects_exist|degraded_pgs_exist cmd011: pg scrub cmd012: pg deep-scrub cmd013: pg repair cmd014: pg force-recovery [...] cmd015: pg force-backfill [...] cmd016: pg cancel-force-recovery [...] cmd017: pg cancel-force-backfill [...] cmd018: osd perf cmd019: osd df {plain|tree} cmd020: osd blocked-by cmd021: osd pool stats {} cmd022: osd reweight-by-utilization {} {} {} {--no-increasing} cmd023: osd test-reweight-by-utilization {} {} {} {--no-increasing} cmd024: osd reweight-by-pg {} {} {} { [...]} cmd025: osd test-reweight-by-pg {} {} {} { [...]} cmd026: osd safe-to-destroy [...] cmd027: osd ok-to-stop [...] cmd028: osd scrub cmd029: osd deep-scrub cmd030: osd repair cmd031: service dump cmd032: service status cmd033: config set cmd034: balancer status cmd035: balancer mode none|crush-compat|upmap cmd036: balancer on cmd037: balancer off cmd038: balancer eval {