summaryrefslogtreecommitdiffstats
path: root/qa
diff options
context:
space:
mode:
authorRishabh Dave <ridave@redhat.com>2023-07-13 09:14:22 +0200
committerGitHub <noreply@github.com>2023-07-13 09:14:22 +0200
commitf0588bd3b3454cd596a01a096b9b56067207a70b (patch)
treec2ade9140f7f295c6c48a507f872935de8401400 /qa
parentMerge PR #51959 into main (diff)
parentqa/cephfs: use run_ceph_cmd() when cmd output is not needed (diff)
downloadceph-f0588bd3b3454cd596a01a096b9b56067207a70b.tar.xz
ceph-f0588bd3b3454cd596a01a096b9b56067207a70b.zip
Merge pull request #50569 from rishabh-d-dave/CephManager-in-CephFSTestCase
qa/cephfs: add helper methods in CephFSTestCase Reviewed-by: Venky Shankar <vshankar@redhat.com>
Diffstat (limited to 'qa')
-rw-r--r--qa/tasks/cephfs/caps_helper.py5
-rw-r--r--qa/tasks/cephfs/cephfs_test_case.py87
-rw-r--r--qa/tasks/cephfs/filesystem.py124
-rw-r--r--qa/tasks/cephfs/mount.py4
-rw-r--r--qa/tasks/cephfs/test_admin.py224
-rw-r--r--qa/tasks/cephfs/test_damage.py27
-rw-r--r--qa/tasks/cephfs/test_data_scan.py4
-rw-r--r--qa/tasks/cephfs/test_failover.py17
-rw-r--r--qa/tasks/cephfs/test_fragment.py5
-rw-r--r--qa/tasks/cephfs/test_fstop.py8
-rw-r--r--qa/tasks/cephfs/test_full.py14
-rw-r--r--qa/tasks/cephfs/test_journal_repair.py4
-rw-r--r--qa/tasks/cephfs/test_mantle.py20
-rw-r--r--qa/tasks/cephfs/test_mds_metrics.py22
-rw-r--r--qa/tasks/cephfs/test_mirroring.py97
-rw-r--r--qa/tasks/cephfs/test_misc.py71
-rw-r--r--qa/tasks/cephfs/test_multifs_auth.py8
-rw-r--r--qa/tasks/cephfs/test_multimds_misc.py2
-rw-r--r--qa/tasks/cephfs/test_nfs.py10
-rw-r--r--qa/tasks/cephfs/test_pool_perm.py18
-rw-r--r--qa/tasks/cephfs/test_recovery_fs.py2
-rw-r--r--qa/tasks/cephfs/test_recovery_pool.py2
-rw-r--r--qa/tasks/cephfs/test_scrub_checks.py4
-rw-r--r--qa/tasks/cephfs/test_sessionmap.py2
-rw-r--r--qa/tasks/cephfs/test_snap_schedules.py6
-rw-r--r--qa/tasks/cephfs/test_snapshots.py16
-rw-r--r--qa/tasks/cephfs/test_strays.py7
-rw-r--r--qa/tasks/cephfs/test_volumes.py42
-rw-r--r--qa/tasks/cephfs/xfstests_dev.py4
29 files changed, 441 insertions, 415 deletions
diff --git a/qa/tasks/cephfs/caps_helper.py b/qa/tasks/cephfs/caps_helper.py
index d60e515aab2..bf6e2f9278f 100644
--- a/qa/tasks/cephfs/caps_helper.py
+++ b/qa/tasks/cephfs/caps_helper.py
@@ -156,12 +156,11 @@ class MonCapTester:
fs is any fs object so that ceph commands can be exceuted.
"""
- get_cluster_cmd_op = fs.mon_manager.raw_cluster_cmd
- keyring = get_cluster_cmd_op(args=f'auth get client.{client_id}')
+ keyring = fs.get_ceph_cmd_stdout(args=f'auth get client.{client_id}')
moncap = get_mon_cap_from_keyring(keyring)
keyring_path = fs.admin_remote.mktemp(data=keyring)
- fsls = get_cluster_cmd_op(
+ fsls = fs.get_ceph_cmd_stdout(
args=f'fs ls --id {client_id} -k {keyring_path}')
log.info(f'output of fs ls cmd run by client.{client_id} -\n{fsls}')
diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py
index 129b84790e9..968bce4093f 100644
--- a/qa/tasks/cephfs/cephfs_test_case.py
+++ b/qa/tasks/cephfs/cephfs_test_case.py
@@ -2,8 +2,7 @@ import json
import logging
import os
import re
-
-from shlex import split as shlex_split
+from io import StringIO
from tasks.ceph_test_case import CephTestCase
@@ -49,7 +48,32 @@ class MountDetails():
mntobj.hostfs_mntpt = self.hostfs_mntpt
-class CephFSTestCase(CephTestCase):
+class RunCephCmd:
+
+ def run_ceph_cmd(self, *args, **kwargs):
+ if kwargs.get('args') is None and args:
+ if len(args) == 1:
+ args = args[0]
+ kwargs['args'] = args
+ return self.mon_manager.run_cluster_cmd(**kwargs)
+
+ def get_ceph_cmd_result(self, *args, **kwargs):
+ if kwargs.get('args') is None and args:
+ if len(args) == 1:
+ args = args[0]
+ kwargs['args'] = args
+ return self.run_ceph_cmd(**kwargs).exitstatus
+
+ def get_ceph_cmd_stdout(self, *args, **kwargs):
+ if kwargs.get('args') is None and args:
+ if len(args) == 1:
+ args = args[0]
+ kwargs['args'] = args
+ kwargs['stdout'] = kwargs.pop('stdout', StringIO())
+ return self.run_ceph_cmd(**kwargs).stdout.getvalue()
+
+
+class CephFSTestCase(CephTestCase, RunCephCmd):
"""
Test case for Ceph FS, requires caller to populate Filesystem and Mounts,
into the fs, mount_a, mount_b class attributes (setting mount_b is optional)
@@ -90,25 +114,38 @@ class CephFSTestCase(CephTestCase):
# In case anything is in the OSD blocklist list, clear it out. This is to avoid
# the OSD map changing in the background (due to blocklist expiry) while tests run.
try:
- self.mds_cluster.mon_manager.run_cluster_cmd(args="osd blocklist clear")
+ self.run_ceph_cmd("osd blocklist clear")
except CommandFailedError:
# Fallback for older Ceph cluster
try:
- blocklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
- "dump", "--format=json-pretty"))['blocklist']
+ blocklist = json.loads(self.get_ceph_cmd_stdout("osd",
+ "dump", "--format=json-pretty"))['blocklist']
log.info(f"Removing {len(blocklist)} blocklist entries")
for addr, blocklisted_at in blocklist.items():
- self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blocklist", "rm", addr)
+ self.run_ceph_cmd("osd", "blocklist", "rm", addr)
except KeyError:
# Fallback for more older Ceph clusters, who will use 'blacklist' instead.
- blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
- "dump", "--format=json-pretty"))['blacklist']
+ blacklist = json.loads(self.get_ceph_cmd_stdout("osd",
+ "dump", "--format=json-pretty"))['blacklist']
log.info(f"Removing {len(blacklist)} blacklist entries")
for addr, blocklisted_at in blacklist.items():
- self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr)
+ self.run_ceph_cmd("osd", "blacklist", "rm", addr)
+
+ def _init_mon_manager(self):
+ # if vstart_runner.py has invoked this code
+ if 'Local' in str(type(self.ceph_cluster)):
+ from tasks.vstart_runner import LocalCephManager
+ self.mon_manager = LocalCephManager(ctx=self.ctx)
+ # else teuthology has invoked this code
+ else:
+ from tasks.ceph_manager import CephManager
+ self.mon_manager = CephManager(self.ceph_cluster.admin_remote,
+ ctx=self.ctx, logger=log.getChild('ceph_manager'))
def setUp(self):
super(CephFSTestCase, self).setUp()
+ self._init_mon_manager()
+ self.admin_remote = self.ceph_cluster.admin_remote
self.config_set('mon', 'mon_allow_pool_delete', True)
@@ -154,7 +191,7 @@ class CephFSTestCase(CephTestCase):
for entry in self.auth_list():
ent_type, ent_id = entry['entity'].split(".")
if ent_type == "client" and ent_id not in client_mount_ids and not (ent_id == "admin" or ent_id[:6] == 'mirror'):
- self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])
+ self.run_ceph_cmd("auth", "del", entry['entity'])
if self.REQUIRE_FILESYSTEM:
self.fs = self.mds_cluster.newfs(create=True)
@@ -165,11 +202,11 @@ class CephFSTestCase(CephTestCase):
'osd', f'allow rw tag cephfs data={self.fs.name}',
'mds', 'allow']
- if self.run_cluster_cmd_result(cmd) == 0:
+ if self.get_ceph_cmd_result(*cmd) == 0:
break
cmd[1] = 'add'
- if self.run_cluster_cmd_result(cmd) != 0:
+ if self.get_ceph_cmd_result(*cmd) != 0:
raise RuntimeError(f'Failed to create new client {cmd[2]}')
# wait for ranks to become active
@@ -182,9 +219,8 @@ class CephFSTestCase(CephTestCase):
if self.REQUIRE_BACKUP_FILESYSTEM:
if not self.REQUIRE_FILESYSTEM:
self.skipTest("backup filesystem requires a primary filesystem as well")
- self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
- 'enable_multiple', 'true',
- '--yes-i-really-mean-it')
+ self.run_ceph_cmd('fs', 'flag', 'set', 'enable_multiple', 'true',
+ '--yes-i-really-mean-it')
self.backup_fs = self.mds_cluster.newfs(name="backup_fs")
self.backup_fs.wait_for_daemons()
@@ -220,9 +256,8 @@ class CephFSTestCase(CephTestCase):
"""
Convenience wrapper on "ceph auth ls"
"""
- return json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd(
- "auth", "ls", "--format=json-pretty"
- ))['auth_dump']
+ return json.loads(self.get_ceph_cmd_stdout("auth", "ls",
+ "--format=json-pretty"))['auth_dump']
def assert_session_count(self, expected, ls_data=None, mds_id=None):
if ls_data is None:
@@ -405,16 +440,6 @@ class CephFSTestCase(CephTestCase):
except contextutil.MaxWhileTries as e:
raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e
- def run_cluster_cmd(self, cmd):
- if isinstance(cmd, str):
- cmd = shlex_split(cmd)
- return self.fs.mon_manager.raw_cluster_cmd(*cmd)
-
- def run_cluster_cmd_result(self, cmd):
- if isinstance(cmd, str):
- cmd = shlex_split(cmd)
- return self.fs.mon_manager.raw_cluster_cmd_result(*cmd)
-
def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None):
if not (moncap or osdcap or mdscap):
if self.fs:
@@ -432,5 +457,5 @@ class CephFSTestCase(CephTestCase):
if mdscap:
cmd += ['mds', mdscap]
- self.run_cluster_cmd(cmd)
- return self.run_cluster_cmd(f'auth get {self.client_name}')
+ self.run_ceph_cmd(*cmd)
+ return self.run_ceph_cmd(f'auth get {self.client_name}')
diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py
index 777ba8249ec..21804f87770 100644
--- a/qa/tasks/cephfs/filesystem.py
+++ b/qa/tasks/cephfs/filesystem.py
@@ -17,8 +17,10 @@ from teuthology import misc
from teuthology.nuke import clear_firewall
from teuthology.parallel import parallel
from teuthology import contextutil
+
from tasks.ceph_manager import write_conf
-from tasks import ceph_manager
+from tasks.ceph_manager import CephManager
+from tasks.cephfs.cephfs_test_case import RunCephCmd
log = logging.getLogger(__name__)
@@ -66,16 +68,16 @@ class FSMissing(Exception):
def __str__(self):
return f"File system {self.ident} does not exist in the map"
-class FSStatus(object):
+class FSStatus(RunCephCmd):
"""
Operations on a snapshot of the FSMap.
"""
def __init__(self, mon_manager, epoch=None):
- self.mon = mon_manager
+ self.mon_manager = mon_manager
cmd = ["fs", "dump", "--format=json"]
if epoch is not None:
cmd.append(str(epoch))
- self.map = json.loads(self.mon.raw_cluster_cmd(*cmd))
+ self.map = json.loads(self.get_ceph_cmd_stdout(*cmd))
def __str__(self):
return json.dumps(self.map, indent = 2, sort_keys = True)
@@ -216,7 +218,7 @@ class FSStatus(object):
#all matching
return False
-class CephCluster(object):
+class CephCluster(RunCephCmd):
@property
def admin_remote(self):
first_mon = misc.get_first_mon(self._ctx, None)
@@ -225,7 +227,8 @@ class CephCluster(object):
def __init__(self, ctx) -> None:
self._ctx = ctx
- self.mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager'))
+ self.mon_manager = CephManager(self.admin_remote, ctx=ctx,
+ logger=log.getChild('ceph_manager'))
def get_config(self, key, service_type=None):
"""
@@ -271,7 +274,7 @@ class CephCluster(object):
return None
def is_addr_blocklisted(self, addr):
- blocklist = json.loads(self.mon_manager.raw_cluster_cmd(
+ blocklist = json.loads(self.get_ceph_cmd_stdout(
"osd", "dump", "--format=json"))['blocklist']
if addr in blocklist:
return True
@@ -350,7 +353,7 @@ class MDSCluster(CephCluster):
Inform MDSMonitor of the death of the daemon process(es). If it held
a rank, that rank will be relinquished.
"""
- self._one_or_all(mds_id, lambda id_: self.mon_manager.raw_cluster_cmd("mds", "fail", id_))
+ self._one_or_all(mds_id, lambda id_: self.get_ceph_cmd_stdout("mds", "fail", id_))
def mds_restart(self, mds_id=None):
self._one_or_all(mds_id, lambda id_: self.mds_daemons[id_].restart())
@@ -364,7 +367,7 @@ class MDSCluster(CephCluster):
"""
def _fail_restart(id_):
self.mds_daemons[id_].stop()
- self.mon_manager.raw_cluster_cmd("mds", "fail", id_)
+ self.run_ceph_cmd("mds", "fail", id_)
self.mds_daemons[id_].restart()
self._one_or_all(mds_id, _fail_restart)
@@ -468,7 +471,7 @@ class MDSCluster(CephCluster):
return FSStatus(self.mon_manager).get_mds(mds_id)
def is_pool_full(self, pool_name):
- pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
+ pools = json.loads(self.get_ceph_cmd_stdout("osd", "dump", "--format=json-pretty"))['pools']
for pool in pools:
if pool['pool_name'] == pool_name:
return 'full' in pool['flags_names'].split(",")
@@ -575,21 +578,21 @@ class Filesystem(MDSCluster):
assert(mds_map['in'] == list(range(0, mds_map['max_mds'])))
def reset(self):
- self.mon_manager.raw_cluster_cmd("fs", "reset", str(self.name), '--yes-i-really-mean-it')
+ self.run_ceph_cmd("fs", "reset", str(self.name), '--yes-i-really-mean-it')
def fail(self):
- self.mon_manager.raw_cluster_cmd("fs", "fail", str(self.name))
+ self.run_ceph_cmd("fs", "fail", str(self.name))
def set_flag(self, var, *args):
a = map(lambda x: str(x).lower(), args)
- self.mon_manager.raw_cluster_cmd("fs", "flag", "set", var, *a)
+ self.run_ceph_cmd("fs", "flag", "set", var, *a)
def set_allow_multifs(self, yes=True):
self.set_flag("enable_multiple", yes)
def set_var(self, var, *args):
a = map(lambda x: str(x).lower(), args)
- self.mon_manager.raw_cluster_cmd("fs", "set", self.name, var, *a)
+ self.run_ceph_cmd("fs", "set", self.name, var, *a)
def set_down(self, down=True):
self.set_var("down", str(down).lower())
@@ -617,7 +620,7 @@ class Filesystem(MDSCluster):
def compat(self, *args):
a = map(lambda x: str(x).lower(), args)
- self.mon_manager.raw_cluster_cmd("fs", "compat", self.name, *a)
+ self.run_ceph_cmd("fs", "compat", self.name, *a)
def add_compat(self, *args):
self.compat("add_compat", *args)
@@ -633,7 +636,7 @@ class Filesystem(MDSCluster):
def required_client_features(self, *args, **kwargs):
c = ["fs", "required_client_features", self.name, *args]
- return self.mon_manager.run_cluster_cmd(args=c, **kwargs)
+ return self.run_ceph_cmd(args=c, **kwargs)
# Since v15.1.0 the pg autoscale mode has been enabled as default,
# will let the pg autoscale mode to calculate the pg_num as needed.
@@ -662,24 +665,23 @@ class Filesystem(MDSCluster):
log.debug("Creating filesystem '{0}'".format(self.name))
try:
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- self.metadata_pool_name,
- '--pg_num_min', str(self.pg_num_min))
-
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- data_pool_name, str(self.pg_num),
- '--pg_num_min', str(self.pg_num_min),
- '--target_size_ratio',
- str(self.target_size_ratio))
+ self.run_ceph_cmd('osd', 'pool', 'create',self.metadata_pool_name,
+ '--pg_num_min', str(self.pg_num_min))
+
+ self.run_ceph_cmd('osd', 'pool', 'create', data_pool_name,
+ str(self.pg_num),
+ '--pg_num_min', str(self.pg_num_min),
+ '--target_size_ratio',
+ str(self.target_size_ratio))
except CommandFailedError as e:
if e.exitstatus == 22: # nautilus couldn't specify --pg_num_min option
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- self.metadata_pool_name,
- str(self.pg_num_min))
+ self.run_ceph_cmd('osd', 'pool', 'create',
+ self.metadata_pool_name,
+ str(self.pg_num_min))
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- data_pool_name, str(self.pg_num),
- str(self.pg_num_min))
+ self.run_ceph_cmd('osd', 'pool', 'create',
+ data_pool_name, str(self.pg_num),
+ str(self.pg_num_min))
else:
raise
@@ -688,7 +690,7 @@ class Filesystem(MDSCluster):
args.append('--recover')
if metadata_overlay:
args.append('--allow-dangerous-metadata-overlay')
- self.mon_manager.raw_cluster_cmd(*args)
+ self.run_ceph_cmd(*args)
if not recover:
if self.ec_profile and 'disabled' not in self.ec_profile:
@@ -696,23 +698,22 @@ class Filesystem(MDSCluster):
log.debug("EC profile is %s", self.ec_profile)
cmd = ['osd', 'erasure-code-profile', 'set', ec_data_pool_name]
cmd.extend(self.ec_profile)
- self.mon_manager.raw_cluster_cmd(*cmd)
+ self.run_ceph_cmd(*cmd)
try:
- self.mon_manager.raw_cluster_cmd(
+ self.run_ceph_cmd(
'osd', 'pool', 'create', ec_data_pool_name,
'erasure', ec_data_pool_name,
'--pg_num_min', str(self.pg_num_min),
'--target_size_ratio', str(self.target_size_ratio_ec))
except CommandFailedError as e:
if e.exitstatus == 22: # nautilus couldn't specify --pg_num_min option
- self.mon_manager.raw_cluster_cmd(
+ self.run_ceph_cmd(
'osd', 'pool', 'create', ec_data_pool_name,
str(self.pg_num_min), 'erasure', ec_data_pool_name)
else:
raise
- self.mon_manager.raw_cluster_cmd(
- 'osd', 'pool', 'set',
- ec_data_pool_name, 'allow_ec_overwrites', 'true')
+ self.run_ceph_cmd('osd', 'pool', 'set', ec_data_pool_name,
+ 'allow_ec_overwrites', 'true')
self.add_data_pool(ec_data_pool_name, create=False)
self.check_pool_application(ec_data_pool_name)
@@ -723,7 +724,8 @@ class Filesystem(MDSCluster):
# Turn off spurious standby count warnings from modifying max_mds in tests.
try:
- self.mon_manager.raw_cluster_cmd('fs', 'set', self.name, 'standby_count_wanted', '0')
+ self.run_ceph_cmd('fs', 'set', self.name, 'standby_count_wanted',
+ '0')
except CommandFailedError as e:
if e.exitstatus == 22:
# standby_count_wanted not available prior to luminous (upgrade tests would fail otherwise)
@@ -758,14 +760,14 @@ class Filesystem(MDSCluster):
for sv in range(0, subvols['create']):
sv_name = f'sv_{sv}'
- self.mon_manager.raw_cluster_cmd(
- 'fs', 'subvolume', 'create', self.name, sv_name,
- self.fs_config.get('subvol_options', ''))
+ self.run_ceph_cmd('fs', 'subvolume', 'create', self.name,
+ sv_name,
+ self.fs_config.get('subvol_options', ''))
if self.name not in self._ctx.created_subvols:
self._ctx.created_subvols[self.name] = []
- subvol_path = self.mon_manager.raw_cluster_cmd(
+ subvol_path = self.get_ceph_cmd_stdout(
'fs', 'subvolume', 'getpath', self.name, sv_name)
subvol_path = subvol_path.strip()
self._ctx.created_subvols[self.name].append(subvol_path)
@@ -858,7 +860,7 @@ class Filesystem(MDSCluster):
"""
Whether a filesystem exists in the mon's filesystem list
"""
- fs_list = json.loads(self.mon_manager.raw_cluster_cmd('fs', 'ls', '--format=json-pretty'))
+ fs_list = json.loads(self.get_ceph_cmd_stdout('fs', 'ls', '--format=json-pretty'))
return self.name in [fs['name'] for fs in fs_list]
def legacy_configured(self):
@@ -867,7 +869,7 @@ class Filesystem(MDSCluster):
the case, the caller should avoid using Filesystem.create
"""
try:
- out_text = self.mon_manager.raw_cluster_cmd('--format=json-pretty', 'osd', 'lspools')
+ out_text = self.get_ceph_cmd_stdout('--format=json-pretty', 'osd', 'lspools')
pools = json.loads(out_text)
metadata_pool_exists = 'metadata' in [p['poolname'] for p in pools]
if metadata_pool_exists:
@@ -883,7 +885,7 @@ class Filesystem(MDSCluster):
return metadata_pool_exists
def _df(self):
- return json.loads(self.mon_manager.raw_cluster_cmd("df", "--format=json-pretty"))
+ return json.loads(self.get_ceph_cmd_stdout("df", "--format=json-pretty"))
# may raise FSMissing
def get_mds_map(self, status=None):
@@ -901,15 +903,15 @@ class Filesystem(MDSCluster):
def add_data_pool(self, name, create=True):
if create:
try:
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name,
- '--pg_num_min', str(self.pg_num_min))
+ self.run_ceph_cmd('osd', 'pool', 'create', name,
+ '--pg_num_min', str(self.pg_num_min))
except CommandFailedError as e:
if e.exitstatus == 22: # nautilus couldn't specify --pg_num_min option
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name,
- str(self.pg_num_min))
+ self.run_ceph_cmd('osd', 'pool', 'create', name,
+ str(self.pg_num_min))
else:
raise
- self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name)
+ self.run_ceph_cmd('fs', 'add_data_pool', self.name, name)
self.get_pool_names(refresh = True)
for poolid, fs_name in self.data_pools.items():
if name == fs_name:
@@ -962,9 +964,9 @@ class Filesystem(MDSCluster):
self.data_pool_name = name
def get_pool_pg_num(self, pool_name):
- pgs = json.loads(self.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
- pool_name, 'pg_num',
- '--format=json-pretty'))
+ pgs = json.loads(self.get_ceph_cmd_stdout('osd', 'pool', 'get',
+ pool_name, 'pg_num',
+ '--format=json-pretty'))
return int(pgs['pg_num'])
def get_namespace_id(self):
@@ -1095,13 +1097,13 @@ class Filesystem(MDSCluster):
self.mds_signal(name, signal)
def rank_freeze(self, yes, rank=0):
- self.mon_manager.raw_cluster_cmd("mds", "freeze", "{}:{}".format(self.id, rank), str(yes).lower())
+ self.run_ceph_cmd("mds", "freeze", "{}:{}".format(self.id, rank), str(yes).lower())
def rank_repaired(self, rank):
- self.mon_manager.raw_cluster_cmd("mds", "repaired", "{}:{}".format(self.id, rank))
+ self.run_ceph_cmd("mds", "repaired", "{}:{}".format(self.id, rank))
def rank_fail(self, rank=0):
- self.mon_manager.raw_cluster_cmd("mds", "fail", "{}:{}".format(self.id, rank))
+ self.run_ceph_cmd("mds", "fail", "{}:{}".format(self.id, rank))
def rank_is_running(self, rank=0, status=None):
name = self.get_rank(rank=rank, status=status)['name']
@@ -1240,7 +1242,7 @@ class Filesystem(MDSCluster):
if mds_id is None:
return self.rank_tell(command)
- return json.loads(self.mon_manager.raw_cluster_cmd("tell", f"mds.{mds_id}", *command))
+ return json.loads(self.get_ceph_cmd_stdout("tell", f"mds.{mds_id}", *command))
def rank_asok(self, command, rank=0, status=None, timeout=None):
info = self.get_rank(rank=rank, status=status)
@@ -1248,7 +1250,7 @@ class Filesystem(MDSCluster):
def rank_tell(self, command, rank=0, status=None):
try:
- out = self.mon_manager.raw_cluster_cmd("tell", f"mds.{self.id}:{rank}", *command)
+ out = self.get_ceph_cmd_stdout("tell", f"mds.{self.id}:{rank}", *command)
return json.loads(out)
except json.decoder.JSONDecodeError:
log.error("could not decode: {}".format(out))
@@ -1648,8 +1650,8 @@ class Filesystem(MDSCluster):
caps = tuple(x)
client_name = 'client.' + client_id
- return self.mon_manager.raw_cluster_cmd('fs', 'authorize', self.name,
- client_name, *caps)
+ return self.get_ceph_cmd_stdout('fs', 'authorize', self.name,
+ client_name, *caps)
def grow(self, new_max_mds, status=None):
oldmax = self.get_var('max_mds', status=status)
diff --git a/qa/tasks/cephfs/mount.py b/qa/tasks/cephfs/mount.py
index be95e9d3e0e..5556a0ac1a8 100644
--- a/qa/tasks/cephfs/mount.py
+++ b/qa/tasks/cephfs/mount.py
@@ -189,10 +189,10 @@ class CephFSMount(object):
self.fs = Filesystem(self.ctx, name=self.cephfs_name)
try:
- output = self.fs.mon_manager.raw_cluster_cmd(args='osd blocklist ls')
+ output = self.fs.get_ceph_cmd_stdout('osd blocklist ls')
except CommandFailedError:
# Fallback for older Ceph cluster
- output = self.fs.mon_manager.raw_cluster_cmd(args='osd blacklist ls')
+ output = self.fs.get_ceph_cmd_stdout('osd blacklist ls')
return self.addr in output
diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py
index d8ff752b366..76614fff7f0 100644
--- a/qa/tasks/cephfs/test_admin.py
+++ b/qa/tasks/cephfs/test_admin.py
@@ -24,18 +24,18 @@ class TestAdminCommands(CephFSTestCase):
MDSS_REQUIRED = 1
def check_pool_application_metadata_key_value(self, pool, app, key, value):
- output = self.fs.mon_manager.raw_cluster_cmd(
+ output = self.get_ceph_cmd_stdout(
'osd', 'pool', 'application', 'get', pool, app, key)
self.assertEqual(str(output.strip()), value)
def setup_ec_pools(self, n, metadata=True, overwrites=True):
if metadata:
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
+ self.run_ceph_cmd('osd', 'pool', 'create', n+"-meta", "8")
cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
- self.fs.mon_manager.raw_cluster_cmd(*cmd)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+ self.run_ceph_cmd(cmd)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
if overwrites:
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+ self.run_ceph_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
class TestFsStatus(TestAdminCommands):
@@ -48,13 +48,13 @@ class TestFsStatus(TestAdminCommands):
That `ceph fs status` command functions.
"""
- s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
+ s = self.get_ceph_cmd_stdout("fs", "status")
self.assertTrue("active" in s)
- mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
+ mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json-pretty"))["mdsmap"]
self.assertEqual(mdsmap[0]["state"], "active")
- mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
+ mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json"))["mdsmap"]
self.assertEqual(mdsmap[0]["state"], "active")
@@ -76,7 +76,7 @@ class TestAddDataPool(TestAdminCommands):
That the application metadata set on a newly added data pool is as expected.
"""
pool_name = "foo"
- mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+ mon_cmd = self.get_ceph_cmd_stdout
mon_cmd('osd', 'pool', 'create', pool_name, '--pg_num_min',
str(self.fs.pg_num_min))
# Check whether https://tracker.ceph.com/issues/43061 is fixed
@@ -120,22 +120,22 @@ class TestAddDataPool(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'first_data_pool' with 'second_fs'
# Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
+ self.run_ceph_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -150,23 +150,23 @@ class TestAddDataPool(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
# try to add 'second_metadata_pool' with 'first_fs' as a data pool
# Expecting EINVAL exit status because 'second_metadata_pool'
# is already in use with 'second_fs' as a metadata pool
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
+ self.run_ceph_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -183,23 +183,21 @@ class TestFsNew(TestAdminCommands):
metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
badname = n+'badname@#'
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- n+metapoolname)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- n+datapoolname)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+metapoolname)
+ self.run_ceph_cmd('osd', 'pool', 'create', n+datapoolname)
# test that fsname not with "goodchars" fails
args = ['fs', 'new', badname, metapoolname, datapoolname]
- proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(),
- check_status=False)
+ proc = self.run_ceph_cmd(args=args, stderr=StringIO(),
+ check_status=False)
self.assertIn('invalid chars', proc.stderr.getvalue().lower())
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
- metapoolname,
- '--yes-i-really-really-mean-it-not-faking')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname,
- datapoolname,
- '--yes-i-really-really-mean-it-not-faking')
+ self.run_ceph_cmd('osd', 'pool', 'rm', metapoolname,
+ metapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
+ self.run_ceph_cmd('osd', 'pool', 'rm', datapoolname,
+ datapoolname,
+ '--yes-i-really-really-mean-it-not-faking')
def test_new_default_ec(self):
"""
@@ -211,7 +209,7 @@ class TestFsNew(TestAdminCommands):
n = "test_new_default_ec"
self.setup_ec_pools(n)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
@@ -229,7 +227,7 @@ class TestFsNew(TestAdminCommands):
self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_force"
self.setup_ec_pools(n)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
def test_new_default_ec_no_overwrite(self):
"""
@@ -241,7 +239,7 @@ class TestFsNew(TestAdminCommands):
n = "test_new_default_ec_no_overwrite"
self.setup_ec_pools(n, overwrites=False)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
@@ -251,7 +249,7 @@ class TestFsNew(TestAdminCommands):
raise RuntimeError("expected failure")
# and even with --force !
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+ self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
@@ -269,7 +267,7 @@ class TestFsNew(TestAdminCommands):
fs_name = "test_fs_new_pool_application"
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
- mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+ mon_cmd = self.get_ceph_cmd_stdout
for p in pool_names:
mon_cmd('osd', 'pool', 'create', p, '--pg_num_min', str(self.fs.pg_num_min))
mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs')
@@ -287,8 +285,8 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
self.fs.status().get_fsmap(fscid)
for i in range(2):
self.check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name)
@@ -302,9 +300,9 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
self.fs.status().get_fsmap(fscid)
def test_fs_new_with_specific_id_fails_without_force_flag(self):
@@ -316,9 +314,9 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'osd pool create {p}')
try:
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}')
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a file system with specifc ID without --force flag")
@@ -335,9 +333,9 @@ class TestFsNew(TestAdminCommands):
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
for p in pool_names:
- self.run_cluster_cmd(f'osd pool create {p}')
+ self.run_ceph_cmd(f'osd pool create {p}')
try:
- self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
+ self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force')
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a file system with specifc ID that is already in use")
@@ -353,13 +351,13 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'first_metadata_pool'
@@ -367,7 +365,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -382,13 +380,13 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
# try to create new fs 'second_fs' with following configuration
# metadata pool -> 'second_metadata_pool'
@@ -396,7 +394,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -411,9 +409,9 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
@@ -423,7 +421,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -438,17 +436,17 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
@@ -458,7 +456,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -473,9 +471,9 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
second_fs = "second_fs"
@@ -485,7 +483,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
# is already in use with 'first_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -500,17 +498,17 @@ class TestFsNew(TestAdminCommands):
first_fs = "first_fs"
first_metadata_pool = "first_metadata_pool"
first_data_pool = "first_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+ self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
# create second data pool, metadata pool and add with filesystem
second_fs = "second_fs"
second_metadata_pool = "second_metadata_pool"
second_data_pool = "second_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+ self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
third_fs = "third_fs"
@@ -520,7 +518,7 @@ class TestFsNew(TestAdminCommands):
# Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
# is already in use with 'first_fs' and 'second_fs'
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
+ self.run_ceph_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -533,20 +531,20 @@ class TestFsNew(TestAdminCommands):
# create pool and initialise with rbd
new_pool = "new_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_data_pool = "new_data_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_data_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_data_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_pool' (already used by rbd app)
# data pool -> 'new_data_pool'
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
+ self.run_ceph_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -559,20 +557,20 @@ class TestFsNew(TestAdminCommands):
# create pool and initialise with rbd
new_pool = "new_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
new_fs = "new_fs"
new_metadata_pool = "new_metadata_pool"
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_metadata_pool)
+ self.run_ceph_cmd('osd', 'pool', 'create', new_metadata_pool)
# try to create new fs 'new_fs' with following configuration
# metadata pool -> 'new_metadata_pool'
# data pool -> 'new_pool' (already used by rbd app)
# Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
+ self.run_ceph_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
@@ -600,7 +598,7 @@ class TestRenameCommand(TestAdminCommands):
new_fs_name = 'new_cephfs'
client_id = 'test_new_cephfs'
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
# authorize a cephx ID access to the renamed file system.
# use the ID to write to the file system.
@@ -621,7 +619,7 @@ class TestRenameCommand(TestAdminCommands):
# cleanup
self.mount_a.umount_wait()
- self.run_cluster_cmd(f'auth rm client.{client_id}')
+ self.run_ceph_cmd(f'auth rm client.{client_id}')
def test_fs_rename_idempotency(self):
"""
@@ -633,8 +631,8 @@ class TestRenameCommand(TestAdminCommands):
orig_fs_name = self.fs.name
new_fs_name = 'new_cephfs'
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
# original file system name does not appear in `fs ls` command
self.assertFalse(self.fs.exists())
@@ -653,10 +651,10 @@ class TestRenameCommand(TestAdminCommands):
new_fs_name = 'new_cephfs'
data_pool = self.fs.get_data_pool_name()
metadata_pool = self.fs.get_metadata_pool_name()
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
try:
- self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
+ self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a new file system with old "
@@ -666,7 +664,7 @@ class TestRenameCommand(TestAdminCommands):
"existing pools to fail.")
try:
- self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
+ self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on creating a new file system with old "
@@ -676,7 +674,7 @@ class TestRenameCommand(TestAdminCommands):
"existing pools, and --force flag to fail.")
try:
- self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
+ self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
"--allow-dangerous-metadata-overlay")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
@@ -691,7 +689,7 @@ class TestRenameCommand(TestAdminCommands):
That renaming a file system without '--yes-i-really-mean-it' flag fails.
"""
try:
- self.run_cluster_cmd(f"fs rename {self.fs.name} new_fs")
+ self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EPERM,
"invalid error code on renaming a file system without the "
@@ -705,7 +703,7 @@ class TestRenameCommand(TestAdminCommands):
That renaming a non-existent file system fails.
"""
try:
- self.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
+ self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on renaming a non-existent fs")
else:
@@ -718,7 +716,7 @@ class TestRenameCommand(TestAdminCommands):
self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
try:
- self.run_cluster_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
+ self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EINVAL,
"invalid error code on renaming to a fs name that is already in use")
@@ -732,14 +730,14 @@ class TestRenameCommand(TestAdminCommands):
orig_fs_name = self.fs.name
new_fs_name = 'new_cephfs'
- self.run_cluster_cmd(f'fs mirror enable {orig_fs_name}')
+ self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}')
try:
- self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+ self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a mirrored file system")
else:
self.fail("expected renaming of a mirrored file system to fail")
- self.run_cluster_cmd(f'fs mirror disable {orig_fs_name}')
+ self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}')
class TestDump(CephFSTestCase):
@@ -823,13 +821,13 @@ class TestRequiredClientFeatures(CephFSTestCase):
"""
def is_required(index):
- out = self.fs.mon_manager.raw_cluster_cmd('fs', 'get', self.fs.name, '--format=json-pretty')
+ out = self.get_ceph_cmd_stdout('fs', 'get', self.fs.name, '--format=json-pretty')
features = json.loads(out)['mdsmap']['required_client_features']
if "feature_{0}".format(index) in features:
return True;
return False;
- features = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty'))
+ features = json.loads(self.get_ceph_cmd_stdout('fs', 'feature', 'ls', '--format=json-pretty'))
self.assertGreater(len(features), 0);
for f in features:
@@ -1035,7 +1033,7 @@ class TestConfigCommands(CephFSTestCase):
names = self.fs.get_rank_names()
for n in names:
- s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
+ s = self.get_ceph_cmd_stdout("config", "show", "mds."+n)
self.assertTrue("NAME" in s)
self.assertTrue("mon_host" in s)
@@ -1085,17 +1083,17 @@ class TestMirroringCommands(CephFSTestCase):
MDSS_REQUIRED = 1
def _enable_mirroring(self, fs_name):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", fs_name)
+ self.run_ceph_cmd("fs", "mirror", "enable", fs_name)
def _disable_mirroring(self, fs_name):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", fs_name)
+ self.run_ceph_cmd("fs", "mirror", "disable", fs_name)
def _add_peer(self, fs_name, peer_spec, remote_fs_name):
peer_uuid = str(uuid.uuid4())
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
def _remove_peer(self, fs_name, peer_uuid):
- self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
+ self.run_ceph_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
def _verify_mirroring(self, fs_name, flag_str):
status = self.fs.status()
@@ -1244,10 +1242,10 @@ class TestFsAuthorize(CephFSTestCase):
fs_name = "cephfs-_."
self.fs = self.mds_cluster.newfs(name=fs_name)
self.fs.wait_for_daemons()
- self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} '
- f'mon "allow r" '
- f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
- f'mds allow')
+ self.run_ceph_cmd(f'auth caps client.{self.mount_a.client_id} '
+ f'mon "allow r" '
+ f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
+ f'mds allow')
self.mount_a.remount(cephfs_name=self.fs.name)
PERM = 'rw'
FS_AUTH_CAPS = (('/', PERM),)
@@ -1291,7 +1289,7 @@ class TestFsAuthorize(CephFSTestCase):
def tearDown(self):
self.mount_a.umount_wait()
- self.run_cluster_cmd(f'auth rm {self.client_name}')
+ self.run_ceph_cmd(f'auth rm {self.client_name}')
super(type(self), self).tearDown()
diff --git a/qa/tasks/cephfs/test_damage.py b/qa/tasks/cephfs/test_damage.py
index d83187017e3..362942d9850 100644
--- a/qa/tasks/cephfs/test_damage.py
+++ b/qa/tasks/cephfs/test_damage.py
@@ -244,7 +244,7 @@ class TestDamage(CephFSTestCase):
# Reset MDS state
self.mount_a.umount_wait(force=True)
self.fs.fail()
- self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+ self.run_ceph_cmd('mds', 'repaired', '0')
# Reset RADOS pool state
self.fs.radosm(['import', '-'], stdin=BytesIO(serialized))
@@ -355,8 +355,9 @@ class TestDamage(CephFSTestCase):
# EIOs mean something handled by DamageTable: assert that it has
# been populated
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), "damage", "ls", '--format=json-pretty'))
+ self.get_ceph_cmd_stdout(
+ 'tell', f'mds.{self.fs.get_active_names()[0]}',
+ "damage", "ls", '--format=json-pretty'))
if len(damage) == 0:
results[mutation] = EIO_NO_DAMAGE
@@ -416,8 +417,8 @@ class TestDamage(CephFSTestCase):
# The fact that there is damaged should have bee recorded
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+ self.get_ceph_cmd_stdout(
+ 'tell', f'mds.{self.fs.get_active_names()[0]}',
"damage", "ls", '--format=json-pretty'))
self.assertEqual(len(damage), 1)
damage_id = damage[0]['id']
@@ -466,9 +467,9 @@ class TestDamage(CephFSTestCase):
self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
# Clean up the damagetable entry
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
- "damage", "rm", "{did}".format(did=damage_id))
+ self.run_ceph_cmd(
+ 'tell', f'mds.{self.fs.get_active_names()[0]}',
+ "damage", "rm", f"{damage_id}")
# Now I should be able to create a file with the same name as the
# damaged guy if I want.
@@ -520,14 +521,14 @@ class TestDamage(CephFSTestCase):
# Check that an entry is created in the damage table
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "ls", '--format=json-pretty'))
self.assertEqual(len(damage), 1)
self.assertEqual(damage[0]['damage_type'], "backtrace")
self.assertEqual(damage[0]['ino'], file1_ino)
- self.fs.mon_manager.raw_cluster_cmd(
+ self.run_ceph_cmd(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "rm", str(damage[0]['id']))
@@ -545,7 +546,7 @@ class TestDamage(CephFSTestCase):
# Check that an entry is created in the damage table
damage = json.loads(
- self.fs.mon_manager.raw_cluster_cmd(
+ self.get_ceph_cmd_stdout(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "ls", '--format=json-pretty'))
self.assertEqual(len(damage), 2)
@@ -560,7 +561,7 @@ class TestDamage(CephFSTestCase):
self.assertEqual(damage[1]['ino'], file2_ino)
for entry in damage:
- self.fs.mon_manager.raw_cluster_cmd(
+ self.run_ceph_cmd(
'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
"damage", "rm", str(entry['id']))
@@ -643,7 +644,7 @@ class TestDamage(CephFSTestCase):
self.fs.rank_freeze(True, rank=0)
# so now we want to trigger commit but this will crash, so:
c = ['--connect-timeout=60', 'tell', f"mds.{fscid}:0", "flush", "journal"]
- p = self.ceph_cluster.mon_manager.run_cluster_cmd(args=c, wait=False, timeoutcmd=30)
+ p = self.run_ceph_cmd(args=c, wait=False, timeoutcmd=30)
self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(), timeout=self.fs.beacon_timeout)
self.config_rm("mds", "mds_inject_journal_corrupt_dentry_first")
self.fs.rank_freeze(False, rank=0)
diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py
index 9a93bd62212..63ac6041547 100644
--- a/qa/tasks/cephfs/test_data_scan.py
+++ b/qa/tasks/cephfs/test_data_scan.py
@@ -428,7 +428,7 @@ class TestDataScan(CephFSTestCase):
self.fs.data_scan(["scan_links"])
# Mark the MDS repaired
- self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+ self.run_ceph_cmd('mds', 'repaired', '0')
# Start the MDS
self.fs.mds_restart()
@@ -603,7 +603,7 @@ class TestDataScan(CephFSTestCase):
file_path = "mydir/myfile_{0}".format(i)
ino = self.mount_a.path_to_ino(file_path)
obj = "{0:x}.{1:08x}".format(ino, 0)
- pgid = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+ pgid = json.loads(self.get_ceph_cmd_stdout(
"osd", "map", self.fs.get_data_pool_name(), obj,
"--format=json-pretty"
))['pgid']
diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py
index ddcc58cccc5..4948e77d0ff 100644
--- a/qa/tasks/cephfs/test_failover.py
+++ b/qa/tasks/cephfs/test_failover.py
@@ -414,7 +414,7 @@ class TestFailover(CephFSTestCase):
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
+ self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
# Kill a standby and check for warning
victim = standbys.pop()
@@ -432,11 +432,11 @@ class TestFailover(CephFSTestCase):
# Set it one greater than standbys ever seen
standbys = self.mds_cluster.get_standby_daemons()
self.assertGreaterEqual(len(standbys), 1)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
+ self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
# Set it to 0
- self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
+ self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
self.wait_for_health_clear(timeout=30)
def test_discontinuous_mdsmap(self):
@@ -685,9 +685,8 @@ class TestMultiFilesystems(CephFSTestCase):
def setUp(self):
super(TestMultiFilesystems, self).setUp()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
- "enable_multiple", "true",
- "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
def _setup_two(self):
fs_a = self.mds_cluster.newfs(name="alpha")
@@ -701,7 +700,7 @@ class TestMultiFilesystems(CephFSTestCase):
# Reconfigure client auth caps
for mount in self.mounts:
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(mount.client_id),
'mds', 'allow',
'mon', 'allow r',
@@ -769,7 +768,7 @@ class TestMultiFilesystems(CephFSTestCase):
# Kill fs_a's active MDS, see a standby take over
self.mds_cluster.mds_stop(original_a)
- self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
+ self.run_ceph_cmd("mds", "fail", original_a)
self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
reject_fn=lambda v: v > 1)
# Assert that it's a *different* daemon that has now appeared in the map for fs_a
@@ -777,7 +776,7 @@ class TestMultiFilesystems(CephFSTestCase):
# Kill fs_b's active MDS, see a standby take over
self.mds_cluster.mds_stop(original_b)
- self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
+ self.run_ceph_cmd("mds", "fail", original_b)
self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
reject_fn=lambda v: v > 1)
# Assert that it's a *different* daemon that has now appeared in the map for fs_a
diff --git a/qa/tasks/cephfs/test_fragment.py b/qa/tasks/cephfs/test_fragment.py
index 7d35ec0dfd5..902a53e79f7 100644
--- a/qa/tasks/cephfs/test_fragment.py
+++ b/qa/tasks/cephfs/test_fragment.py
@@ -160,14 +160,13 @@ class TestFragmentation(CephFSTestCase):
target_files = branch_factor**depth * int(split_size * 1.5)
create_files = target_files - files_written
- self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
+ self.run_ceph_cmd("log",
"{0} Writing {1} files (depth={2})".format(
self.__class__.__name__, create_files, depth
))
self.mount_a.create_n_files("splitdir/file_{0}".format(depth),
create_files)
- self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
- "{0} Done".format(self.__class__.__name__))
+ self.run_ceph_cmd("log","{0} Done".format(self.__class__.__name__))
files_written += create_files
log.info("Now have {0} files".format(files_written))
diff --git a/qa/tasks/cephfs/test_fstop.py b/qa/tasks/cephfs/test_fstop.py
index ed76eaac2cf..b625c24bfe8 100644
--- a/qa/tasks/cephfs/test_fstop.py
+++ b/qa/tasks/cephfs/test_fstop.py
@@ -20,10 +20,10 @@ class TestFSTop(CephFSTestCase):
super(TestFSTop, self).tearDown()
def _enable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "enable", "stats")
def _disable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "disable", "stats")
def _fstop_dump(self, *args):
return self.mount_a.run_shell(['cephfs-top',
@@ -93,8 +93,8 @@ class TestFSTop(CephFSTestCase):
# umount mount_b, mount another filesystem on it and use --dumpfs filter
self.mount_b.umount_wait()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set", "enable_multiple", "true",
- "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple", "true",
+ "--yes-i-really-mean-it")
# create a new filesystem
fs_b = self.mds_cluster.newfs(name=newfs_name)
diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py
index 2b3a7d5f95c..90a65f069ef 100644
--- a/qa/tasks/cephfs/test_full.py
+++ b/qa/tasks/cephfs/test_full.py
@@ -61,10 +61,10 @@ class FullnessTestCase(CephFSTestCase):
self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch)
# Set and unset a flag to cause OSD epoch to increment
- self.fs.mon_manager.raw_cluster_cmd("osd", "set", "pause")
- self.fs.mon_manager.raw_cluster_cmd("osd", "unset", "pause")
+ self.run_ceph_cmd("osd", "set", "pause")
+ self.run_ceph_cmd("osd", "unset", "pause")
- out = self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json").strip()
+ out = self.get_ceph_cmd_stdout("osd", "dump", "--format=json").strip()
new_epoch = json.loads(out)['epoch']
self.assertNotEqual(self.initial_osd_epoch, new_epoch)
@@ -138,7 +138,7 @@ class FullnessTestCase(CephFSTestCase):
# Wait for the MDS to see the latest OSD map so that it will reliably
# be applying the policy of rejecting non-deletion metadata operations
# while in the full state.
- osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+ osd_epoch = json.loads(self.get_ceph_cmd_stdout("osd", "dump", "--format=json-pretty"))['epoch']
self.wait_until_true(
lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch,
timeout=10)
@@ -167,7 +167,7 @@ class FullnessTestCase(CephFSTestCase):
# Wait for the MDS to see the latest OSD map so that it will reliably
# be applying the free space policy
- osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+ osd_epoch = json.loads(self.get_ceph_cmd_stdout("osd", "dump", "--format=json-pretty"))['epoch']
self.wait_until_true(
lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch,
timeout=10)
@@ -376,8 +376,8 @@ class TestQuotaFull(FullnessTestCase):
super(TestQuotaFull, self).setUp()
pool_name = self.fs.get_data_pool_name()
- self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", pool_name,
- "max_bytes", "{0}".format(self.pool_capacity))
+ self.run_ceph_cmd("osd", "pool", "set-quota", pool_name,
+ "max_bytes", f"{self.pool_capacity}")
class TestClusterFull(FullnessTestCase):
diff --git a/qa/tasks/cephfs/test_journal_repair.py b/qa/tasks/cephfs/test_journal_repair.py
index c5769784d51..365140fd9f6 100644
--- a/qa/tasks/cephfs/test_journal_repair.py
+++ b/qa/tasks/cephfs/test_journal_repair.py
@@ -233,8 +233,8 @@ class TestJournalRepair(CephFSTestCase):
self.fs.table_tool(["0", "reset", "session"])
self.fs.journal_tool(["journal", "reset"], 0)
self.fs.erase_mds_objects(1)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
- '--yes-i-really-mean-it')
+ self.run_ceph_cmd('fs', 'reset', self.fs.name,
+ '--yes-i-really-mean-it')
# Bring an MDS back online, mount a client, and see that we can walk the full
# filesystem tree again
diff --git a/qa/tasks/cephfs/test_mantle.py b/qa/tasks/cephfs/test_mantle.py
index 746c2ffe371..92583b502d4 100644
--- a/qa/tasks/cephfs/test_mantle.py
+++ b/qa/tasks/cephfs/test_mantle.py
@@ -22,7 +22,7 @@ class TestMantle(CephFSTestCase):
self.fs.mds_asok(['config', 'set', 'debug_mds_balancer', '5'], mds_id=m)
def push_balancer(self, obj, lua_code, expect):
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', obj)
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', obj)
self.fs.radosm(["put", obj, "-"], stdin=StringIO(lua_code))
with self.assert_cluster_log(failure + obj + " " + expect):
log.info("run a " + obj + " balancer that expects=" + expect)
@@ -31,16 +31,16 @@ class TestMantle(CephFSTestCase):
self.start_mantle()
expect = " : (2) No such file or directory"
- ret = self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer')
+ ret = self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer')
assert(ret == 22) # EINVAL
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', " ")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', " ")
with self.assert_cluster_log(failure + " " + expect): pass
def test_version_not_in_rados(self):
self.start_mantle()
expect = failure + "ghost.lua : (2) No such file or directory"
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua")
with self.assert_cluster_log(expect): pass
def test_balancer_invalid(self):
@@ -59,7 +59,7 @@ class TestMantle(CephFSTestCase):
def test_balancer_valid(self):
self.start_mantle()
lua_code = "BAL_LOG(0, \"test\")\nreturn {3, 4}"
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
self.fs.radosm(["put", "valid.lua", "-"], stdin=StringIO(lua_code))
with self.assert_cluster_log(success + "valid.lua"):
log.info("run a valid.lua balancer")
@@ -94,13 +94,13 @@ class TestMantle(CephFSTestCase):
expect = " : (110) Connection timed out"
# kill the OSDs so that the balancer pull from RADOS times out
- osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
+ osd_map = json.loads(self.get_ceph_cmd_stdout('osd', 'dump', '--format=json-pretty'))
for i in range(0, len(osd_map['osds'])):
- self.fs.mon_manager.raw_cluster_cmd_result('osd', 'down', str(i))
- self.fs.mon_manager.raw_cluster_cmd_result('osd', 'out', str(i))
+ self.get_ceph_cmd_result('osd', 'down', str(i))
+ self.get_ceph_cmd_result('osd', 'out', str(i))
# trigger a pull from RADOS
- self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
+ self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
# make the timeout a little longer since dead OSDs spam ceph -w
with self.assert_cluster_log(failure + "valid.lua" + expect, timeout=30):
@@ -108,4 +108,4 @@ class TestMantle(CephFSTestCase):
# cleanup
for i in range(0, len(osd_map['osds'])):
- self.fs.mon_manager.raw_cluster_cmd_result('osd', 'in', str(i))
+ self.get_ceph_cmd_result('osd', 'in', str(i))
diff --git a/qa/tasks/cephfs/test_mds_metrics.py b/qa/tasks/cephfs/test_mds_metrics.py
index ad877f62280..0e824d3d278 100644
--- a/qa/tasks/cephfs/test_mds_metrics.py
+++ b/qa/tasks/cephfs/test_mds_metrics.py
@@ -57,13 +57,13 @@ class TestMDSMetrics(CephFSTestCase):
return verify_metrics_cbk
def _fs_perf_stats(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", *args)
+ return self.get_ceph_cmd_stdout("fs", "perf", "stats", *args)
def _enable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "enable", "stats")
def _disable_mgr_stats_plugin(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+ return self.get_ceph_cmd_stdout("mgr", "module", "disable", "stats")
def _spread_directory_on_all_ranks(self, fscid):
fs_status = self.fs.status()
@@ -115,7 +115,7 @@ class TestMDSMetrics(CephFSTestCase):
# Reconfigure client auth caps
for mount in self.mounts:
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', f"client.{mount.client_id}",
'mds', 'allow',
'mon', 'allow r',
@@ -404,7 +404,7 @@ class TestMDSMetrics(CephFSTestCase):
invalid_mds_rank = "1,"
# try, 'fs perf stat' command with invalid mds_rank
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
+ self.run_ceph_cmd("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
@@ -415,7 +415,7 @@ class TestMDSMetrics(CephFSTestCase):
invalid_client_id = "abcd"
# try, 'fs perf stat' command with invalid client_id
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--client_id", invalid_client_id)
+ self.run_ceph_cmd("fs", "perf", "stats", "--client_id", invalid_client_id)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
@@ -426,7 +426,7 @@ class TestMDSMetrics(CephFSTestCase):
invalid_client_ip = "1.2.3"
# try, 'fs perf stat' command with invalid client_ip
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--client_ip", invalid_client_ip)
+ self.run_ceph_cmd("fs", "perf", "stats", "--client_ip", invalid_client_ip)
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise
@@ -501,8 +501,8 @@ class TestMDSMetrics(CephFSTestCase):
self.mount_b.umount_wait()
self.fs.delete_all_filesystems()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
- "enable_multiple", "true", "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
# creating filesystem
fs_a = self._setup_fs(fs_name="fs1")
@@ -569,8 +569,8 @@ class TestMDSMetrics(CephFSTestCase):
self.mount_a.umount_wait()
self.mount_b.umount_wait()
- self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
- "enable_multiple", "true", "--yes-i-really-mean-it")
+ self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+ "true", "--yes-i-really-mean-it")
# creating filesystem
fs_b = self._setup_fs(fs_name="fs2")
diff --git a/qa/tasks/cephfs/test_mirroring.py b/qa/tasks/cephfs/test_mirroring.py
index c1a940e3f75..b7f436774dd 100644
--- a/qa/tasks/cephfs/test_mirroring.py
+++ b/qa/tasks/cephfs/test_mirroring.py
@@ -34,13 +34,13 @@ class TestMirroring(CephFSTestCase):
super(TestMirroring, self).tearDown()
def enable_mirroring_module(self):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", TestMirroring.MODULE_NAME)
+ self.run_ceph_cmd("mgr", "module", "enable", TestMirroring.MODULE_NAME)
def disable_mirroring_module(self):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", TestMirroring.MODULE_NAME)
+ self.run_ceph_cmd("mgr", "module", "disable", TestMirroring.MODULE_NAME)
def enable_mirroring(self, fs_name, fs_id):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "enable", fs_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "enable", fs_name)
time.sleep(10)
# verify via asok
res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -49,7 +49,7 @@ class TestMirroring(CephFSTestCase):
self.assertTrue(res['snap_dirs']['dir_count'] == 0)
def disable_mirroring(self, fs_name, fs_id):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "disable", fs_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "disable", fs_name)
time.sleep(10)
# verify via asok
try:
@@ -76,15 +76,15 @@ class TestMirroring(CephFSTestCase):
def peer_add(self, fs_name, fs_id, peer_spec, remote_fs_name=None):
if remote_fs_name:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
else:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
time.sleep(10)
self.verify_peer_added(fs_name, fs_id, peer_spec, remote_fs_name)
def peer_remove(self, fs_name, fs_id, peer_spec):
peer_uuid = self.get_peer_uuid(peer_spec)
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
time.sleep(10)
# verify via asok
res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -92,13 +92,14 @@ class TestMirroring(CephFSTestCase):
self.assertTrue(res['peers'] == {} and res['snap_dirs']['dir_count'] == 0)
def bootstrap_peer(self, fs_name, client_name, site_name):
- outj = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
- "fs", "snapshot", "mirror", "peer_bootstrap", "create", fs_name, client_name, site_name))
+ outj = json.loads(self.get_ceph_cmd_stdout(
+ "fs", "snapshot", "mirror", "peer_bootstrap", "create", fs_name,
+ client_name, site_name))
return outj['token']
def import_peer(self, fs_name, token):
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_bootstrap", "import",
- fs_name, token)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_bootstrap",
+ "import", fs_name, token)
def add_directory(self, fs_name, fs_id, dir_name):
# get initial dir count
@@ -107,7 +108,7 @@ class TestMirroring(CephFSTestCase):
dir_count = res['snap_dirs']['dir_count']
log.debug(f'initial dir_count={dir_count}')
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", fs_name, dir_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "add", fs_name, dir_name)
time.sleep(10)
# verify via asok
@@ -124,7 +125,7 @@ class TestMirroring(CephFSTestCase):
dir_count = res['snap_dirs']['dir_count']
log.debug(f'initial dir_count={dir_count}')
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
time.sleep(10)
# verify via asok
@@ -234,7 +235,7 @@ class TestMirroring(CephFSTestCase):
return json.loads(res)
def get_mirror_daemon_status(self):
- daemon_status = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "daemon", "status"))
+ daemon_status = json.loads(self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "daemon", "status"))
log.debug(f'daemon_status: {daemon_status}')
# running a single mirror daemon is supported
status = daemon_status[0]
@@ -321,7 +322,7 @@ class TestMirroring(CephFSTestCase):
# try removing peer
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
except CommandFailedError as ce:
if ce.exitstatus != errno.EINVAL:
raise RuntimeError(-errno.EINVAL, 'incorrect error code when removing a peer')
@@ -466,12 +467,13 @@ class TestMirroring(CephFSTestCase):
def test_cephfs_mirror_stats(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
@@ -532,12 +534,13 @@ class TestMirroring(CephFSTestCase):
def test_cephfs_mirror_cancel_sync(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
@@ -568,12 +571,13 @@ class TestMirroring(CephFSTestCase):
def test_cephfs_mirror_restart_sync_on_blocklist(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
@@ -698,7 +702,7 @@ class TestMirroring(CephFSTestCase):
# enable mirroring through mon interface -- this should result in the mirror daemon
# failing to enable mirroring due to absence of `cephfs_mirorr` index object.
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
with safe_while(sleep=5, tries=10, action='wait for failed state') as proceed:
while proceed():
@@ -713,7 +717,7 @@ class TestMirroring(CephFSTestCase):
except:
pass
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
time.sleep(10)
# verify via asok
try:
@@ -735,7 +739,7 @@ class TestMirroring(CephFSTestCase):
# enable mirroring through mon interface -- this should result in the mirror daemon
# failing to enable mirroring due to absence of `cephfs_mirror` index object.
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
# need safe_while since non-failed status pops up as mirroring is restarted
# internally in mirror daemon.
with safe_while(sleep=5, tries=20, action='wait for failed state') as proceed:
@@ -766,7 +770,7 @@ class TestMirroring(CephFSTestCase):
self.assertTrue(res['peers'] == {})
self.assertTrue(res['snap_dirs']['dir_count'] == 0)
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", self.primary_fs_name)
+ self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
time.sleep(10)
# verify via asok
try:
@@ -792,7 +796,7 @@ class TestMirroring(CephFSTestCase):
# verify via peer_list interface
peer_uuid = self.get_peer_uuid("client.mirror_peer_bootstrap@site-remote")
- res = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_list", self.primary_fs_name))
+ res = json.loads(self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_list", self.primary_fs_name))
self.assertTrue(peer_uuid in res)
self.assertTrue('mon_host' in res[peer_uuid] and res[peer_uuid]['mon_host'] != '')
@@ -803,12 +807,13 @@ class TestMirroring(CephFSTestCase):
def test_cephfs_mirror_symlink_sync(self):
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
@@ -893,20 +898,20 @@ class TestMirroring(CephFSTestCase):
dir_path_p = "/d0/d1"
dir_path = "/d0/d1/d2"
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
time.sleep(10)
# this uses an undocumented interface to get dirpath map state
- res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+ res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
res = json.loads(res_json)
# there are no mirror daemons
self.assertTrue(res['state'], 'stalled')
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
time.sleep(10)
try:
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
except CommandFailedError as ce:
if ce.exitstatus != errno.ENOENT:
raise RuntimeError('invalid errno when checking dirmap status for non-existent directory')
@@ -914,11 +919,11 @@ class TestMirroring(CephFSTestCase):
raise RuntimeError('incorrect errno when checking dirmap state for non-existent directory')
# adding a parent directory should be allowed
- self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
+ self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
time.sleep(10)
# however, this directory path should get stalled too
- res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
+ res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
res = json.loads(res_json)
# there are no mirror daemons
self.assertTrue(res['state'], 'stalled')
@@ -930,7 +935,7 @@ class TestMirroring(CephFSTestCase):
# wait for restart mirror on blocklist
time.sleep(60)
- res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
+ res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
res = json.loads(res_json)
# there are no mirror daemons
self.assertTrue(res['state'], 'mapped')
@@ -940,12 +945,13 @@ class TestMirroring(CephFSTestCase):
def test_cephfs_mirror_incremental_sync(self):
""" Test incremental snapshot synchronization (based on mtime differences)."""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
@@ -1018,12 +1024,13 @@ class TestMirroring(CephFSTestCase):
file_z | sym dir reg sym
"""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
@@ -1089,12 +1096,13 @@ class TestMirroring(CephFSTestCase):
"""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
@@ -1169,12 +1177,13 @@ class TestMirroring(CephFSTestCase):
that all replayer threads (3 by default) in the mirror daemon are busy.
"""
log.debug('reconfigure client auth caps')
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
'mds', 'allow rw',
'mon', 'allow r',
'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
- self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+ self.backup_fs.get_data_pool_name(),
+ self.backup_fs.get_data_pool_name()))
log.debug(f'mounting filesystem {self.secondary_fs_name}')
self.mount_b.umount_wait()
@@ -1266,7 +1275,7 @@ class TestMirroring(CephFSTestCase):
log.debug('reconfigure client auth caps')
cid = self.mount_b.client_id
data_pool = self.backup_fs.get_data_pool_name()
- self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', f"client.{cid}",
'mds', 'allow rw',
'mon', 'allow r',
diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py
index 1e9678322ab..58c4e379095 100644
--- a/qa/tasks/cephfs/test_misc.py
+++ b/qa/tasks/cephfs/test_misc.py
@@ -96,16 +96,15 @@ class TestMisc(CephFSTestCase):
self.fs.fail()
- self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
- '--yes-i-really-mean-it')
+ self.run_ceph_cmd('fs', 'rm', self.fs.name, '--yes-i-really-mean-it')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
- self.fs.metadata_pool_name,
- self.fs.metadata_pool_name,
- '--yes-i-really-really-mean-it')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- self.fs.metadata_pool_name,
- '--pg_num_min', str(self.fs.pg_num_min))
+ self.run_ceph_cmd('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.run_ceph_cmd('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ '--pg_num_min', str(self.fs.pg_num_min))
# insert a garbage object
self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
@@ -119,34 +118,34 @@ class TestMisc(CephFSTestCase):
self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
try:
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name)
+ self.run_ceph_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name)
except CommandFailedError as e:
self.assertEqual(e.exitstatus, errno.EINVAL)
else:
raise AssertionError("Expected EINVAL")
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name, "--force")
+ self.run_ceph_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name, "--force")
- self.fs.mon_manager.raw_cluster_cmd('fs', 'fail', self.fs.name)
+ self.run_ceph_cmd('fs', 'fail', self.fs.name)
- self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
- '--yes-i-really-mean-it')
+ self.run_ceph_cmd('fs', 'rm', self.fs.name,
+ '--yes-i-really-mean-it')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
- self.fs.metadata_pool_name,
- self.fs.metadata_pool_name,
- '--yes-i-really-really-mean-it')
- self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- self.fs.metadata_pool_name,
- '--pg_num_min', str(self.fs.pg_num_min))
- self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
- self.fs.metadata_pool_name,
- data_pool_name,
- '--allow_dangerous_metadata_overlay')
+ self.run_ceph_cmd('osd', 'pool', 'delete',
+ self.fs.metadata_pool_name,
+ self.fs.metadata_pool_name,
+ '--yes-i-really-really-mean-it')
+ self.run_ceph_cmd('osd', 'pool', 'create',
+ self.fs.metadata_pool_name,
+ '--pg_num_min', str(self.fs.pg_num_min))
+ self.run_ceph_cmd('fs', 'new', self.fs.name,
+ self.fs.metadata_pool_name,
+ data_pool_name,
+ '--allow_dangerous_metadata_overlay')
def test_cap_revoke_nonresponder(self):
"""
@@ -199,9 +198,8 @@ class TestMisc(CephFSTestCase):
pool_name = self.fs.get_data_pool_name()
raw_df = self.fs.get_pool_df(pool_name)
raw_avail = float(raw_df["max_avail"])
- out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
- pool_name, 'size',
- '-f', 'json-pretty')
+ out = self.get_ceph_cmd_stdout('osd', 'pool', 'get', pool_name,
+ 'size', '-f', 'json-pretty')
_ = json.loads(out)
proc = self.mount_a.run_shell(['df', '.'])
@@ -253,9 +251,8 @@ class TestMisc(CephFSTestCase):
self.fs.set_allow_new_snaps(False)
self.fs.set_allow_standby_replay(True)
- lsflags = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'lsflags',
- self.fs.name,
- "--format=json-pretty"))
+ lsflags = json.loads(self.get_ceph_cmd_stdout(
+ 'fs', 'lsflags', self.fs.name, "--format=json-pretty"))
self.assertEqual(lsflags["joinable"], False)
self.assertEqual(lsflags["allow_snaps"], False)
self.assertEqual(lsflags["allow_multimds_snaps"], True)
@@ -425,7 +422,7 @@ class TestMisc(CephFSTestCase):
self.fs.mds_asok(['config', 'set', 'debug_mds', '1/10'])
self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
try:
- mons = json.loads(self.fs.mon_manager.raw_cluster_cmd('mon', 'dump', '-f', 'json'))['mons']
+ mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
except:
self.assertTrue(False, "Error fetching monitors")
@@ -468,7 +465,7 @@ class TestMisc(CephFSTestCase):
self.fs.mds_asok(['config', 'set', 'mds_heartbeat_grace', '1'])
self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
try:
- mons = json.loads(self.fs.mon_manager.raw_cluster_cmd('mon', 'dump', '-f', 'json'))['mons']
+ mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
except:
self.assertTrue(False, "Error fetching monitors")
diff --git a/qa/tasks/cephfs/test_multifs_auth.py b/qa/tasks/cephfs/test_multifs_auth.py
index 7e50c4e1d76..acf434294d1 100644
--- a/qa/tasks/cephfs/test_multifs_auth.py
+++ b/qa/tasks/cephfs/test_multifs_auth.py
@@ -26,15 +26,15 @@ class TestMultiFS(CephFSTestCase):
# we might have it - the client - if the same cluster was used for a
# different vstart_runner.py run.
- self.run_cluster_cmd(f'auth rm {self.client_name}')
+ self.run_ceph_cmd(f'auth rm {self.client_name}')
self.fs1 = self.fs
self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
# we'll reassign caps to client.1 so that it can operate with cephfs2
- self.run_cluster_cmd(f'auth caps client.{self.mount_b.client_id} mon '
- f'"allow r" osd "allow rw '
- f'pool={self.fs2.get_data_pool_name()}" mds allow')
+ self.run_ceph_cmd(f'auth caps client.{self.mount_b.client_id} mon '
+ f'"allow r" osd "allow rw '
+ f'pool={self.fs2.get_data_pool_name()}" mds allow')
self.mount_b.remount(cephfs_name=self.fs2.name)
diff --git a/qa/tasks/cephfs/test_multimds_misc.py b/qa/tasks/cephfs/test_multimds_misc.py
index 2bb6257c7eb..e0e46fb24c0 100644
--- a/qa/tasks/cephfs/test_multimds_misc.py
+++ b/qa/tasks/cephfs/test_multimds_misc.py
@@ -116,7 +116,7 @@ class TestScrub2(CephFSTestCase):
def expect_exdev(cmd, mds):
try:
- self.fs.mon_manager.raw_cluster_cmd('tell', 'mds.{0}'.format(mds), *cmd)
+ self.run_ceph_cmd('tell', 'mds.{0}'.format(mds), *cmd)
except CommandFailedError as e:
if e.exitstatus == errno.EXDEV:
pass
diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py
index 0a10709e6c5..5fe71054ad9 100644
--- a/qa/tasks/cephfs/test_nfs.py
+++ b/qa/tasks/cephfs/test_nfs.py
@@ -16,16 +16,14 @@ NFS_POOL_NAME = '.nfs' # should match mgr_module.py
# TODO Add test for cluster update when ganesha can be deployed on multiple ports.
class TestNFS(MgrTestCase):
def _cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+ return self.get_ceph_cmd_stdout(args)
def _nfs_cmd(self, *args):
return self._cmd("nfs", *args)
def _nfs_complete_cmd(self, cmd):
- return self.mgr_cluster.mon_manager.run_cluster_cmd(args=f"nfs {cmd}",
- stdout=StringIO(),
- stderr=StringIO(),
- check_status=False)
+ return self.run_ceph_cmd(args=f"nfs {cmd}", stdout=StringIO(),
+ stderr=StringIO(), check_status=False)
def _orch_cmd(self, *args):
return self._cmd("orch", *args)
@@ -142,7 +140,7 @@ class TestNFS(MgrTestCase):
:param cmd_args: nfs command arguments to be run
'''
cmd_func()
- ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd_args)
+ ret = self.get_ceph_cmd_result(*cmd_args)
if ret != 0:
self.fail("Idempotency test failed")
diff --git a/qa/tasks/cephfs/test_pool_perm.py b/qa/tasks/cephfs/test_pool_perm.py
index 9912debedee..b55052b826e 100644
--- a/qa/tasks/cephfs/test_pool_perm.py
+++ b/qa/tasks/cephfs/test_pool_perm.py
@@ -30,9 +30,9 @@ class TestPoolPerm(CephFSTestCase):
client_name = "client.{0}".format(self.mount_a.client_id)
# set data pool read only
- self.fs.mon_manager.raw_cluster_cmd_result(
- 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
- 'allow r pool={0}'.format(self.fs.get_data_pool_name()))
+ self.get_ceph_cmd_result(
+ 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r',
+ 'osd', 'allow r pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
self.mount_a.mount_wait()
@@ -41,9 +41,9 @@ class TestPoolPerm(CephFSTestCase):
self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
# set data pool write only
- self.fs.mon_manager.raw_cluster_cmd_result(
- 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
- 'allow w pool={0}'.format(self.fs.get_data_pool_name()))
+ self.get_ceph_cmd_result(
+ 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r',
+ 'osd', 'allow w pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
self.mount_a.mount_wait()
@@ -66,7 +66,7 @@ class TestPoolPerm(CephFSTestCase):
self.mount_a.run_shell(["mkdir", "layoutdir"])
# Set MDS 'rw' perms: missing 'p' means no setting pool layouts
- self.fs.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', client_name, 'mds', 'allow rw', 'mon', 'allow r',
'osd',
'allow rw pool={0},allow rw pool={1}'.format(
@@ -86,7 +86,7 @@ class TestPoolPerm(CephFSTestCase):
self.mount_a.umount_wait()
# Set MDS 'rwp' perms: should now be able to set layouts
- self.fs.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', client_name, 'mds', 'allow rwp', 'mon', 'allow r',
'osd',
'allow rw pool={0},allow rw pool={1}'.format(
@@ -101,7 +101,7 @@ class TestPoolPerm(CephFSTestCase):
self.mount_a.umount_wait()
def tearDown(self):
- self.fs.mon_manager.raw_cluster_cmd_result(
+ self.get_ceph_cmd_result(
'auth', 'caps', "client.{0}".format(self.mount_a.client_id),
'mds', 'allow', 'mon', 'allow r', 'osd',
'allow rw pool={0}'.format(self.fs.get_data_pool_names()[0]))
diff --git a/qa/tasks/cephfs/test_recovery_fs.py b/qa/tasks/cephfs/test_recovery_fs.py
index bbcdf976972..17669c0f2a8 100644
--- a/qa/tasks/cephfs/test_recovery_fs.py
+++ b/qa/tasks/cephfs/test_recovery_fs.py
@@ -27,7 +27,7 @@ class TestFSRecovery(CephFSTestCase):
# recovered/intact
self.fs.rm()
# Recreate file system with pool and previous fscid
- self.fs.mon_manager.raw_cluster_cmd(
+ self.run_ceph_cmd(
'fs', 'new', self.fs.name, metadata_pool, data_pool,
'--recover', '--force', '--fscid', f'{self.fs.id}')
self.fs.set_joinable()
diff --git a/qa/tasks/cephfs/test_recovery_pool.py b/qa/tasks/cephfs/test_recovery_pool.py
index 8c4e1967d35..7aef2822985 100644
--- a/qa/tasks/cephfs/test_recovery_pool.py
+++ b/qa/tasks/cephfs/test_recovery_pool.py
@@ -119,7 +119,7 @@ class TestRecoveryPool(CephFSTestCase):
recovery_fs.create(recover=True, metadata_overlay=True)
recovery_pool = recovery_fs.get_metadata_pool_name()
- recovery_fs.mon_manager.raw_cluster_cmd('-s')
+ self.run_ceph_cmd('-s')
# Reset the MDS map in case multiple ranks were in play: recovery procedure
# only understands how to rebuild metadata under rank 0
diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py
index f88929ad265..0521e60b409 100644
--- a/qa/tasks/cephfs/test_scrub_checks.py
+++ b/qa/tasks/cephfs/test_scrub_checks.py
@@ -281,8 +281,8 @@ class TestScrubChecks(CephFSTestCase):
all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
for d in damage:
- self.fs.mon_manager.raw_cluster_cmd(
- 'tell', 'mds.{0}'.format(self.fs.get_active_names()[mds_rank]),
+ self.run_ceph_cmd(
+ 'tell', f'mds.{self.fs.get_active_names()[mds_rank]}',
"damage", "rm", str(d['id']))
return len(damage) > 0
diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py
index ad6fd1d609c..b3b88af7246 100644
--- a/qa/tasks/cephfs/test_sessionmap.py
+++ b/qa/tasks/cephfs/test_sessionmap.py
@@ -158,7 +158,7 @@ class TestSessionMap(CephFSTestCase):
if mon_caps is None:
mon_caps = "allow r"
- out = self.fs.mon_manager.raw_cluster_cmd(
+ out = self.get_ceph_cmd_stdout(
"auth", "get-or-create", "client.{name}".format(name=id_name),
"mds", mds_caps,
"osd", osd_caps,
diff --git a/qa/tasks/cephfs/test_snap_schedules.py b/qa/tasks/cephfs/test_snap_schedules.py
index ce5351a3072..7bbb274f85b 100644
--- a/qa/tasks/cephfs/test_snap_schedules.py
+++ b/qa/tasks/cephfs/test_snap_schedules.py
@@ -36,7 +36,7 @@ class TestSnapSchedulesHelper(CephFSTestCase):
self.assertTrue((delta <= timo + 5) and (delta >= timo - 5))
def _fs_cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
+ return self.get_ceph_cmd_stdout("fs", *args)
def fs_snap_schedule_cmd(self, *args, **kwargs):
fs = kwargs.pop('fs', self.volname)
@@ -60,10 +60,10 @@ class TestSnapSchedulesHelper(CephFSTestCase):
self.volname = result[0]['name']
def _enable_snap_schedule(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "snap_schedule")
+ return self.get_ceph_cmd_stdout("mgr", "module", "enable", "snap_schedule")
def _disable_snap_schedule(self):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "snap_schedule")
+ return self.get_ceph_cmd_stdout("mgr", "module", "disable", "snap_schedule")
def _allow_minute_granularity_snapshots(self):
self.config_set('mgr', 'mgr/snap_schedule/allow_m_granularity', True)
diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py
index 608dcc81f15..29dc06066ad 100644
--- a/qa/tasks/cephfs/test_snapshots.py
+++ b/qa/tasks/cephfs/test_snapshots.py
@@ -566,40 +566,40 @@ class TestMonSnapsAndFsPools(CephFSTestCase):
"""
test_pool_name = 'snap-test-pool'
base_cmd = f'osd pool create {test_pool_name}'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, 0)
self.fs.rados(["mksnap", "snap3"], pool=test_pool_name)
base_cmd = f'fs add_data_pool {self.fs.name} {test_pool_name}'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, errno.EOPNOTSUPP)
# cleanup
self.fs.rados(["rmsnap", "snap3"], pool=test_pool_name)
base_cmd = f'osd pool delete {test_pool_name}'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
def test_using_pool_with_snap_fails_fs_creation(self):
"""
Test that using a pool with snaps for fs creation fails
"""
base_cmd = 'osd pool create test_data_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, 0)
base_cmd = 'osd pool create test_metadata_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, 0)
self.fs.rados(["mksnap", "snap4"], pool='test_data_pool')
base_cmd = 'fs new testfs test_metadata_pool test_data_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
self.assertEqual(ret, errno.EOPNOTSUPP)
# cleanup
self.fs.rados(["rmsnap", "snap4"], pool='test_data_pool')
base_cmd = 'osd pool delete test_data_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
base_cmd = 'osd pool delete test_metadata_pool'
- ret = self.run_cluster_cmd_result(base_cmd)
+ ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py
index 8bdc126e2b6..11701dc2836 100644
--- a/qa/tasks/cephfs/test_strays.py
+++ b/qa/tasks/cephfs/test_strays.py
@@ -651,9 +651,8 @@ class TestStrays(CephFSTestCase):
self.assertFalse(self._is_stopped(1))
# Permit the daemon to start purging again
- self.fs.mon_manager.raw_cluster_cmd('tell', 'mds.{0}'.format(rank_1_id),
- 'injectargs',
- "--mds_max_purge_files 100")
+ self.run_ceph_cmd('tell', 'mds.{0}'.format(rank_1_id),
+ 'injectargs', "--mds_max_purge_files 100")
# It should now proceed through shutdown
self.fs.wait_for_daemons(timeout=120)
@@ -816,7 +815,7 @@ touch pin/placeholder
:param pool_name: Which pool (must exist)
"""
- out = self.fs.mon_manager.raw_cluster_cmd("df", "--format=json-pretty")
+ out = self.get_ceph_cmd_stdout("df", "--format=json-pretty")
for p in json.loads(out)['pools']:
if p['name'] == pool_name:
return p['stats']
diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py
index 379c175e39b..f5f5c15fba1 100644
--- a/qa/tasks/cephfs/test_volumes.py
+++ b/qa/tasks/cephfs/test_volumes.py
@@ -35,10 +35,10 @@ class TestVolumesHelper(CephFSTestCase):
DEFAULT_NUMBER_OF_FILES = 1024
def _fs_cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
+ return self.get_ceph_cmd_stdout("fs", *args)
def _raw_cmd(self, *args):
- return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+ return self.get_ceph_cmd_stdout(args)
def __check_clone_state(self, state, clone, clone_group=None, timo=120):
check = 0
@@ -1024,7 +1024,7 @@ class TestSubvolumeGroups(TestVolumesHelper):
# Create auth_id
authid = "client.guest1"
- user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+ user = json.loads(self.get_ceph_cmd_stdout(
"auth", "get-or-create", authid,
"mds", "allow rw path=/volumes",
"mgr", "allow rw",
@@ -1110,7 +1110,7 @@ class TestSubvolumeGroups(TestVolumesHelper):
# Create auth_id
authid = "client.guest1"
- user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+ user = json.loads(self.get_ceph_cmd_stdout(
"auth", "get-or-create", authid,
"mds", f"allow rw path={mount_path}",
"mgr", "allow rw",
@@ -2741,7 +2741,7 @@ class TestSubvolumes(TestVolumesHelper):
group = self._generate_random_group_name()
# Create auth_id
- self.fs.mon_manager.raw_cluster_cmd(
+ self.run_ceph_cmd(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
@@ -2770,7 +2770,7 @@ class TestSubvolumes(TestVolumesHelper):
self.fail("expected the 'fs subvolume authorize' command to fail")
# clean up
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -2785,7 +2785,7 @@ class TestSubvolumes(TestVolumesHelper):
group = self._generate_random_group_name()
# Create auth_id
- self.fs.mon_manager.raw_cluster_cmd(
+ self.run_ceph_cmd(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
@@ -2813,7 +2813,7 @@ class TestSubvolumes(TestVolumesHelper):
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
"--group_name", group)
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -2847,7 +2847,7 @@ class TestSubvolumes(TestVolumesHelper):
"--group_name", group).rstrip()
# Update caps for guestclient_1 out of band
- out = self.fs.mon_manager.raw_cluster_cmd(
+ out = self.get_ceph_cmd_stdout(
"auth", "caps", "client.guest1",
"mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
"osd", "allow rw pool=cephfs_data",
@@ -2860,7 +2860,7 @@ class TestSubvolumes(TestVolumesHelper):
# Validate the caps of guestclient_1 after deauthorize. It should not have deleted
# guestclient_1. The mgr and mds caps should be present which was updated out of band.
- out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
+ out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.guest1", "--format=json-pretty"))
self.assertEqual("client.guest1", out[0]["entity"])
self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
@@ -2868,7 +2868,7 @@ class TestSubvolumes(TestVolumesHelper):
self.assertNotIn("osd", out[0]["caps"])
# clean up
- out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ out = self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -2920,7 +2920,7 @@ class TestSubvolumes(TestVolumesHelper):
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -2976,7 +2976,7 @@ class TestSubvolumes(TestVolumesHelper):
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3051,7 +3051,7 @@ class TestSubvolumes(TestVolumesHelper):
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3123,7 +3123,7 @@ class TestSubvolumes(TestVolumesHelper):
# clean up
self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
guest_mount.umount_wait()
- self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+ self.run_ceph_cmd("auth", "rm", "client.guest1")
self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -5505,9 +5505,9 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
# try to get metadata after removing snapshot.
# Expecting error ENOENT with error message of snapshot does not exist
- cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd(
- args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group],
- check_status=False, stdout=StringIO(), stderr=StringIO())
+ cmd_ret = self.run_ceph_cmd(
+ args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group], check_status=False, stdout=StringIO(),
+ stderr=StringIO())
self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error")
self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(),
f"Expecting message: snapshot '{snapshot}' does not exist ")
@@ -7019,8 +7019,8 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
new_pool = "new_pool"
self.fs.add_data_pool(new_pool)
- self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
- "max_bytes", "{0}".format(pool_capacity // 4))
+ self.run_ceph_cmd("osd", "pool", "set-quota", new_pool,
+ "max_bytes", f"{pool_capacity // 4}")
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
@@ -7794,7 +7794,7 @@ class TestMisc(TestVolumesHelper):
self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
# Validate that the mds path added is of subvol1 and not of subvol2
- out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
+ out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.alice", "--format=json-pretty"))
self.assertEqual("client.alice", out[0]["entity"])
self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
diff --git a/qa/tasks/cephfs/xfstests_dev.py b/qa/tasks/cephfs/xfstests_dev.py
index 0c1ad4ce843..09cef974a55 100644
--- a/qa/tasks/cephfs/xfstests_dev.py
+++ b/qa/tasks/cephfs/xfstests_dev.py
@@ -156,8 +156,8 @@ class XFSTestsDev(CephFSTestCase):
import configparser
cp = configparser.ConfigParser()
- cp.read_string(self.fs.mon_manager.raw_cluster_cmd(
- 'auth', 'get-or-create', 'client.admin'))
+ cp.read_string(self.get_ceph_cmd_stdout('auth', 'get-or-create',
+ 'client.admin'))
return cp['client.admin']['key']