diff options
author | Rishabh Dave <ridave@redhat.com> | 2023-03-16 10:41:08 +0100 |
---|---|---|
committer | Rishabh Dave <ridave@redhat.com> | 2023-06-28 14:08:19 +0200 |
commit | f8f2154e54e202996b24904162da6022081e9d93 (patch) | |
tree | fe74f4eb3c5c7a36b657985d8711a0a03358f03d /qa | |
parent | qa/cephfs: add and use get_ceph_cmd_result() (diff) | |
download | ceph-f8f2154e54e202996b24904162da6022081e9d93.tar.xz ceph-f8f2154e54e202996b24904162da6022081e9d93.zip |
qa/cephfs: add and use run_ceph_cmd()
Instead of writing something as long as
"self.mds_cluster.mon_manager.run_cluster_cmd()" to execute a command,
let's add a helper method to class CephFSTestCase and use it instead.
With this, running a command becomes simple - "self.run_ceph_cmd()".
Signed-off-by: Rishabh Dave <ridave@redhat.com>
Diffstat (limited to 'qa')
-rw-r--r-- | qa/tasks/cephfs/cephfs_test_case.py | 33 | ||||
-rw-r--r-- | qa/tasks/cephfs/test_admin.py | 60 | ||||
-rw-r--r-- | qa/tasks/cephfs/test_damage.py | 2 | ||||
-rw-r--r-- | qa/tasks/cephfs/test_multifs_auth.py | 8 | ||||
-rw-r--r-- | qa/tasks/cephfs/test_nfs.py | 6 | ||||
-rw-r--r-- | qa/tasks/cephfs/test_volumes.py | 6 |
6 files changed, 56 insertions, 59 deletions
diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index e8ff8c43ae6..bb538d82014 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -3,8 +3,6 @@ import logging import os import re -from shlex import split as shlex_split - from tasks.ceph_test_case import CephTestCase from teuthology import contextutil @@ -51,12 +49,19 @@ class MountDetails(): class RunCephCmd: + def run_ceph_cmd(self, *args, **kwargs): + if kwargs.get('args') is None and args: + if len(args) == 1: + args = args[0] + kwargs['args'] = args + return self.mon_manager.run_cluster_cmd(**kwargs) + def get_ceph_cmd_result(self, *args, **kwargs): if kwargs.get('args') is None and args: if len(args) == 1: args = args[0] kwargs['args'] = args - return self.mon_manager.run_cluster_cmd(**kwargs).exitstatus + return self.run_ceph_cmd(**kwargs).exitstatus class CephFSTestCase(CephTestCase, RunCephCmd): @@ -100,7 +105,7 @@ class CephFSTestCase(CephTestCase, RunCephCmd): # In case anything is in the OSD blocklist list, clear it out. This is to avoid # the OSD map changing in the background (due to blocklist expiry) while tests run. try: - self.mds_cluster.mon_manager.run_cluster_cmd(args="osd blocklist clear") + self.run_ceph_cmd("osd blocklist clear") except CommandFailedError: # Fallback for older Ceph cluster try: @@ -108,14 +113,14 @@ class CephFSTestCase(CephTestCase, RunCephCmd): "dump", "--format=json-pretty"))['blocklist'] log.info(f"Removing {len(blocklist)} blocklist entries") for addr, blocklisted_at in blocklist.items(): - self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blocklist", "rm", addr) + self.run_ceph_cmd("osd", "blocklist", "rm", addr) except KeyError: # Fallback for more older Ceph clusters, who will use 'blacklist' instead. blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['blacklist'] log.info(f"Removing {len(blacklist)} blacklist entries") for addr, blocklisted_at in blacklist.items(): - self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr) + self.run_ceph_cmd("osd", "blacklist", "rm", addr) def _init_mon_manager(self): # if vstart_runner.py has invoked this code @@ -177,7 +182,7 @@ class CephFSTestCase(CephTestCase, RunCephCmd): for entry in self.auth_list(): ent_type, ent_id = entry['entity'].split(".") if ent_type == "client" and ent_id not in client_mount_ids and not (ent_id == "admin" or ent_id[:6] == 'mirror'): - self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity']) + self.run_ceph_cmd("auth", "del", entry['entity']) if self.REQUIRE_FILESYSTEM: self.fs = self.mds_cluster.newfs(create=True) @@ -205,9 +210,8 @@ class CephFSTestCase(CephTestCase, RunCephCmd): if self.REQUIRE_BACKUP_FILESYSTEM: if not self.REQUIRE_FILESYSTEM: self.skipTest("backup filesystem requires a primary filesystem as well") - self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set', - 'enable_multiple', 'true', - '--yes-i-really-mean-it') + self.run_ceph_cmd('fs', 'flag', 'set', 'enable_multiple', 'true', + '--yes-i-really-mean-it') self.backup_fs = self.mds_cluster.newfs(name="backup_fs") self.backup_fs.wait_for_daemons() @@ -428,11 +432,6 @@ class CephFSTestCase(CephTestCase, RunCephCmd): except contextutil.MaxWhileTries as e: raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e - def run_cluster_cmd(self, cmd): - if isinstance(cmd, str): - cmd = shlex_split(cmd) - return self.fs.mon_manager.raw_cluster_cmd(*cmd) - def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None): if not (moncap or osdcap or mdscap): if self.fs: @@ -450,5 +449,5 @@ class CephFSTestCase(CephTestCase, RunCephCmd): if mdscap: cmd += ['mds', mdscap] - self.run_cluster_cmd(cmd) - return self.run_cluster_cmd(f'auth get {self.client_name}') + self.run_ceph_cmd(*cmd) + return self.run_ceph_cmd(f'auth get {self.client_name}') diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index d8ff752b366..2eeb20a2aad 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -190,8 +190,8 @@ class TestFsNew(TestAdminCommands): # test that fsname not with "goodchars" fails args = ['fs', 'new', badname, metapoolname, datapoolname] - proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(), - check_status=False) + proc = self.run_ceph_cmd(args=args, stderr=StringIO(), + check_status=False) self.assertIn('invalid chars', proc.stderr.getvalue().lower()) self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname, @@ -287,8 +287,8 @@ class TestFsNew(TestAdminCommands): keys = ['metadata', 'data'] pool_names = [fs_name+'-'+key for key in keys] for p in pool_names: - self.run_cluster_cmd(f'osd pool create {p}') - self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') + self.run_ceph_cmd(f'osd pool create {p}') + self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') self.fs.status().get_fsmap(fscid) for i in range(2): self.check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name) @@ -302,9 +302,9 @@ class TestFsNew(TestAdminCommands): keys = ['metadata', 'data'] pool_names = [fs_name+'-'+key for key in keys] for p in pool_names: - self.run_cluster_cmd(f'osd pool create {p}') - self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') - self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') + self.run_ceph_cmd(f'osd pool create {p}') + self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') + self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') self.fs.status().get_fsmap(fscid) def test_fs_new_with_specific_id_fails_without_force_flag(self): @@ -316,9 +316,9 @@ class TestFsNew(TestAdminCommands): keys = ['metadata', 'data'] pool_names = [fs_name+'-'+key for key in keys] for p in pool_names: - self.run_cluster_cmd(f'osd pool create {p}') + self.run_ceph_cmd(f'osd pool create {p}') try: - self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}') + self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid}') except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on creating a file system with specifc ID without --force flag") @@ -335,9 +335,9 @@ class TestFsNew(TestAdminCommands): keys = ['metadata', 'data'] pool_names = [fs_name+'-'+key for key in keys] for p in pool_names: - self.run_cluster_cmd(f'osd pool create {p}') + self.run_ceph_cmd(f'osd pool create {p}') try: - self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') + self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid {fscid} --force') except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on creating a file system with specifc ID that is already in use") @@ -600,7 +600,7 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' client_id = 'test_new_cephfs' - self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') # authorize a cephx ID access to the renamed file system. # use the ID to write to the file system. @@ -621,7 +621,7 @@ class TestRenameCommand(TestAdminCommands): # cleanup self.mount_a.umount_wait() - self.run_cluster_cmd(f'auth rm client.{client_id}') + self.run_ceph_cmd(f'auth rm client.{client_id}') def test_fs_rename_idempotency(self): """ @@ -633,8 +633,8 @@ class TestRenameCommand(TestAdminCommands): orig_fs_name = self.fs.name new_fs_name = 'new_cephfs' - self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') - self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') # original file system name does not appear in `fs ls` command self.assertFalse(self.fs.exists()) @@ -653,10 +653,10 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' data_pool = self.fs.get_data_pool_name() metadata_pool = self.fs.get_metadata_pool_name() - self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') try: - self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}") + self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}") except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on creating a new file system with old " @@ -666,7 +666,7 @@ class TestRenameCommand(TestAdminCommands): "existing pools to fail.") try: - self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force") + self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force") except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on creating a new file system with old " @@ -676,7 +676,7 @@ class TestRenameCommand(TestAdminCommands): "existing pools, and --force flag to fail.") try: - self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} " + self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} " "--allow-dangerous-metadata-overlay") except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EINVAL, @@ -691,7 +691,7 @@ class TestRenameCommand(TestAdminCommands): That renaming a file system without '--yes-i-really-mean-it' flag fails. """ try: - self.run_cluster_cmd(f"fs rename {self.fs.name} new_fs") + self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs") except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a file system without the " @@ -705,7 +705,7 @@ class TestRenameCommand(TestAdminCommands): That renaming a non-existent file system fails. """ try: - self.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it") + self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it") except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on renaming a non-existent fs") else: @@ -718,7 +718,7 @@ class TestRenameCommand(TestAdminCommands): self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True) try: - self.run_cluster_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it") + self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it") except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on renaming to a fs name that is already in use") @@ -732,14 +732,14 @@ class TestRenameCommand(TestAdminCommands): orig_fs_name = self.fs.name new_fs_name = 'new_cephfs' - self.run_cluster_cmd(f'fs mirror enable {orig_fs_name}') + self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}') try: - self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') except CommandFailedError as ce: self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a mirrored file system") else: self.fail("expected renaming of a mirrored file system to fail") - self.run_cluster_cmd(f'fs mirror disable {orig_fs_name}') + self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}') class TestDump(CephFSTestCase): @@ -1244,10 +1244,10 @@ class TestFsAuthorize(CephFSTestCase): fs_name = "cephfs-_." self.fs = self.mds_cluster.newfs(name=fs_name) self.fs.wait_for_daemons() - self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} ' - f'mon "allow r" ' - f'osd "allow rw pool={self.fs.get_data_pool_name()}" ' - f'mds allow') + self.run_ceph_cmd(f'auth caps client.{self.mount_a.client_id} ' + f'mon "allow r" ' + f'osd "allow rw pool={self.fs.get_data_pool_name()}" ' + f'mds allow') self.mount_a.remount(cephfs_name=self.fs.name) PERM = 'rw' FS_AUTH_CAPS = (('/', PERM),) @@ -1291,7 +1291,7 @@ class TestFsAuthorize(CephFSTestCase): def tearDown(self): self.mount_a.umount_wait() - self.run_cluster_cmd(f'auth rm {self.client_name}') + self.run_ceph_cmd(f'auth rm {self.client_name}') super(type(self), self).tearDown() diff --git a/qa/tasks/cephfs/test_damage.py b/qa/tasks/cephfs/test_damage.py index d83187017e3..9b613d727cc 100644 --- a/qa/tasks/cephfs/test_damage.py +++ b/qa/tasks/cephfs/test_damage.py @@ -643,7 +643,7 @@ class TestDamage(CephFSTestCase): self.fs.rank_freeze(True, rank=0) # so now we want to trigger commit but this will crash, so: c = ['--connect-timeout=60', 'tell', f"mds.{fscid}:0", "flush", "journal"] - p = self.ceph_cluster.mon_manager.run_cluster_cmd(args=c, wait=False, timeoutcmd=30) + p = self.run_ceph_cmd(args=c, wait=False, timeoutcmd=30) self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(), timeout=self.fs.beacon_timeout) self.config_rm("mds", "mds_inject_journal_corrupt_dentry_first") self.fs.rank_freeze(False, rank=0) diff --git a/qa/tasks/cephfs/test_multifs_auth.py b/qa/tasks/cephfs/test_multifs_auth.py index 7e50c4e1d76..acf434294d1 100644 --- a/qa/tasks/cephfs/test_multifs_auth.py +++ b/qa/tasks/cephfs/test_multifs_auth.py @@ -26,15 +26,15 @@ class TestMultiFS(CephFSTestCase): # we might have it - the client - if the same cluster was used for a # different vstart_runner.py run. - self.run_cluster_cmd(f'auth rm {self.client_name}') + self.run_ceph_cmd(f'auth rm {self.client_name}') self.fs1 = self.fs self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True) # we'll reassign caps to client.1 so that it can operate with cephfs2 - self.run_cluster_cmd(f'auth caps client.{self.mount_b.client_id} mon ' - f'"allow r" osd "allow rw ' - f'pool={self.fs2.get_data_pool_name()}" mds allow') + self.run_ceph_cmd(f'auth caps client.{self.mount_b.client_id} mon ' + f'"allow r" osd "allow rw ' + f'pool={self.fs2.get_data_pool_name()}" mds allow') self.mount_b.remount(cephfs_name=self.fs2.name) diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py index eba175650cc..7619f011550 100644 --- a/qa/tasks/cephfs/test_nfs.py +++ b/qa/tasks/cephfs/test_nfs.py @@ -22,10 +22,8 @@ class TestNFS(MgrTestCase): return self._cmd("nfs", *args) def _nfs_complete_cmd(self, cmd): - return self.mgr_cluster.mon_manager.run_cluster_cmd(args=f"nfs {cmd}", - stdout=StringIO(), - stderr=StringIO(), - check_status=False) + return self.run_ceph_cmd(args=f"nfs {cmd}", stdout=StringIO(), + stderr=StringIO(), check_status=False) def _orch_cmd(self, *args): return self._cmd("orch", *args) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 379c175e39b..66a97ed86d5 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -5505,9 +5505,9 @@ class TestSubvolumeSnapshots(TestVolumesHelper): # try to get metadata after removing snapshot. # Expecting error ENOENT with error message of snapshot does not exist - cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd( - args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group], - check_status=False, stdout=StringIO(), stderr=StringIO()) + cmd_ret = self.run_ceph_cmd( + args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group], check_status=False, stdout=StringIO(), + stderr=StringIO()) self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error") self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(), f"Expecting message: snapshot '{snapshot}' does not exist ") |