summaryrefslogtreecommitdiffstats
path: root/qa
diff options
context:
space:
mode:
authorRishabh Dave <ridave@redhat.com>2024-07-19 16:00:39 +0200
committerRishabh Dave <ridave@redhat.com>2024-09-26 17:35:48 +0200
commit9e3449995f8c21fd3bde6308517aebcd79478988 (patch)
treed81db9a500617ce00e5ef70cc1e87544ae9f5334 /qa
parentMerge pull request #59891 from rhcs-dashboard/fix-68162-main (diff)
downloadceph-9e3449995f8c21fd3bde6308517aebcd79478988.tar.xz
ceph-9e3449995f8c21fd3bde6308517aebcd79478988.zip
qa/cephfs: test clone progress reporter after subvol is deleted but...
snapshot is retained despite of deletion (using --retain-snapshots option of "subvolume rm" command). Signed-off-by: Rishabh Dave <ridave@redhat.com>
Diffstat (limited to 'qa')
-rw-r--r--qa/tasks/cephfs/test_volumes.py69
1 files changed, 68 insertions, 1 deletions
diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py
index 2baefd72c3f..6a4e2594825 100644
--- a/qa/tasks/cephfs/test_volumes.py
+++ b/qa/tasks/cephfs/test_volumes.py
@@ -7876,7 +7876,22 @@ class TestCloneProgressReporter(TestVolumesHelper):
self.run_ceph_cmd('fs subvolume snapshot rm --force '
f'--format json {v} {sv} {ss}')
- self.run_ceph_cmd(f'fs subvolume rm {v} {sv}')
+ try:
+ self.run_ceph_cmd(f'fs subvolume rm {v} {sv}')
+ except CommandFailedError as e:
+ if e.exitstatus == errno.ENOENT:
+ log.info(
+ 'ignoring this error, perhaps subvolume was deleted '
+ 'during the test and snapshot deleted above is a '
+ 'retained snapshot. when a retained snapshot (which is '
+ 'snapshot retained despite of subvolume deletion) is '
+ 'deleted, the subvolume directory is also deleted '
+ 'along. and before retained snapshot deletion, the '
+ 'subvolume is reported by "subvolume ls" command, which'
+ 'is what probably caused confusion here')
+ pass
+ else:
+ raise
# verify trash dir is clean
self._wait_for_trash_empty()
@@ -8090,6 +8105,58 @@ class TestCloneProgressReporter(TestVolumesHelper):
# and not cancelling these clone doesnt affect this test case.
self.cancel_clones_and_ignore_if_finished(c)
+ def test_clone_after_subvol_is_removed(self):
+ '''
+ Initiate cloning after source subvolume has been deleted but with
+ snapshots retained and then test that, when this clone is in progress,
+ one progress bar is printed in output of command "ceph status" that
+ shows progress of this clone.
+ '''
+ v = self.volname
+ sv = 'sv1'
+ ss = 'ss1'
+ # XXX: "clone" must be part of clone name for sake of tearDown()
+ c = 'ss1clone1'
+
+ # XXX: without setting mds_snap_rstat to true rstats are not updated on
+ # a subvolume snapshot and therefore clone progress bar will not show
+ # any progress.
+ self.config_set('mds', 'mds_snap_rstat', 'true')
+
+ self.run_ceph_cmd(f'fs subvolume create {v} {sv} --mode=777')
+ size = self._do_subvolume_io(sv, None, None, 10, 1024)
+
+ self.run_ceph_cmd(f'fs subvolume snapshot create {v} {sv} {ss}')
+ self.wait_till_rbytes_is_right(v, sv, size)
+
+ self.run_ceph_cmd(f'fs subvolume rm {v} {sv} --retain-snapshots')
+ self.run_ceph_cmd(f'fs subvolume snapshot clone {v} {sv} {ss} {c}')
+
+ with safe_while(tries=15, sleep=10) as proceed:
+ while proceed():
+ pev = self.get_pevs_from_ceph_status(c)
+
+ if len(pev) < 1:
+ continue
+ elif len(pev) > 1:
+ raise RuntimeError('For 1 clone "ceph status" output has 2 '
+ 'progress bars, it should have only 1 '
+ f'progress bar.\npev -\n{pev}')
+
+ # ensure that exactly 1 progress bar for cloning is present in
+ # "ceph status" output
+ msg = ('"progress_events" dict in "ceph status" output must have '
+ f'exactly one entry.\nprogress_event dict -\n{pev}')
+ self.assertEqual(len(pev), 1, msg)
+
+ pev_msg = tuple(pev.values())[0]['message']
+ self.assertIn('1 ongoing clones', pev_msg)
+ break
+
+ # allowing clone jobs to finish will consume too much time and space
+ # and not cancelling these clone doesnt affect this test case.
+ self.cancel_clones_and_ignore_if_finished(c)
+
def test_clones_equal_to_cloner_threads(self):
'''
Test that one progress bar is printed in output of "ceph status" output