summaryrefslogtreecommitdiffstats
path: root/qa/tasks/cephfs/test_volumes.py
diff options
context:
space:
mode:
Diffstat (limited to 'qa/tasks/cephfs/test_volumes.py')
-rw-r--r--qa/tasks/cephfs/test_volumes.py75
1 files changed, 71 insertions, 4 deletions
diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py
index 2baefd72c3f..2ee3b6ac052 100644
--- a/qa/tasks/cephfs/test_volumes.py
+++ b/qa/tasks/cephfs/test_volumes.py
@@ -2388,7 +2388,7 @@ class TestSubvolumes(TestVolumesHelper):
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# set earmark
- earmark = "smb.test"
+ earmark = "smb"
self._fs_cmd("subvolume", "earmark", "set", self.volname, subvolume, "--earmark", earmark)
# get earmark
@@ -2401,7 +2401,7 @@ class TestSubvolumes(TestVolumesHelper):
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# set earmark
- earmark = "smb.test"
+ earmark = "smb"
self._fs_cmd("subvolume", "earmark", "set", self.volname, subvolume, "--earmark", earmark)
# remove earmark
@@ -2559,7 +2559,7 @@ class TestSubvolumes(TestVolumesHelper):
self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
# set earmark
- earmark = "smb.test"
+ earmark = "smb"
self._fs_cmd("subvolume", "earmark", "set", self.volname, subvolume, "--earmark", earmark)
subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
@@ -7876,7 +7876,22 @@ class TestCloneProgressReporter(TestVolumesHelper):
self.run_ceph_cmd('fs subvolume snapshot rm --force '
f'--format json {v} {sv} {ss}')
- self.run_ceph_cmd(f'fs subvolume rm {v} {sv}')
+ try:
+ self.run_ceph_cmd(f'fs subvolume rm {v} {sv}')
+ except CommandFailedError as e:
+ if e.exitstatus == errno.ENOENT:
+ log.info(
+ 'ignoring this error, perhaps subvolume was deleted '
+ 'during the test and snapshot deleted above is a '
+ 'retained snapshot. when a retained snapshot (which is '
+ 'snapshot retained despite of subvolume deletion) is '
+ 'deleted, the subvolume directory is also deleted '
+ 'along. and before retained snapshot deletion, the '
+ 'subvolume is reported by "subvolume ls" command, which'
+ 'is what probably caused confusion here')
+ pass
+ else:
+ raise
# verify trash dir is clean
self._wait_for_trash_empty()
@@ -8090,6 +8105,58 @@ class TestCloneProgressReporter(TestVolumesHelper):
# and not cancelling these clone doesnt affect this test case.
self.cancel_clones_and_ignore_if_finished(c)
+ def test_clone_after_subvol_is_removed(self):
+ '''
+ Initiate cloning after source subvolume has been deleted but with
+ snapshots retained and then test that, when this clone is in progress,
+ one progress bar is printed in output of command "ceph status" that
+ shows progress of this clone.
+ '''
+ v = self.volname
+ sv = 'sv1'
+ ss = 'ss1'
+ # XXX: "clone" must be part of clone name for sake of tearDown()
+ c = 'ss1clone1'
+
+ # XXX: without setting mds_snap_rstat to true rstats are not updated on
+ # a subvolume snapshot and therefore clone progress bar will not show
+ # any progress.
+ self.config_set('mds', 'mds_snap_rstat', 'true')
+
+ self.run_ceph_cmd(f'fs subvolume create {v} {sv} --mode=777')
+ size = self._do_subvolume_io(sv, None, None, 10, 1024)
+
+ self.run_ceph_cmd(f'fs subvolume snapshot create {v} {sv} {ss}')
+ self.wait_till_rbytes_is_right(v, sv, size)
+
+ self.run_ceph_cmd(f'fs subvolume rm {v} {sv} --retain-snapshots')
+ self.run_ceph_cmd(f'fs subvolume snapshot clone {v} {sv} {ss} {c}')
+
+ with safe_while(tries=15, sleep=10) as proceed:
+ while proceed():
+ pev = self.get_pevs_from_ceph_status(c)
+
+ if len(pev) < 1:
+ continue
+ elif len(pev) > 1:
+ raise RuntimeError('For 1 clone "ceph status" output has 2 '
+ 'progress bars, it should have only 1 '
+ f'progress bar.\npev -\n{pev}')
+
+ # ensure that exactly 1 progress bar for cloning is present in
+ # "ceph status" output
+ msg = ('"progress_events" dict in "ceph status" output must have '
+ f'exactly one entry.\nprogress_event dict -\n{pev}')
+ self.assertEqual(len(pev), 1, msg)
+
+ pev_msg = tuple(pev.values())[0]['message']
+ self.assertIn('1 ongoing clones', pev_msg)
+ break
+
+ # allowing clone jobs to finish will consume too much time and space
+ # and not cancelling these clone doesnt affect this test case.
+ self.cancel_clones_and_ignore_if_finished(c)
+
def test_clones_equal_to_cloner_threads(self):
'''
Test that one progress bar is printed in output of "ceph status" output