diff options
author | Xiubo Li <xiubli@redhat.com> | 2024-05-14 06:54:15 +0200 |
---|---|---|
committer | Xiubo Li <xiubli@redhat.com> | 2024-06-07 04:19:38 +0200 |
commit | d2645fd157ba8b4faa22b31204d5ff7e86921b2a (patch) | |
tree | f4b0d56b6ab92d9869bf71818327c77915400c28 /qa | |
parent | mds: cleanup the debug logs to make it to be more readable (diff) | |
download | ceph-d2645fd157ba8b4faa22b31204d5ff7e86921b2a.tar.xz ceph-d2645fd157ba8b4faa22b31204d5ff7e86921b2a.zip |
qa/cephfs: add test_session_evict_non_blocklisted test case
When evicting the clients or sessions during the
mds_session_blocklist_on_evict option is disabled the clients should
reconnect to MDS successfully later after new IOs being sent.
URL: https://tracker.ceph.com/issues/65647
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Diffstat (limited to 'qa')
-rw-r--r-- | qa/tasks/cephfs/test_sessionmap.py | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index b3b88af7246..dbae3295738 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -230,3 +230,46 @@ class TestSessionMap(CephFSTestCase): self.mount_a.kill_cleanup() self.mount_a.mount_wait() + + def test_session_evict_non_blocklisted(self): + """ + Check that mds evicts without blocklisting client + """ + + self.config_set('mds', 'mds_session_blocklist_on_evict', False) + self.fs.set_max_mds(2) + status = self.fs.wait_for_daemons() + + self.fs.set_ceph_conf('client', 'client reconnect stale', True) + self.mount_a.remount() + self.mount_b.remount() + + self.mount_a.run_shell_payload("mkdir {d0,d1} && touch {d0,d1}/file") + self.mount_a.setfattr("d0", "ceph.dir.pin", "0") + self.mount_a.setfattr("d1", "ceph.dir.pin", "1") + self._wait_subtrees([('/d0', 0), ('/d1', 1)], status=status) + + self.mount_a.run_shell(["touch", "d0/f0"]) + self.mount_a.run_shell(["touch", "d1/f0"]) + self.mount_b.run_shell(["touch", "d0/f1"]) + self.mount_b.run_shell(["touch", "d1/f1"]) + + self.assert_session_count(2, mds_id=self.fs.get_rank(rank=0, status=status)['name']) + self.assert_session_count(2, mds_id=self.fs.get_rank(rank=1, status=status)['name']) + + mount_a_client_id = self.mount_a.get_global_id() + self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id], + mds_id=self.fs.get_rank(rank=0, status=status)['name']) + + self.mount_a.run_shell(["touch", "d0/f00"]) + self.mount_a.run_shell(["touch", "d1/f00"]) + self.mount_b.run_shell(["touch", "d0/f10"]) + self.mount_b.run_shell(["touch", "d1/f10"]) + + # 10 seconds should be enough for reconnecting the sessions + time.sleep(10) + self.assert_session_count(2, mds_id=self.fs.get_rank(rank=0, status=status)['name']) + self.assert_session_count(2, mds_id=self.fs.get_rank(rank=1, status=status)['name']) + + self.mount_a.kill_cleanup() + self.mount_a.mount_wait() |