summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam King <47704447+adk3798@users.noreply.github.com>2024-09-17 15:08:01 +0200
committerGitHub <noreply@github.com>2024-09-17 15:08:01 +0200
commit4adee01313d2104a80db803ac11d5b2d46dbea6e (patch)
treee5f2985903ae2049e15512e6b61498eb6d9533ea
parentMerge pull request #58728 from ifed01/wip-ifed-ret-error-kv-stats (diff)
parentdoc: nit fixes for nfs doc (diff)
downloadceph-4adee01313d2104a80db803ac11d5b2d46dbea6e.tar.xz
ceph-4adee01313d2104a80db803ac11d5b2d46dbea6e.zip
Merge pull request #54277 from rhcs-dashboard/nfs-export-apply-fix
mgr/nfs: generate user_id & access_key for apply_export(CephFS) Reviewed-by: Adam King <adking@redhat.com> Reviewed-by: Dhairya Parmar <dparmar@redhat.com> Reviewed-by: John Mulligan <jmulligan@redhat.com>
-rw-r--r--PendingReleaseNotes4
-rw-r--r--doc/mgr/nfs.rst19
-rw-r--r--qa/tasks/cephfs/test_nfs.py110
-rw-r--r--src/pybind/mgr/nfs/export.py199
-rw-r--r--src/pybind/mgr/nfs/ganesha_conf.py20
-rw-r--r--src/pybind/mgr/nfs/module.py2
-rw-r--r--src/pybind/mgr/nfs/tests/test_nfs.py174
7 files changed, 392 insertions, 136 deletions
diff --git a/PendingReleaseNotes b/PendingReleaseNotes
index ad8ac0ed81a..59692fe83a2 100644
--- a/PendingReleaseNotes
+++ b/PendingReleaseNotes
@@ -279,6 +279,10 @@ CephFS: Disallow delegating preallocated inode ranges to clients. Config
* RGW: in bucket notifications, the `principalId` inside `ownerIdentity` now contains
complete user id, prefixed with tenant id
+* NFS: The export create/apply of CephFS based exports will now have a additional parameter `cmount_path` under the FSAL block,
+ which specifies the path within the CephFS to mount this export on. If this and the other
+ `EXPORT { FSAL {} }` options are the same between multiple exports, those exports will share a single CephFS client. If not specified, the default is `/`.
+
>=18.0.0
* The RGW policy parser now rejects unknown principals by default. If you are
diff --git a/doc/mgr/nfs.rst b/doc/mgr/nfs.rst
index 746ab4247f3..3077ea40aa6 100644
--- a/doc/mgr/nfs.rst
+++ b/doc/mgr/nfs.rst
@@ -283,7 +283,7 @@ Create CephFS Export
.. code:: bash
- $ ceph nfs export create cephfs --cluster-id <cluster_id> --pseudo-path <pseudo_path> --fsname <fsname> [--readonly] [--path=/path/in/cephfs] [--client_addr <value>...] [--squash <value>] [--sectype <value>...]
+ $ ceph nfs export create cephfs --cluster-id <cluster_id> --pseudo-path <pseudo_path> --fsname <fsname> [--readonly] [--path=/path/in/cephfs] [--client_addr <value>...] [--squash <value>] [--sectype <value>...] [--cmount_path <value>]
This creates export RADOS objects containing the export block, where
@@ -318,6 +318,12 @@ values may be separated by a comma (example: ``--sectype krb5p,krb5i``). The
server will negotatiate a supported security type with the client preferring
the supplied methods left-to-right.
+``<cmount_path>`` specifies the path within the CephFS to mount this export on. It is
+allowed to be any complete path hierarchy between ``/`` and the ``EXPORT {path}``. (i.e. if ``EXPORT { Path }`` parameter is ``/foo/bar`` then cmount_path could be ``/``, ``/foo`` or ``/foo/bar``).
+
+.. note:: If this and the other ``EXPORT { FSAL {} }`` options are the same between multiple exports, those exports will share a single CephFS client.
+ If not specified, the default is ``/``.
+
.. note:: Specifying values for sectype that require Kerberos will only function on servers
that are configured to support Kerberos. Setting up NFS-Ganesha to support Kerberos
is outside the scope of this document.
@@ -477,9 +483,9 @@ For example,::
],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.mynfs.1",
"fs_name": "a",
- "sec_label_xattr": ""
+ "sec_label_xattr": "",
+ "cmount_path": "/"
},
"clients": []
}
@@ -494,6 +500,9 @@ as when creating a new export), with the exception of the
authentication credentials, which will be carried over from the
previous state of the export where possible.
+!! NOTE: The ``user_id`` in the ``fsal`` block should not be modified or mentioned in the JSON file as it is auto-generated for CephFS exports.
+It's auto-generated in the format ``nfs.<cluster_id>.<fs_name>.<hash_id>``.
+
::
$ ceph nfs export apply mynfs -i update_cephfs_export.json
@@ -514,9 +523,9 @@ previous state of the export where possible.
],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.mynfs.1",
"fs_name": "a",
- "sec_label_xattr": ""
+ "sec_label_xattr": "",
+ "cmount_path": "/"
},
"clients": []
}
diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py
index 6d1c65dfb7d..932d504d47f 100644
--- a/qa/tasks/cephfs/test_nfs.py
+++ b/qa/tasks/cephfs/test_nfs.py
@@ -14,6 +14,8 @@ log = logging.getLogger(__name__)
NFS_POOL_NAME = '.nfs' # should match mgr_module.py
# TODO Add test for cluster update when ganesha can be deployed on multiple ports.
+
+
class TestNFS(MgrTestCase):
def _cmd(self, *args):
return self.get_ceph_cmd_stdout(args)
@@ -59,8 +61,9 @@ class TestNFS(MgrTestCase):
],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.1",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": self.fs_name,
+ "cmount_path": "/",
},
"clients": []
}
@@ -118,7 +121,7 @@ class TestNFS(MgrTestCase):
return
self.fail(fail_msg)
- def _check_auth_ls(self, export_id=1, check_in=False):
+ def _check_auth_ls(self, fs_name, check_in=False, user_id=None):
'''
Tests export user id creation or deletion.
:param export_id: Denotes export number
@@ -126,10 +129,12 @@ class TestNFS(MgrTestCase):
'''
output = self._cmd('auth', 'ls')
client_id = f'client.nfs.{self.cluster_id}'
+ search_id = f'client.{user_id}' if user_id else f'{client_id}.{fs_name}'
+
if check_in:
- self.assertIn(f'{client_id}.{export_id}', output)
+ self.assertIn(search_id, output)
else:
- self.assertNotIn(f'{client_id}.{export_id}', output)
+ self.assertNotIn(search_id, output)
def _test_idempotency(self, cmd_func, cmd_args):
'''
@@ -216,7 +221,7 @@ class TestNFS(MgrTestCase):
# Runs the nfs export create command
self._cmd(*export_cmd)
# Check if user id for export is created
- self._check_auth_ls(export_id, check_in=True)
+ self._check_auth_ls(self.fs_name, check_in=True)
res = self._sys_cmd(['rados', '-p', NFS_POOL_NAME, '-N', self.cluster_id, 'get',
f'export-{export_id}', '-'])
# Check if export object is created
@@ -230,12 +235,12 @@ class TestNFS(MgrTestCase):
self._test_create_cluster()
self._create_export(export_id='1', create_fs=True)
- def _delete_export(self):
+ def _delete_export(self, pseduo_path=None, check_in=False, user_id=None):
'''
Delete an export.
'''
- self._nfs_cmd('export', 'rm', self.cluster_id, self.pseudo_path)
- self._check_auth_ls()
+ self._nfs_cmd('export', 'rm', self.cluster_id, pseduo_path if pseduo_path else self.pseudo_path)
+ self._check_auth_ls(self.fs_name, check_in, user_id)
def _test_list_export(self):
'''
@@ -256,26 +261,27 @@ class TestNFS(MgrTestCase):
self.sample_export['export_id'] = 2
self.sample_export['pseudo'] = self.pseudo_path + '1'
self.sample_export['access_type'] = 'RO'
- self.sample_export['fsal']['user_id'] = f'{self.expected_name}.2'
+ self.sample_export['fsal']['user_id'] = f'{self.expected_name}.{self.fs_name}.3746f603'
self.assertDictEqual(self.sample_export, nfs_output[1])
# Export-3 for subvolume with r only
self.sample_export['export_id'] = 3
self.sample_export['path'] = sub_vol_path
self.sample_export['pseudo'] = self.pseudo_path + '2'
- self.sample_export['fsal']['user_id'] = f'{self.expected_name}.3'
+ self.sample_export['fsal']['user_id'] = f'{self.expected_name}.{self.fs_name}.3746f603'
self.assertDictEqual(self.sample_export, nfs_output[2])
# Export-4 for subvolume
self.sample_export['export_id'] = 4
self.sample_export['pseudo'] = self.pseudo_path + '3'
self.sample_export['access_type'] = 'RW'
- self.sample_export['fsal']['user_id'] = f'{self.expected_name}.4'
+ self.sample_export['fsal']['user_id'] = f'{self.expected_name}.{self.fs_name}.3746f603'
self.assertDictEqual(self.sample_export, nfs_output[3])
- def _get_export(self):
+ def _get_export(self, pseudo_path=None):
'''
Returns export block in json format
'''
- return json.loads(self._nfs_cmd('export', 'info', self.cluster_id, self.pseudo_path))
+ return json.loads(self._nfs_cmd('export', 'info', self.cluster_id,
+ pseudo_path if pseudo_path else self.pseudo_path))
def _test_get_export(self):
'''
@@ -506,7 +512,7 @@ class TestNFS(MgrTestCase):
self._test_delete_cluster()
# Check if rados ganesha conf object is deleted
self._check_export_obj_deleted(conf_obj=True)
- self._check_auth_ls()
+ self._check_auth_ls(self.fs_name)
def test_exports_on_mgr_restart(self):
'''
@@ -935,7 +941,7 @@ class TestNFS(MgrTestCase):
"protocols": [4],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.1",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": self.fs_name
}
},
@@ -948,7 +954,7 @@ class TestNFS(MgrTestCase):
"protocols": [4],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.2",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": "invalid_fs_name" # invalid fs
}
},
@@ -961,7 +967,7 @@ class TestNFS(MgrTestCase):
"protocols": [4],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.3",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": self.fs_name
}
}
@@ -1008,7 +1014,7 @@ class TestNFS(MgrTestCase):
"protocols": [4],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.1",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": "invalid_fs_name" # invalid fs
}
}
@@ -1048,7 +1054,7 @@ class TestNFS(MgrTestCase):
"protocols": [4],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.1",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": self.fs_name
}
},
@@ -1061,7 +1067,7 @@ class TestNFS(MgrTestCase):
"protocols": [4],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.2",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": self.fs_name
}
},
@@ -1075,7 +1081,7 @@ class TestNFS(MgrTestCase):
"protocols": [4],
"fsal": {
"name": "CEPH",
- "user_id": "nfs.test.3",
+ "user_id": "nfs.test.nfs-cephfs.3746f603",
"fs_name": "invalid_fs_name"
}
}
@@ -1211,3 +1217,65 @@ class TestNFS(MgrTestCase):
finally:
self.ctx.cluster.run(args=['rm', '-rf', f'{mnt_pt}/*'])
self._delete_cluster_with_fs(self.fs_name, mnt_pt, preserve_mode)
+
+ def test_nfs_export_creation_without_cmount_path(self):
+ """
+ Test that ensure cmount_path is present in FSAL block
+ """
+ self._create_cluster_with_fs(self.fs_name)
+
+ pseudo_path = '/test_without_cmount'
+ self._create_export(export_id='1',
+ extra_cmd=['--pseudo-path', pseudo_path])
+ nfs_output = self._get_export(pseudo_path)
+ self.assertIn('cmount_path', nfs_output['fsal'])
+
+ self._delete_export(pseudo_path)
+
+ def test_nfs_exports_with_same_and_diff_user_id(self):
+ """
+ Test that exports with same FSAL share same user_id
+ """
+ self._create_cluster_with_fs(self.fs_name)
+
+ pseudo_path_1 = '/test1'
+ pseudo_path_2 = '/test2'
+ pseudo_path_3 = '/test3'
+
+ # Create subvolumes
+ self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol_1')
+ self._cmd('fs', 'subvolume', 'create', self.fs_name, 'sub_vol_2')
+
+ fs_path_1 = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol_1').strip()
+ fs_path_2 = self._cmd('fs', 'subvolume', 'getpath', self.fs_name, 'sub_vol_2').strip()
+ # Both exports should have same user_id(since cmount_path=/ & fs_name is same)
+ self._create_export(export_id='1',
+ extra_cmd=['--pseudo-path', pseudo_path_1,
+ '--path', fs_path_1])
+ self._create_export(export_id='2',
+ extra_cmd=['--pseudo-path', pseudo_path_2,
+ '--path', fs_path_2])
+
+ nfs_output_1 = self._get_export(pseudo_path_1)
+ nfs_output_2 = self._get_export(pseudo_path_2)
+ # Check if both exports have same user_id
+ self.assertEqual(nfs_output_2['fsal']['user_id'], nfs_output_1['fsal']['user_id'])
+ self.assertEqual(nfs_output_1['fsal']['user_id'], 'nfs.test.nfs-cephfs.3746f603')
+
+ cmount_path = '/volumes'
+ self._create_export(export_id='3',
+ extra_cmd=['--pseudo-path', pseudo_path_3,
+ '--path', fs_path_1,
+ '--cmount-path', cmount_path])
+
+ nfs_output_3 = self._get_export(pseudo_path_3)
+ self.assertNotEqual(nfs_output_3['fsal']['user_id'], nfs_output_1['fsal']['user_id'])
+ self.assertEqual(nfs_output_3['fsal']['user_id'], 'nfs.test.nfs-cephfs.32cd8545')
+
+ # Deleting export with same user_id should not delete the user_id
+ self._delete_export(pseudo_path_1, True, nfs_output_1['fsal']['user_id'])
+ # Deleting export 22 should delete the user_id since it's only export left with that user_id
+ self._delete_export(pseudo_path_2, False, nfs_output_2['fsal']['user_id'])
+
+ # Deleting export 23 should delete the user_id since it's only export with that user_id
+ self._delete_export(pseudo_path_3, False, nfs_output_3['fsal']['user_id'])
diff --git a/src/pybind/mgr/nfs/export.py b/src/pybind/mgr/nfs/export.py
index 35a0fd88f6e..3ba75e60b5c 100644
--- a/src/pybind/mgr/nfs/export.py
+++ b/src/pybind/mgr/nfs/export.py
@@ -1,4 +1,5 @@
import errno
+import hashlib
import json
import logging
from typing import (
@@ -76,6 +77,15 @@ def validate_cephfs_path(mgr: 'Module', fs_name: str, path: str) -> None:
raise NFSException(e.args[1], -e.args[0])
+def _validate_cmount_path(cmount_path: str, path: str) -> None:
+ if cmount_path not in path:
+ raise ValueError(
+ f"Invalid cmount_path: '{cmount_path}'. The path '{path}' is not within the mount path. "
+ f"Please ensure that the cmount_path includes the specified path '{path}'. "
+ "It is allowed to be any complete path hierarchy between / and the EXPORT {path}."
+ )
+
+
class NFSRados:
def __init__(self, rados: 'Rados', namespace: str) -> None:
self.rados = rados
@@ -267,42 +277,43 @@ class ExportMgr:
# do nothing; we're using the bucket owner creds.
pass
- def _create_export_user(self, export: Export) -> None:
- if isinstance(export.fsal, CephFSFSAL):
- fsal = cast(CephFSFSAL, export.fsal)
- assert fsal.fs_name
- fsal.user_id = f"nfs.{export.cluster_id}.{export.export_id}"
- fsal.cephx_key = self._create_user_key(
- export.cluster_id, fsal.user_id, export.path, fsal.fs_name
+ def _create_rgw_export_user(self, export: Export) -> None:
+ rgwfsal = cast(RGWFSAL, export.fsal)
+ if not rgwfsal.user_id:
+ assert export.path
+ ret, out, err = self.mgr.tool_exec(
+ ['radosgw-admin', 'bucket', 'stats', '--bucket', export.path]
)
- log.debug("Successfully created user %s for cephfs path %s", fsal.user_id, export.path)
-
- elif isinstance(export.fsal, RGWFSAL):
- rgwfsal = cast(RGWFSAL, export.fsal)
- if not rgwfsal.user_id:
- assert export.path
- ret, out, err = self.mgr.tool_exec(
- ['radosgw-admin', 'bucket', 'stats', '--bucket', export.path]
- )
- if ret:
- raise NFSException(f'Failed to fetch owner for bucket {export.path}')
- j = json.loads(out)
- owner = j.get('owner', '')
- rgwfsal.user_id = owner
- assert rgwfsal.user_id
- ret, out, err = self.mgr.tool_exec([
- 'radosgw-admin', 'user', 'info', '--uid', rgwfsal.user_id
- ])
if ret:
- raise NFSException(
- f'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}'
- )
+ raise NFSException(f'Failed to fetch owner for bucket {export.path}')
j = json.loads(out)
+ owner = j.get('owner', '')
+ rgwfsal.user_id = owner
+ assert rgwfsal.user_id
+ ret, out, err = self.mgr.tool_exec([
+ 'radosgw-admin', 'user', 'info', '--uid', rgwfsal.user_id
+ ])
+ if ret:
+ raise NFSException(
+ f'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}'
+ )
+ j = json.loads(out)
+
+ # FIXME: make this more tolerate of unexpected output?
+ rgwfsal.access_key_id = j['keys'][0]['access_key']
+ rgwfsal.secret_access_key = j['keys'][0]['secret_key']
+ log.debug("Successfully fetched user %s for RGW path %s", rgwfsal.user_id, export.path)
- # FIXME: make this more tolerate of unexpected output?
- rgwfsal.access_key_id = j['keys'][0]['access_key']
- rgwfsal.secret_access_key = j['keys'][0]['secret_key']
- log.debug("Successfully fetched user %s for RGW path %s", rgwfsal.user_id, export.path)
+ def _ensure_cephfs_export_user(self, export: Export) -> None:
+ fsal = cast(CephFSFSAL, export.fsal)
+ assert fsal.fs_name
+ assert fsal.cmount_path
+
+ fsal.user_id = f"nfs.{get_user_id(export.cluster_id, fsal.fs_name, fsal.cmount_path)}"
+ fsal.cephx_key = self._create_user_key(
+ export.cluster_id, fsal.user_id, fsal.cmount_path, fsal.fs_name
+ )
+ log.debug(f"Established user {fsal.user_id} for cephfs {fsal.fs_name}")
def _gen_export_id(self, cluster_id: str) -> int:
exports = sorted([ex.export_id for ex in self.exports[cluster_id]])
@@ -350,11 +361,18 @@ class ExportMgr:
export = self._fetch_export(cluster_id, pseudo_path)
if export:
+ exports_count = 0
+ if export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
+ exports_count = self.get_export_count_with_same_fsal(export.fsal.cmount_path, # type: ignore
+ cluster_id, export.fsal.fs_name) # type: ignore
+ if exports_count == 1:
+ self._delete_export_user(export)
if pseudo_path:
self._rados(cluster_id).remove_obj(
export_obj_name(export.export_id), conf_obj_name(cluster_id))
self.exports[cluster_id].remove(export)
- self._delete_export_user(export)
+ if export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
+ self._delete_export_user(export)
if not self.exports[cluster_id]:
del self.exports[cluster_id]
log.debug("Deleted all exports for cluster %s", cluster_id)
@@ -598,31 +616,24 @@ class ExportMgr:
log.info("Export user updated %s", user_id)
- def _create_user_key(
- self,
- cluster_id: str,
- entity: str,
- path: str,
- fs_name: str,
- ) -> str:
- osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
- self.rados_pool, cluster_id, fs_name)
+ def _create_user_key(self, cluster_id: str, entity: str, path: str, fs_name: str) -> str:
+ osd_cap = f'allow rw pool={self.rados_pool} namespace={cluster_id}, allow rw tag cephfs data={fs_name}'
nfs_caps = [
'mon', 'allow r',
'osd', osd_cap,
- 'mds', 'allow rw path={}'.format(path)
+ 'mds', f'allow rw path={path}'
]
ret, out, err = self.mgr.mon_command({
'prefix': 'auth get-or-create',
- 'entity': 'client.{}'.format(entity),
+ 'entity': f'client.{entity}',
'caps': nfs_caps,
'format': 'json',
})
if ret == -errno.EINVAL and 'does not match' in err:
ret, out, err = self.mgr.mon_command({
'prefix': 'auth caps',
- 'entity': 'client.{}'.format(entity),
+ 'entity': f'client.{entity}',
'caps': nfs_caps,
'format': 'json',
})
@@ -630,14 +641,14 @@ class ExportMgr:
raise NFSException(f'Failed to update caps for {entity}: {err}')
ret, out, err = self.mgr.mon_command({
'prefix': 'auth get',
- 'entity': 'client.{}'.format(entity),
+ 'entity': f'client.{entity}',
'format': 'json',
})
if err:
raise NFSException(f'Failed to fetch caps for {entity}: {err}')
json_res = json.loads(out)
- log.info("Export user created is %s", json_res[0]['entity'])
+ log.info(f"Export user created is {json_res[0]['entity']}")
return json_res[0]['key']
def create_export_from_dict(self,
@@ -666,8 +677,10 @@ class ExportMgr:
raise FSNotFound(fs_name)
validate_cephfs_path(self.mgr, fs_name, path)
+ if fsal["cmount_path"] != "/":
+ _validate_cmount_path(fsal["cmount_path"], path) # type: ignore
- user_id = f"nfs.{cluster_id}.{ex_id}"
+ user_id = f"nfs.{get_user_id(cluster_id, fs_name, fsal['cmount_path'])}"
if "user_id" in fsal and fsal["user_id"] != user_id:
raise NFSInvalidOperation(f"export FSAL user_id must be '{user_id}'")
else:
@@ -677,6 +690,8 @@ class ExportMgr:
ex_dict["fsal"] = fsal
ex_dict["cluster_id"] = cluster_id
export = Export.from_dict(ex_id, ex_dict)
+ if export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
+ self._ensure_cephfs_export_user(export)
export.validate(self.mgr)
log.debug("Successfully created %s export-%s from dict for cluster %s",
fsal_type, ex_id, cluster_id)
@@ -691,9 +706,12 @@ class ExportMgr:
squash: str,
access_type: str,
clients: list = [],
- sectype: Optional[List[str]] = None) -> Dict[str, Any]:
+ sectype: Optional[List[str]] = None,
+ cmount_path: Optional[str] = "/") -> Dict[str, Any]:
validate_cephfs_path(self.mgr, fs_name, path)
+ if cmount_path != "/":
+ _validate_cmount_path(cmount_path, path) # type: ignore
pseudo_path = normalize_path(pseudo_path)
@@ -708,6 +726,7 @@ class ExportMgr:
"squash": squash,
"fsal": {
"name": NFS_GANESHA_SUPPORTED_FSALS[0],
+ "cmount_path": cmount_path,
"fs_name": fs_name,
},
"clients": clients,
@@ -715,7 +734,7 @@ class ExportMgr:
}
)
log.debug("creating cephfs export %s", export)
- self._create_export_user(export)
+ self._ensure_cephfs_export_user(export)
self._save_export(cluster_id, export)
result = {
"bind": export.pseudo,
@@ -760,7 +779,7 @@ class ExportMgr:
}
)
log.debug("creating rgw export %s", export)
- self._create_export_user(export)
+ self._create_rgw_export_user(export)
self._save_export(cluster_id, export)
result = {
"bind": export.pseudo,
@@ -803,6 +822,15 @@ class ExportMgr:
log.debug("export %s pseudo %s -> %s",
old_export.export_id, old_export.pseudo, new_export_dict['pseudo'])
+ fsal_dict = new_export_dict.get('fsal')
+ if fsal_dict and fsal_dict['name'] == NFS_GANESHA_SUPPORTED_FSALS[0]:
+ # Ensure cmount_path is present in CephFS FSAL block
+ if not fsal_dict.get('cmount_path'):
+ if old_export:
+ new_export_dict['fsal']['cmount_path'] = old_export.fsal.cmount_path
+ else:
+ new_export_dict['fsal']['cmount_path'] = '/'
+
new_export = self.create_export_from_dict(
cluster_id,
new_export_dict.get('export_id', self._gen_export_id(cluster_id)),
@@ -810,7 +838,8 @@ class ExportMgr:
)
if not old_export:
- self._create_export_user(new_export)
+ if new_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: # only for RGW
+ self._create_rgw_export_user(new_export)
self._save_export(cluster_id, new_export)
return {"pseudo": new_export.pseudo, "state": "added"}
@@ -824,48 +853,18 @@ class ExportMgr:
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
old_fsal = cast(CephFSFSAL, old_export.fsal)
new_fsal = cast(CephFSFSAL, new_export.fsal)
- if old_fsal.user_id != new_fsal.user_id:
- self._delete_export_user(old_export)
- self._create_export_user(new_export)
- elif (
- old_export.path != new_export.path
- or old_fsal.fs_name != new_fsal.fs_name
- ):
- self._update_user_id(
- cluster_id,
- new_export.path,
- cast(str, new_fsal.fs_name),
- cast(str, new_fsal.user_id)
- )
- new_fsal.cephx_key = old_fsal.cephx_key
- else:
- expected_mds_caps = 'allow rw path={}'.format(new_export.path)
- entity = new_fsal.user_id
- ret, out, err = self.mgr.mon_command({
- 'prefix': 'auth get',
- 'entity': 'client.{}'.format(entity),
- 'format': 'json',
- })
- if ret:
- raise NFSException(f'Failed to fetch caps for {entity}: {err}')
- actual_mds_caps = json.loads(out)[0]['caps'].get('mds')
- if actual_mds_caps != expected_mds_caps:
- self._update_user_id(
- cluster_id,
- new_export.path,
- cast(str, new_fsal.fs_name),
- cast(str, new_fsal.user_id)
- )
- elif old_export.pseudo == new_export.pseudo:
- need_nfs_service_restart = False
- new_fsal.cephx_key = old_fsal.cephx_key
+ self._ensure_cephfs_export_user(new_export)
+ need_nfs_service_restart = not (old_fsal.user_id == new_fsal.user_id
+ and old_fsal.fs_name == new_fsal.fs_name
+ and old_export.path == new_export.path
+ and old_export.pseudo == new_export.pseudo)
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
new_rgw_fsal = cast(RGWFSAL, new_export.fsal)
if old_rgw_fsal.user_id != new_rgw_fsal.user_id:
self._delete_export_user(old_export)
- self._create_export_user(new_export)
+ self._create_rgw_export_user(new_export)
elif old_rgw_fsal.access_key_id != new_rgw_fsal.access_key_id:
raise NFSInvalidOperation('access_key_id change is not allowed')
elif old_rgw_fsal.secret_access_key != new_rgw_fsal.secret_access_key:
@@ -880,3 +879,27 @@ class ExportMgr:
def _rados(self, cluster_id: str) -> NFSRados:
"""Return a new NFSRados object for the given cluster id."""
return NFSRados(self.mgr.rados, cluster_id)
+
+ def get_export_count_with_same_fsal(self, cmount_path: str, cluster_id: str, fs_name: str) -> int:
+ exports = self.list_exports(cluster_id, detailed=True)
+ exports_count = 0
+ for export in exports:
+ if export['fsal']['name'] == 'CEPH' and export['fsal']['cmount_path'] == cmount_path and export['fsal']['fs_name'] == fs_name:
+ exports_count += 1
+ return exports_count
+
+
+def get_user_id(cluster_id: str, fs_name: str, cmount_path: str) -> str:
+ """
+ Generates a unique ID based on the input parameters using SHA-1.
+
+ :param cluster_id: String representing the cluster ID.
+ :param fs_name: String representing the file system name.
+ :param cmount_path: String representing the complicated mount path.
+ :return: A unique ID in the format 'cluster_id.fs_name.<hash>'.
+ """
+ input_string = f"{cluster_id}:{fs_name}:{cmount_path}"
+ hash_hex = hashlib.sha1(input_string.encode('utf-8')).hexdigest()
+ unique_id = f"{cluster_id}.{fs_name}.{hash_hex[:8]}" # Use the first 8 characters of the hash
+
+ return unique_id
diff --git a/src/pybind/mgr/nfs/ganesha_conf.py b/src/pybind/mgr/nfs/ganesha_conf.py
index 31aaa4ea11c..56c56b434bb 100644
--- a/src/pybind/mgr/nfs/ganesha_conf.py
+++ b/src/pybind/mgr/nfs/ganesha_conf.py
@@ -179,8 +179,12 @@ class GaneshaConfParser:
class FSAL(object):
- def __init__(self, name: str) -> None:
+ def __init__(self, name: str, cmount_path: Optional[str] = "/") -> None:
+ # By default, cmount_path is set to "/", allowing the export to mount at the root level.
+ # This ensures that the export path can be any complete path hierarchy within the Ceph filesystem.
+ # If multiple exports share the same cmount_path and FSAL options, they will share a single CephFS client.
self.name = name
+ self.cmount_path = cmount_path
@classmethod
def from_dict(cls, fsal_dict: Dict[str, Any]) -> 'FSAL':
@@ -211,9 +215,11 @@ class CephFSFSAL(FSAL):
user_id: Optional[str] = None,
fs_name: Optional[str] = None,
sec_label_xattr: Optional[str] = None,
- cephx_key: Optional[str] = None) -> None:
+ cephx_key: Optional[str] = None,
+ cmount_path: Optional[str] = "/") -> None:
super().__init__(name)
assert name == 'CEPH'
+ self.cmount_path = cmount_path
self.fs_name = fs_name
self.user_id = user_id
self.sec_label_xattr = sec_label_xattr
@@ -225,7 +231,8 @@ class CephFSFSAL(FSAL):
fsal_block.values.get('user_id'),
fsal_block.values.get('filesystem'),
fsal_block.values.get('sec_label_xattr'),
- fsal_block.values.get('secret_access_key'))
+ fsal_block.values.get('secret_access_key'),
+ cmount_path=fsal_block.values.get('cmount_path'))
def to_fsal_block(self) -> RawBlock:
result = RawBlock('FSAL', values={'name': self.name})
@@ -238,6 +245,8 @@ class CephFSFSAL(FSAL):
result.values['sec_label_xattr'] = self.sec_label_xattr
if self.cephx_key:
result.values['secret_access_key'] = self.cephx_key
+ if self.cmount_path:
+ result.values['cmount_path'] = self.cmount_path
return result
@classmethod
@@ -246,7 +255,8 @@ class CephFSFSAL(FSAL):
fsal_dict.get('user_id'),
fsal_dict.get('fs_name'),
fsal_dict.get('sec_label_xattr'),
- fsal_dict.get('cephx_key'))
+ fsal_dict.get('cephx_key'),
+ fsal_dict.get('cmount_path'))
def to_dict(self) -> Dict[str, str]:
r = {'name': self.name}
@@ -256,6 +266,8 @@ class CephFSFSAL(FSAL):
r['fs_name'] = self.fs_name
if self.sec_label_xattr:
r['sec_label_xattr'] = self.sec_label_xattr
+ if self.cmount_path:
+ r['cmount_path'] = self.cmount_path
return r
diff --git a/src/pybind/mgr/nfs/module.py b/src/pybind/mgr/nfs/module.py
index a984500eebf..be43112f396 100644
--- a/src/pybind/mgr/nfs/module.py
+++ b/src/pybind/mgr/nfs/module.py
@@ -38,6 +38,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
client_addr: Optional[List[str]] = None,
squash: str = 'none',
sectype: Optional[List[str]] = None,
+ cmount_path: Optional[str] = "/"
) -> Dict[str, Any]:
"""Create a CephFS export"""
return self.export_mgr.create_export(
@@ -50,6 +51,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
squash=squash,
addr=client_addr,
sectype=sectype,
+ cmount_path=cmount_path
)
@CLICommand('nfs export create rgw', perm='rw')
diff --git a/src/pybind/mgr/nfs/tests/test_nfs.py b/src/pybind/mgr/nfs/tests/test_nfs.py
index 5b4d5fe7e12..edf8bab37a1 100644
--- a/src/pybind/mgr/nfs/tests/test_nfs.py
+++ b/src/pybind/mgr/nfs/tests/test_nfs.py
@@ -76,9 +76,8 @@ EXPORT
EXPORT {
FSAL {
name = "CEPH";
- user_id = "nfs.foo.1";
filesystem = "a";
- secret_access_key = "AQCjU+hgjyReLBAAddJa0Dza/ZHqjX5+JiePMA==";
+ cmount_path = "/";
}
export_id = 1;
path = "/";
@@ -95,9 +94,8 @@ EXPORT {
EXPORT {
FSAL {
name = "CEPH";
- user_id = "nfs.foo.1";
filesystem = "a";
- secret_access_key = "AQCjU+hgjyReLBAAddJa0Dza/ZHqjX5+JiePMA==";
+ cmount_path = "/";
}
export_id = 1;
path = "/secure/me";
@@ -111,6 +109,25 @@ EXPORT {
transports = "TCP";
}
"""
+ export_5 = """
+EXPORT {
+ Export_ID=3;
+ Protocols = 4;
+ Path = /;
+ Pseudo = /cephfs_b/;
+ Access_Type = RW;
+ Protocols = 4;
+ Attr_Expiration_Time = 0;
+
+ FSAL {
+ Name = CEPH;
+ Filesystem = "b";
+ User_Id = "nfs.foo.b.lgudhr";
+ Secret_Access_Key = "YOUR SECRET KEY HERE";
+ cmount_path = "/";
+ }
+}
+"""
conf_nfs_foo = f'''
%url "rados://{NFS_POOL_NAME}/{cluster_id}/export-1"
@@ -159,6 +176,7 @@ EXPORT {
'foo': {
'export-1': TestNFS.RObject("export-1", self.export_1),
'export-2': TestNFS.RObject("export-2", self.export_2),
+ 'export-3': TestNFS.RObject("export-3", self.export_5),
'conf-nfs.foo': TestNFS.RObject("conf-nfs.foo", self.conf_nfs_foo)
}
}
@@ -382,6 +400,29 @@ NFS_CORE_PARAM {
export = Export.from_export_block(blocks[0], self.cluster_id)
self._validate_export_2(export)
+ def _validate_export_3(self, export: Export):
+ assert export.export_id == 3
+ assert export.path == "/"
+ assert export.pseudo == "/cephfs_b/"
+ assert export.access_type == "RW"
+ assert export.squash == "no_root_squash"
+ assert export.protocols == [4]
+ assert export.fsal.name == "CEPH"
+ assert export.fsal.user_id == "nfs.foo.b.lgudhr"
+ assert export.fsal.fs_name == "b"
+ assert export.fsal.sec_label_xattr == None
+ assert export.fsal.cmount_path == "/"
+ assert export.cluster_id == 'foo'
+ assert export.attr_expiration_time == 0
+ assert export.security_label == True
+
+ def test_export_parser_3(self) -> None:
+ blocks = GaneshaConfParser(self.export_5).parse()
+ assert isinstance(blocks, list)
+ assert len(blocks) == 1
+ export = Export.from_export_block(blocks[0], self.cluster_id)
+ self._validate_export_3(export)
+
def test_daemon_conf_parser(self) -> None:
blocks = GaneshaConfParser(self.conf_nfs_foo).parse()
assert isinstance(blocks, list)
@@ -404,10 +445,11 @@ NFS_CORE_PARAM {
ganesha_conf = ExportMgr(nfs_mod)
exports = ganesha_conf.exports[self.cluster_id]
- assert len(exports) == 2
+ assert len(exports) == 3
self._validate_export_1([e for e in exports if e.export_id == 1][0])
self._validate_export_2([e for e in exports if e.export_id == 2][0])
+ self._validate_export_3([e for e in exports if e.export_id == 3][0])
def test_config_dict(self) -> None:
self._do_mock_test(self._do_test_config_dict)
@@ -811,6 +853,9 @@ NFS_CORE_PARAM {
def test_update_export_with_list(self):
self._do_mock_test(self._do_test_update_export_with_list)
+
+ def test_update_export_cephfs(self):
+ self._do_mock_test(self._do_test_update_export_cephfs)
def _do_test_update_export_with_list(self):
nfs_mod = Module('nfs', '', '')
@@ -865,7 +910,7 @@ NFS_CORE_PARAM {
assert len(r.changes) == 2
export = conf._fetch_export('foo', '/rgw/bucket')
- assert export.export_id == 3
+ assert export.export_id == 4
assert export.path == "bucket"
assert export.pseudo == "/rgw/bucket"
assert export.access_type == "RW"
@@ -881,7 +926,7 @@ NFS_CORE_PARAM {
assert export.cluster_id == self.cluster_id
export = conf._fetch_export('foo', '/rgw/bucket2')
- assert export.export_id == 4
+ assert export.export_id == 5
assert export.path == "bucket2"
assert export.pseudo == "/rgw/bucket2"
assert export.access_type == "RO"
@@ -896,17 +941,50 @@ NFS_CORE_PARAM {
assert export.clients[0].access_type is None
assert export.cluster_id == self.cluster_id
+ def _do_test_update_export_cephfs(self):
+ nfs_mod = Module('nfs', '', '')
+ conf = ExportMgr(nfs_mod)
+ r = conf.apply_export(self.cluster_id, json.dumps({
+ 'export_id': 3,
+ 'path': '/',
+ 'cluster_id': self.cluster_id,
+ 'pseudo': '/cephfs_c',
+ 'access_type': 'RW',
+ 'squash': 'root_squash',
+ 'security_label': True,
+ 'protocols': [4],
+ 'transports': ['TCP', 'UDP'],
+ 'fsal': {
+ 'name': 'CEPH',
+ 'fs_name': 'c',
+ }
+ }))
+ assert len(r.changes) == 1
+
+ export = conf._fetch_export('foo', '/cephfs_c')
+ assert export.export_id == 3
+ assert export.path == "/"
+ assert export.pseudo == "/cephfs_c"
+ assert export.access_type == "RW"
+ assert export.squash == "root_squash"
+ assert export.protocols == [4]
+ assert export.transports == ["TCP", "UDP"]
+ assert export.fsal.name == "CEPH"
+ assert export.fsal.cmount_path == "/"
+ assert export.fsal.user_id == "nfs.foo.c.02de2980"
+ assert export.cluster_id == self.cluster_id
+
def test_remove_export(self) -> None:
self._do_mock_test(self._do_test_remove_export)
def _do_test_remove_export(self) -> None:
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
- assert len(conf.exports[self.cluster_id]) == 2
+ assert len(conf.exports[self.cluster_id]) == 3
conf.delete_export(cluster_id=self.cluster_id,
pseudo_path="/rgw")
exports = conf.exports[self.cluster_id]
- assert len(exports) == 1
+ assert len(exports) == 2
assert exports[0].export_id == 1
def test_create_export_rgw_bucket(self):
@@ -917,7 +995,7 @@ NFS_CORE_PARAM {
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 2
+ assert len(ls) == 3
r = conf.create_export(
fsal_type='rgw',
@@ -931,7 +1009,7 @@ NFS_CORE_PARAM {
assert r["bind"] == "/mybucket"
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 3
+ assert len(ls) == 4
export = conf._fetch_export('foo', '/mybucket')
assert export.export_id
@@ -959,7 +1037,7 @@ NFS_CORE_PARAM {
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 2
+ assert len(ls) == 3
r = conf.create_export(
fsal_type='rgw',
@@ -974,7 +1052,7 @@ NFS_CORE_PARAM {
assert r["bind"] == "/mybucket"
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 3
+ assert len(ls) == 4
export = conf._fetch_export('foo', '/mybucket')
assert export.export_id
@@ -1002,7 +1080,7 @@ NFS_CORE_PARAM {
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 2
+ assert len(ls) == 3
r = conf.create_export(
fsal_type='rgw',
@@ -1016,7 +1094,7 @@ NFS_CORE_PARAM {
assert r["bind"] == "/mybucket"
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 3
+ assert len(ls) == 4
export = conf._fetch_export('foo', '/mybucket')
assert export.export_id
@@ -1038,13 +1116,19 @@ NFS_CORE_PARAM {
def test_create_export_cephfs(self):
self._do_mock_test(self._do_test_create_export_cephfs)
+
+ def test_create_export_cephfs_with_cmount_path(self):
+ self._do_mock_test(self._do_test_create_export_cephfs_with_cmount_path)
+
+ def test_create_export_cephfs_with_invalid_cmount_path(self):
+ self._do_mock_test(self._do_test_create_export_cephfs_with_invalid_cmount_path)
def _do_test_create_export_cephfs(self):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 2
+ assert len(ls) == 3
r = conf.create_export(
fsal_type='cephfs',
@@ -1059,7 +1143,7 @@ NFS_CORE_PARAM {
assert r["bind"] == "/cephfs2"
ls = conf.list_exports(cluster_id=self.cluster_id)
- assert len(ls) == 3
+ assert len(ls) == 4
export = conf._fetch_export('foo', '/cephfs2')
assert export.export_id
@@ -1070,13 +1154,67 @@ NFS_CORE_PARAM {
assert export.protocols == [4]
assert export.transports == ["TCP"]
assert export.fsal.name == "CEPH"
- assert export.fsal.user_id == "nfs.foo.3"
+ assert export.fsal.user_id == "nfs.foo.myfs.86ca58ef"
assert export.fsal.cephx_key == "thekeyforclientabc"
assert len(export.clients) == 1
assert export.clients[0].squash == 'root'
assert export.clients[0].access_type == 'rw'
assert export.clients[0].addresses == ["192.168.1.0/8"]
assert export.cluster_id == self.cluster_id
+
+ def _do_test_create_export_cephfs_with_cmount_path(self):
+ nfs_mod = Module('nfs', '', '')
+ conf = ExportMgr(nfs_mod)
+
+ ls = conf.list_exports(cluster_id=self.cluster_id)
+ assert len(ls) == 3
+
+ r = conf.create_export(
+ fsal_type='cephfs',
+ cluster_id=self.cluster_id,
+ fs_name='myfs',
+ path='/',
+ pseudo_path='/cephfs3',
+ read_only=False,
+ squash='root',
+ cmount_path='/',
+ )
+ assert r["bind"] == "/cephfs3"
+
+ ls = conf.list_exports(cluster_id=self.cluster_id)
+ assert len(ls) == 4
+
+ export = conf._fetch_export('foo', '/cephfs3')
+ assert export.export_id
+ assert export.path == "/"
+ assert export.pseudo == "/cephfs3"
+ assert export.access_type == "RW"
+ assert export.squash == "root"
+ assert export.protocols == [4]
+ assert export.fsal.name == "CEPH"
+ assert export.fsal.user_id == "nfs.foo.myfs.86ca58ef"
+ assert export.fsal.cephx_key == "thekeyforclientabc"
+ assert export.fsal.cmount_path == "/"
+ assert export.cluster_id == self.cluster_id
+
+ def _do_test_create_export_cephfs_with_invalid_cmount_path(self):
+ import object_format
+
+ nfs_mod = Module('nfs', '', '')
+ conf = ExportMgr(nfs_mod)
+
+ with pytest.raises(object_format.ErrorResponse) as e:
+ conf.create_export(
+ fsal_type='cephfs',
+ cluster_id=self.cluster_id,
+ fs_name='myfs',
+ path='/',
+ pseudo_path='/cephfs4',
+ read_only=False,
+ squash='root',
+ cmount_path='/invalid',
+ )
+ assert "Invalid cmount_path: '/invalid'" in str(e.value)
def _do_test_cluster_ls(self):
nfs_mod = Module('nfs', '', '')