summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Mulligan <jmulligan@redhat.com>2022-05-05 22:56:08 +0200
committerJohn Mulligan <jmulligan@redhat.com>2023-01-12 19:44:11 +0100
commit59b15dd89e272a770c10a1aa33653b65f8f3a3d0 (patch)
tree7d9489f483734bc400253ed29fc9aa8c6e74372d
parentmgr/nfs: convert _cmd_nfs_cluster_rm to use EmptyResponder decorator (diff)
downloadceph-59b15dd89e272a770c10a1aa33653b65f8f3a3d0.tar.xz
ceph-59b15dd89e272a770c10a1aa33653b65f8f3a3d0.zip
mgr/nfs: convert _cmd_nfs_export_apply to use Responder decorator
The "export apply" functionality is unusual in that it allows either one or multiple nested requests to change or create an export. The previous implementation would concatenate the results of multiple change operations into a single string. It also would continue making changes if one case failed, adding the error to the string and setting a non-zero error code. The updated version keeps the general behavior but returns structured JSON (or other formatted data) with one entry per change request. In order to accomplish this and match the old behavior as closely as possible we add an intermediate type (AppliedExportResults) that can return both the response data (the `to_simplified` method) and track if there was a failure mixed in with the various updates (the `mgr_return_value` method). Signed-off-by: John Mulligan <jmulligan@redhat.com>
-rw-r--r--src/pybind/mgr/nfs/export.py113
-rw-r--r--src/pybind/mgr/nfs/module.py5
-rw-r--r--src/pybind/mgr/nfs/tests/test_nfs.py19
3 files changed, 87 insertions, 50 deletions
diff --git a/src/pybind/mgr/nfs/export.py b/src/pybind/mgr/nfs/export.py
index 652171af8a3..b2a13f39aa0 100644
--- a/src/pybind/mgr/nfs/export.py
+++ b/src/pybind/mgr/nfs/export.py
@@ -187,6 +187,27 @@ def nfs_rados_configs(rados: 'Rados', nfs_pool: str = POOL_NAME) -> Set[str]:
return ns
+class AppliedExportResults:
+ """Gathers the results of multiple changed exports.
+ Returned by apply_export.
+ """
+
+ def __init__(self) -> None:
+ self.changes: List[Dict[str, str]] = []
+ self.has_error = False
+
+ def append(self, value: Dict[str, str]) -> None:
+ if value.get("state", "") == "error":
+ self.has_error = True
+ self.changes.append(value)
+
+ def to_simplified(self) -> List[Dict[str, str]]:
+ return self.changes
+
+ def mgr_return_value(self) -> int:
+ return -errno.EIO if self.has_error else 0
+
+
class ExportMgr:
def __init__(
self,
@@ -497,46 +518,58 @@ class ExportMgr:
export = self._fetch_export(cluster_id, pseudo_path)
return export.to_dict() if export else None
- def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
+ def apply_export(self, cluster_id: str, export_config: str) -> AppliedExportResults:
try:
- if not export_config:
- raise NFSInvalidOperation("Empty Config!!")
+ exports = self._read_export_config(cluster_id, export_config)
+ except Exception as e:
+ log.exception(f'Failed to update export: {e}')
+ raise ErrorResponse.wrap(e)
+
+ aeresults = AppliedExportResults()
+ for export in exports:
+ aeresults.append(self._change_export(cluster_id, export))
+ return aeresults
+
+ def _read_export_config(self, cluster_id: str, export_config: str) -> List[Dict]:
+ if not export_config:
+ raise NFSInvalidOperation("Empty Config!!")
+ try:
+ j = json.loads(export_config)
+ except ValueError:
+ # okay, not JSON. is it an EXPORT block?
try:
- j = json.loads(export_config)
- except ValueError:
- # okay, not JSON. is it an EXPORT block?
- try:
- blocks = GaneshaConfParser(export_config).parse()
- exports = [
- Export.from_export_block(block, cluster_id)
- for block in blocks
- ]
- j = [export.to_dict() for export in exports]
- except Exception as ex:
- raise NFSInvalidOperation(f"Input must be JSON or a ganesha EXPORT block: {ex}")
-
- # check export type
- if isinstance(j, list):
- ret, out, err = (0, '', '')
- for export in j:
- try:
- r, o, e = self._apply_export(cluster_id, export)
- except Exception as ex:
- r, o, e = exception_handler(ex, f'Failed to apply export: {ex}')
- if r:
- ret = r
- if o:
- out += o + '\n'
- if e:
- err += e + '\n'
- return ret, out, err
- else:
- r, o, e = self._apply_export(cluster_id, j)
- return r, o, e
+ blocks = GaneshaConfParser(export_config).parse()
+ exports = [
+ Export.from_export_block(block, cluster_id)
+ for block in blocks
+ ]
+ j = [export.to_dict() for export in exports]
+ except Exception as ex:
+ raise NFSInvalidOperation(f"Input must be JSON or a ganesha EXPORT block: {ex}")
+ # check export type - always return a list
+ if isinstance(j, list):
+ return j # j is already a list object
+ return [j] # return a single object list, with j as the only item
+
+ def _change_export(self, cluster_id: str, export: Dict) -> Dict[str, str]:
+ try:
+ return self._apply_export(cluster_id, export)
except NotImplementedError:
- return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
- except Exception as e:
- return exception_handler(e, f'Failed to update export: {e}')
+ # in theory, the NotImplementedError here may be raised by a hook back to
+ # an orchestration module. If the orchestration module supports it the NFS
+ # servers may be restarted. If not supported the expectation is that an
+ # (unfortunately generic) NotImplementedError will be raised. We then
+ # indicate to the user that manual intervention may be needed now that the
+ # configuration changes have been applied.
+ return {
+ "pseudo": export['pseudo'],
+ "state": "warning",
+ "msg": "changes applied (Manual restart of NFS Pods required)",
+ }
+ except Exception as ex:
+ msg = f'Failed to apply export: {ex}'
+ log.exception(msg)
+ return {"state": "error", "msg": msg}
def _update_user_id(
self,
@@ -733,7 +766,7 @@ class ExportMgr:
self,
cluster_id: str,
new_export_dict: Dict,
- ) -> Tuple[int, str, str]:
+ ) -> Dict[str, str]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
@@ -769,7 +802,7 @@ class ExportMgr:
if not old_export:
self._create_export_user(new_export)
self._save_export(cluster_id, new_export)
- return 0, f'Added export {new_export.pseudo}', ''
+ return {"pseudo": new_export.pseudo, "state": "added"}
need_nfs_service_restart = True
if old_export.fsal.name != new_export.fsal.name:
@@ -832,7 +865,7 @@ class ExportMgr:
self._update_export(cluster_id, new_export, need_nfs_service_restart)
- return 0, f"Updated export {new_export.pseudo}", ""
+ return {"pseudo": new_export.pseudo, "state": "updated"}
def _rados(self, cluster_id: str) -> NFSRados:
"""Return a new NFSRados object for the given cluster id."""
diff --git a/src/pybind/mgr/nfs/module.py b/src/pybind/mgr/nfs/module.py
index ec0cd83dd93..c5d54773eed 100644
--- a/src/pybind/mgr/nfs/module.py
+++ b/src/pybind/mgr/nfs/module.py
@@ -6,7 +6,7 @@ from mgr_module import MgrModule, CLICommand, Option, CLICheckNonemptyFileInput
import object_format
import orchestrator
-from .export import ExportMgr
+from .export import ExportMgr, AppliedExportResults
from .cluster import NFSCluster
from .utils import available_clusters
@@ -109,7 +109,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
@CLICommand('nfs export apply', perm='rw')
@CLICheckNonemptyFileInput(desc='Export JSON or Ganesha EXPORT specification')
- def _cmd_nfs_export_apply(self, cluster_id: str, inbuf: str) -> Tuple[int, str, str]:
+ @object_format.Responder()
+ def _cmd_nfs_export_apply(self, cluster_id: str, inbuf: str) -> AppliedExportResults:
"""Create or update an export by `-i <json_or_ganesha_export_file>`"""
return self.export_mgr.apply_export(cluster_id, export_config=inbuf)
diff --git a/src/pybind/mgr/nfs/tests/test_nfs.py b/src/pybind/mgr/nfs/tests/test_nfs.py
index 50f864d3ba1..64072591cfb 100644
--- a/src/pybind/mgr/nfs/tests/test_nfs.py
+++ b/src/pybind/mgr/nfs/tests/test_nfs.py
@@ -610,7 +610,7 @@ NFS_CORE_PARAM {
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 2
@@ -651,7 +651,7 @@ NFS_CORE_PARAM {
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 2
@@ -691,7 +691,7 @@ NFS_CORE_PARAM {
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
export = conf._fetch_export(self.cluster_id, '/rgw/bucket')
assert export.export_id == 2
@@ -737,7 +737,7 @@ NFS_CORE_PARAM {
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
# no sectype was given, key not present
info = conf._get_export_dict(self.cluster_id, "/rgw/bucket")
@@ -768,7 +768,7 @@ NFS_CORE_PARAM {
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
# assert sectype matches new value(s)
info = conf._get_export_dict(self.cluster_id, "/rgw/bucket")
@@ -783,7 +783,7 @@ NFS_CORE_PARAM {
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, self.export_3)
- assert r[0] == 0
+ assert len(r.changes) == 1
def test_update_export_with_ganesha_conf_sectype(self):
self._do_mock_test(
@@ -800,7 +800,7 @@ NFS_CORE_PARAM {
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, export_conf)
- assert r[0] == 0
+ assert len(r.changes) == 1
# assert sectype matches new value(s)
info = conf._get_export_dict(self.cluster_id, "/secure1")
@@ -858,7 +858,10 @@ NFS_CORE_PARAM {
}
},
]))
- assert r[0] == 0
+ # The input object above contains TWO items (two different pseudo paths)
+ # therefore we expect the result to report that two changes have been
+ # applied, rather than the typical 1 change.
+ assert len(r.changes) == 2
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 3