summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/cephadm/services
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/cephadm/services')
-rw-r--r--src/pybind/mgr/cephadm/services/cephadmservice.py41
-rw-r--r--src/pybind/mgr/cephadm/services/ingress.py11
-rw-r--r--src/pybind/mgr/cephadm/services/mgmt_gateway.py66
-rw-r--r--src/pybind/mgr/cephadm/services/monitoring.py71
-rw-r--r--src/pybind/mgr/cephadm/services/nvmeof.py29
5 files changed, 157 insertions, 61 deletions
diff --git a/src/pybind/mgr/cephadm/services/cephadmservice.py b/src/pybind/mgr/cephadm/services/cephadmservice.py
index 9043577bc5a..4f83d7bb0fb 100644
--- a/src/pybind/mgr/cephadm/services/cephadmservice.py
+++ b/src/pybind/mgr/cephadm/services/cephadmservice.py
@@ -1015,12 +1015,6 @@ class RgwService(CephService):
# set rgw_realm rgw_zonegroup and rgw_zone, if present
self.set_realm_zg_zone(spec)
- if spec.generate_cert and not spec.rgw_frontend_ssl_certificate:
- # generate a self-signed cert for the rgw service
- cert, key = self.mgr.cert_mgr.ssl_certs.generate_root_cert(custom_san_list=spec.zonegroup_hostnames)
- spec.rgw_frontend_ssl_certificate = ''.join([key, cert])
- self.mgr.spec_store.save(spec)
-
if spec.rgw_frontend_ssl_certificate:
if isinstance(spec.rgw_frontend_ssl_certificate, list):
cert_data = '\n'.join(spec.rgw_frontend_ssl_certificate)
@@ -1068,6 +1062,19 @@ class RgwService(CephService):
# and it matches the spec.
port = spec.get_port()
+ if spec.generate_cert:
+ cert, key = self.mgr.cert_mgr.generate_cert(
+ daemon_spec.host,
+ self.mgr.inventory.get_addr(daemon_spec.host),
+ custom_san_list=spec.zonegroup_hostnames
+ )
+ pem = ''.join([key, cert])
+ ret, out, err = self.mgr.check_mon_command({
+ 'prefix': 'config-key set',
+ 'key': f'rgw/cert/{daemon_spec.name()}',
+ 'val': pem,
+ })
+
# configure frontend
args = []
ftype = spec.rgw_frontend_type or "beast"
@@ -1078,7 +1085,10 @@ class RgwService(CephService):
f"ssl_endpoint={build_url(host=daemon_spec.ip, port=port).lstrip('/')}")
else:
args.append(f"ssl_port={port}")
- args.append(f"ssl_certificate=config://rgw/cert/{spec.service_name()}")
+ if spec.generate_cert:
+ args.append(f"ssl_certificate=config://rgw/cert/{daemon_spec.name()}")
+ else:
+ args.append(f"ssl_certificate=config://rgw/cert/{spec.service_name()}")
else:
if daemon_spec.ip:
args.append(f"endpoint={build_url(host=daemon_spec.ip, port=port).lstrip('/')}")
@@ -1091,7 +1101,10 @@ class RgwService(CephService):
args.append(f"port={build_url(host=daemon_spec.ip, port=port).lstrip('/')}s")
else:
args.append(f"port={port}s") # note the 's' suffix on port
- args.append(f"ssl_certificate=config://rgw/cert/{spec.service_name()}")
+ if spec.generate_cert:
+ args.append(f"ssl_certificate=config://rgw/cert/{daemon_spec.name()}")
+ else:
+ args.append(f"ssl_certificate=config://rgw/cert/{spec.service_name()}")
else:
if daemon_spec.ip:
args.append(f"port={build_url(host=daemon_spec.ip, port=port).lstrip('/')}")
@@ -1144,6 +1157,14 @@ class RgwService(CephService):
'value': str(spec.rgw_bucket_counters_cache_size),
})
+ if getattr(spec, 'disable_multisite_sync_traffic', None) is not None:
+ ret, out, err = self.mgr.check_mon_command({
+ 'prefix': 'config set',
+ 'who': daemon_name,
+ 'name': 'rgw_run_sync_thread',
+ 'value': 'false' if spec.disable_multisite_sync_traffic else 'true',
+ })
+
daemon_spec.keyring = keyring
daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec)
@@ -1180,6 +1201,10 @@ class RgwService(CephService):
'who': utils.name_to_config_section(daemon.name()),
'name': 'rgw_frontends',
})
+ self.mgr.check_mon_command({
+ 'prefix': 'config-key rm',
+ 'key': f'rgw/cert/{daemon.name()}',
+ })
def ok_to_stop(
self,
diff --git a/src/pybind/mgr/cephadm/services/ingress.py b/src/pybind/mgr/cephadm/services/ingress.py
index a17000cd632..7381ef67d7e 100644
--- a/src/pybind/mgr/cephadm/services/ingress.py
+++ b/src/pybind/mgr/cephadm/services/ingress.py
@@ -241,7 +241,12 @@ class IngressService(CephService):
if spec.keepalived_password:
password = spec.keepalived_password
- daemons = self.mgr.cache.get_daemons_by_service(spec.service_name())
+ if spec.keepalive_only:
+ # when keepalive_only instead of haproxy, we have to monitor the backend service daemons
+ if spec.backend_service is not None:
+ daemons = self.mgr.cache.get_daemons_by_service(spec.backend_service)
+ else:
+ daemons = self.mgr.cache.get_daemons_by_service(spec.service_name())
if not daemons and not spec.keepalive_only:
raise OrchestratorError(
@@ -297,6 +302,10 @@ class IngressService(CephService):
port = d.ports[1] # monitoring port
host_ip = d.ip or self.mgr.inventory.get_addr(d.hostname)
script = f'/usr/bin/curl {build_url(scheme="http", host=host_ip, port=port)}/health'
+ elif d.daemon_type == 'mgmt-gateway':
+ mgmt_gw_port = d.ports[0] if d.ports else None
+ host_ip = d.ip or self.mgr.inventory.get_addr(d.hostname)
+ script = f'/usr/bin/curl -k {build_url(scheme="https", host=host_ip, port=mgmt_gw_port)}/health'
assert script
states = []
diff --git a/src/pybind/mgr/cephadm/services/mgmt_gateway.py b/src/pybind/mgr/cephadm/services/mgmt_gateway.py
index 1943264025e..0897ce99ff7 100644
--- a/src/pybind/mgr/cephadm/services/mgmt_gateway.py
+++ b/src/pybind/mgr/cephadm/services/mgmt_gateway.py
@@ -1,10 +1,12 @@
import logging
-from typing import List, Any, Tuple, Dict, cast, Optional
+from typing import List, Any, Tuple, Dict, cast, TYPE_CHECKING
from orchestrator import DaemonDescription
from ceph.deployment.service_spec import MgmtGatewaySpec, GrafanaSpec
from cephadm.services.cephadmservice import CephadmService, CephadmDaemonDeploySpec, get_dashboard_endpoints
+if TYPE_CHECKING:
+ from ..module import CephadmOrchestrator
logger = logging.getLogger(__name__)
@@ -36,10 +38,11 @@ class MgmtGatewayService(CephadmService):
# if empty list provided, return empty Daemon Desc
return DaemonDescription()
- def get_oauth2_service_url(self) -> Optional[str]:
- # TODO(redo): check how can we create several servers for HA
- oauth2_servers = self.get_service_endpoints('oauth2-proxy')
- return f'https://{oauth2_servers[0]}' if oauth2_servers else None
+ def get_mgmt_gw_ips(self, svc_spec: MgmtGatewaySpec, daemon_spec: CephadmDaemonDeploySpec) -> List[str]:
+ mgmt_gw_ips = [self.mgr.inventory.get_addr(daemon_spec.host)]
+ if svc_spec.virtual_ip is not None:
+ mgmt_gw_ips.append(svc_spec.virtual_ip)
+ return mgmt_gw_ips
def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None:
# we adjust the standby behaviour so rev-proxy can pick correctly the active instance
@@ -56,9 +59,9 @@ class MgmtGatewayService(CephadmService):
key = svc_spec.ssl_certificate_key
else:
# not provided on the spec, let's generate self-sigend certificates
- addr = self.mgr.inventory.get_addr(daemon_spec.host)
+ ips = self.get_mgmt_gw_ips(svc_spec, daemon_spec)
host_fqdn = self.mgr.get_fqdn(daemon_spec.host)
- cert, key = self.mgr.cert_mgr.generate_cert(host_fqdn, addr)
+ cert, key = self.mgr.cert_mgr.generate_cert(host_fqdn, ips)
# save certificates
if cert and key:
self.mgr.cert_key_store.save_cert('mgmt_gw_cert', cert)
@@ -67,23 +70,33 @@ class MgmtGatewayService(CephadmService):
logger.error("Failed to obtain certificate and key from mgmt-gateway.")
return cert, key
- def get_internal_certificates(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[str, str]:
- node_ip = self.mgr.inventory.get_addr(daemon_spec.host)
+ def get_internal_certificates(self, svc_spec: MgmtGatewaySpec, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[str, str]:
+ ips = self.get_mgmt_gw_ips(svc_spec, daemon_spec)
host_fqdn = self.mgr.get_fqdn(daemon_spec.host)
- return self.mgr.cert_mgr.generate_cert(host_fqdn, node_ip)
+ return self.mgr.cert_mgr.generate_cert(host_fqdn, ips)
- def get_mgmt_gateway_deps(self) -> List[str]:
- # url_prefix for the following services depends on the presence of mgmt-gateway
- deps: List[str] = []
- deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('prometheus')]
- deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('alertmanager')]
- deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('grafana')]
- deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('oauth2-proxy')]
+ def get_service_discovery_endpoints(self) -> List[str]:
+ sd_endpoints = []
for dd in self.mgr.cache.get_daemons_by_service('mgr'):
- # we consider mgr a dep even if the dashboard is disabled
- # in order to be consistent with _calc_daemon_deps().
- deps.append(dd.name())
+ assert dd.hostname is not None
+ addr = dd.ip if dd.ip else self.mgr.inventory.get_addr(dd.hostname)
+ sd_endpoints.append(f"{addr}:{self.mgr.service_discovery_port}")
+ return sd_endpoints
+ @staticmethod
+ def get_dependencies(mgr: "CephadmOrchestrator") -> List[str]:
+ # url_prefix for the following services depends on the presence of mgmt-gateway
+ deps = [
+ f'{d.name()}:{d.ports[0]}' if d.ports else d.name()
+ for service in ['prometheus', 'alertmanager', 'grafana', 'oauth2-proxy']
+ for d in mgr.cache.get_daemons_by_service(service)
+ ]
+ # dashboard and service discovery urls depend on the mgr daemons
+ deps += [
+ f'{d.name()}'
+ for service in ['mgr']
+ for d in mgr.cache.get_daemons_by_service(service)
+ ]
return deps
def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]:
@@ -94,6 +107,8 @@ class MgmtGatewayService(CephadmService):
prometheus_endpoints = self.get_service_endpoints('prometheus')
alertmanager_endpoints = self.get_service_endpoints('alertmanager')
grafana_endpoints = self.get_service_endpoints('grafana')
+ oauth2_proxy_endpoints = self.get_service_endpoints('oauth2-proxy')
+ service_discovery_endpoints = self.get_service_discovery_endpoints()
try:
grafana_spec = cast(GrafanaSpec, self.mgr.spec_store['grafana'].spec)
grafana_protocol = grafana_spec.protocol
@@ -104,7 +119,9 @@ class MgmtGatewayService(CephadmService):
'dashboard_endpoints': dashboard_endpoints,
'prometheus_endpoints': prometheus_endpoints,
'alertmanager_endpoints': alertmanager_endpoints,
- 'grafana_endpoints': grafana_endpoints
+ 'grafana_endpoints': grafana_endpoints,
+ 'oauth2_proxy_endpoints': oauth2_proxy_endpoints,
+ 'service_discovery_endpoints': service_discovery_endpoints
}
server_context = {
'spec': svc_spec,
@@ -117,11 +134,12 @@ class MgmtGatewayService(CephadmService):
'prometheus_endpoints': prometheus_endpoints,
'alertmanager_endpoints': alertmanager_endpoints,
'grafana_endpoints': grafana_endpoints,
- 'oauth2_proxy_url': self.get_oauth2_service_url(),
+ 'service_discovery_endpoints': service_discovery_endpoints,
+ 'enable_oauth2_proxy': bool(oauth2_proxy_endpoints),
}
cert, key = self.get_external_certificates(svc_spec, daemon_spec)
- internal_cert, internal_pkey = self.get_internal_certificates(daemon_spec)
+ internal_cert, internal_pkey = self.get_internal_certificates(svc_spec, daemon_spec)
daemon_config = {
"files": {
"nginx.conf": self.mgr.template.render(self.SVC_TEMPLATE_PATH, main_context),
@@ -136,7 +154,7 @@ class MgmtGatewayService(CephadmService):
daemon_config["files"]["nginx.crt"] = cert
daemon_config["files"]["nginx.key"] = key
- return daemon_config, sorted(self.get_mgmt_gateway_deps())
+ return daemon_config, sorted(MgmtGatewayService.get_dependencies(self.mgr))
def pre_remove(self, daemon: DaemonDescription) -> None:
"""
diff --git a/src/pybind/mgr/cephadm/services/monitoring.py b/src/pybind/mgr/cephadm/services/monitoring.py
index 6a57e3b31ef..9c5b5a112f3 100644
--- a/src/pybind/mgr/cephadm/services/monitoring.py
+++ b/src/pybind/mgr/cephadm/services/monitoring.py
@@ -3,15 +3,17 @@ import logging
import os
import socket
from typing import List, Any, Tuple, Dict, Optional, cast
+import ipaddress
from mgr_module import HandleCommandResult
from orchestrator import DaemonDescription
from ceph.deployment.service_spec import AlertManagerSpec, GrafanaSpec, ServiceSpec, \
- SNMPGatewaySpec, PrometheusSpec
+ SNMPGatewaySpec, PrometheusSpec, MgmtGatewaySpec
from cephadm.services.cephadmservice import CephadmService, CephadmDaemonDeploySpec, get_dashboard_urls
from mgr_util import verify_tls, ServerConfigException, build_url, get_cert_issuer_info, password_hash
from ceph.deployment.utils import wrap_ipv6
+from .. import utils
logger = logging.getLogger(__name__)
@@ -56,16 +58,20 @@ class GrafanaService(CephadmService):
if ip_to_bind_to:
daemon_spec.port_ips = {str(grafana_port): ip_to_bind_to}
grafana_ip = ip_to_bind_to
+ if ipaddress.ip_network(grafana_ip).version == 6:
+ grafana_ip = f"[{grafana_ip}]"
- mgmt_gw_ip = None
domain = self.mgr.get_fqdn(daemon_spec.host)
+ mgmt_gw_ips = []
if mgmt_gw_enabled:
mgmt_gw_daemons = self.mgr.cache.get_daemons_by_service('mgmt-gateway')
if mgmt_gw_daemons:
dd = mgmt_gw_daemons[0]
assert dd.hostname
- domain = self.mgr.get_fqdn(dd.hostname)
- mgmt_gw_ip = self.mgr.inventory.get_addr(dd.hostname)
+ mgmt_gw_spec = cast(MgmtGatewaySpec, self.mgr.spec_store['mgmt-gateway'].spec)
+ # TODO(redo): should we resolve the virtual_ip to a name if possible?
+ domain = mgmt_gw_spec.virtual_ip or self.mgr.get_fqdn(dd.hostname) # give prio to VIP if configured
+ mgmt_gw_ips = [self.mgr.inventory.get_addr(dd.hostname) for dd in mgmt_gw_daemons] # type: ignore
return self.mgr.template.render('services/grafana/grafana.ini.j2', {
'anonymous_access': spec.anonymous_access,
@@ -76,7 +82,7 @@ class GrafanaService(CephadmService):
'domain': domain,
'mgmt_gw_enabled': mgmt_gw_enabled,
'oauth2_enabled': oauth2_enabled,
- 'mgmt_gw_ip': mgmt_gw_ip,
+ 'mgmt_gw_ips': ','.join(mgmt_gw_ips),
})
def calculate_grafana_deps(self, security_enabled: bool) -> List[str]:
@@ -87,7 +93,7 @@ class GrafanaService(CephadmService):
# in case security is enabled we have to reconfig when prom user/pass changes
prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials()
if security_enabled and prometheus_user and prometheus_password:
- deps.append(f'{hash(prometheus_user + prometheus_password)}')
+ deps.append(f'{utils.md5_hash(prometheus_user + prometheus_password)}')
# adding a dependency for mgmt-gateway because the usage of url_prefix relies on its presence.
# another dependency is added for oauth-proxy as Grafana login is delegated to this service when enabled.
@@ -311,17 +317,18 @@ class AlertmanagerService(CephadmService):
# add a dependency since enbling basic-auth (or not) depends on the existence of 'oauth2-proxy'
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('oauth2-proxy')]
- # scan all mgrs to generate deps and to get standbys too.
- for dd in self.mgr.cache.get_daemons_by_service('mgr'):
- # we consider mgr a dep even if the dashboard is disabled
- # in order to be consistent with _calc_daemon_deps().
- deps.append(dd.name())
-
security_enabled, mgmt_gw_enabled, oauth2_enabled = self.mgr._get_security_config()
if mgmt_gw_enabled:
dashboard_urls = [f'{self.mgr.get_mgmt_gw_internal_endpoint()}/dashboard']
else:
dashboard_urls = get_dashboard_urls(self)
+ # scan all mgrs to generate deps and to get standbys too.
+ for dd in self.mgr.cache.get_daemons_by_service('mgr'):
+ # we consider mgr a dep even if the dashboard is disabled
+ # in order to be consistent with _calc_daemon_deps().
+ # when mgmt_gw is enabled there's no need for mgr dep as
+ # mgmt-gw wil route to the active mgr automatically
+ deps.append(dd.name())
snmp_gateway_urls: List[str] = []
for dd in self.mgr.cache.get_daemons_by_service('snmp-gateway'):
@@ -350,11 +357,18 @@ class AlertmanagerService(CephadmService):
addr = self.mgr.get_fqdn(dd.hostname)
peers.append(build_url(host=addr, port=port).lstrip('/'))
+ ip_to_bind_to = ''
+ if spec.only_bind_port_on_networks and spec.networks:
+ assert daemon_spec.host is not None
+ ip_to_bind_to = self.mgr.get_first_matching_network_ip(daemon_spec.host, spec) or ''
+ if ip_to_bind_to:
+ daemon_spec.port_ips = {str(port): ip_to_bind_to}
+
deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}')
if security_enabled:
alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials()
if alertmanager_user and alertmanager_password:
- deps.append(f'{hash(alertmanager_user + alertmanager_password)}')
+ deps.append(f'{utils.md5_hash(alertmanager_user + alertmanager_password)}')
cert, key = self.get_alertmanager_certificates(daemon_spec)
context = {
'enable_mtls': mgmt_gw_enabled,
@@ -372,7 +386,8 @@ class AlertmanagerService(CephadmService):
},
'peers': peers,
'web_config': '/etc/alertmanager/web.yml',
- 'use_url_prefix': mgmt_gw_enabled
+ 'use_url_prefix': mgmt_gw_enabled,
+ 'ip_to_bind_to': ip_to_bind_to
}, sorted(deps)
else:
return {
@@ -380,7 +395,8 @@ class AlertmanagerService(CephadmService):
"alertmanager.yml": yml
},
"peers": peers,
- 'use_url_prefix': mgmt_gw_enabled
+ 'use_url_prefix': mgmt_gw_enabled,
+ 'ip_to_bind_to': ip_to_bind_to
}, sorted(deps)
def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
@@ -489,8 +505,14 @@ class PrometheusService(CephadmService):
security_enabled, mgmt_gw_enabled, oauth2_enabled = self.mgr._get_security_config()
port = self.mgr.service_discovery_port
mgr_addr = wrap_ipv6(self.mgr.get_mgr_ip())
+
protocol = 'https' if security_enabled else 'http'
- srv_end_point = f'{protocol}://{mgr_addr}:{port}/sd/prometheus/sd-config?'
+ self.mgr.get_mgmt_gw_internal_endpoint()
+ if mgmt_gw_enabled:
+ service_discovery_url_prefix = f'{self.mgr.get_mgmt_gw_internal_endpoint()}'
+ else:
+ service_discovery_url_prefix = f'{protocol}://{mgr_addr}:{port}'
+ srv_end_point = f'{service_discovery_url_prefix}/sd/prometheus/sd-config?'
node_exporter_cnt = len(self.mgr.cache.get_daemons_by_service('node-exporter'))
alertmgr_cnt = len(self.mgr.cache.get_daemons_by_service('alertmanager'))
@@ -617,18 +639,23 @@ class PrometheusService(CephadmService):
port = cast(int, self.mgr.get_module_option_ex('prometheus', 'server_port', self.DEFAULT_MGR_PROMETHEUS_PORT))
deps.append(str(port))
deps.append(str(self.mgr.service_discovery_port))
- # add an explicit dependency on the active manager. This will force to
- # re-deploy prometheus if the mgr has changed (due to a fail-over i.e).
- deps.append(self.mgr.get_active_mgr().name())
deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}')
- security_enabled, _, _ = self.mgr._get_security_config()
+ security_enabled, mgmt_gw_enabled, _ = self.mgr._get_security_config()
+
+ if not mgmt_gw_enabled:
+ # add an explicit dependency on the active manager. This will force to
+ # re-deploy prometheus if the mgr has changed (due to a fail-over i.e).
+ # when mgmt_gw is enabled there's no need for such dep as mgmt-gw wil
+ # route to the active mgr automatically
+ deps.append(self.mgr.get_active_mgr().name())
+
if security_enabled:
alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials()
prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials()
if prometheus_user and prometheus_password:
- deps.append(f'{hash(prometheus_user + prometheus_password)}')
+ deps.append(f'{utils.md5_hash(prometheus_user + prometheus_password)}')
if alertmanager_user and alertmanager_password:
- deps.append(f'{hash(alertmanager_user + alertmanager_password)}')
+ deps.append(f'{utils.md5_hash(alertmanager_user + alertmanager_password)}')
# add a dependency since url_prefix depends on the existence of mgmt-gateway
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('mgmt-gateway')]
diff --git a/src/pybind/mgr/cephadm/services/nvmeof.py b/src/pybind/mgr/cephadm/services/nvmeof.py
index 162815da24c..b3fd526815e 100644
--- a/src/pybind/mgr/cephadm/services/nvmeof.py
+++ b/src/pybind/mgr/cephadm/services/nvmeof.py
@@ -38,6 +38,8 @@ class NvmeofService(CephService):
spec = cast(NvmeofServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
nvmeof_gw_id = daemon_spec.daemon_id
host_ip = self.mgr.inventory.get_addr(daemon_spec.host)
+ map_addr = spec.addr_map.get(daemon_spec.host) if spec.addr_map else None
+ map_discovery_addr = spec.discovery_addr_map.get(daemon_spec.host) if spec.discovery_addr_map else None
keyring = self.get_keyring_with_caps(self.get_auth_entity(nvmeof_gw_id),
['mon', 'profile rbd',
@@ -47,15 +49,21 @@ class NvmeofService(CephService):
transport_tcp_options = json.dumps(spec.transport_tcp_options) if spec.transport_tcp_options else None
name = '{}.{}'.format(utils.name_to_config_section('nvmeof'), nvmeof_gw_id)
rados_id = name[len('client.'):] if name.startswith('client.') else name
- addr = spec.addr or host_ip
- discovery_addr = spec.discovery_addr or host_ip
+
+ # The address is first searched in the per node address map,
+ # then in the spec address configuration.
+ # If neither is defined, the host IP is used as a fallback.
+ addr = map_addr or spec.addr or host_ip
+ self.mgr.log.info(f"gateway address: {addr} from {map_addr=} {spec.addr=} {host_ip=}")
+ discovery_addr = map_discovery_addr or spec.discovery_addr or host_ip
+ self.mgr.log.info(f"discovery address: {discovery_addr} from {map_discovery_addr=} {spec.discovery_addr=} {host_ip=}")
context = {
'spec': spec,
'name': name,
'addr': addr,
'discovery_addr': discovery_addr,
'port': spec.port,
- 'spdk_log_level': 'WARNING',
+ 'spdk_log_level': '',
'rpc_socket_dir': '/var/tmp/',
'rpc_socket_name': 'spdk.sock',
'transport_tcp_options': transport_tcp_options,
@@ -66,6 +74,10 @@ class NvmeofService(CephService):
daemon_spec.keyring = keyring
daemon_spec.extra_files = {'ceph-nvmeof.conf': gw_conf}
+ # Indicate to the daemon whether to utilize huge pages
+ if spec.spdk_mem_size:
+ daemon_spec.extra_files['spdk_mem_size'] = str(spec.spdk_mem_size)
+
if spec.enable_auth:
if (
not spec.client_cert
@@ -87,6 +99,9 @@ class NvmeofService(CephService):
daemon_spec.extra_files['client_key'] = spec.client_key
daemon_spec.extra_files['root_ca_cert'] = spec.root_ca_cert
+ if spec.encryption_key:
+ daemon_spec.extra_files['encryption_key'] = spec.encryption_key
+
daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec)
daemon_spec.deps = []
return daemon_spec
@@ -185,19 +200,21 @@ class NvmeofService(CephService):
# to clean the keyring up
super().post_remove(daemon, is_failed_deploy=is_failed_deploy)
service_name = daemon.service_name()
+ daemon_name = daemon.name()
# remove config for dashboard nvmeof gateways if any
- ret, out, err = self.mgr.mon_command({
+ ret, _, err = self.mgr.mon_command({
'prefix': 'dashboard nvmeof-gateway-rm',
'name': service_name,
+ 'daemon_name': daemon_name
})
if not ret:
- logger.info(f'{daemon.hostname} removed from nvmeof gateways dashboard config')
+ logger.info(f'{daemon_name} removed from nvmeof gateways dashboard config')
spec = cast(NvmeofServiceSpec,
self.mgr.spec_store.all_specs.get(daemon.service_name(), None))
if not spec:
- self.mgr.log.error(f'Failed to find spec for {daemon.name()}')
+ self.mgr.log.error(f'Failed to find spec for {daemon_name}')
return
pool = spec.pool
group = spec.group