summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam King <47704447+adk3798@users.noreply.github.com>2024-08-20 14:35:44 +0200
committerGitHub <noreply@github.com>2024-08-20 14:35:44 +0200
commita61eabead346db16cdadea7f7c502f8d1ad1b3b9 (patch)
tree68ed072523c2a13fc61eddade6349979126beca6
parentMerge pull request #58860 from adk3798/cephadm-nvmeof-require-group (diff)
parentmgr/cephadm: adding oauth2-proxy cephadm service (diff)
downloadceph-a61eabead346db16cdadea7f7c502f8d1ad1b3b9.tar.xz
ceph-a61eabead346db16cdadea7f7c502f8d1ad1b3b9.zip
Merge pull request #58460 from rkachach/fix_issue_oauth2_support
adding support for SSO based on auth2-proxy Reviewed-by: Adam King <adking@redhat.com>
-rw-r--r--doc/cephadm/services/index.rst1
-rw-r--r--doc/cephadm/services/oauth2-proxy.rst139
-rwxr-xr-xsrc/cephadm/cephadm.py11
-rw-r--r--src/cephadm/cephadmlib/constants.py1
-rw-r--r--src/cephadm/cephadmlib/daemons/__init__.py2
-rw-r--r--src/cephadm/cephadmlib/daemons/oauth2_proxy.py165
-rw-r--r--src/pybind/mgr/cephadm/http_server.py4
-rw-r--r--src/pybind/mgr/cephadm/inventory.py11
-rw-r--r--src/pybind/mgr/cephadm/module.py89
-rw-r--r--src/pybind/mgr/cephadm/services/cephadmservice.py2
-rw-r--r--src/pybind/mgr/cephadm/services/mgmt_gateway.py41
-rw-r--r--src/pybind/mgr/cephadm/services/monitoring.py190
-rw-r--r--src/pybind/mgr/cephadm/services/oauth2_proxy.py86
-rw-r--r--src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j216
-rw-r--r--src/pybind/mgr/cephadm/templates/services/mgmt-gateway/external_server.conf.j2101
-rw-r--r--src/pybind/mgr/cephadm/templates/services/mgmt-gateway/internal_server.conf.j25
-rw-r--r--src/pybind/mgr/cephadm/templates/services/mgmt-gateway/nginx.conf.j29
-rw-r--r--src/pybind/mgr/cephadm/templates/services/oauth2-proxy/oauth2-proxy.conf.j237
-rw-r--r--src/pybind/mgr/cephadm/tests/fixtures.py1
-rw-r--r--src/pybind/mgr/cephadm/tests/test_cephadm.py4
-rw-r--r--src/pybind/mgr/cephadm/tests/test_services.py452
-rw-r--r--src/pybind/mgr/orchestrator/_interface.py8
-rw-r--r--src/pybind/mgr/orchestrator/module.py24
-rw-r--r--src/python-common/ceph/deployment/service_spec.py124
24 files changed, 1376 insertions, 147 deletions
diff --git a/doc/cephadm/services/index.rst b/doc/cephadm/services/index.rst
index 021162af81b..86a3fad8ab3 100644
--- a/doc/cephadm/services/index.rst
+++ b/doc/cephadm/services/index.rst
@@ -21,6 +21,7 @@ for details on individual services:
tracing
smb
mgmt-gateway
+ oauth2-proxy
Service Status
==============
diff --git a/doc/cephadm/services/oauth2-proxy.rst b/doc/cephadm/services/oauth2-proxy.rst
new file mode 100644
index 00000000000..d8e644a7fe7
--- /dev/null
+++ b/doc/cephadm/services/oauth2-proxy.rst
@@ -0,0 +1,139 @@
+.. _deploy-cephadm-oauth2-proxy:
+
+==================
+OAuth2 Proxy
+==================
+
+Deploying oauth2-proxy
+======================
+
+In Ceph releases starting from Squid, the `oauth2-proxy` service introduces an advanced method
+for managing authentication and access control for Ceph applications. This service integrates
+with external Identity Providers (IDPs) to provide secure, flexible authentication via the
+OIDC (OpenID Connect) protocol. `oauth2-proxy` acts as an authentication gateway, ensuring that
+access to Ceph applications including the Ceph Dashboard and monitoring stack is tightly controlled.
+
+To deploy the `oauth2-proxy` service, use the following command:
+
+.. prompt:: bash #
+
+ ceph orch apply oauth2-proxy [--placement ...] ...
+
+Once applied, `cephadm` will re-configure the necessary components to use `oauth2-proxy` for authentication,
+thereby securing access to all Ceph applications. The service will handle login flows, redirect users
+to the appropriate IDP for authentication, and manage session tokens to facilitate seamless user access.
+
+
+Benefits of the oauth2-proxy service
+====================================
+* ``Enhanced Security``: Provides robust authentication through integration with external IDPs using the OIDC protocol.
+* ``Seamless SSO``: Enables seamless single sign-on (SSO) across all Ceph applications, improving user access control.
+* ``Centralized Authentication``: Centralizes authentication management, reducing complexity and improving control over access.
+
+
+Security enhancements
+=====================
+
+The `oauth2-proxy` service ensures that all access to Ceph applications is authenticated, preventing unauthorized users from
+accessing sensitive information. Since it makes use of the `oauth2-proxy` open source project, this service integrates
+easily with a variety of `external IDPs <https://oauth2-proxy.github.io/oauth2-proxy/configuration/providers/>`_ to provide
+a secure and flexible authentication mechanism.
+
+
+High availability
+==============================
+`oauth2-proxy` is designed to integrate with an external IDP hence login high availability is not the responsibility of this
+service. In squid release high availability for the service itself is not supported yet.
+
+
+Accessing services with oauth2-proxy
+====================================
+
+After deploying `oauth2-proxy`, access to Ceph applications will require authentication through the configured IDP. Users will
+be redirected to the IDP for login and then returned to the requested application. This setup ensures secure access and integrates
+seamlessly with the Ceph management stack.
+
+
+Service Specification
+=====================
+
+Before deploying `oauth2-proxy` service please remember to deploy the `mgmt-gateway` service by turning on the `--enable_auth` flag. i.e:
+
+.. prompt:: bash #
+
+ ceph orch apply mgmt-gateway --enable_auth=true
+
+An `oauth2-proxy` service can be applied using a specification. An example in YAML follows:
+
+.. code-block:: yaml
+
+ service_type: oauth2-proxy
+ service_id: auth-proxy
+ placement:
+ hosts:
+ - ceph0
+ spec:
+ https_address: "0.0.0.0:4180"
+ provider_display_name: "My OIDC Provider"
+ client_id: "your-client-id"
+ oidc_issuer_url: "http://192.168.100.1:5556/dex"
+ client_secret: "your-client-secret"
+ cookie_secret: "your-cookie-secret"
+ ssl_certificate: |
+ -----BEGIN CERTIFICATE-----
+ MIIDtTCCAp2gAwIBAgIYMC4xNzc1NDQxNjEzMzc2MjMyXzxvQ7EcMA0GCSqGSIb3
+ DQEBCwUAMG0xCzAJBgNVBAYTAlVTMQ0wCwYDVQQIDARVdGFoMRcwFQYDVQQHDA5T
+ [...]
+ -----END CERTIFICATE-----
+ ssl_certificate_key: |
+ -----BEGIN PRIVATE KEY-----
+ MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5jdYbjtNTAKW4
+ /CwQr/7wOiLGzVxChn3mmCIF3DwbL/qvTFTX2d8bDf6LjGwLYloXHscRfxszX/4h
+ [...]
+ -----END PRIVATE KEY-----
+
+Fields specific to the ``spec`` section of the `oauth2-proxy` service are described below. More detailed
+description of the fields can be found on `oauth2-proxy <https://oauth2-proxy.github.io/oauth2-proxy/>`_
+project documentation.
+
+
+.. py:currentmodule:: ceph.deployment.service_spec
+
+.. autoclass:: OAuth2ProxySpec
+ :members:
+
+The specification can then be applied by running the below command. Once becomes available, cephadm will automatically redeploy
+the `mgmt-gateway` service while adapting its configuration to redirect the authentication to the newly deployed `oauth2-service`.
+
+.. prompt:: bash #
+
+ ceph orch apply -i oauth2-proxy.yaml
+
+
+Limitations
+===========
+
+A non-exhaustive list of important limitations for the `oauth2-proxy` service follows:
+
+* High-availability configurations for `oauth2-proxy` itself are not supported.
+* Proper configuration of the IDP and OAuth2 parameters is crucial to avoid authentication failures. Misconfigurations can lead to access issues.
+
+
+Default images
+~~~~~~~~~~~~~~
+
+The `oauth2-proxy` service typically uses the default container image:
+
+::
+
+ DEFAULT_OAUTH2_PROXY = 'quay.io/oauth2-proxy/oauth2-proxy:v7.2.0'
+
+Admins can specify the image to be used by changing the `container_image_oauth2_proxy` cephadm module option. If there were already running daemon(s),
+you must redeploy the daemon(s) to apply the new image.
+
+For example:
+
+.. code-block:: bash
+
+ ceph config set mgr mgr/cephadm/container_image_oauth2_proxy <new-oauth2-proxy-image>
+ ceph orch redeploy oauth2-proxy
diff --git a/src/cephadm/cephadm.py b/src/cephadm/cephadm.py
index 7450d0945ed..e71addf7bfa 100755
--- a/src/cephadm/cephadm.py
+++ b/src/cephadm/cephadm.py
@@ -178,6 +178,7 @@ from cephadmlib.daemons import (
SMB,
SNMPGateway,
MgmtGateway,
+ OAuth2Proxy,
Tracing,
NodeProxy,
)
@@ -230,6 +231,7 @@ def get_supported_daemons():
supported_daemons.append(CephadmAgent.daemon_type)
supported_daemons.append(SNMPGateway.daemon_type)
supported_daemons.append(MgmtGateway.daemon_type)
+ supported_daemons.append(OAuth2Proxy.daemon_type)
supported_daemons.extend(Tracing.components)
supported_daemons.append(NodeProxy.daemon_type)
supported_daemons.append(SMB.daemon_type)
@@ -468,6 +470,8 @@ def update_default_image(ctx: CephadmContext) -> None:
ctx.image = SNMPGateway.default_image
if type_ == MgmtGateway.daemon_type:
ctx.image = MgmtGateway.default_image
+ if type_ == OAuth2Proxy.daemon_type:
+ ctx.image = OAuth2Proxy.default_image
if type_ == CephNvmeof.daemon_type:
ctx.image = CephNvmeof.default_image
if type_ in Tracing.components:
@@ -864,6 +868,10 @@ def create_daemon_dirs(
cg = MgmtGateway.init(ctx, fsid, ident.daemon_id)
cg.create_daemon_dirs(data_dir, uid, gid)
+ elif daemon_type == OAuth2Proxy.daemon_type:
+ co = OAuth2Proxy.init(ctx, fsid, ident.daemon_id)
+ co.create_daemon_dirs(data_dir, uid, gid)
+
elif daemon_type == NodeProxy.daemon_type:
node_proxy = NodeProxy.init(ctx, fsid, ident.daemon_id)
node_proxy.create_daemon_dirs(data_dir, uid, gid)
@@ -3605,6 +3613,9 @@ def list_daemons(
elif daemon_type == MgmtGateway.daemon_type:
version = MgmtGateway.get_version(ctx, container_id)
seen_versions[image_id] = version
+ elif daemon_type == OAuth2Proxy.daemon_type:
+ version = OAuth2Proxy.get_version(ctx, container_id)
+ seen_versions[image_id] = version
else:
logger.warning('version for unknown daemon type %s' % daemon_type)
else:
diff --git a/src/cephadm/cephadmlib/constants.py b/src/cephadm/cephadmlib/constants.py
index 06163d06489..b104a98bc56 100644
--- a/src/cephadm/cephadmlib/constants.py
+++ b/src/cephadm/cephadmlib/constants.py
@@ -20,6 +20,7 @@ DEFAULT_JAEGER_AGENT_IMAGE = 'quay.io/jaegertracing/jaeger-agent:1.29'
DEFAULT_JAEGER_QUERY_IMAGE = 'quay.io/jaegertracing/jaeger-query:1.29'
DEFAULT_SMB_IMAGE = 'quay.io/samba.org/samba-server:devbuilds-centos-amd64'
DEFAULT_NGINX_IMAGE = 'quay.io/ceph/nginx:1.26.1'
+DEFAULT_OAUTH2_PROXY_IMAGE = 'quay.io/oauth2-proxy/oauth2-proxy:v7.6.0'
DEFAULT_REGISTRY = 'docker.io' # normalize unqualified digests to this
# ------------------------------------------------------------------------------
diff --git a/src/cephadm/cephadmlib/daemons/__init__.py b/src/cephadm/cephadmlib/daemons/__init__.py
index 279f6f1a898..bdf2c532e02 100644
--- a/src/cephadm/cephadmlib/daemons/__init__.py
+++ b/src/cephadm/cephadmlib/daemons/__init__.py
@@ -10,6 +10,7 @@ from .snmp import SNMPGateway
from .tracing import Tracing
from .node_proxy import NodeProxy
from .mgmt_gateway import MgmtGateway
+from .oauth2_proxy import OAuth2Proxy
__all__ = [
'Ceph',
@@ -27,4 +28,5 @@ __all__ = [
'Tracing',
'NodeProxy',
'MgmtGateway',
+ 'OAuth2Proxy',
]
diff --git a/src/cephadm/cephadmlib/daemons/oauth2_proxy.py b/src/cephadm/cephadmlib/daemons/oauth2_proxy.py
new file mode 100644
index 00000000000..2b61df9d2e7
--- /dev/null
+++ b/src/cephadm/cephadmlib/daemons/oauth2_proxy.py
@@ -0,0 +1,165 @@
+import logging
+import os
+from typing import Dict, List, Tuple, Optional
+import re
+
+from ..call_wrappers import call, CallVerbosity
+from ..container_daemon_form import ContainerDaemonForm, daemon_to_container
+from ..container_types import CephContainer
+from ..context import CephadmContext
+from ..context_getters import fetch_configs
+from ..daemon_form import register as register_daemon_form
+from ..daemon_identity import DaemonIdentity
+from ..deployment_utils import to_deployment_container
+from ..constants import DEFAULT_OAUTH2_PROXY_IMAGE, UID_NOBODY, GID_NOGROUP
+from ..data_utils import dict_get, is_fsid
+from ..file_utils import populate_files, makedirs, recursive_chown
+from ..exceptions import Error
+
+
+logger = logging.getLogger()
+
+
+@register_daemon_form
+class OAuth2Proxy(ContainerDaemonForm):
+ """Define the configs for the jaeger tracing containers"""
+
+ default_image = DEFAULT_OAUTH2_PROXY_IMAGE
+ daemon_type = 'oauth2-proxy'
+ required_files = [
+ 'oauth2-proxy.conf',
+ 'oauth2-proxy.crt',
+ 'oauth2-proxy.key',
+ ]
+
+ @classmethod
+ def for_daemon_type(cls, daemon_type: str) -> bool:
+ return cls.daemon_type == daemon_type
+
+ def __init__(
+ self,
+ ctx: CephadmContext,
+ fsid: str,
+ daemon_id: str,
+ config_json: Dict,
+ image: str = DEFAULT_OAUTH2_PROXY_IMAGE,
+ ):
+ self.ctx = ctx
+ self.fsid = fsid
+ self.daemon_id = daemon_id
+ self.image = image
+ self.files = dict_get(config_json, 'files', {})
+ self.validate()
+
+ @classmethod
+ def init(
+ cls, ctx: CephadmContext, fsid: str, daemon_id: str
+ ) -> 'OAuth2Proxy':
+ return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image)
+
+ @classmethod
+ def create(
+ cls, ctx: CephadmContext, ident: DaemonIdentity
+ ) -> 'OAuth2Proxy':
+ return cls.init(ctx, ident.fsid, ident.daemon_id)
+
+ @property
+ def identity(self) -> DaemonIdentity:
+ return DaemonIdentity(self.fsid, self.daemon_type, self.daemon_id)
+
+ def container(self, ctx: CephadmContext) -> CephContainer:
+ ctr = daemon_to_container(ctx, self)
+ return to_deployment_container(ctx, ctr)
+
+ def uid_gid(self, ctx: CephadmContext) -> Tuple[int, int]:
+ return UID_NOBODY, GID_NOGROUP
+
+ def get_daemon_args(self) -> List[str]:
+ return [
+ '--config=/etc/oauth2-proxy.conf',
+ '--tls-cert-file=/etc/oauth2-proxy.crt',
+ '--tls-key-file=/etc/oauth2-proxy.key',
+ ]
+
+ def default_entrypoint(self) -> str:
+ return ''
+
+ def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None:
+ """Create files under the container data dir"""
+ if not os.path.isdir(data_dir):
+ raise OSError('data_dir is not a directory: %s' % (data_dir))
+ logger.info('Writing oauth2-proxy config...')
+ config_dir = os.path.join(data_dir, 'etc/')
+ makedirs(config_dir, uid, gid, 0o755)
+ recursive_chown(config_dir, uid, gid)
+ populate_files(config_dir, self.files, uid, gid)
+
+ def validate(self) -> None:
+ if not is_fsid(self.fsid):
+ raise Error(f'not an fsid: {self.fsid}')
+ if not self.daemon_id:
+ raise Error(f'invalid daemon_id: {self.daemon_id}')
+ if not self.image:
+ raise Error(f'invalid image: {self.image}')
+
+ # check for the required files
+ if self.required_files:
+ for fname in self.required_files:
+ if fname not in self.files:
+ raise Error(
+ 'required file missing from config-json: %s' % fname
+ )
+
+ @staticmethod
+ def get_version(ctx: CephadmContext, container_id: str) -> Optional[str]:
+ """Return the version of the oauth2-proxy container"""
+ version = None
+ out, err, code = call(
+ ctx,
+ [
+ ctx.container_engine.path,
+ 'exec',
+ container_id,
+ 'oauth2-proxy',
+ '--version',
+ ],
+ verbosity=CallVerbosity.QUIET,
+ )
+ if code == 0:
+ match = re.search(r'oauth2-proxy (v\d+\.\d+\.\d+)', out)
+ if match:
+ version = match.group(1)
+ return version
+
+ def customize_container_mounts(
+ self, ctx: CephadmContext, mounts: Dict[str, str]
+ ) -> None:
+ data_dir = self.identity.data_dir(ctx.data_dir)
+ mounts.update(
+ {
+ os.path.join(
+ data_dir, 'etc/oauth2-proxy.conf'
+ ): '/etc/oauth2-proxy.conf:Z',
+ os.path.join(
+ data_dir, 'etc/oauth2-proxy.crt'
+ ): '/etc/oauth2-proxy.crt:Z',
+ os.path.join(
+ data_dir, 'etc/oauth2-proxy.key'
+ ): '/etc/oauth2-proxy.key:Z',
+ }
+ )
+
+ def customize_container_args(
+ self, ctx: CephadmContext, args: List[str]
+ ) -> None:
+ uid, _ = self.uid_gid(ctx)
+ other_args = [
+ '--user',
+ str(uid),
+ ]
+ args.extend(other_args)
+
+ def customize_process_args(
+ self, ctx: CephadmContext, args: List[str]
+ ) -> None:
+ args.extend(self.get_daemon_args())
diff --git a/src/pybind/mgr/cephadm/http_server.py b/src/pybind/mgr/cephadm/http_server.py
index 7ddce2e8be2..efeb54e8a24 100644
--- a/src/pybind/mgr/cephadm/http_server.py
+++ b/src/pybind/mgr/cephadm/http_server.py
@@ -31,7 +31,7 @@ class CephadmHttpServer(threading.Thread):
self.service_discovery = ServiceDiscovery(mgr)
self.cherrypy_shutdown_event = threading.Event()
self._service_discovery_port = self.mgr.service_discovery_port
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, _, _ = self.mgr._get_security_config()
self.security_enabled = security_enabled
super().__init__(target=self.run)
@@ -50,7 +50,7 @@ class CephadmHttpServer(threading.Thread):
def config_update(self) -> None:
self.service_discovery_port = self.mgr.service_discovery_port
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, _, _ = self.mgr._get_security_config()
if self.security_enabled != security_enabled:
self.security_enabled = security_enabled
self.restart()
diff --git a/src/pybind/mgr/cephadm/inventory.py b/src/pybind/mgr/cephadm/inventory.py
index f909fe375e1..8319d9f762a 100644
--- a/src/pybind/mgr/cephadm/inventory.py
+++ b/src/pybind/mgr/cephadm/inventory.py
@@ -144,6 +144,15 @@ class Inventory:
return stored_name
return host
+ def get_fqdn(self, hname: str) -> Optional[str]:
+ if hname in self._inventory:
+ if hname in self._all_known_names:
+ all_names = self._all_known_names[hname] # [hostname, shortname, fqdn]
+ if all_names:
+ return all_names[2]
+ return hname # names info is not yet available!
+ return None
+
def update_known_hostnames(self, hostname: str, shortname: str, fqdn: str) -> None:
for hname in [hostname, shortname, fqdn]:
# if we know the host by any of the names, store the full set of names
@@ -1953,6 +1962,7 @@ class CertKeyStore():
'nvmeof_client_cert': {}, # service-name -> cert
'nvmeof_root_ca_cert': {}, # service-name -> cert
'mgmt_gw_cert': Cert(), # cert
+ 'oauth2_proxy_cert': Cert(), # cert
'cephadm_root_ca_cert': Cert(), # cert
'grafana_cert': {}, # host -> cert
}
@@ -1961,6 +1971,7 @@ class CertKeyStore():
# so there is no need to store a separate key
self.known_keys = {
'mgmt_gw_key': PrivKey(), # cert
+ 'oauth2_proxy_key': PrivKey(), # cert
'cephadm_root_ca_key': PrivKey(), # cert
'grafana_key': {}, # host -> key
'iscsi_ssl_key': {}, # service-name -> key
diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py
index 392c62ae9ac..75fc7c4f04e 100644
--- a/src/pybind/mgr/cephadm/module.py
+++ b/src/pybind/mgr/cephadm/module.py
@@ -6,7 +6,6 @@ import ipaddress
import logging
import re
import shlex
-import socket
from collections import defaultdict
from configparser import ConfigParser
from contextlib import contextmanager
@@ -35,7 +34,8 @@ from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.service_spec import \
ServiceSpec, PlacementSpec, \
HostPlacementSpec, IngressSpec, \
- TunedProfileSpec, IscsiServiceSpec
+ TunedProfileSpec, IscsiServiceSpec, \
+ MgmtGatewaySpec
from ceph.utils import str_to_datetime, datetime_to_str, datetime_now
from cephadm.serve import CephadmServe
from cephadm.services.cephadmservice import CephadmDaemonDeploySpec
@@ -71,6 +71,7 @@ from .services.container import CustomContainerService
from .services.iscsi import IscsiService
from .services.nvmeof import NvmeofService
from .services.mgmt_gateway import MgmtGatewayService
+from .services.oauth2_proxy import OAuth2ProxyService
from .services.nfs import NFSService
from .services.osd import OSDRemovalQueue, OSDService, OSD, NotFoundError
from .services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \
@@ -144,6 +145,7 @@ DEFAULT_ELASTICSEARCH_IMAGE = 'quay.io/omrizeneva/elasticsearch:6.8.23'
DEFAULT_JAEGER_COLLECTOR_IMAGE = 'quay.io/jaegertracing/jaeger-collector:1.29'
DEFAULT_JAEGER_AGENT_IMAGE = 'quay.io/jaegertracing/jaeger-agent:1.29'
DEFAULT_NGINX_IMAGE = 'quay.io/ceph/nginx:1.26.1'
+DEFAULT_OAUTH2_PROXY = 'quay.io/oauth2-proxy/oauth2-proxy:v7.6.0'
DEFAULT_JAEGER_QUERY_IMAGE = 'quay.io/jaegertracing/jaeger-query:1.29'
DEFAULT_SAMBA_IMAGE = 'quay.io/samba.org/samba-server:devbuilds-centos-amd64'
# ------------------------------------------------------------------------------
@@ -288,6 +290,11 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
desc='Nginx container image',
),
Option(
+ 'container_image_oauth2_proxy',
+ default=DEFAULT_OAUTH2_PROXY,
+ desc='oauth2-proxy container image',
+ ),
+ Option(
'container_image_elasticsearch',
default=DEFAULT_ELASTICSEARCH_IMAGE,
desc='elasticsearch container image',
@@ -572,6 +579,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
self.container_image_keepalived = ''
self.container_image_snmp_gateway = ''
self.container_image_nginx = ''
+ self.container_image_oauth2_proxy = ''
self.container_image_elasticsearch = ''
self.container_image_jaeger_agent = ''
self.container_image_jaeger_collector = ''
@@ -721,6 +729,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
SMBService,
SNMPGatewayService,
MgmtGatewayService,
+ OAuth2ProxyService,
]
# https://github.com/python/mypy/issues/8993
@@ -778,16 +787,38 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
If the FQDN can't be resolved, the address from the inventory will
be returned instead.
"""
- # TODO(redo): get fqdn from the inventory
- addr = self.inventory.get_addr(hostname)
- return socket.getfqdn(addr)
+ return self.inventory.get_fqdn(hostname) or self.inventory.get_addr(hostname)
- def _get_security_config(self) -> Tuple[bool, bool]:
- # TODO(redo): enable when oauth2-proxy code is active
- # oauth2_proxy_enabled = len(self.mgr.cache.get_daemons_by_service('oauth2-proxy')) > 0
+ def _get_security_config(self) -> Tuple[bool, bool, bool]:
+ oauth2_proxy_enabled = len(self.cache.get_daemons_by_service('oauth2-proxy')) > 0
mgmt_gw_enabled = len(self.cache.get_daemons_by_service('mgmt-gateway')) > 0
security_enabled = self.secure_monitoring_stack or mgmt_gw_enabled
- return security_enabled, mgmt_gw_enabled
+ return security_enabled, mgmt_gw_enabled, oauth2_proxy_enabled
+
+ def get_mgmt_gw_internal_endpoint(self) -> Optional[str]:
+ mgmt_gw_daemons = self.cache.get_daemons_by_service('mgmt-gateway')
+ if not mgmt_gw_daemons:
+ return None
+
+ dd = mgmt_gw_daemons[0]
+ assert dd.hostname is not None
+ mgmt_gw_addr = self.get_fqdn(dd.hostname)
+ mgmt_gw_internal_endpoint = build_url(scheme='https', host=mgmt_gw_addr, port=MgmtGatewayService.INTERNAL_SERVICE_PORT)
+ return f'{mgmt_gw_internal_endpoint}/internal'
+
+ def get_mgmt_gw_external_endpoint(self) -> Optional[str]:
+ mgmt_gw_daemons = self.cache.get_daemons_by_service('mgmt-gateway')
+ if not mgmt_gw_daemons:
+ return None
+
+ dd = mgmt_gw_daemons[0]
+ assert dd.hostname is not None
+ mgmt_gw_port = dd.ports[0] if dd.ports else None
+ mgmt_gw_addr = self.get_fqdn(dd.hostname)
+ mgmt_gw_spec = cast(MgmtGatewaySpec, self.spec_store['mgmt-gateway'].spec)
+ protocol = 'http' if mgmt_gw_spec.disable_https else 'https'
+ mgmt_gw_external_endpoint = build_url(scheme=protocol, host=mgmt_gw_addr, port=mgmt_gw_port)
+ return mgmt_gw_external_endpoint
def _get_cephadm_binary_path(self) -> str:
import hashlib
@@ -948,7 +979,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
'mon', 'crash', 'ceph-exporter', 'node-proxy',
'prometheus', 'node-exporter', 'grafana', 'alertmanager',
'container', 'agent', 'snmp-gateway', 'loki', 'promtail',
- 'elasticsearch', 'jaeger-collector', 'jaeger-agent', 'jaeger-query', 'mgmt-gateway'
+ 'elasticsearch', 'jaeger-collector', 'jaeger-agent', 'jaeger-query', 'mgmt-gateway', 'oauth2-proxy'
]
if forcename:
if len([d for d in existing if d.daemon_id == forcename]):
@@ -1681,6 +1712,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
'promtail': self.container_image_promtail,
'snmp-gateway': self.container_image_snmp_gateway,
'mgmt-gateway': self.container_image_nginx,
+ 'oauth2-proxy': self.container_image_oauth2_proxy,
# The image can't be resolved here, the necessary information
# is only available when a container is deployed (given
# via spec).
@@ -2958,20 +2990,21 @@ Then run the following:
# add dependency on ceph-exporter daemons
deps += [d.name() for d in self.cache.get_daemons_by_service('ceph-exporter')]
deps += [d.name() for d in self.cache.get_daemons_by_service('mgmt-gateway')]
- security_enabled, _ = self._get_security_config()
+ deps += [d.name() for d in self.cache.get_daemons_by_service('oauth2-proxy')]
+ security_enabled, _, _ = self._get_security_config()
if security_enabled:
if prometheus_user and prometheus_password:
deps.append(f'{hash(prometheus_user + prometheus_password)}')
if alertmanager_user and alertmanager_password:
deps.append(f'{hash(alertmanager_user + alertmanager_password)}')
elif daemon_type == 'grafana':
- deps += get_daemon_names(['prometheus', 'loki', 'mgmt-gateway'])
- security_enabled, _ = self._get_security_config()
+ deps += get_daemon_names(['prometheus', 'loki', 'mgmt-gateway', 'oauth2-proxy'])
+ security_enabled, _, _ = self._get_security_config()
if security_enabled and prometheus_user and prometheus_password:
deps.append(f'{hash(prometheus_user + prometheus_password)}')
elif daemon_type == 'alertmanager':
- deps += get_daemon_names(['mgr', 'alertmanager', 'snmp-gateway', 'mgmt-gateway'])
- security_enabled, _ = self._get_security_config()
+ deps += get_daemon_names(['mgr', 'alertmanager', 'snmp-gateway', 'mgmt-gateway', 'oauth2-proxy'])
+ security_enabled, _, _ = self._get_security_config()
if security_enabled and alertmanager_user and alertmanager_password:
deps.append(f'{hash(alertmanager_user + alertmanager_password)}')
elif daemon_type == 'promtail':
@@ -2987,7 +3020,7 @@ Then run the following:
elif daemon_type == 'mgmt-gateway':
# url_prefix for monitoring daemons depends on the presence of mgmt-gateway
# while dashboard urls depend on the mgr daemons
- deps += get_daemon_names(['mgr', 'grafana', 'prometheus', 'alertmanager'])
+ deps += get_daemon_names(['mgr', 'grafana', 'prometheus', 'alertmanager', 'oauth2-proxy'])
else:
# this daemon type doesn't need deps mgmt
pass
@@ -3109,17 +3142,19 @@ Then run the following:
@handle_orch_error
def generate_certificates(self, module_name: str) -> Optional[Dict[str, str]]:
- import socket
supported_moduels = ['dashboard', 'prometheus']
if module_name not in supported_moduels:
raise OrchestratorError(f'Unsupported modlue {module_name}. Supported moduels are: {supported_moduels}')
- host_fqdns = [socket.getfqdn(self.get_hostname())]
- node_ip = self.get_mgr_ip()
+ host_fqdns = []
+ fdqn = self.inventory.get_fqdn(self.get_hostname())
+ if fdqn:
+ host_fqdns.append(fdqn)
+
if module_name == 'dashboard':
host_fqdns.append('dashboard_servers')
- cert, key = self.cert_mgr.generate_cert(host_fqdns, node_ip)
+ cert, key = self.cert_mgr.generate_cert(host_fqdns, self.get_mgr_ip())
return {'cert': cert, 'key': key}
@handle_orch_error
@@ -3176,7 +3211,7 @@ Then run the following:
@handle_orch_error
def get_prometheus_access_info(self) -> Dict[str, str]:
- security_enabled, _ = self._get_security_config()
+ security_enabled, _, _ = self._get_security_config()
if not security_enabled:
return {}
user, password = self._get_prometheus_credentials()
@@ -3186,7 +3221,7 @@ Then run the following:
@handle_orch_error
def get_alertmanager_access_info(self) -> Dict[str, str]:
- security_enabled, _ = self._get_security_config()
+ security_enabled, _, _ = self._get_security_config()
if not security_enabled:
return {}
user, password = self._get_alertmanager_credentials()
@@ -3421,6 +3456,7 @@ Then run the following:
'container': PlacementSpec(count=1),
'snmp-gateway': PlacementSpec(count=1),
'mgmt-gateway': PlacementSpec(count=1),
+ 'oauth2-proxy': PlacementSpec(count=1),
'elasticsearch': PlacementSpec(count=1),
'jaeger-agent': PlacementSpec(host_pattern='*'),
'jaeger-collector': PlacementSpec(count=1),
@@ -3437,6 +3473,11 @@ Then run the following:
host_count = len(self.inventory.keys())
max_count = self.max_count_per_host
+ if spec.service_type == 'oauth2-proxy':
+ mgmt_gw_daemons = self.cache.get_daemons_by_service('mgmt-gateway')
+ if not mgmt_gw_daemons:
+ raise OrchestratorError("The 'oauth2-proxy' service depends on the 'mgmt-gateway' service, but it is not configured.")
+
if spec.placement.count is not None:
if spec.service_type in ['mon', 'mgr']:
if spec.placement.count > max(5, host_count):
@@ -3564,6 +3605,10 @@ Then run the following:
return self._apply(spec)
@handle_orch_error
+ def apply_oauth2_proxy(self, spec: ServiceSpec) -> str:
+ return self._apply(spec)
+
+ @handle_orch_error
def set_unmanaged(self, service_name: str, value: bool) -> str:
return self.spec_store.set_unmanaged(service_name, value)
diff --git a/src/pybind/mgr/cephadm/services/cephadmservice.py b/src/pybind/mgr/cephadm/services/cephadmservice.py
index d4b9ea262bb..8a41d3a54c2 100644
--- a/src/pybind/mgr/cephadm/services/cephadmservice.py
+++ b/src/pybind/mgr/cephadm/services/cephadmservice.py
@@ -1273,7 +1273,7 @@ class CephExporterService(CephService):
if spec.stats_period:
exporter_config.update({'stats-period': f'{spec.stats_period}'})
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, _, _ = self.mgr._get_security_config()
if security_enabled:
exporter_config.update({'https_enabled': True})
crt, key = self.get_certificates(daemon_spec)
diff --git a/src/pybind/mgr/cephadm/services/mgmt_gateway.py b/src/pybind/mgr/cephadm/services/mgmt_gateway.py
index 2470b7de4cb..1943264025e 100644
--- a/src/pybind/mgr/cephadm/services/mgmt_gateway.py
+++ b/src/pybind/mgr/cephadm/services/mgmt_gateway.py
@@ -1,44 +1,14 @@
import logging
-from typing import TYPE_CHECKING, List, Any, Tuple, Dict, cast, Optional
+from typing import List, Any, Tuple, Dict, cast, Optional
from orchestrator import DaemonDescription
from ceph.deployment.service_spec import MgmtGatewaySpec, GrafanaSpec
from cephadm.services.cephadmservice import CephadmService, CephadmDaemonDeploySpec, get_dashboard_endpoints
-from mgr_util import build_url
-if TYPE_CHECKING:
- from cephadm.module import CephadmOrchestrator
logger = logging.getLogger(__name__)
-def get_mgmt_gw_internal_endpoint(mgr: "CephadmOrchestrator") -> Optional[str]:
- mgmt_gw_daemons = mgr.cache.get_daemons_by_service('mgmt-gateway')
- if not mgmt_gw_daemons:
- return None
-
- dd = mgmt_gw_daemons[0]
- assert dd.hostname is not None
- mgmt_gw_addr = mgr.get_fqdn(dd.hostname)
- mgmt_gw_internal_endpoint = build_url(scheme='https', host=mgmt_gw_addr, port=MgmtGatewayService.INTERNAL_SERVICE_PORT)
- return f'{mgmt_gw_internal_endpoint}/internal'
-
-
-def get_mgmt_gw_external_endpoint(mgr: "CephadmOrchestrator") -> Optional[str]:
- mgmt_gw_daemons = mgr.cache.get_daemons_by_service('mgmt-gateway')
- if not mgmt_gw_daemons:
- return None
-
- dd = mgmt_gw_daemons[0]
- assert dd.hostname is not None
- mgmt_gw_port = dd.ports[0] if dd.ports else None
- mgmt_gw_addr = mgr.get_fqdn(dd.hostname)
- mgmt_gw_spec = cast(MgmtGatewaySpec, mgr.spec_store['mgmt-gateway'].spec)
- protocol = 'http' if mgmt_gw_spec.disable_https else 'https'
- mgmt_gw_external_endpoint = build_url(scheme=protocol, host=mgmt_gw_addr, port=mgmt_gw_port)
- return mgmt_gw_external_endpoint
-
-
class MgmtGatewayService(CephadmService):
TYPE = 'mgmt-gateway'
SVC_TEMPLATE_PATH = 'services/mgmt-gateway/nginx.conf.j2'
@@ -66,6 +36,11 @@ class MgmtGatewayService(CephadmService):
# if empty list provided, return empty Daemon Desc
return DaemonDescription()
+ def get_oauth2_service_url(self) -> Optional[str]:
+ # TODO(redo): check how can we create several servers for HA
+ oauth2_servers = self.get_service_endpoints('oauth2-proxy')
+ return f'https://{oauth2_servers[0]}' if oauth2_servers else None
+
def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None:
# we adjust the standby behaviour so rev-proxy can pick correctly the active instance
self.mgr.set_module_option_ex('dashboard', 'standby_error_status_code', '503')
@@ -103,6 +78,7 @@ class MgmtGatewayService(CephadmService):
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('prometheus')]
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('alertmanager')]
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('grafana')]
+ deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('oauth2-proxy')]
for dd in self.mgr.cache.get_daemons_by_service('mgr'):
# we consider mgr a dep even if the dashboard is disabled
# in order to be consistent with _calc_daemon_deps().
@@ -140,7 +116,8 @@ class MgmtGatewayService(CephadmService):
'alertmanager_scheme': scheme,
'prometheus_endpoints': prometheus_endpoints,
'alertmanager_endpoints': alertmanager_endpoints,
- 'grafana_endpoints': grafana_endpoints
+ 'grafana_endpoints': grafana_endpoints,
+ 'oauth2_proxy_url': self.get_oauth2_service_url(),
}
cert, key = self.get_external_certificates(svc_spec, daemon_spec)
diff --git a/src/pybind/mgr/cephadm/services/monitoring.py b/src/pybind/mgr/cephadm/services/monitoring.py
index 2cb02f4e219..f407985fcee 100644
--- a/src/pybind/mgr/cephadm/services/monitoring.py
+++ b/src/pybind/mgr/cephadm/services/monitoring.py
@@ -10,7 +10,6 @@ from orchestrator import DaemonDescription
from ceph.deployment.service_spec import AlertManagerSpec, GrafanaSpec, ServiceSpec, \
SNMPGatewaySpec, PrometheusSpec
from cephadm.services.cephadmservice import CephadmService, CephadmDaemonDeploySpec, get_dashboard_urls
-from cephadm.services.mgmt_gateway import get_mgmt_gw_internal_endpoint, get_mgmt_gw_external_endpoint
from mgr_util import verify_tls, ServerConfigException, build_url, get_cert_issuer_info, password_hash
from ceph.deployment.utils import wrap_ipv6
@@ -26,17 +25,83 @@ class GrafanaService(CephadmService):
daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec)
return daemon_spec
- def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]:
- assert self.TYPE == daemon_spec.daemon_type
+ def generate_data_sources(self, security_enabled: bool, mgmt_gw_enabled: bool, cert: str, pkey: str) -> str:
+ prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials()
+ root_cert = self.mgr.cert_mgr.get_root_ca()
+ oneline_root_cert = '\\n'.join([line.strip() for line in root_cert.splitlines()])
+ oneline_cert = '\\n'.join([line.strip() for line in cert.splitlines()])
+ oneline_key = '\\n'.join([line.strip() for line in pkey.splitlines()])
+ prom_services = self.generate_prom_services(security_enabled, mgmt_gw_enabled)
+ return self.mgr.template.render('services/grafana/ceph-dashboard.yml.j2',
+ {'hosts': prom_services,
+ 'prometheus_user': prometheus_user,
+ 'prometheus_password': prometheus_password,
+ 'cephadm_root_ca': oneline_root_cert,
+ 'cert': oneline_cert,
+ 'key': oneline_key,
+ 'security_enabled': security_enabled,
+ 'loki_host': self.get_loki_host()})
+
+ def generate_grafana_ini(self,
+ daemon_spec: CephadmDaemonDeploySpec,
+ mgmt_gw_enabled: bool,
+ oauth2_enabled: bool) -> str:
+
+ spec: GrafanaSpec = cast(GrafanaSpec, self.mgr.spec_store.active_specs[daemon_spec.service_name])
+ grafana_port = daemon_spec.ports[0] if daemon_spec.ports else self.DEFAULT_SERVICE_PORT
+ grafana_ip = daemon_spec.ip if daemon_spec.ip else ''
+ if spec.only_bind_port_on_networks and spec.networks:
+ assert daemon_spec.host is not None
+ ip_to_bind_to = self.mgr.get_first_matching_network_ip(daemon_spec.host, spec)
+ if ip_to_bind_to:
+ daemon_spec.port_ips = {str(grafana_port): ip_to_bind_to}
+ grafana_ip = ip_to_bind_to
+
+ mgmt_gw_ip = None
+ domain = self.mgr.get_fqdn(daemon_spec.host)
+ if mgmt_gw_enabled:
+ mgmt_gw_daemons = self.mgr.cache.get_daemons_by_service('mgmt-gateway')
+ if mgmt_gw_daemons:
+ dd = mgmt_gw_daemons[0]
+ assert dd.hostname
+ domain = self.mgr.get_fqdn(dd.hostname)
+ mgmt_gw_ip = self.mgr.inventory.get_addr(dd.hostname)
+
+ return self.mgr.template.render('services/grafana/grafana.ini.j2', {
+ 'anonymous_access': spec.anonymous_access,
+ 'initial_admin_password': spec.initial_admin_password,
+ 'protocol': spec.protocol,
+ 'http_port': grafana_port,
+ 'http_addr': grafana_ip,
+ 'domain': domain,
+ 'mgmt_gw_enabled': mgmt_gw_enabled,
+ 'oauth2_enabled': oauth2_enabled,
+ 'mgmt_gw_ip': mgmt_gw_ip,
+ })
+
+ def calculate_grafana_deps(self, security_enabled: bool) -> List[str]:
+
deps = [] # type: List[str]
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}')
+
+ # in case security is enabled we have to reconfig when prom user/pass changes
prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials()
if security_enabled and prometheus_user and prometheus_password:
deps.append(f'{hash(prometheus_user + prometheus_password)}')
- # add a dependency since url_prefix depends on the existence of mgmt-gateway
- deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('mgmt-gateway')]
+ # adding a dependency for mgmt-gateway because the usage of url_prefix relies on its presence.
+ # another dependency is added for oauth-proxy as Grafana login is delegated to this service when enabled.
+ for service in ['prometheus', 'loki', 'mgmt-gateway', 'oauth2-proxy']:
+ deps += [d.name() for d in self.mgr.cache.get_daemons_by_service(service)]
+
+ return deps
+
+ def generate_prom_services(self, security_enabled: bool, mgmt_gw_enabled: bool) -> List[str]:
+
+ # in case mgmt-gw is enabeld we only use one url pointing to the internal
+ # mgmt gw for dashboard which will take care of HA in this case
+ if mgmt_gw_enabled:
+ return [f'{self.mgr.get_mgmt_gw_internal_endpoint()}/prometheus']
prom_services = [] # type: List[str]
for dd in self.mgr.cache.get_daemons_by_service('prometheus'):
@@ -45,73 +110,29 @@ class GrafanaService(CephadmService):
port = dd.ports[0] if dd.ports else 9095
protocol = 'https' if security_enabled else 'http'
prom_services.append(build_url(scheme=protocol, host=addr, port=port))
- deps.append(dd.name())
- # in case mgmt-gw is enabeld we only use one url pointing to the internal
- # mgmt gw for dashboard which will take care of HA in this case
- if mgmt_gw_enabled:
- prom_services = [f'{get_mgmt_gw_internal_endpoint(self.mgr)}/prometheus']
+ return prom_services
+ def get_loki_host(self) -> str:
daemons = self.mgr.cache.get_daemons_by_service('loki')
- loki_host = ''
for i, dd in enumerate(daemons):
assert dd.hostname is not None
if i == 0:
addr = dd.ip if dd.ip else self.mgr.get_fqdn(dd.hostname)
- loki_host = build_url(scheme='http', host=addr, port=3100)
-
- deps.append(dd.name())
-
- root_cert = self.mgr.cert_mgr.get_root_ca()
- cert, pkey = self.prepare_certificates(daemon_spec)
- oneline_root_cert = '\\n'.join([line.strip() for line in root_cert.splitlines()])
- oneline_cert = '\\n'.join([line.strip() for line in cert.splitlines()])
- oneline_key = '\\n'.join([line.strip() for line in pkey.splitlines()])
- grafana_data_sources = self.mgr.template.render('services/grafana/ceph-dashboard.yml.j2',
- {'hosts': prom_services,
- 'prometheus_user': prometheus_user,
- 'prometheus_password': prometheus_password,
- 'cephadm_root_ca': oneline_root_cert,
- 'cert': oneline_cert,
- 'key': oneline_key,
- 'security_enabled': security_enabled,
- 'loki_host': loki_host})
-
- spec: GrafanaSpec = cast(
- GrafanaSpec, self.mgr.spec_store.active_specs[daemon_spec.service_name])
-
- grafana_port = daemon_spec.ports[0] if daemon_spec.ports else self.DEFAULT_SERVICE_PORT
- grafana_ip = daemon_spec.ip if daemon_spec.ip else ''
-
- if spec.only_bind_port_on_networks and spec.networks:
- assert daemon_spec.host is not None
- ip_to_bind_to = self.mgr.get_first_matching_network_ip(daemon_spec.host, spec)
- if ip_to_bind_to:
- daemon_spec.port_ips = {str(grafana_port): ip_to_bind_to}
- grafana_ip = ip_to_bind_to
+ return build_url(scheme='http', host=addr, port=3100)
- grafana_ini = self.mgr.template.render(
- 'services/grafana/grafana.ini.j2', {
- 'anonymous_access': spec.anonymous_access,
- 'initial_admin_password': spec.initial_admin_password,
- 'http_port': grafana_port,
- 'protocol': spec.protocol,
- 'http_addr': grafana_ip,
- 'use_url_prefix': mgmt_gw_enabled,
- 'domain': daemon_spec.host,
- })
+ return ''
- if 'dashboard' in self.mgr.get('mgr_map')['modules'] and spec.initial_admin_password:
- self.mgr.check_mon_command(
- {'prefix': 'dashboard set-grafana-api-password'}, inbuf=spec.initial_admin_password)
+ def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]:
+ assert self.TYPE == daemon_spec.daemon_type
+ cert, pkey = self.prepare_certificates(daemon_spec)
+ security_enabled, mgmt_gw_enabled, oauth2_enabled = self.mgr._get_security_config()
+ deps = self.calculate_grafana_deps(security_enabled)
+ grafana_ini = self.generate_grafana_ini(daemon_spec, mgmt_gw_enabled, oauth2_enabled)
+ grafana_data_sources = self.generate_data_sources(security_enabled, mgmt_gw_enabled, cert, pkey)
# the path of the grafana dashboards are assumed from the providers.yml.j2 file by grafana
grafana_dashboards_path = self.mgr.grafana_dashboards_path or '/etc/grafana/dashboards/ceph-dashboard/'
- grafana_providers = self.mgr.template.render(
- 'services/grafana/providers.yml.j2', {
- 'grafana_dashboards_path': grafana_dashboards_path
- }
- )
config_file = {
'files': {
@@ -119,10 +140,18 @@ class GrafanaService(CephadmService):
'provisioning/datasources/ceph-dashboard.yml': grafana_data_sources,
'certs/cert_file': '# generated by cephadm\n%s' % cert,
'certs/cert_key': '# generated by cephadm\n%s' % pkey,
- 'provisioning/dashboards/default.yml': grafana_providers
+ 'provisioning/dashboards/default.yml': self.mgr.template.render(
+ 'services/grafana/providers.yml.j2', {
+ 'grafana_dashboards_path': grafana_dashboards_path
+ }
+ )
}
}
+ spec: GrafanaSpec = cast(GrafanaSpec, self.mgr.spec_store.active_specs[daemon_spec.service_name])
+ if 'dashboard' in self.mgr.get('mgr_map')['modules'] and spec.initial_admin_password:
+ self.mgr.check_mon_command({'prefix': 'dashboard set-grafana-api-password'}, inbuf=spec.initial_admin_password)
+
# include dashboards, if present in the container
if os.path.exists(grafana_dashboards_path):
files = os.listdir(grafana_dashboards_path)
@@ -203,7 +232,7 @@ class GrafanaService(CephadmService):
port = dd.ports[0] if dd.ports else self.DEFAULT_SERVICE_PORT
spec = cast(GrafanaSpec, self.mgr.spec_store[dd.service_name()].spec)
- mgmt_gw_external_endpoint = get_mgmt_gw_external_endpoint(self.mgr)
+ mgmt_gw_external_endpoint = self.mgr.get_mgmt_gw_external_endpoint()
if mgmt_gw_external_endpoint is not None:
self._set_value_on_dashboard(
'Grafana',
@@ -279,15 +308,18 @@ class AlertmanagerService(CephadmService):
# add a dependency since url_prefix depends on the existence of mgmt-gateway
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('mgmt-gateway')]
+ # add a dependency since enbling basic-auth (or not) depends on the existence of 'oauth2-proxy'
+ deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('oauth2-proxy')]
+
# scan all mgrs to generate deps and to get standbys too.
for dd in self.mgr.cache.get_daemons_by_service('mgr'):
# we consider mgr a dep even if the dashboard is disabled
# in order to be consistent with _calc_daemon_deps().
deps.append(dd.name())
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, mgmt_gw_enabled, oauth2_enabled = self.mgr._get_security_config()
if mgmt_gw_enabled:
- dashboard_urls = [f'{get_mgmt_gw_internal_endpoint(self.mgr)}/dashboard']
+ dashboard_urls = [f'{self.mgr.get_mgmt_gw_internal_endpoint()}/dashboard']
else:
dashboard_urls = get_dashboard_urls(self)
@@ -326,7 +358,7 @@ class AlertmanagerService(CephadmService):
cert, key = self.get_alertmanager_certificates(daemon_spec)
context = {
'enable_mtls': mgmt_gw_enabled,
- 'enable_basic_auth': True, # TODO(redo): disable when ouath2-proxy is enabled
+ 'enable_basic_auth': not oauth2_enabled,
'alertmanager_web_user': alertmanager_user,
'alertmanager_web_password': password_hash(alertmanager_password),
}
@@ -363,14 +395,14 @@ class AlertmanagerService(CephadmService):
assert dd.hostname is not None
addr = dd.ip if dd.ip else self.mgr.get_fqdn(dd.hostname)
port = dd.ports[0] if dd.ports else self.DEFAULT_SERVICE_PORT
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, mgmt_gw_enabled, _ = self.mgr._get_security_config()
protocol = 'https' if security_enabled else 'http'
if mgmt_gw_enabled:
self._set_value_on_dashboard(
'AlertManager',
'dashboard get-alertmanager-api-host',
'dashboard set-alertmanager-api-host',
- f'{get_mgmt_gw_internal_endpoint(self.mgr)}/alertmanager'
+ f'{self.mgr.get_mgmt_gw_internal_endpoint()}/alertmanager'
)
self._set_value_on_dashboard(
'Alertmanager',
@@ -415,7 +447,7 @@ class PrometheusService(CephadmService):
# we shouldn't get here (mon will tell the mgr to respawn), but no
# harm done if we do.
- def get_mgr_prometheus_certificates(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[str, str]:
+ def get_prometheus_certificates(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[str, str]:
node_ip = self.mgr.inventory.get_addr(daemon_spec.host)
host_fqdn = self.mgr.get_fqdn(daemon_spec.host)
cert, key = self.mgr.cert_mgr.generate_cert([host_fqdn, 'prometheus_servers'], node_ip)
@@ -454,7 +486,7 @@ class PrometheusService(CephadmService):
retention_size = '0'
# build service discovery end-point
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, mgmt_gw_enabled, oauth2_enabled = self.mgr._get_security_config()
port = self.mgr.service_discovery_port
mgr_addr = wrap_ipv6(self.mgr.get_mgr_ip())
protocol = 'https' if security_enabled else 'http'
@@ -504,13 +536,17 @@ class PrometheusService(CephadmService):
web_context = {
'enable_mtls': mgmt_gw_enabled,
- 'enable_basic_auth': True, # TODO(redo): disable when ouath2-proxy is enabled
+ 'enable_basic_auth': not oauth2_enabled,
'prometheus_web_user': prometheus_user,
'prometheus_web_password': password_hash(prometheus_password),
}
if security_enabled:
- cert, key = self.get_mgr_prometheus_certificates(daemon_spec)
+ # Following key/cert are needed for:
+ # 1- run the prometheus server (web.yml config)
+ # 2- use mTLS to scrape node-exporter (prometheus acts as client)
+ # 3- use mTLS to send alerts to alertmanager (prometheus acts as client)
+ cert, key = self.get_prometheus_certificates(daemon_spec)
r: Dict[str, Any] = {
'files': {
'prometheus.yml': self.mgr.template.render('services/prometheus/prometheus.yml.j2', context),
@@ -570,7 +606,7 @@ class PrometheusService(CephadmService):
# re-deploy prometheus if the mgr has changed (due to a fail-over i.e).
deps.append(self.mgr.get_active_mgr().name())
deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}')
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, _, _ = self.mgr._get_security_config()
if security_enabled:
alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials()
prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials()
@@ -581,6 +617,8 @@ class PrometheusService(CephadmService):
# add a dependency since url_prefix depends on the existence of mgmt-gateway
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('mgmt-gateway')]
+ # add a dependency since enbling basic-auth (or not) depends on the existence of 'oauth2-proxy'
+ deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('oauth2-proxy')]
# add dependency on ceph-exporter daemons
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('ceph-exporter')]
@@ -601,14 +639,14 @@ class PrometheusService(CephadmService):
assert dd.hostname is not None
addr = dd.ip if dd.ip else self.mgr.get_fqdn(dd.hostname)
port = dd.ports[0] if dd.ports else self.DEFAULT_SERVICE_PORT
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, mgmt_gw_enabled, _ = self.mgr._get_security_config()
protocol = 'https' if security_enabled else 'http'
if mgmt_gw_enabled:
self._set_value_on_dashboard(
'Prometheus',
'dashboard get-prometheus-api-host',
'dashboard set-prometheus-api-host',
- f'{get_mgmt_gw_internal_endpoint(self.mgr)}/prometheus'
+ f'{self.mgr.get_mgmt_gw_internal_endpoint()}/prometheus'
)
self._set_value_on_dashboard(
'Prometheus',
@@ -655,7 +693,7 @@ class NodeExporterService(CephadmService):
deps = []
deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('mgmt-gateway')]
deps += [f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}']
- security_enabled, mgmt_gw_enabled = self.mgr._get_security_config()
+ security_enabled, mgmt_gw_enabled, _ = self.mgr._get_security_config()
if security_enabled:
cert, key = self.get_node_exporter_certificates(daemon_spec)
r = {
diff --git a/src/pybind/mgr/cephadm/services/oauth2_proxy.py b/src/pybind/mgr/cephadm/services/oauth2_proxy.py
new file mode 100644
index 00000000000..a84f44817ee
--- /dev/null
+++ b/src/pybind/mgr/cephadm/services/oauth2_proxy.py
@@ -0,0 +1,86 @@
+import logging
+from typing import List, Any, Tuple, Dict, cast, Optional
+import os
+import base64
+
+from orchestrator import DaemonDescription
+from ceph.deployment.service_spec import OAuth2ProxySpec
+from cephadm.services.cephadmservice import CephadmService, CephadmDaemonDeploySpec
+
+logger = logging.getLogger(__name__)
+
+
+class OAuth2ProxyService(CephadmService):
+ TYPE = 'oauth2-proxy'
+ SVC_TEMPLATE_PATH = 'services/oauth2-proxy/oauth2-proxy.conf.j2'
+
+ def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec:
+ assert self.TYPE == daemon_spec.daemon_type
+ daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec)
+ return daemon_spec
+
+ def get_service_ips_and_hosts(self, service_name: str) -> List[str]:
+ entries = set()
+ for dd in self.mgr.cache.get_daemons_by_service(service_name):
+ assert dd.hostname is not None
+ addr = dd.ip if dd.ip else self.mgr.inventory.get_addr(dd.hostname)
+ entries.add(dd.hostname)
+ entries.add(addr)
+ return sorted(list(entries))
+
+ def get_redirect_url(self) -> Optional[str]:
+ external_endpoint = self.mgr.get_mgmt_gw_external_endpoint()
+ return f"{external_endpoint}/oauth2/callback" if external_endpoint else None
+
+ def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
+ if daemon_descrs:
+ return daemon_descrs[0]
+ # if empty list provided, return empty Daemon Desc
+ return DaemonDescription()
+
+ def get_certificates(self, svc_spec: OAuth2ProxySpec, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[str, str]:
+ cert = self.mgr.cert_key_store.get_cert('oauth2_proxy_cert')
+ key = self.mgr.cert_key_store.get_key('oauth2_proxy_key')
+ if not (cert and key):
+ # not available on store, check if provided on the spec
+ if svc_spec.ssl_certificate and svc_spec.ssl_certificate_key:
+ cert = svc_spec.ssl_certificate
+ key = svc_spec.ssl_certificate_key
+ else:
+ # not provided on the spec, let's generate self-sigend certificates
+ addr = self.mgr.inventory.get_addr(daemon_spec.host)
+ host_fqdn = self.mgr.get_fqdn(daemon_spec.host)
+ cert, key = self.mgr.cert_mgr.generate_cert(host_fqdn, addr)
+ # save certificates
+ if cert and key:
+ self.mgr.cert_key_store.save_cert('oauth2_proxy_cert', cert)
+ self.mgr.cert_key_store.save_key('oauth2_proxy_key', key)
+ else:
+ logger.error("Failed to obtain certificate and key from mgmt-gateway.")
+ return cert, key
+
+ def generate_random_secret(self) -> str:
+ random_bytes = os.urandom(32)
+ base64_secret = base64.urlsafe_b64encode(random_bytes).rstrip(b'=').decode('utf-8')
+ return base64_secret
+
+ def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]:
+ assert self.TYPE == daemon_spec.daemon_type
+ svc_spec = cast(OAuth2ProxySpec, self.mgr.spec_store[daemon_spec.service_name].spec)
+ context = {
+ 'spec': svc_spec,
+ 'cookie_secret': svc_spec.cookie_secret or self.generate_random_secret(),
+ 'whitelist_domains': self.get_service_ips_and_hosts('mgmt-gateway'),
+ 'redirect_url': svc_spec.redirect_url or self.get_redirect_url()
+ }
+
+ cert, key = self.get_certificates(svc_spec, daemon_spec)
+ daemon_config = {
+ "files": {
+ "oauth2-proxy.conf": self.mgr.template.render(self.SVC_TEMPLATE_PATH, context),
+ "oauth2-proxy.crt": cert,
+ "oauth2-proxy.key": key,
+ }
+ }
+
+ return daemon_config, []
diff --git a/src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2 b/src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2
index 69569ec7b63..972ef22e7b5 100644
--- a/src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2
+++ b/src/pybind/mgr/cephadm/templates/services/grafana/grafana.ini.j2
@@ -14,7 +14,7 @@
cert_key = /etc/grafana/certs/cert_key
http_port = {{ http_port }}
http_addr = {{ http_addr }}
-{% if use_url_prefix %}
+{% if mgmt_gw_enabled %}
root_url = %(protocol)s://%(domain)s/grafana/
{% endif %}
[snapshots]
@@ -29,3 +29,17 @@
cookie_secure = true
cookie_samesite = none
allow_embedding = true
+{% if oauth2_enabled %}
+[auth]
+ disable_login_form = true
+[auth.proxy]
+ enabled = true
+ header_name = X-WEBAUTH-USER
+ header_property = username
+ auto_sign_up = true
+ sync_ttl = 15
+ whitelist = {{ mgmt_gw_ip }}
+ headers_encoded = false
+ enable_login_token = false
+ headers = Role:X-WEBAUTH-ROLE
+{% endif %}
diff --git a/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/external_server.conf.j2 b/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/external_server.conf.j2
index 29da8954ccc..260e7418e2d 100644
--- a/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/external_server.conf.j2
+++ b/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/external_server.conf.j2
@@ -44,13 +44,66 @@ server {
add_header X-XSS-Protection "1; mode=block";
## Content-Security-Policy (CSP): FIXME
# add_header Content-Security-Policy "default-src 'self'; script-src 'self'; object-src 'none'; base-uri 'none'; require-trusted-types-for 'script'; frame-ancestors 'self';";
+{% endif %}
+
+{% if oauth2_proxy_url %}
+ location /oauth2/ {
+ proxy_pass {{ oauth2_proxy_url }};
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Scheme $scheme;
+ # Check for original-uri header
+ proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
+ }
+ location = /oauth2/auth {
+ internal;
+ proxy_pass {{ oauth2_proxy_url }};
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Scheme $scheme;
+ # nginx auth_request includes headers but not body
+ proxy_set_header Content-Length "";
+ proxy_pass_request_body off;
+ }
{% endif %}
{% if dashboard_endpoints %}
location / {
proxy_pass {{ dashboard_scheme }}://dashboard_servers;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
+ {% if oauth2_proxy_url %}
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-Email $email;
+
+ auth_request_set $groups $upstream_http_x_auth_request_groups;
+ proxy_set_header X-User-Groups $groups;
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ proxy_set_header X-User $user;
+
+ auth_request_set $token $upstream_http_x_auth_request_access_token;
+ proxy_set_header X-Access-Token $token;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
+
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Host $host:80;
+ proxy_set_header X-Forwarded-Port 80;
+ proxy_set_header X-Forwarded-Server $host;
+ proxy_set_header X-Forwarded-Groups $groups;
+
+ proxy_http_version 1.1;
+
+ proxy_set_header X-Forwarded-Proto "https";
+ proxy_ssl_verify off;
+ {% endif %}
}
{% endif %}
@@ -61,6 +114,30 @@ server {
# clear any Authorization header as Prometheus and Alertmanager are using basic-auth browser
# will send this header if Grafana is running on the same node as one of those services
proxy_set_header Authorization "";
+ {% if oauth2_proxy_url %}
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ proxy_set_header X-Original-URI "/";
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-WEBAUTH-USER $user;
+ proxy_set_header X-WEBAUTH-EMAIL $email;
+
+ # Pass role header to Grafana
+ proxy_set_header X-WEBAUTH-ROLE $http_x_auth_request_role;
+
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
+
+ proxy_set_header X-Forwarded-Proto $scheme;
+ {% endif %}
}
{% endif %}
@@ -73,6 +150,18 @@ server {
proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
+ {% if oauth2_proxy_url %}
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-User $user;
+ proxy_set_header X-Email $email;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
+ {% endif %}
}
{% endif %}
@@ -85,6 +174,18 @@ server {
proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
+ {% if oauth2_proxy_url %}
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-User $user;
+ proxy_set_header X-Email $email;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
+ {% endif %}
}
{% endif %}
}
diff --git a/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/internal_server.conf.j2 b/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/internal_server.conf.j2
index f48582c2ce1..f2c32f87977 100644
--- a/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/internal_server.conf.j2
+++ b/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/internal_server.conf.j2
@@ -4,8 +4,9 @@ server {
listen [::]:{{ internal_port }} ssl;
ssl_certificate /etc/nginx/ssl/nginx_internal.crt;
ssl_certificate_key /etc/nginx/ssl/nginx_internal.key;
- ssl_protocols TLSv1.2 TLSv1.3;
- ssl_ciphers AES128-SHA:AES256-SHA:RC4-SHA:DES-CBC3-SHA:RC4-MD5;
+ ssl_protocols TLSv1.3;
+ # from: https://ssl-config.mozilla.org/#server=nginx
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
ssl_prefer_server_ciphers on;
{% if dashboard_endpoints %}
diff --git a/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/nginx.conf.j2 b/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/nginx.conf.j2
index 9ce6eb9867d..0c2a6b98c3b 100644
--- a/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/nginx.conf.j2
+++ b/src/pybind/mgr/cephadm/templates/services/mgmt-gateway/nginx.conf.j2
@@ -7,6 +7,15 @@ events {
}
http {
+
+ client_header_buffer_size 32K;
+ large_client_header_buffers 4 32k;
+ proxy_busy_buffers_size 512k;
+ proxy_buffers 4 512k;
+ proxy_buffer_size 256K;
+ proxy_headers_hash_max_size 1024;
+ proxy_headers_hash_bucket_size 128;
+
{% if dashboard_endpoints %}
upstream dashboard_servers {
{% for ep in dashboard_endpoints %}
diff --git a/src/pybind/mgr/cephadm/templates/services/oauth2-proxy/oauth2-proxy.conf.j2 b/src/pybind/mgr/cephadm/templates/services/oauth2-proxy/oauth2-proxy.conf.j2
new file mode 100644
index 00000000000..20ca8cb6504
--- /dev/null
+++ b/src/pybind/mgr/cephadm/templates/services/oauth2-proxy/oauth2-proxy.conf.j2
@@ -0,0 +1,37 @@
+
+# Listen on port 4180 for incoming HTTP traffic.
+https_address= "{{ spec.https_address or '0.0.0.0:4180' }}"
+
+skip_provider_button= true
+skip_jwt_bearer_tokens= true
+
+# OIDC provider configuration.
+provider= "oidc"
+provider_display_name= "{{ spec.provider_display_name }}"
+client_id= "{{ spec.client_id }}"
+client_secret= "{{ spec.client_secret }}"
+oidc_issuer_url= "{{ spec.oidc_issuer_url }}"
+{% if redirect_url %}
+redirect_url= "{{ redirect_url }}"
+{% endif %}
+
+ssl_insecure_skip_verify=true
+
+# following configuration is needed to avoid getting Forbidden
+# when using chrome like browsers as they handle 3rd party cookies
+# more strictly than Firefox
+cookie_samesite= "none"
+cookie_secure= true
+cookie_expire= "5h"
+cookie_refresh= "2h"
+
+pass_access_token= true
+pass_authorization_header= true
+pass_basic_auth= true
+pass_user_headers= true
+set_xauthrequest= true
+
+# Secret value for encrypting cookies.
+cookie_secret= "{{ cookie_secret }}"
+email_domains= "*"
+whitelist_domains= "{{ whitelist_domains | join(',') }}"
diff --git a/src/pybind/mgr/cephadm/tests/fixtures.py b/src/pybind/mgr/cephadm/tests/fixtures.py
index c49c637e6ed..dd858c6c7da 100644
--- a/src/pybind/mgr/cephadm/tests/fixtures.py
+++ b/src/pybind/mgr/cephadm/tests/fixtures.py
@@ -95,6 +95,7 @@ def with_cephadm_module(module_options=None, store=None):
mock.patch('cephadm.module.CephadmOrchestrator.get_module_option_ex', get_module_option_ex), \
mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \
mock.patch("cephadm.module.CephadmOrchestrator.remote"), \
+ mock.patch("cephadm.module.CephadmOrchestrator.get_fqdn", lambda a, b: 'host_fqdn'), \
mock.patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1'), \
mock.patch("cephadm.agent.CephadmAgentHelpers._request_agent_acks"), \
mock.patch("cephadm.agent.CephadmAgentHelpers._apply_agent", return_value=False), \
diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py
index 9774e107bce..b3dc921ae56 100644
--- a/src/pybind/mgr/cephadm/tests/test_cephadm.py
+++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py
@@ -854,7 +854,7 @@ class TestCephadm(object):
with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd:
CephadmServe(cephadm_module)._check_daemons()
_mon_cmd.assert_any_call(
- {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://[1::4]:3000'},
+ {'prefix': 'dashboard set-grafana-api-url', 'value': 'https://host_fqdn:3000'},
None)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
@@ -1727,6 +1727,7 @@ class TestCephadm(object):
'iscsi_ssl_cert': False,
'ingress_ssl_cert': False,
'mgmt_gw_cert': False,
+ 'oauth2_proxy_cert': False,
'cephadm_root_ca_cert': False,
'grafana_cert': False,
'nvmeof_client_cert': False,
@@ -1779,6 +1780,7 @@ class TestCephadm(object):
expected_ls = {
'grafana_key': False,
'mgmt_gw_key': False,
+ 'oauth2_proxy_key': False,
'cephadm_root_ca_key': False,
'iscsi_ssl_key': False,
'ingress_ssl_key': False,
diff --git a/src/pybind/mgr/cephadm/tests/test_services.py b/src/pybind/mgr/cephadm/tests/test_services.py
index 87ba8eb1344..c7f795a1c03 100644
--- a/src/pybind/mgr/cephadm/tests/test_services.py
+++ b/src/pybind/mgr/cephadm/tests/test_services.py
@@ -36,6 +36,7 @@ from ceph.deployment.service_spec import (
ServiceSpec,
TracingSpec,
MgmtGatewaySpec,
+ OAuth2ProxySpec
)
from cephadm.tests.fixtures import with_host, with_service, _run_cephadm, async_side_effect
@@ -1219,6 +1220,7 @@ class TestMonitoring:
@patch("cephadm.serve.CephadmServe._run_cephadm")
@patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4')
+ @patch("cephadm.module.CephadmOrchestrator.get_fqdn", lambda a, b: 'host_fqdn')
@patch("cephadm.services.monitoring.verify_tls", lambda *_: None)
def test_grafana_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
_run_cephadm.side_effect = async_side_effect(("{}", "", 0))
@@ -1241,7 +1243,7 @@ class TestMonitoring:
org_name = 'Main Org.'
org_role = 'Viewer'
[server]
- domain = 'test'
+ domain = 'host_fqdn'
protocol = https
cert_file = /etc/grafana/certs/cert_file
cert_key = /etc/grafana/certs/cert_key
@@ -1253,7 +1255,7 @@ class TestMonitoring:
disable_initial_admin_creation = true
cookie_secure = true
cookie_samesite = none
- allow_embedding = true""").lstrip(), # noqa: W291
+ allow_embedding = true\n""").lstrip(), # noqa: W291
'provisioning/datasources/ceph-dashboard.yml': dedent("""
# This file is generated by cephadm.
apiVersion: 1
@@ -1267,7 +1269,7 @@ class TestMonitoring:
type: 'prometheus'
access: 'proxy'
orgId: 1
- url: 'http://[1::4]:9095'
+ url: 'http://host_fqdn:9095'
basicAuth: false
isDefault: true
editable: false
@@ -1349,7 +1351,7 @@ class TestMonitoring:
" org_name = 'Main Org.'\n"
" org_role = 'Viewer'\n"
'[server]\n'
- " domain = 'test'\n"
+ " domain = 'host_fqdn'\n"
' protocol = https\n'
' cert_file = /etc/grafana/certs/cert_file\n'
' cert_key = /etc/grafana/certs/cert_key\n'
@@ -1362,7 +1364,7 @@ class TestMonitoring:
' admin_password = secure\n'
' cookie_secure = true\n'
' cookie_samesite = none\n'
- ' allow_embedding = true',
+ ' allow_embedding = true\n',
'provisioning/datasources/ceph-dashboard.yml':
"# This file is generated by cephadm.\n"
"apiVersion: 1\n\n"
@@ -1411,7 +1413,7 @@ class TestMonitoring:
'[users]\n'
' default_theme = light\n'
'[server]\n'
- " domain = 'test'\n"
+ " domain = 'host_fqdn'\n"
' protocol = https\n'
' cert_file = /etc/grafana/certs/cert_file\n'
' cert_key = /etc/grafana/certs/cert_key\n'
@@ -1424,7 +1426,7 @@ class TestMonitoring:
' admin_password = secure\n'
' cookie_secure = true\n'
' cookie_samesite = none\n'
- ' allow_embedding = true',
+ ' allow_embedding = true\n',
'provisioning/datasources/ceph-dashboard.yml':
"# This file is generated by cephadm.\n"
"apiVersion: 1\n\n"
@@ -3268,7 +3270,7 @@ class TestMgmtGateway:
@patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
@patch('cephadm.cert_mgr.CertMgr.get_root_ca', lambda instance: cephadm_root_ca)
@patch("cephadm.services.mgmt_gateway.get_dashboard_endpoints", lambda _: (["ceph-node-2:8443", "ceph-node-2:8443"], "https"))
- def test_mgmt_gateway_config(self, get_service_endpoints_mock: List[str], _run_cephadm, cephadm_module: CephadmOrchestrator):
+ def test_mgmt_gateway_config_no_auth(self, get_service_endpoints_mock: List[str], _run_cephadm, cephadm_module: CephadmOrchestrator):
def get_services_endpoints(name):
if name == 'prometheus':
@@ -3314,6 +3316,238 @@ class TestMgmtGateway:
}
http {
+
+ client_header_buffer_size 32K;
+ large_client_header_buffers 4 32k;
+ proxy_busy_buffers_size 512k;
+ proxy_buffers 4 512k;
+ proxy_buffer_size 256K;
+ proxy_headers_hash_max_size 1024;
+ proxy_headers_hash_bucket_size 128;
+
+ upstream dashboard_servers {
+ server ceph-node-2:8443;
+ server ceph-node-2:8443;
+ }
+
+ upstream grafana_servers {
+ server ceph-node-2:3000;
+ server ceph-node-2:3000;
+ }
+
+ upstream prometheus_servers {
+ server 192.168.100.100:9095;
+ server 192.168.100.101:9095;
+ }
+
+ upstream alertmanager_servers {
+ server 192.168.100.100:9093;
+ server 192.168.100.102:9093;
+ }
+
+ include /etc/nginx_external_server.conf;
+ include /etc/nginx_internal_server.conf;
+ }"""),
+ "nginx_external_server.conf": dedent("""
+ server {
+ listen 5555 ssl;
+ listen [::]:5555 ssl;
+ ssl_certificate /etc/nginx/ssl/nginx.crt;
+ ssl_certificate_key /etc/nginx/ssl/nginx.key;
+ ssl_protocols TLSv1.3;
+ # from: https://ssl-config.mozilla.org/#server=nginx
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+
+ # Only return Nginx in server header, no extra info will be provided
+ server_tokens off;
+
+ # Perfect Forward Secrecy(PFS) is frequently compromised without this
+ ssl_prefer_server_ciphers on;
+
+ # Enable SSL session caching for improved performance
+ ssl_session_tickets off;
+ ssl_session_timeout 1d;
+ ssl_session_cache shared:SSL:10m;
+
+ # OCSP stapling
+ ssl_stapling on;
+ ssl_stapling_verify on;
+ resolver_timeout 5s;
+
+ # Security headers
+ ## X-Content-Type-Options: avoid MIME type sniffing
+ add_header X-Content-Type-Options nosniff;
+ ## Strict Transport Security (HSTS): Yes
+ add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
+ ## Enables the Cross-site scripting (XSS) filter in browsers.
+ add_header X-XSS-Protection "1; mode=block";
+ ## Content-Security-Policy (CSP): FIXME
+ # add_header Content-Security-Policy "default-src 'self'; script-src 'self'; object-src 'none'; base-uri 'none'; require-trusted-types-for 'script'; frame-ancestors 'self';";
+
+
+ location / {
+ proxy_pass https://dashboard_servers;
+ proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
+ }
+
+ location /grafana {
+ rewrite ^/grafana/(.*) /$1 break;
+ proxy_pass https://grafana_servers;
+ # clear any Authorization header as Prometheus and Alertmanager are using basic-auth browser
+ # will send this header if Grafana is running on the same node as one of those services
+ proxy_set_header Authorization "";
+ }
+
+ location /prometheus {
+ proxy_pass https://prometheus_servers;
+
+ proxy_ssl_certificate /etc/nginx/ssl/nginx_internal.crt;
+ proxy_ssl_certificate_key /etc/nginx/ssl/nginx_internal.key;
+ proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
+ proxy_ssl_verify on;
+ proxy_ssl_verify_depth 2;
+ }
+
+ location /alertmanager {
+ proxy_pass https://alertmanager_servers;
+
+ proxy_ssl_certificate /etc/nginx/ssl/nginx_internal.crt;
+ proxy_ssl_certificate_key /etc/nginx/ssl/nginx_internal.key;
+ proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
+ proxy_ssl_verify on;
+ proxy_ssl_verify_depth 2;
+ }
+ }"""),
+ "nginx_internal_server.conf": dedent("""
+ server {
+ listen 29443 ssl;
+ listen [::]:29443 ssl;
+ ssl_certificate /etc/nginx/ssl/nginx_internal.crt;
+ ssl_certificate_key /etc/nginx/ssl/nginx_internal.key;
+ ssl_protocols TLSv1.3;
+ # from: https://ssl-config.mozilla.org/#server=nginx
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+ ssl_prefer_server_ciphers on;
+
+ location /internal/dashboard {
+ rewrite ^/internal/dashboard/(.*) /$1 break;
+ proxy_pass https://dashboard_servers;
+ proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
+ }
+
+ location /internal/grafana {
+ rewrite ^/internal/grafana/(.*) /$1 break;
+ proxy_pass https://grafana_servers;
+ }
+
+ location /internal/prometheus {
+ rewrite ^/internal/prometheus/(.*) /prometheus/$1 break;
+ proxy_pass https://prometheus_servers;
+
+ proxy_ssl_certificate /etc/nginx/ssl/nginx_internal.crt;
+ proxy_ssl_certificate_key /etc/nginx/ssl/nginx_internal.key;
+ proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
+ proxy_ssl_verify on;
+ proxy_ssl_verify_depth 2;
+ }
+
+ location /internal/alertmanager {
+ rewrite ^/internal/alertmanager/(.*) /alertmanager/$1 break;
+ proxy_pass https://alertmanager_servers;
+
+ proxy_ssl_certificate /etc/nginx/ssl/nginx_internal.crt;
+ proxy_ssl_certificate_key /etc/nginx/ssl/nginx_internal.key;
+ proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
+ proxy_ssl_verify on;
+ proxy_ssl_verify_depth 2;
+ }
+ }"""),
+ "nginx_internal.crt": f"{ceph_generated_cert}",
+ "nginx_internal.key": f"{ceph_generated_key}",
+ "ca.crt": f"{cephadm_root_ca}",
+ "nginx.crt": f"{ceph_generated_cert}",
+ "nginx.key": f"{ceph_generated_key}",
+ }
+ }
+ }
+
+ with with_host(cephadm_module, 'ceph-node'):
+ with with_service(cephadm_module, spec):
+ _run_cephadm.assert_called_with(
+ 'ceph-node',
+ 'mgmt-gateway.ceph-node',
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps(expected),
+ use_current_daemon_image=False,
+ )
+
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("cephadm.services.mgmt_gateway.MgmtGatewayService.get_service_endpoints")
+ @patch("cephadm.services.mgmt_gateway.MgmtGatewayService.get_external_certificates",
+ lambda instance, svc_spec, dspec: (ceph_generated_cert, ceph_generated_key))
+ @patch("cephadm.services.mgmt_gateway.MgmtGatewayService.get_internal_certificates",
+ lambda instance, dspec: (ceph_generated_cert, ceph_generated_key))
+ @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
+ @patch('cephadm.cert_mgr.CertMgr.get_root_ca', lambda instance: cephadm_root_ca)
+ @patch("cephadm.services.mgmt_gateway.get_dashboard_endpoints", lambda _: (["ceph-node-2:8443", "ceph-node-2:8443"], "https"))
+ @patch("cephadm.services.mgmt_gateway.MgmtGatewayService.get_oauth2_service_url", lambda _: "https://192.168.100.102:4180")
+ def test_mgmt_gateway_config_with_auth(self, get_service_endpoints_mock: List[str], _run_cephadm, cephadm_module: CephadmOrchestrator):
+
+ def get_services_endpoints(name):
+ if name == 'prometheus':
+ return ["192.168.100.100:9095", "192.168.100.101:9095"]
+ elif name == 'grafana':
+ return ["ceph-node-2:3000", "ceph-node-2:3000"]
+ elif name == 'alertmanager':
+ return ["192.168.100.100:9093", "192.168.100.102:9093"]
+ return []
+
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ get_service_endpoints_mock.side_effect = get_services_endpoints
+
+ server_port = 5555
+ spec = MgmtGatewaySpec(port=server_port,
+ ssl_certificate=ceph_generated_cert,
+ ssl_certificate_key=ceph_generated_key,
+ enable_auth=True)
+
+ expected = {
+ "fsid": "fsid",
+ "name": "mgmt-gateway.ceph-node",
+ "image": "",
+ "deploy_arguments": [],
+ "params": {"tcp_ports": [server_port]},
+ "meta": {
+ "service_name": "mgmt-gateway",
+ "ports": [server_port],
+ "ip": None,
+ "deployed_by": [],
+ "rank": None,
+ "rank_generation": None,
+ "extra_container_args": None,
+ "extra_entrypoint_args": None
+ },
+ "config_blobs": {
+ "files": {
+ "nginx.conf": dedent("""
+ # This file is generated by cephadm.
+ worker_rlimit_nofile 8192;
+
+ events {
+ worker_connections 4096;
+ }
+
+ http {
+
+ client_header_buffer_size 32K;
+ large_client_header_buffers 4 32k;
+ proxy_busy_buffers_size 512k;
+ proxy_buffers 4 512k;
+ proxy_buffer_size 256K;
+ proxy_headers_hash_max_size 1024;
+ proxy_headers_hash_bucket_size 128;
+
upstream dashboard_servers {
server ceph-node-2:8443;
server ceph-node-2:8443;
@@ -3373,10 +3607,59 @@ class TestMgmtGateway:
## Content-Security-Policy (CSP): FIXME
# add_header Content-Security-Policy "default-src 'self'; script-src 'self'; object-src 'none'; base-uri 'none'; require-trusted-types-for 'script'; frame-ancestors 'self';";
+ location /oauth2/ {
+ proxy_pass https://192.168.100.102:4180;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Scheme $scheme;
+ # Check for original-uri header
+ proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
+ }
+
+ location = /oauth2/auth {
+ internal;
+ proxy_pass https://192.168.100.102:4180;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Scheme $scheme;
+ # nginx auth_request includes headers but not body
+ proxy_set_header Content-Length "";
+ proxy_pass_request_body off;
+ }
location / {
proxy_pass https://dashboard_servers;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-Email $email;
+
+ auth_request_set $groups $upstream_http_x_auth_request_groups;
+ proxy_set_header X-User-Groups $groups;
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ proxy_set_header X-User $user;
+
+ auth_request_set $token $upstream_http_x_auth_request_access_token;
+ proxy_set_header X-Access-Token $token;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
+
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Host $host:80;
+ proxy_set_header X-Forwarded-Port 80;
+ proxy_set_header X-Forwarded-Server $host;
+ proxy_set_header X-Forwarded-Groups $groups;
+
+ proxy_http_version 1.1;
+
+ proxy_set_header X-Forwarded-Proto "https";
+ proxy_ssl_verify off;
}
location /grafana {
@@ -3385,6 +3668,28 @@ class TestMgmtGateway:
# clear any Authorization header as Prometheus and Alertmanager are using basic-auth browser
# will send this header if Grafana is running on the same node as one of those services
proxy_set_header Authorization "";
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ proxy_set_header X-Original-URI "/";
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-WEBAUTH-USER $user;
+ proxy_set_header X-WEBAUTH-EMAIL $email;
+
+ # Pass role header to Grafana
+ proxy_set_header X-WEBAUTH-ROLE $http_x_auth_request_role;
+
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
+
+ proxy_set_header X-Forwarded-Proto $scheme;
}
location /prometheus {
@@ -3395,6 +3700,16 @@ class TestMgmtGateway:
proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-User $user;
+ proxy_set_header X-Email $email;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
}
location /alertmanager {
@@ -3405,6 +3720,16 @@ class TestMgmtGateway:
proxy_ssl_trusted_certificate /etc/nginx/ssl/ca.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
+ auth_request /oauth2/auth;
+ error_page 401 = /oauth2/sign_in;
+
+ auth_request_set $user $upstream_http_x_auth_request_user;
+ auth_request_set $email $upstream_http_x_auth_request_email;
+ proxy_set_header X-User $user;
+ proxy_set_header X-Email $email;
+
+ auth_request_set $auth_cookie $upstream_http_set_cookie;
+ add_header Set-Cookie $auth_cookie;
}
}"""),
"nginx_internal_server.conf": dedent("""
@@ -3413,8 +3738,9 @@ class TestMgmtGateway:
listen [::]:29443 ssl;
ssl_certificate /etc/nginx/ssl/nginx_internal.crt;
ssl_certificate_key /etc/nginx/ssl/nginx_internal.key;
- ssl_protocols TLSv1.2 TLSv1.3;
- ssl_ciphers AES128-SHA:AES256-SHA:RC4-SHA:DES-CBC3-SHA:RC4-MD5;
+ ssl_protocols TLSv1.3;
+ # from: https://ssl-config.mozilla.org/#server=nginx
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
ssl_prefer_server_ciphers on;
location /internal/dashboard {
@@ -3469,3 +3795,109 @@ class TestMgmtGateway:
stdin=json.dumps(expected),
use_current_daemon_image=False,
)
+
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("cephadm.services.mgmt_gateway.MgmtGatewayService.get_service_endpoints")
+ @patch("cephadm.services.mgmt_gateway.MgmtGatewayService.get_external_certificates",
+ lambda instance, svc_spec, dspec: (ceph_generated_cert, ceph_generated_key))
+ @patch("cephadm.services.mgmt_gateway.MgmtGatewayService.get_internal_certificates",
+ lambda instance, dspec: (ceph_generated_cert, ceph_generated_key))
+ @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1')
+ @patch('cephadm.cert_mgr.CertMgr.get_root_ca', lambda instance: cephadm_root_ca)
+ @patch("cephadm.services.mgmt_gateway.get_dashboard_endpoints", lambda _: (["ceph-node-2:8443", "ceph-node-2:8443"], "https"))
+ def test_oauth2_proxy_service(self, get_service_endpoints_mock: List[str], _run_cephadm, cephadm_module: CephadmOrchestrator):
+
+ def get_services_endpoints(name):
+ if name == 'prometheus':
+ return ["192.168.100.100:9095", "192.168.100.101:9095"]
+ elif name == 'grafana':
+ return ["ceph-node-2:3000", "ceph-node-2:3000"]
+ elif name == 'alertmanager':
+ return ["192.168.100.100:9093", "192.168.100.102:9093"]
+ return []
+
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+ get_service_endpoints_mock.side_effect = get_services_endpoints
+
+ server_port = 5555
+ mgmt_gw_spec = MgmtGatewaySpec(port=server_port,
+ ssl_certificate=ceph_generated_cert,
+ ssl_certificate_key=ceph_generated_key,
+ enable_auth=True)
+
+ oauth2_spec = OAuth2ProxySpec(provider_display_name='my_idp_provider',
+ client_id='my_client_id',
+ client_secret='my_client_secret',
+ oidc_issuer_url='http://192.168.10.10:8888/dex',
+ cookie_secret='kbAEM9opAmuHskQvt0AW8oeJRaOM2BYy5Loba0kZ0SQ=',
+ ssl_certificate=ceph_generated_cert,
+ ssl_certificate_key=ceph_generated_key)
+ expected = {
+ "fsid": "fsid",
+ "name": "oauth2-proxy.ceph-node",
+ "image": "",
+ "deploy_arguments": [],
+ "params": {"tcp_ports": [4180]},
+ "meta": {
+ "service_name": "oauth2-proxy",
+ "ports": [4180],
+ "ip": None,
+ "deployed_by": [],
+ "rank": None,
+ "rank_generation": None,
+ "extra_container_args": None,
+ "extra_entrypoint_args": None
+ },
+ "config_blobs": {
+ "files": {
+ "oauth2-proxy.conf": dedent("""
+ # Listen on port 4180 for incoming HTTP traffic.
+ https_address= "0.0.0.0:4180"
+
+ skip_provider_button= true
+ skip_jwt_bearer_tokens= true
+
+ # OIDC provider configuration.
+ provider= "oidc"
+ provider_display_name= "my_idp_provider"
+ client_id= "my_client_id"
+ client_secret= "my_client_secret"
+ oidc_issuer_url= "http://192.168.10.10:8888/dex"
+ redirect_url= "https://host_fqdn:5555/oauth2/callback"
+
+ ssl_insecure_skip_verify=true
+
+ # following configuration is needed to avoid getting Forbidden
+ # when using chrome like browsers as they handle 3rd party cookies
+ # more strictly than Firefox
+ cookie_samesite= "none"
+ cookie_secure= true
+ cookie_expire= "5h"
+ cookie_refresh= "2h"
+
+ pass_access_token= true
+ pass_authorization_header= true
+ pass_basic_auth= true
+ pass_user_headers= true
+ set_xauthrequest= true
+
+ # Secret value for encrypting cookies.
+ cookie_secret= "kbAEM9opAmuHskQvt0AW8oeJRaOM2BYy5Loba0kZ0SQ="
+ email_domains= "*"
+ whitelist_domains= "1::4,ceph-node\""""),
+ "oauth2-proxy.crt": f"{ceph_generated_cert}",
+ "oauth2-proxy.key": f"{ceph_generated_key}",
+ }
+ }
+ }
+
+ with with_host(cephadm_module, 'ceph-node'):
+ with with_service(cephadm_module, mgmt_gw_spec) as _, with_service(cephadm_module, oauth2_spec):
+ _run_cephadm.assert_called_with(
+ 'ceph-node',
+ 'oauth2-proxy.ceph-node',
+ ['_orch', 'deploy'],
+ [],
+ stdin=json.dumps(expected),
+ use_current_daemon_image=False,
+ )
diff --git a/src/pybind/mgr/orchestrator/_interface.py b/src/pybind/mgr/orchestrator/_interface.py
index cc389545c45..c33f38cfdd4 100644
--- a/src/pybind/mgr/orchestrator/_interface.py
+++ b/src/pybind/mgr/orchestrator/_interface.py
@@ -43,6 +43,7 @@ from ceph.deployment.service_spec import (
SMBSpec,
SNMPGatewaySpec,
MgmtGatewaySpec,
+ OAuth2ProxySpec,
ServiceSpec,
TunedProfileSpec,
)
@@ -600,6 +601,7 @@ class Orchestrator(object):
'host': self.add_host,
'smb': self.apply_smb,
'mgmt-gateway': self.apply_mgmt_gateway,
+ 'oauth2-proxy': self.apply_oauth2_proxy,
}
def merge(l: OrchResult[List[str]], r: OrchResult[str]) -> OrchResult[List[str]]: # noqa: E741
@@ -849,6 +851,10 @@ class Orchestrator(object):
"""Update an existing cluster gateway service"""
raise NotImplementedError()
+ def apply_oauth2_proxy(self, spec: OAuth2ProxySpec) -> OrchResult[str]:
+ """Update an existing oauth2-proxy"""
+ raise NotImplementedError()
+
def apply_smb(self, spec: SMBSpec) -> OrchResult[str]:
"""Update a smb gateway service"""
raise NotImplementedError()
@@ -933,6 +939,7 @@ def daemon_type_to_service(dtype: str) -> str:
'iscsi': 'iscsi',
'nvmeof': 'nvmeof',
'mgmt-gateway': 'mgmt-gateway',
+ 'oauth2-proxy': 'oauth2-proxy',
'rbd-mirror': 'rbd-mirror',
'cephfs-mirror': 'cephfs-mirror',
'nfs': 'nfs',
@@ -969,6 +976,7 @@ def service_to_daemon_types(stype: str) -> List[str]:
'iscsi': ['iscsi'],
'nvmeof': ['nvmeof'],
'mgmt-gateway': ['mgmt-gateway'],
+ 'oauth2-proxy': ['oauth2-proxy'],
'rbd-mirror': ['rbd-mirror'],
'cephfs-mirror': ['cephfs-mirror'],
'nfs': ['nfs'],
diff --git a/src/pybind/mgr/orchestrator/module.py b/src/pybind/mgr/orchestrator/module.py
index 2bddefabe9a..32c379492ea 100644
--- a/src/pybind/mgr/orchestrator/module.py
+++ b/src/pybind/mgr/orchestrator/module.py
@@ -47,6 +47,7 @@ from ._interface import (
SMBSpec,
SNMPGatewaySpec,
MgmtGatewaySpec,
+ OAuth2ProxySpec,
ServiceDescription,
TunedProfileSpec,
_cli_read_command,
@@ -1806,6 +1807,7 @@ Usage:
def _apply_mgmt_gateway(self,
port: Optional[int] = None,
disable_https: Optional[bool] = False,
+ enable_auth: Optional[bool] = False,
placement: Optional[str] = None,
unmanaged: bool = False,
dry_run: bool = False,
@@ -1821,6 +1823,7 @@ Usage:
unmanaged=unmanaged,
port=port,
disable_https=disable_https,
+ enable_auth=enable_auth,
preview_only=dry_run
)
@@ -1828,6 +1831,27 @@ Usage:
return self._apply_misc([spec], dry_run, format, no_overwrite)
+ @_cli_write_command('orch apply oauth2-proxy')
+ def _apply_oauth2_proxy(self,
+ https_address: Optional[str] = None,
+ placement: Optional[str] = None,
+ unmanaged: bool = False,
+ dry_run: bool = False,
+ format: Format = Format.plain,
+ no_overwrite: bool = False,
+ inbuf: Optional[str] = None) -> HandleCommandResult:
+ """Add a cluster gateway service (cephadm only)"""
+
+ spec = OAuth2ProxySpec(
+ placement=PlacementSpec.from_string(placement),
+ unmanaged=unmanaged,
+ https_address=https_address
+ )
+
+ spec.validate() # force any validation exceptions to be caught correctly
+
+ return self._apply_misc([spec], dry_run, format, no_overwrite)
+
@_cli_write_command('orch apply nvmeof')
def _apply_nvmeof(self,
pool: str,
diff --git a/src/python-common/ceph/deployment/service_spec.py b/src/python-common/ceph/deployment/service_spec.py
index 274ada81d93..bbc78105548 100644
--- a/src/python-common/ceph/deployment/service_spec.py
+++ b/src/python-common/ceph/deployment/service_spec.py
@@ -766,6 +766,7 @@ class ServiceSpec(object):
'grafana',
'ingress',
'mgmt-gateway',
+ 'oauth2-proxy',
'iscsi',
'jaeger-agent',
'jaeger-collector',
@@ -821,6 +822,7 @@ class ServiceSpec(object):
'alertmanager': AlertManagerSpec,
'ingress': IngressSpec,
'mgmt-gateway': MgmtGatewaySpec,
+ 'oauth2-proxy': OAuth2ProxySpec,
'container': CustomContainerSpec,
'grafana': GrafanaSpec,
'node-exporter': MonitoringSpec,
@@ -1788,6 +1790,7 @@ class MgmtGatewaySpec(ServiceSpec):
networks: Optional[List[str]] = None,
placement: Optional[PlacementSpec] = None,
disable_https: Optional[bool] = False,
+ enable_auth: Optional[bool] = False,
port: Optional[int] = None,
ssl_certificate: Optional[str] = None,
ssl_certificate_key: Optional[str] = None,
@@ -1819,6 +1822,8 @@ class MgmtGatewaySpec(ServiceSpec):
)
#: Is a flag to disable HTTPS. If True, the server will use unsecure HTTP
self.disable_https = disable_https
+ #: Is a flag to enable SSO auth. Requires oauth2-proxy to be active for SSO authentication.
+ self.enable_auth = enable_auth
#: The port number on which the server will listen
self.port = port
#: A multi-line string that contains the SSL certificate
@@ -1909,6 +1914,125 @@ class MgmtGatewaySpec(ServiceSpec):
yaml.add_representer(MgmtGatewaySpec, ServiceSpec.yaml_representer)
+class OAuth2ProxySpec(ServiceSpec):
+ def __init__(self,
+ service_type: str = 'oauth2-proxy',
+ service_id: Optional[str] = None,
+ config: Optional[Dict[str, str]] = None,
+ networks: Optional[List[str]] = None,
+ placement: Optional[PlacementSpec] = None,
+ https_address: Optional[str] = None,
+ provider_display_name: Optional[str] = None,
+ client_id: Optional[str] = None,
+ client_secret: Optional[str] = None,
+ oidc_issuer_url: Optional[str] = None,
+ redirect_url: Optional[str] = None,
+ cookie_secret: Optional[str] = None,
+ ssl_certificate: Optional[str] = None,
+ ssl_certificate_key: Optional[str] = None,
+ unmanaged: bool = False,
+ extra_container_args: Optional[GeneralArgList] = None,
+ extra_entrypoint_args: Optional[GeneralArgList] = None,
+ custom_configs: Optional[List[CustomConfig]] = None,
+ ):
+ assert service_type == 'oauth2-proxy'
+
+ super(OAuth2ProxySpec, self).__init__(
+ 'oauth2-proxy', service_id=service_id,
+ placement=placement, config=config,
+ networks=networks,
+ extra_container_args=extra_container_args,
+ extra_entrypoint_args=extra_entrypoint_args,
+ custom_configs=custom_configs
+ )
+ #: The address for HTTPS connections, formatted as 'host:port'.
+ self.https_address = https_address
+ #: The display name for the identity provider (IDP) in the UI.
+ self.provider_display_name = provider_display_name
+ #: The client ID for authenticating with the identity provider.
+ self.client_id = client_id
+ #: The client secret for authenticating with the identity provider.
+ self.client_secret = client_secret
+ #: The URL of the OpenID Connect (OIDC) issuer.
+ self.oidc_issuer_url = oidc_issuer_url
+ #: The URL oauth2-proxy will redirect to after a successful login. If not provided
+ # cephadm will calculate automatically the value of this url.
+ self.redirect_url = redirect_url
+ #: The secret key used for signing cookies. Its length must be 16,
+ # 24, or 32 bytes to create an AES cipher.
+ self.cookie_secret = cookie_secret
+ #: The multi-line SSL certificate for encrypting communications.
+ self.ssl_certificate = ssl_certificate
+ #: The multi-line SSL certificate private key for decrypting communications.
+ self.ssl_certificate_key = ssl_certificate_key
+ self.unmanaged = unmanaged
+
+ def get_port_start(self) -> List[int]:
+ ports = [4180]
+ return ports
+
+ def validate(self) -> None:
+ super(OAuth2ProxySpec, self).validate()
+ self._validate_non_empty_string(self.provider_display_name, "provider_display_name")
+ self._validate_non_empty_string(self.client_id, "client_id")
+ self._validate_non_empty_string(self.client_secret, "client_secret")
+ self._validate_cookie_secret(self.cookie_secret)
+ self._validate_url(self.oidc_issuer_url, "oidc_issuer_url")
+ if self.redirect_url is not None:
+ self._validate_url(self.redirect_url, "redirect_url")
+ if self.https_address is not None:
+ self._validate_https_address(self.https_address)
+
+ def _validate_non_empty_string(self, value: Optional[str], field_name: str) -> None:
+ if not value or not isinstance(value, str) or not value.strip():
+ raise SpecValidationError(f"Invalid {field_name}: Must be a non-empty string.")
+
+ def _validate_url(self, url: Optional[str], field_name: str) -> None:
+ from urllib.parse import urlparse
+ try:
+ result = urlparse(url)
+ except Exception as e:
+ raise SpecValidationError(f"Invalid {field_name}: {e}. Must be a valid URL.")
+ else:
+ if not all([result.scheme, result.netloc]):
+ raise SpecValidationError(f"Error parsing {field_name} field: Must be a valid URL.")
+
+ def _validate_https_address(self, https_address: Optional[str]) -> None:
+ from urllib.parse import urlparse
+ result = urlparse(f'http://{https_address}')
+ # Check if netloc contains a valid IP or hostname and a port
+ if not result.netloc or ':' not in result.netloc:
+ raise SpecValidationError("Invalid https_address: Valid format [IP|hostname]:port.")
+ # Split netloc into hostname and port
+ hostname, port = result.netloc.rsplit(':', 1)
+ # Validate port
+ if not port.isdigit() or not (0 <= int(port) <= 65535):
+ raise SpecValidationError("Invalid https_address: Port must be between 0 and 65535.")
+
+ def _validate_cookie_secret(self, cookie_secret: Optional[str]) -> None:
+ if cookie_secret is None:
+ return
+ if not isinstance(cookie_secret, str):
+ raise SpecValidationError("Invalid cookie_secret: Must be a non-empty string.")
+
+ import base64
+ import binascii
+ try:
+ # Try decoding the cookie_secret as base64
+ decoded_secret = base64.urlsafe_b64decode(cookie_secret)
+ length = len(decoded_secret)
+ except binascii.Error:
+ # If decoding fails, consider it as a plain string
+ length = len(cookie_secret.encode('utf-8'))
+
+ if length not in [16, 24, 32]:
+ raise SpecValidationError(f"cookie_secret is {length} bytes "
+ "but must be 16, 24, or 32 bytes to create an AES cipher.")
+
+
+yaml.add_representer(OAuth2ProxySpec, ServiceSpec.yaml_representer)
+
+
class InitContainerSpec(object):
"""An init container is not a service that lives on its own, but rather
is used to run and exit prior to a service container starting in order