summaryrefslogtreecommitdiffstats
path: root/src/python-common
diff options
context:
space:
mode:
Diffstat (limited to 'src/python-common')
-rw-r--r--src/python-common/CMakeLists.txt2
-rw-r--r--src/python-common/ceph/cephadm/__init__.py2
-rw-r--r--src/python-common/ceph/cephadm/images.py57
-rw-r--r--src/python-common/ceph/deployment/drive_group.py4
-rw-r--r--src/python-common/ceph/deployment/drive_selection/filter.py6
-rw-r--r--src/python-common/ceph/deployment/drive_selection/matchers.py5
-rw-r--r--src/python-common/ceph/deployment/drive_selection/selector.py2
-rw-r--r--src/python-common/ceph/deployment/inventory.py2
-rw-r--r--src/python-common/ceph/deployment/service_spec.py228
-rw-r--r--src/python-common/ceph/deployment/translate.py2
-rw-r--r--src/python-common/ceph/deployment/utils.py51
-rw-r--r--src/python-common/ceph/fs/earmarking.py20
-rw-r--r--src/python-common/ceph/tests/utils.py3
-rw-r--r--src/python-common/requirements-lint.txt2
-rw-r--r--src/python-common/tox.ini12
15 files changed, 278 insertions, 120 deletions
diff --git a/src/python-common/CMakeLists.txt b/src/python-common/CMakeLists.txt
index e89bbe2feef..08660342a6a 100644
--- a/src/python-common/CMakeLists.txt
+++ b/src/python-common/CMakeLists.txt
@@ -3,5 +3,5 @@ distutils_install_module(ceph)
if(WITH_TESTS)
include(AddCephTest)
- add_tox_test(python-common TOX_ENVS py3 lint)
+ add_tox_test(python-common TOX_ENVS __tox_defaults__)
endif()
diff --git a/src/python-common/ceph/cephadm/__init__.py b/src/python-common/ceph/cephadm/__init__.py
new file mode 100644
index 00000000000..3c74dfd3941
--- /dev/null
+++ b/src/python-common/ceph/cephadm/__init__.py
@@ -0,0 +1,2 @@
+# this directory is meant for things that will be shared only between
+# the cephadm binary and cephadm mgr module
diff --git a/src/python-common/ceph/cephadm/images.py b/src/python-common/ceph/cephadm/images.py
new file mode 100644
index 00000000000..5b3c7421205
--- /dev/null
+++ b/src/python-common/ceph/cephadm/images.py
@@ -0,0 +1,57 @@
+# Default container images -----------------------------------------------------
+
+from typing import NamedTuple
+from enum import Enum
+
+
+class ContainerImage(NamedTuple):
+ image_ref: str # reference to default container image
+ key: str # image key
+ desc: str # description of image
+
+ def __repr__(self) -> str:
+ return self.image_ref
+
+
+def _create_image(image_ref: str, key: str) -> ContainerImage:
+ _img_prefix = 'container_image_'
+ description = key.replace('_', ' ').capitalize()
+ return ContainerImage(
+ image_ref,
+ f'{_img_prefix}{key}',
+ f'{description} container image'
+ )
+
+
+class DefaultImages(Enum):
+ PROMETHEUS = _create_image('quay.io/prometheus/prometheus:v2.51.0', 'prometheus')
+ LOKI = _create_image('docker.io/grafana/loki:3.0.0', 'loki')
+ PROMTAIL = _create_image('docker.io/grafana/promtail:3.0.0', 'promtail')
+ NODE_EXPORTER = _create_image('quay.io/prometheus/node-exporter:v1.7.0', 'node_exporter')
+ ALERTMANAGER = _create_image('quay.io/prometheus/alertmanager:v0.27.0', 'alertmanager')
+ GRAFANA = _create_image('quay.io/ceph/grafana:10.4.8', 'grafana')
+ HAPROXY = _create_image('quay.io/ceph/haproxy:2.3', 'haproxy')
+ KEEPALIVED = _create_image('quay.io/ceph/keepalived:2.2.4', 'keepalived')
+ NVMEOF = _create_image('quay.io/ceph/nvmeof:1.4', 'nvmeof')
+ SNMP_GATEWAY = _create_image('docker.io/maxwo/snmp-notifier:v1.2.1', 'snmp_gateway')
+ ELASTICSEARCH = _create_image('quay.io/omrizeneva/elasticsearch:6.8.23', 'elasticsearch')
+ JAEGER_COLLECTOR = _create_image('quay.io/jaegertracing/jaeger-collector:1.29',
+ 'jaeger_collector')
+ JAEGER_AGENT = _create_image('quay.io/jaegertracing/jaeger-agent:1.29', 'jaeger_agent')
+ JAEGER_QUERY = _create_image('quay.io/jaegertracing/jaeger-query:1.29', 'jaeger_query')
+ SAMBA = _create_image('quay.io/samba.org/samba-server:devbuilds-centos-amd64', 'samba')
+ SAMBA_METRICS = _create_image('quay.io/samba.org/samba-metrics:latest', 'samba_metrics')
+ NGINX = _create_image('quay.io/ceph/nginx:sclorg-nginx-126', 'nginx')
+ OAUTH2_PROXY = _create_image('quay.io/oauth2-proxy/oauth2-proxy:v7.6.0', 'oauth2_proxy')
+
+ @property
+ def image_ref(self) -> str:
+ return self.value.image_ref
+
+ @property
+ def key(self) -> str:
+ return self.value.key
+
+ @property
+ def desc(self) -> str:
+ return self.value.desc
diff --git a/src/python-common/ceph/deployment/drive_group.py b/src/python-common/ceph/deployment/drive_group.py
index c68ee01a728..43175aa79fb 100644
--- a/src/python-common/ceph/deployment/drive_group.py
+++ b/src/python-common/ceph/deployment/drive_group.py
@@ -2,7 +2,7 @@ import enum
import yaml
from ceph.deployment.inventory import Device
-from ceph.deployment.service_spec import (
+from ceph.deployment.service_spec import ( # noqa: F401 (type comments)
CustomConfig,
GeneralArgList,
PlacementSpec,
@@ -11,7 +11,7 @@ from ceph.deployment.service_spec import (
from ceph.deployment.hostspec import SpecValidationError
try:
- from typing import Optional, List, Dict, Any, Union
+ from typing import Optional, List, Dict, Any, Union # noqa: F401
except ImportError:
pass
diff --git a/src/python-common/ceph/deployment/drive_selection/filter.py b/src/python-common/ceph/deployment/drive_selection/filter.py
index 0da1b5c3901..28f63ddc2f2 100644
--- a/src/python-common/ceph/deployment/drive_selection/filter.py
+++ b/src/python-common/ceph/deployment/drive_selection/filter.py
@@ -15,12 +15,10 @@ logger = logging.getLogger(__name__)
class FilterGenerator(object):
- def __init__(self, device_filter):
- # type: (DeviceSelection) -> None
+ def __init__(self, device_filter: DeviceSelection) -> None:
self.device_filter = device_filter
- def __iter__(self):
- # type: () -> Generator[Matcher, None, None]
+ def __iter__(self) -> Generator[Matcher, None, None]:
if self.device_filter.actuators:
yield EqualityMatcher('actuators', self.device_filter.actuators)
if self.device_filter.size:
diff --git a/src/python-common/ceph/deployment/drive_selection/matchers.py b/src/python-common/ceph/deployment/drive_selection/matchers.py
index df502410aeb..a6a2147ce9e 100644
--- a/src/python-common/ceph/deployment/drive_selection/matchers.py
+++ b/src/python-common/ceph/deployment/drive_selection/matchers.py
@@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
-from typing import Tuple, Optional, Any, Union, Iterator
+# TODO: remove noqa and update to python3/mypy style type annotations
+from typing import Tuple, Optional, Any, Union, Iterator # noqa: F401
-from ceph.deployment.inventory import Device
+from ceph.deployment.inventory import Device # noqa: F401
import re
import logging
diff --git a/src/python-common/ceph/deployment/drive_selection/selector.py b/src/python-common/ceph/deployment/drive_selection/selector.py
index 59ebbb6347e..85fc95cf394 100644
--- a/src/python-common/ceph/deployment/drive_selection/selector.py
+++ b/src/python-common/ceph/deployment/drive_selection/selector.py
@@ -3,7 +3,7 @@ import logging
from typing import List, Optional, Dict, Callable
from ..inventory import Device
-from ..drive_group import DriveGroupSpec, DeviceSelection, DriveGroupValidationError
+from ..drive_group import DriveGroupSpec, DeviceSelection, DriveGroupValidationError # noqa: F401
from .filter import FilterGenerator
from .matchers import _MatchInvalid
diff --git a/src/python-common/ceph/deployment/inventory.py b/src/python-common/ceph/deployment/inventory.py
index e2c1a5605f9..29475e94d82 100644
--- a/src/python-common/ceph/deployment/inventory.py
+++ b/src/python-common/ceph/deployment/inventory.py
@@ -1,5 +1,5 @@
try:
- from typing import List, Optional, Dict, Any, Union
+ from typing import List, Optional, Dict, Any, Union # noqa: F401
except ImportError:
pass # for type checking
diff --git a/src/python-common/ceph/deployment/service_spec.py b/src/python-common/ceph/deployment/service_spec.py
index 459ab7df1a0..1ac9fa49e32 100644
--- a/src/python-common/ceph/deployment/service_spec.py
+++ b/src/python-common/ceph/deployment/service_spec.py
@@ -25,7 +25,9 @@ from typing import (
import yaml
from ceph.deployment.hostspec import HostSpec, SpecValidationError, assert_valid_host
-from ceph.deployment.utils import unwrap_ipv6, valid_addr
+from ceph.deployment.utils import unwrap_ipv6, valid_addr, verify_non_negative_int
+from ceph.deployment.utils import verify_positive_int, verify_non_negative_number
+from ceph.deployment.utils import verify_boolean, verify_enum
from ceph.utils import is_hex
ServiceSpecT = TypeVar('ServiceSpecT', bound='ServiceSpec')
@@ -527,8 +529,8 @@ pattern_type=PatternType.fnmatch))
labels = [x for x in strings if 'label:' in x]
if len(labels) > 1:
raise SpecValidationError('more than one label provided: {}'.format(labels))
- for l in labels:
- strings.remove(l)
+ for lbl in labels:
+ strings.remove(lbl)
label = labels[0][6:] if labels else None
host_patterns = strings
@@ -701,7 +703,7 @@ class ArgumentSpec:
if isinstance(data, str):
return cls(data, split=True, origin=cls.OriginalType.STRING)
if 'argument' not in data:
- raise SpecValidationError(f'ArgumentSpec must have an "argument" field')
+ raise SpecValidationError('ArgumentSpec must have an "argument" field')
for k in data.keys():
if k not in cls._fields:
raise SpecValidationError(f'ArgumentSpec got an unknown field {k!r}')
@@ -1229,6 +1231,7 @@ class RGWSpec(ServiceSpec):
rgw_bucket_counters_cache: Optional[bool] = False,
rgw_bucket_counters_cache_size: Optional[int] = None,
generate_cert: bool = False,
+ disable_multisite_sync_traffic: Optional[bool] = None,
):
assert service_type == 'rgw', service_type
@@ -1281,6 +1284,8 @@ class RGWSpec(ServiceSpec):
self.rgw_bucket_counters_cache_size = rgw_bucket_counters_cache_size
#: Whether we should generate a cert/key for the user if not provided
self.generate_cert = generate_cert
+ #: Used to make RGW not do multisite replication so it can dedicate to IO
+ self.disable_multisite_sync_traffic = disable_multisite_sync_traffic
def get_port_start(self) -> List[int]:
return [self.get_port()]
@@ -1313,6 +1318,10 @@ class RGWSpec(ServiceSpec):
raise SpecValidationError('"ssl" field must be set to true when "generate_cert" '
'is set to true')
+ if self.generate_cert and self.rgw_frontend_ssl_certificate:
+ raise SpecValidationError('"generate_cert" field and "rgw_frontend_ssl_certificate" '
+ 'field are mutually exclusive')
+
yaml.add_representer(RGWSpec, ServiceSpec.yaml_representer)
@@ -1324,31 +1333,50 @@ class NvmeofServiceSpec(ServiceSpec):
name: Optional[str] = None,
group: Optional[str] = None,
addr: Optional[str] = None,
+ addr_map: Optional[Dict[str, str]] = None,
port: Optional[int] = None,
pool: Optional[str] = None,
enable_auth: bool = False,
state_update_notify: Optional[bool] = True,
state_update_interval_sec: Optional[int] = 5,
enable_spdk_discovery_controller: Optional[bool] = False,
+ enable_key_encryption: Optional[bool] = True,
+ encryption_key: Optional[str] = None,
+ rebalance_period_sec: Optional[int] = 7,
+ max_gws_in_grp: Optional[int] = 16,
+ max_ns_to_change_lb_grp: Optional[int] = 8,
omap_file_lock_duration: Optional[int] = 20,
omap_file_lock_retries: Optional[int] = 30,
omap_file_lock_retry_sleep_interval: Optional[float] = 1.0,
omap_file_update_reloads: Optional[int] = 10,
enable_prometheus_exporter: Optional[bool] = True,
+ prometheus_port: Optional[int] = 10008,
+ prometheus_stats_interval: Optional[int] = 10,
bdevs_per_cluster: Optional[int] = 32,
verify_nqns: Optional[bool] = True,
+ verify_keys: Optional[bool] = True,
allowed_consecutive_spdk_ping_failures: Optional[int] = 1,
spdk_ping_interval_in_seconds: Optional[float] = 2.0,
ping_spdk_under_lock: Optional[bool] = False,
+ max_hosts_per_namespace: Optional[int] = 8,
+ max_namespaces_with_netmask: Optional[int] = 1000,
+ max_subsystems: Optional[int] = 128,
+ max_namespaces: Optional[int] = 1024,
+ max_namespaces_per_subsystem: Optional[int] = 256,
+ max_hosts_per_subsystem: Optional[int] = 32,
server_key: Optional[str] = None,
server_cert: Optional[str] = None,
client_key: Optional[str] = None,
client_cert: Optional[str] = None,
root_ca_cert: Optional[str] = None,
+ # unused and duplicate of tgt_path below, consider removing
spdk_path: Optional[str] = None,
+ spdk_mem_size: Optional[int] = None,
tgt_path: Optional[str] = None,
spdk_timeout: Optional[float] = 60.0,
- spdk_log_level: Optional[str] = 'WARNING',
+ spdk_log_level: Optional[str] = '',
+ spdk_protocol_log_level: Optional[str] = 'WARNING',
+ spdk_log_file_dir: Optional[str] = '',
rpc_socket_dir: Optional[str] = '/var/tmp/',
rpc_socket_name: Optional[str] = 'spdk.sock',
conn_retries: Optional[int] = 10,
@@ -1357,6 +1385,7 @@ class NvmeofServiceSpec(ServiceSpec):
{"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7},
tgt_cmd_extra_args: Optional[str] = None,
discovery_addr: Optional[str] = None,
+ discovery_addr_map: Optional[Dict[str, str]] = None,
discovery_port: Optional[int] = None,
log_level: Optional[str] = 'INFO',
log_files_enabled: Optional[bool] = True,
@@ -1368,6 +1397,7 @@ class NvmeofServiceSpec(ServiceSpec):
log_directory: Optional[str] = '/var/log/ceph/',
monitor_timeout: Optional[float] = 1.0,
enable_monitor_client: bool = True,
+ monitor_client_log_file_dir: Optional[str] = '',
placement: Optional[PlacementSpec] = None,
unmanaged: bool = False,
preview_only: bool = False,
@@ -1390,6 +1420,8 @@ class NvmeofServiceSpec(ServiceSpec):
self.pool = pool
#: ``addr`` address of the nvmeof gateway
self.addr = addr
+ #: ``addr_map`` per node address map of the nvmeof gateways
+ self.addr_map = addr_map
#: ``port`` port of the nvmeof gateway
self.port = port or 5500
#: ``name`` name of the nvmeof gateway
@@ -1404,10 +1436,26 @@ class NvmeofServiceSpec(ServiceSpec):
self.state_update_interval_sec = state_update_interval_sec
#: ``enable_spdk_discovery_controller`` SPDK or ceph-nvmeof discovery service
self.enable_spdk_discovery_controller = enable_spdk_discovery_controller
+ #: ``enable_key_encryption`` encrypt DHCHAP and PSK keys before saving in OMAP
+ self.enable_key_encryption = enable_key_encryption
+ #: ``encryption_key`` gateway encryption key
+ self.encryption_key = encryption_key
+ #: ``rebalance_period_sec`` number of seconds between cycles of auto namesapce rebalancing
+ self.rebalance_period_sec = rebalance_period_sec
+ #: ``max_gws_in_grp`` max number of gateways in one group
+ self.max_gws_in_grp = max_gws_in_grp
+ #: ``max_ns_to_change_lb_grp`` max number of namespaces before switching to a new lb group
+ self.max_ns_to_change_lb_grp = max_ns_to_change_lb_grp
#: ``enable_prometheus_exporter`` enables Prometheus exporter
self.enable_prometheus_exporter = enable_prometheus_exporter
+ #: ``prometheus_port`` Prometheus port
+ self.prometheus_port = prometheus_port or 10008
+ #: ``prometheus_stats_interval`` Prometheus get stats interval
+ self.prometheus_stats_interval = prometheus_stats_interval
#: ``verify_nqns`` enables verification of subsystem and host NQNs for validity
self.verify_nqns = verify_nqns
+ #: ``verify_keys`` enables verification of PSJ and DHCHAP keys in the gateway
+ self.verify_keys = verify_keys
#: ``omap_file_lock_duration`` number of seconds before automatically unlock OMAP file lock
self.omap_file_lock_duration = omap_file_lock_duration
#: ``omap_file_lock_retries`` number of retries to lock OMAP file before giving up
@@ -1416,6 +1464,18 @@ class NvmeofServiceSpec(ServiceSpec):
self.omap_file_lock_retry_sleep_interval = omap_file_lock_retry_sleep_interval
#: ``omap_file_update_reloads`` number of attempt to reload OMAP when it differs from local
self.omap_file_update_reloads = omap_file_update_reloads
+ #: ``max_hosts_per_namespace`` max number of hosts per namespace
+ self.max_hosts_per_namespace = max_hosts_per_namespace
+ #: ``max_namespaces_with_netmask`` max number of namespaces which are not auto visible
+ self.max_namespaces_with_netmask = max_namespaces_with_netmask
+ #: ``max_subsystems`` max number of subsystems
+ self.max_subsystems = max_subsystems
+ #: ``max_namespaces`` max number of namespaces on all subsystems
+ self.max_namespaces = max_namespaces
+ #: ``max_namespaces_per_subsystem`` max number of namespaces per one subsystem
+ self.max_namespaces_per_subsystem = max_namespaces_per_subsystem
+ #: ``max_hosts_per_subsystem`` max number of hosts per subsystems
+ self.max_hosts_per_subsystem = max_hosts_per_subsystem
#: ``allowed_consecutive_spdk_ping_failures`` # of ping failures before aborting gateway
self.allowed_consecutive_spdk_ping_failures = allowed_consecutive_spdk_ping_failures
#: ``spdk_ping_interval_in_seconds`` sleep interval in seconds between SPDK pings
@@ -1434,14 +1494,20 @@ class NvmeofServiceSpec(ServiceSpec):
self.client_cert = client_cert
#: ``root_ca_cert`` CA cert for server/client certs
self.root_ca_cert = root_ca_cert
- #: ``spdk_path`` path to SPDK
+ #: ``spdk_path`` path is unused and duplicate of tgt_path below, consider removing
self.spdk_path = spdk_path or '/usr/local/bin/nvmf_tgt'
+ #: ``spdk_mem_size`` memory size in MB for DPDK
+ self.spdk_mem_size = spdk_mem_size
#: ``tgt_path`` nvmeof target path
self.tgt_path = tgt_path or '/usr/local/bin/nvmf_tgt'
#: ``spdk_timeout`` SPDK connectivity timeout
self.spdk_timeout = spdk_timeout
#: ``spdk_log_level`` the SPDK log level
- self.spdk_log_level = spdk_log_level or 'WARNING'
+ self.spdk_log_level = spdk_log_level
+ #: ``spdk_protocol_log_level`` the SPDK protocol log level
+ self.spdk_protocol_log_level = spdk_protocol_log_level or 'WARNING'
+ #: ``spdk_log_file_dir`` the SPDK log output file file directory
+ self.spdk_log_file_dir = spdk_log_file_dir
#: ``rpc_socket_dir`` the SPDK RPC socket file directory
self.rpc_socket_dir = rpc_socket_dir or '/var/tmp/'
#: ``rpc_socket_name`` the SPDK RPC socket file name
@@ -1456,6 +1522,8 @@ class NvmeofServiceSpec(ServiceSpec):
self.tgt_cmd_extra_args = tgt_cmd_extra_args
#: ``discovery_addr`` address of the discovery service
self.discovery_addr = discovery_addr
+ #: ``discovery_addr_map`` per node address map of the discovery service
+ self.discovery_addr_map = discovery_addr_map
#: ``discovery_port`` port of the discovery service
self.discovery_port = discovery_port or 8009
#: ``log_level`` the nvmeof gateway log level
@@ -1478,9 +1546,11 @@ class NvmeofServiceSpec(ServiceSpec):
self.monitor_timeout = monitor_timeout
#: ``enable_monitor_client`` whether to connect to the ceph monitor or not
self.enable_monitor_client = enable_monitor_client
+ #: ``monitor_client_log_file_dir`` the monitor client log output file file directory
+ self.monitor_client_log_file_dir = monitor_client_log_file_dir
def get_port_start(self) -> List[int]:
- return [5500, 4420, 8009]
+ return [self.port, 4420, self.discovery_port]
def validate(self) -> None:
# TODO: what other parameters should be validated as part of this function?
@@ -1489,6 +1559,7 @@ class NvmeofServiceSpec(ServiceSpec):
if not self.pool:
raise SpecValidationError('Cannot add NVMEOF: No Pool specified')
+ verify_boolean(self.enable_auth, "Enable authentication")
if self.enable_auth:
if not all([self.server_key, self.server_cert, self.client_key,
self.client_cert, self.root_ca_cert]):
@@ -1503,102 +1574,65 @@ class NvmeofServiceSpec(ServiceSpec):
if self.transports not in ['tcp']:
raise SpecValidationError('Invalid transport. Valid values are tcp')
- if self.log_level:
- if self.log_level.lower() not in ['debug',
- 'info',
- 'warning',
- 'error',
- 'critical']:
- raise SpecValidationError(
- 'Invalid log level. Valid values are: debug, info, warning, error, critial')
-
- if self.spdk_log_level:
- if self.spdk_log_level.lower() not in ['debug',
- 'info',
- 'warning',
- 'error',
- 'notice']:
- raise SpecValidationError(
- 'Invalid SPDK log level. Valid values are: '
- 'DEBUG, INFO, WARNING, ERROR, NOTICE')
+ verify_enum(self.log_level, "log level", ['debug', 'info', 'warning', 'error', 'critical'])
+ verify_enum(self.spdk_log_level, "SPDK log level",
+ ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'NOTICE'])
+ verify_enum(self.spdk_protocol_log_level, "SPDK protocol log level",
+ ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'NOTICE'])
+ verify_positive_int(self.bdevs_per_cluster, "Bdevs per cluster")
+ if self.bdevs_per_cluster is not None and self.bdevs_per_cluster < 1:
+ raise SpecValidationError("Bdevs per cluster should be at least 1")
+ verify_non_negative_number(self.spdk_ping_interval_in_seconds, "SPDK ping interval")
if (
- self.spdk_ping_interval_in_seconds
+ self.spdk_ping_interval_in_seconds is not None
and self.spdk_ping_interval_in_seconds < 1.0
):
raise SpecValidationError("SPDK ping interval should be at least 1 second")
+ verify_non_negative_int(self.allowed_consecutive_spdk_ping_failures,
+ "Allowed consecutive SPDK ping failures")
if (
- self.allowed_consecutive_spdk_ping_failures
+ self.allowed_consecutive_spdk_ping_failures is not None
and self.allowed_consecutive_spdk_ping_failures < 1
):
raise SpecValidationError("Allowed consecutive SPDK ping failures should be at least 1")
- if (
- self.state_update_interval_sec
- and self.state_update_interval_sec < 0
- ):
- raise SpecValidationError("State update interval can't be negative")
-
- if (
- self.omap_file_lock_duration
- and self.omap_file_lock_duration < 0
- ):
- raise SpecValidationError("OMAP file lock duration can't be negative")
-
- if (
- self.omap_file_lock_retries
- and self.omap_file_lock_retries < 0
- ):
- raise SpecValidationError("OMAP file lock retries can't be negative")
-
- if (
- self.omap_file_update_reloads
- and self.omap_file_update_reloads < 0
- ):
- raise SpecValidationError("OMAP file reloads can't be negative")
-
- if (
- self.spdk_timeout
- and self.spdk_timeout < 0.0
- ):
- raise SpecValidationError("SPDK timeout can't be negative")
-
- if (
- self.conn_retries
- and self.conn_retries < 0
- ):
- raise SpecValidationError("Connection retries can't be negative")
-
- if (
- self.max_log_file_size_in_mb
- and self.max_log_file_size_in_mb < 0
- ):
- raise SpecValidationError("Log file size can't be negative")
-
- if (
- self.max_log_files_count
- and self.max_log_files_count < 0
- ):
- raise SpecValidationError("Log files count can't be negative")
-
- if (
- self.max_log_directory_backups
- and self.max_log_directory_backups < 0
- ):
- raise SpecValidationError("Log file directory backups can't be negative")
-
- if (
- self.monitor_timeout
- and self.monitor_timeout < 0.0
- ):
- raise SpecValidationError("Monitor timeout can't be negative")
-
- if self.port and self.port < 0:
- raise SpecValidationError("Port can't be negative")
-
- if self.discovery_port and self.discovery_port < 0:
- raise SpecValidationError("Discovery port can't be negative")
+ verify_non_negative_int(self.state_update_interval_sec, "State update interval")
+ verify_non_negative_int(self.rebalance_period_sec, "Rebalance period")
+ verify_non_negative_int(self.max_gws_in_grp, "Max gateways in group")
+ verify_non_negative_int(self.max_ns_to_change_lb_grp,
+ "Max namespaces to change load balancing group")
+ verify_non_negative_int(self.omap_file_lock_duration, "OMAP file lock duration")
+ verify_non_negative_number(self.omap_file_lock_retry_sleep_interval,
+ "OMAP file lock sleep interval")
+ verify_non_negative_int(self.omap_file_lock_retries, "OMAP file lock retries")
+ verify_non_negative_int(self.omap_file_update_reloads, "OMAP file reloads")
+ verify_non_negative_number(self.spdk_timeout, "SPDK timeout")
+ verify_non_negative_int(self.max_log_file_size_in_mb, "Log file size")
+ verify_non_negative_int(self.max_log_files_count, "Log files count")
+ verify_non_negative_int(self.max_log_directory_backups, "Log file directory backups")
+ verify_non_negative_int(self.max_hosts_per_namespace, "Max hosts per namespace")
+ verify_non_negative_int(self.max_namespaces_with_netmask, "Max namespaces with netmask")
+ verify_positive_int(self.max_subsystems, "Max subsystems")
+ verify_positive_int(self.max_namespaces, "Max namespaces")
+ verify_positive_int(self.max_namespaces_per_subsystem, "Max namespaces per subsystem")
+ verify_positive_int(self.max_hosts_per_subsystem, "Max hosts per subsystem")
+ verify_non_negative_number(self.monitor_timeout, "Monitor timeout")
+ verify_non_negative_int(self.port, "Port")
+ verify_non_negative_int(self.discovery_port, "Discovery port")
+ verify_non_negative_int(self.prometheus_port, "Prometheus port")
+ verify_non_negative_int(self.prometheus_stats_interval, "Prometheus stats interval")
+ verify_boolean(self.state_update_notify, "State update notify")
+ verify_boolean(self.enable_spdk_discovery_controller, "Enable SPDK discovery controller")
+ verify_boolean(self.enable_key_encryption, "Enable key encryption")
+ verify_boolean(self.enable_prometheus_exporter, "Enable Prometheus exporter")
+ verify_boolean(self.verify_nqns, "Verify NQNs")
+ verify_boolean(self.verify_keys, "Verify Keys")
+ verify_boolean(self.log_files_enabled, "Log files enabled")
+ verify_boolean(self.log_files_rotation_enabled, "Log files rotation enabled")
+ verify_boolean(self.verbose_log_messages, "Verbose log messages")
+ verify_boolean(self.enable_monitor_client, "Enable monitor client")
yaml.add_representer(NvmeofServiceSpec, ServiceSpec.yaml_representer)
@@ -1762,7 +1796,7 @@ class IngressSpec(ServiceSpec):
if not self.keepalive_only and not self.frontend_port:
raise SpecValidationError(
'Cannot add ingress: No frontend_port specified')
- if not self.monitor_port:
+ if not self.keepalive_only and not self.monitor_port:
raise SpecValidationError(
'Cannot add ingress: No monitor_port specified')
if not self.virtual_ip and not self.virtual_ips_list:
@@ -1805,6 +1839,7 @@ class MgmtGatewaySpec(ServiceSpec):
ssl_protocols: Optional[List[str]] = None,
ssl_ciphers: Optional[List[str]] = None,
enable_health_check_endpoint: bool = False,
+ virtual_ip: Optional[str] = None,
preview_only: bool = False,
unmanaged: bool = False,
extra_container_args: Optional[GeneralArgList] = None,
@@ -1851,6 +1886,7 @@ class MgmtGatewaySpec(ServiceSpec):
#: List of supported secure SSL ciphers. Changing this list may reduce system security.
self.ssl_ciphers = ssl_ciphers
self.enable_health_check_endpoint = enable_health_check_endpoint
+ self.virtual_ip = virtual_ip
def get_port_start(self) -> List[int]:
ports = []
@@ -2295,6 +2331,7 @@ class AlertManagerSpec(MonitoringSpec):
user_data: Optional[Dict[str, Any]] = None,
config: Optional[Dict[str, str]] = None,
networks: Optional[List[str]] = None,
+ only_bind_port_on_networks: bool = False,
port: Optional[int] = None,
secure: bool = False,
extra_container_args: Optional[GeneralArgList] = None,
@@ -2325,6 +2362,7 @@ class AlertManagerSpec(MonitoringSpec):
# <webhook_configs> configuration.
self.user_data = user_data or {}
self.secure = secure
+ self.only_bind_port_on_networks = only_bind_port_on_networks
def get_port_start(self) -> List[int]:
return [self.get_port(), 9094]
@@ -2371,7 +2409,7 @@ class GrafanaSpec(MonitoringSpec):
self.protocol = protocol
# whether ports daemons for this service bind to should
- # bind to only hte networks listed in networks param, or
+ # bind to only the networks listed in networks param, or
# to all networks. Defaults to false which is saying to bind
# on all networks.
self.only_bind_port_on_networks = only_bind_port_on_networks
diff --git a/src/python-common/ceph/deployment/translate.py b/src/python-common/ceph/deployment/translate.py
index 49fb17da725..9dfe7cfcf81 100644
--- a/src/python-common/ceph/deployment/translate.py
+++ b/src/python-common/ceph/deployment/translate.py
@@ -5,7 +5,7 @@ try:
except ImportError:
pass
-from ceph.deployment.drive_selection.selector import DriveSelection
+from ceph.deployment.drive_selection.selector import DriveSelection # noqa: F401
logger = logging.getLogger(__name__)
diff --git a/src/python-common/ceph/deployment/utils.py b/src/python-common/ceph/deployment/utils.py
index f800e373897..758eddc9412 100644
--- a/src/python-common/ceph/deployment/utils.py
+++ b/src/python-common/ceph/deployment/utils.py
@@ -1,7 +1,9 @@
import ipaddress
import socket
-from typing import Tuple, Optional
+from typing import Tuple, Optional, Any
from urllib.parse import urlparse
+from ceph.deployment.hostspec import SpecValidationError
+from numbers import Number
def unwrap_ipv6(address):
@@ -100,3 +102,50 @@ def valid_addr(addr: str) -> Tuple[bool, str]:
if addr[0].isalpha() and '.' in addr:
return _dns_lookup(addr, port)
return _ip_lookup(addr, port)
+
+
+def verify_numeric(field: Any, field_name: str) -> None:
+ if field is not None:
+ if not isinstance(field, Number) or isinstance(field, bool):
+ raise SpecValidationError(f"{field_name} must be a number")
+
+
+def verify_non_negative_int(field: Any, field_name: str) -> None:
+ verify_numeric(field, field_name)
+ if field is not None:
+ if not isinstance(field, int) or isinstance(field, bool):
+ raise SpecValidationError(f"{field_name} must be an integer")
+ if field < 0:
+ raise SpecValidationError(f"{field_name} can't be negative")
+
+
+def verify_positive_int(field: Any, field_name: str) -> None:
+ verify_non_negative_int(field, field_name)
+ if field is not None and field <= 0:
+ raise SpecValidationError(f"{field_name} must be greater than zero")
+
+
+def verify_non_negative_number(field: Any, field_name: str) -> None:
+ verify_numeric(field, field_name)
+ if field is not None:
+ if field < 0.0:
+ raise SpecValidationError(f"{field_name} can't be negative")
+
+
+def verify_boolean(field: Any, field_name: str) -> None:
+ if field is not None:
+ if not isinstance(field, bool):
+ raise SpecValidationError(f"{field_name} must be a boolean")
+
+
+def verify_enum(field: Any, field_name: str, allowed: list) -> None:
+ if field:
+ allowed_lower = []
+ if not isinstance(field, str):
+ raise SpecValidationError(f"{field_name} must be a string")
+ for val in allowed:
+ assert isinstance(val, str)
+ allowed_lower.append(val.lower())
+ if field.lower() not in allowed_lower:
+ raise SpecValidationError(
+ f'Invalid {field_name}. Valid values are: {", ".join(allowed)}')
diff --git a/src/python-common/ceph/fs/earmarking.py b/src/python-common/ceph/fs/earmarking.py
index c5d4a59a4d5..f4fd4ddf96c 100644
--- a/src/python-common/ceph/fs/earmarking.py
+++ b/src/python-common/ceph/fs/earmarking.py
@@ -19,13 +19,25 @@ supported top-level scopes.
import errno
import enum
import logging
-from typing import List, NamedTuple, Optional, Tuple
+from typing import List, NamedTuple, Optional, Tuple, Protocol
log = logging.getLogger(__name__)
XATTR_SUBVOLUME_EARMARK_NAME = 'user.ceph.subvolume.earmark'
+class FSOperations(Protocol):
+ """Protocol class representing the file system operations earmarking
+ classes will perform.
+ """
+
+ def setxattr(
+ self, path: str, key: str, value: bytes, flags: int
+ ) -> None: ...
+
+ def getxattr(self, path: str, key: str) -> bytes: ...
+
+
class EarmarkTopScope(enum.Enum):
NFS = "nfs"
SMB = "smb"
@@ -53,11 +65,11 @@ class EarmarkParseError(ValueError):
class CephFSVolumeEarmarking:
- def __init__(self, fs, path: str) -> None:
+ def __init__(self, fs: FSOperations, path: str) -> None:
self.fs = fs
self.path = path
- def _handle_cephfs_error(self, e: Exception, action: str) -> None:
+ def _handle_cephfs_error(self, e: Exception, action: str) -> Optional[str]:
if isinstance(e, ValueError):
raise EarmarkException(errno.EINVAL, f"Invalid earmark specified: {e}") from e
elif isinstance(e, OSError):
@@ -135,7 +147,7 @@ class CephFSVolumeEarmarking:
except Exception as e:
return self._handle_cephfs_error(e, "getting")
- def set_earmark(self, earmark: str):
+ def set_earmark(self, earmark: str) -> None:
# Validate the earmark before attempting to set it
if not self._validate_earmark(earmark):
raise EarmarkException(
diff --git a/src/python-common/ceph/tests/utils.py b/src/python-common/ceph/tests/utils.py
index 04b8a4e3895..20a39e4666b 100644
--- a/src/python-common/ceph/tests/utils.py
+++ b/src/python-common/ceph/tests/utils.py
@@ -35,8 +35,7 @@ def _mk_device(rotational=True,
)]
-def _mk_inventory(devices):
- # type: (Any) -> List[Device]
+def _mk_inventory(devices: Any) -> List[Device]:
devs = []
for dev_, name in zip(devices, map(chr, range(ord('a'), ord('z')))):
dev = Device.from_json(dev_.to_json())
diff --git a/src/python-common/requirements-lint.txt b/src/python-common/requirements-lint.txt
deleted file mode 100644
index 2a7142182c2..00000000000
--- a/src/python-common/requirements-lint.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-flake8==3.7.8
-rstcheck==3.3.1
diff --git a/src/python-common/tox.ini b/src/python-common/tox.ini
index 313a4334d51..e0b59c700ca 100644
--- a/src/python-common/tox.ini
+++ b/src/python-common/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py3, mypy, lint
+envlist = lint, rstcheck, mypy, py3
skip_missing_interpreters = true
[testenv:py3]
@@ -26,9 +26,13 @@ exclude =
__pycache__
[testenv:lint]
-deps =
- -rrequirements-lint.txt
+deps =
+ flake8
commands =
flake8 {posargs:ceph}
- rstcheck --report info --debug README.rst
+[testenv:rstcheck]
+deps =
+ rstcheck
+commands =
+ rstcheck --report-level info README.rst