summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph-volume/ceph_volume')
-rw-r--r--src/ceph-volume/ceph_volume/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/api/lvm.py17
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/zap.py321
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/list.py147
-rw-r--r--src/ceph-volume/ceph_volume/main.py10
-rw-r--r--src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py2
-rw-r--r--src/ceph-volume/ceph_volume/tests/conftest.py2
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/data_zap.py81
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py223
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/data_list.py102
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py114
-rw-r--r--src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py1
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_inventory.py3
-rw-r--r--src/ceph-volume/ceph_volume/util/arg_validators.py14
-rw-r--r--src/ceph-volume/ceph_volume/util/device.py34
-rw-r--r--src/ceph-volume/ceph_volume/util/disk.py47
-rw-r--r--src/ceph-volume/ceph_volume/util/prepare.py3
17 files changed, 749 insertions, 373 deletions
diff --git a/src/ceph-volume/ceph_volume/__init__.py b/src/ceph-volume/ceph_volume/__init__.py
index b10100c0218..814619cfddd 100644
--- a/src/ceph-volume/ceph_volume/__init__.py
+++ b/src/ceph-volume/ceph_volume/__init__.py
@@ -6,6 +6,7 @@ from collections import namedtuple
sys_info = namedtuple('sys_info', ['devices'])
sys_info.devices = dict()
logger = logging.getLogger(__name__)
+BEING_REPLACED_HEADER: str = 'CEPH_DEVICE_BEING_REPLACED'
class AllowLoopDevices:
diff --git a/src/ceph-volume/ceph_volume/api/lvm.py b/src/ceph-volume/ceph_volume/api/lvm.py
index 16cbc08b262..fc376f891fd 100644
--- a/src/ceph-volume/ceph_volume/api/lvm.py
+++ b/src/ceph-volume/ceph_volume/api/lvm.py
@@ -10,6 +10,8 @@ from itertools import repeat
from math import floor
from ceph_volume import process, util, conf
from ceph_volume.exceptions import SizeAllocationError
+from typing import Any, Dict
+
logger = logging.getLogger(__name__)
@@ -807,13 +809,16 @@ LV_CMD_OPTIONS = ['--noheadings', '--readonly', '--separator=";"', '-a',
'--units=b', '--nosuffix']
-class Volume(object):
+class Volume:
"""
Represents a Logical Volume from LVM, with some top-level attributes like
``lv_name`` and parsed tags as a dictionary of key/value pairs.
"""
- def __init__(self, **kw):
+ def __init__(self, **kw: str) -> None:
+ self.lv_path: str = ''
+ self.lv_name: str = ''
+ self.lv_uuid: str = ''
for k, v in kw.items():
setattr(self, k, v)
self.lv_api = kw
@@ -824,13 +829,13 @@ class Volume(object):
self.encrypted = self.tags.get('ceph.encrypted', '0') == '1'
self.used_by_ceph = 'ceph.osd_id' in self.tags
- def __str__(self):
+ def __str__(self) -> str:
return '<%s>' % self.lv_api['lv_path']
- def __repr__(self):
+ def __repr__(self) -> str:
return self.__str__()
- def as_dict(self):
+ def as_dict(self) -> Dict[str, Any]:
obj = {}
obj.update(self.lv_api)
obj['tags'] = self.tags
@@ -839,7 +844,7 @@ class Volume(object):
obj['path'] = self.lv_path
return obj
- def report(self):
+ def report(self) -> Dict[str, Any]:
if not self.used_by_ceph:
return {
'name': self.lv_name,
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/zap.py b/src/ceph-volume/ceph_volume/devices/lvm/zap.py
index 2b6925f5b27..a6d82c7f0fa 100644
--- a/src/ceph-volume/ceph_volume/devices/lvm/zap.py
+++ b/src/ceph-volume/ceph_volume/devices/lvm/zap.py
@@ -5,12 +5,13 @@ import time
from textwrap import dedent
-from ceph_volume import decorators, terminal, process
+from ceph_volume import decorators, terminal, process, BEING_REPLACED_HEADER
from ceph_volume.api import lvm as api
from ceph_volume.util import system, encryption, disk, arg_validators, str_to_int, merge_dict
from ceph_volume.util.device import Device
from ceph_volume.systemd import systemctl
-from typing import List
+from ceph_volume.devices.raw.list import direct_report
+from typing import Any, Dict, List, Set
logger = logging.getLogger(__name__)
mlogger = terminal.MultiLogger(__name__)
@@ -96,84 +97,126 @@ def zap_data(path):
])
-def find_associated_devices(osd_id=None, osd_fsid=None):
- """
- From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
- system that match those tag values, further detect if any partitions are
- part of the OSD, and then return the set of LVs and partitions (if any).
- """
- lv_tags = {}
- if osd_id:
- lv_tags['ceph.osd_id'] = osd_id
- if osd_fsid:
- lv_tags['ceph.osd_fsid'] = osd_fsid
-
- lvs = api.get_lvs(tags=lv_tags)
- if not lvs:
- raise RuntimeError('Unable to find any LV for zapping OSD: '
- '%s' % osd_id or osd_fsid)
+class Zap:
+ help = 'Removes all data and filesystems from a logical volume or partition.'
- devices_to_zap = ensure_associated_lvs(lvs, lv_tags)
- return [Device(path) for path in set(devices_to_zap) if path]
+ def __init__(self, argv: List[str]) -> None:
+ self.argv = argv
+ self.osd_ids_to_zap: List[str] = []
+ def ensure_associated_raw(self, raw_report: Dict[str, Any]) -> List[str]:
+ osd_id: str = self.args.osd_id
+ osd_uuid: str = self.args.osd_fsid
+ raw_devices: Set[str] = set()
-def ensure_associated_lvs(lvs, lv_tags={}):
- """
- Go through each LV and ensure if backing devices (journal, wal, block)
- are LVs or partitions, so that they can be accurately reported.
- """
- # look for many LVs for each backing type, because it is possible to
- # receive a filtering for osd.1, and have multiple failed deployments
- # leaving many journals with osd.1 - usually, only a single LV will be
- # returned
-
- db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
- wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
- backing_devices = [(db_lvs, 'db'),
- (wal_lvs, 'wal')]
-
- verified_devices = []
-
- for lv in lvs:
- # go through each lv and append it, otherwise query `blkid` to find
- # a physical device. Do this for each type (journal,db,wal) regardless
- # if they have been processed in the previous LV, so that bad devices
- # with the same ID can be caught
- for ceph_lvs, _type in backing_devices:
- if ceph_lvs:
- verified_devices.extend([l.lv_path for l in ceph_lvs])
- continue
-
- # must be a disk partition, by querying blkid by the uuid we are
- # ensuring that the device path is always correct
- try:
- device_uuid = lv.tags['ceph.%s_uuid' % _type]
- except KeyError:
- # Bluestore will not have ceph.journal_uuid, and Filestore
- # will not not have ceph.db_uuid
- continue
+ if len([details.get('osd_id') for _, details in raw_report.items() if details.get('osd_id') == osd_id]) > 1:
+ if not osd_uuid:
+ raise RuntimeError(f'Multiple OSDs found with id {osd_id}, pass --osd-fsid')
- osd_device = disk.get_device_from_partuuid(device_uuid)
- if not osd_device:
- # if the osd_device is not found by the partuuid, then it is
- # not possible to ensure this device exists anymore, so skip it
- continue
- verified_devices.append(osd_device)
+ if not osd_uuid:
+ for _, details in raw_report.items():
+ if details.get('osd_id') == int(osd_id):
+ osd_uuid = details.get('osd_uuid')
+ break
- verified_devices.append(lv.lv_path)
+ for _, details in raw_report.items():
+ device: str = details.get('device')
+ if details.get('osd_uuid') == osd_uuid:
+ raw_devices.add(device)
- # reduce the list from all the duplicates that were added
- return list(set(verified_devices))
+ return list(raw_devices)
+ def find_associated_devices(self) -> List[api.Volume]:
+ """From an ``osd_id`` and/or an ``osd_fsid``, filter out all the Logical Volumes (LVs) in the
+ system that match those tag values, further detect if any partitions are
+ part of the OSD, and then return the set of LVs and partitions (if any).
-class Zap(object):
+ The function first queries the LVM-based OSDs using the provided `osd_id` or `osd_fsid`.
+ If no matches are found, it then searches the system for RAW-based OSDs.
- help = 'Removes all data and filesystems from a logical volume or partition.'
+ Raises:
+ SystemExit: If no OSDs are found, the function raises a `SystemExit` with an appropriate message.
- def __init__(self, argv):
- self.argv = argv
+ Returns:
+ List[api.Volume]: A list of `api.Volume` objects corresponding to the OSD's Logical Volumes (LVs)
+ or partitions that are associated with the given `osd_id` or `osd_fsid`.
- def unmount_lv(self, lv):
+ Notes:
+ - If neither `osd_id` nor `osd_fsid` are provided, the function will not be able to find OSDs.
+ - The search proceeds from LVM-based OSDs to RAW-based OSDs if no Logical Volumes are found.
+ """
+ lv_tags = {}
+ lv_tags = {key: value for key, value in {
+ 'ceph.osd_id': self.args.osd_id,
+ 'ceph.osd_fsid': self.args.osd_fsid
+ }.items() if value}
+ devices_to_zap: List[str] = []
+ lvs = api.get_lvs(tags=lv_tags)
+
+ if lvs:
+ devices_to_zap = self.ensure_associated_lvs(lvs, lv_tags)
+ else:
+ mlogger.debug(f'No OSD identified by "{self.args.osd_id or self.args.osd_fsid}" was found among LVM-based OSDs.')
+ mlogger.debug('Proceeding to check RAW-based OSDs.')
+ raw_osds: Dict[str, Any] = direct_report()
+ if raw_osds:
+ devices_to_zap = self.ensure_associated_raw(raw_osds)
+ if not devices_to_zap:
+ raise SystemExit('No OSD were found.')
+
+ return [Device(path) for path in set(devices_to_zap) if path]
+
+ def ensure_associated_lvs(self,
+ lvs: List[api.Volume],
+ lv_tags: Dict[str, Any] = {}) -> List[str]:
+ """
+ Go through each LV and ensure if backing devices (journal, wal, block)
+ are LVs or partitions, so that they can be accurately reported.
+ """
+ # look for many LVs for each backing type, because it is possible to
+ # receive a filtering for osd.1, and have multiple failed deployments
+ # leaving many journals with osd.1 - usually, only a single LV will be
+ # returned
+
+ db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
+ wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
+ backing_devices = [(db_lvs, 'db'),
+ (wal_lvs, 'wal')]
+
+ verified_devices = []
+
+ for lv in lvs:
+ # go through each lv and append it, otherwise query `blkid` to find
+ # a physical device. Do this for each type (journal,db,wal) regardless
+ # if they have been processed in the previous LV, so that bad devices
+ # with the same ID can be caught
+ for ceph_lvs, _type in backing_devices:
+ if ceph_lvs:
+ verified_devices.extend([l.lv_path for l in ceph_lvs])
+ continue
+
+ # must be a disk partition, by querying blkid by the uuid we are
+ # ensuring that the device path is always correct
+ try:
+ device_uuid = lv.tags['ceph.%s_uuid' % _type]
+ except KeyError:
+ # Bluestore will not have ceph.journal_uuid, and Filestore
+ # will not not have ceph.db_uuid
+ continue
+
+ osd_device = disk.get_device_from_partuuid(device_uuid)
+ if not osd_device:
+ # if the osd_device is not found by the partuuid, then it is
+ # not possible to ensure this device exists anymore, so skip it
+ continue
+ verified_devices.append(osd_device)
+
+ verified_devices.append(lv.lv_path)
+
+ # reduce the list from all the duplicates that were added
+ return list(set(verified_devices))
+
+ def unmount_lv(self, lv: api.Volume) -> None:
if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'):
lv_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id'])
else:
@@ -186,39 +229,95 @@ class Zap(object):
if dmcrypt and dmcrypt_uuid:
self.dmcrypt_close(dmcrypt_uuid)
- def zap_lv(self, device):
+ def _write_replacement_header(self, device: str) -> None:
+ """Write a replacement header to a device.
+
+ This method writes the string defined in `BEING_REPLACED_HEADER`
+ to the specified device. This header indicates that the device
+ is in the process of being replaced.
+
+ Args:
+ device (str): The path to the device on which the replacement
+ header will be written.
+ """
+ disk._dd_write(device,
+ BEING_REPLACED_HEADER)
+
+ def clear_replace_header(self) -> bool:
+ """Safely erase the replacement header on a device if it is marked as being replaced.
+
+ This method checks whether the given device is marked as being replaced
+ (`device.is_being_replaced`). If true, it proceeds to erase the replacement header
+ from the device using the `_erase_replacement_header` method. The method returns
+ a boolean indicating whether any action was taken.
+
+ Args:
+ device (Device): The device object, which includes information about the device's
+ path and status (such as whether it is currently being replaced).
+
+ Returns:
+ bool: True if the replacement header was successfully erased, False if the
+ device was not marked as being replaced or no action was necessary.
+ """
+ result: bool = False
+ device: Device = self.args.clear_replace_header
+ if device.is_being_replaced:
+ self._erase_replacement_header(device.path)
+ result = True
+ return result
+
+ def _erase_replacement_header(self, device: str) -> None:
+ """Erase the replacement header on a device.
+
+ This method writes a sequence of null bytes (`0x00`) over the area of the device
+ where the replacement header is stored, effectively erasing it.
+
+ Args:
+ device (str): The path to the device from which the replacement header will be erased.
+ """
+ disk._dd_write(device,
+ b'\x00' * len(BEING_REPLACED_HEADER))
+
+ def zap_lv(self, device: Device) -> None:
"""
Device examples: vg-name/lv-name, /dev/vg-name/lv-name
Requirements: Must be a logical volume (LV)
"""
lv: api.Volume = device.lv_api
self.unmount_lv(lv)
-
+ self.parent_device: str = disk.get_parent_device_from_mapper(lv.lv_path)
zap_device(device.path)
if self.args.destroy:
lvs = api.get_lvs(filters={'vg_name': device.vg_name})
- if lvs == []:
- mlogger.info('No LVs left, exiting', device.vg_name)
- return
- elif len(lvs) <= 1:
+ if len(lvs) <= 1:
mlogger.info('Only 1 LV left in VG, will proceed to destroy '
'volume group %s', device.vg_name)
pvs = api.get_pvs(filters={'lv_uuid': lv.lv_uuid})
api.remove_vg(device.vg_name)
for pv in pvs:
api.remove_pv(pv.pv_name)
+ replacement_args: Dict[str, bool] = {
+ 'block': self.args.replace_block,
+ 'db': self.args.replace_db,
+ 'wal': self.args.replace_wal
+ }
+ if replacement_args.get(lv.tags.get('ceph.type'), False):
+ mlogger.info(f'Marking {self.parent_device} as being replaced')
+ self._write_replacement_header(self.parent_device)
else:
mlogger.info('More than 1 LV left in VG, will proceed to '
'destroy LV only')
mlogger.info('Removing LV because --destroy was given: %s',
device.path)
+ if self.args.replace_block:
+ mlogger.info(f'--replace-block passed but the device still has {str(len(lvs))} LV(s)')
api.remove_lv(device.path)
elif lv:
# just remove all lvm metadata, leaving the LV around
lv.clear_tags()
- def zap_partition(self, device):
+ def zap_partition(self, device: Device) -> None:
"""
Device example: /dev/sda1
Requirements: Must be a partition
@@ -246,7 +345,7 @@ class Zap(object):
mlogger.info("Destroying partition since --destroy was used: %s" % device.path)
disk.remove_partition(device)
- def zap_lvm_member(self, device):
+ def zap_lvm_member(self, device: Device) -> None:
"""
An LVM member may have more than one LV and or VG, for example if it is
a raw device with multiple partitions each belonging to a different LV
@@ -266,7 +365,7 @@ class Zap(object):
- def zap_raw_device(self, device):
+ def zap_raw_device(self, device: Device) -> None:
"""
Any whole (raw) device passed in as input will be processed here,
checking for LVM membership and partitions (if any).
@@ -286,11 +385,19 @@ class Zap(object):
self.zap_partition(Device('/dev/%s' % part_name))
zap_device(device.path)
+ # TODO(guits): I leave this commented out, this should be part of a separate patch in order to
+ # support device replacement with raw-based OSDs
+ # if self.args.replace_block:
+ # disk._dd_write(device.path, 'CEPH_DEVICE_BEING_REPLACED')
@decorators.needs_root
- def zap(self, devices=None):
- devices = devices or self.args.devices
+ def zap(self) -> None:
+ """Zap a device.
+ Raises:
+ SystemExit: When the device is a mapper and not a mpath device.
+ """
+ devices = self.args.devices
for device in devices:
mlogger.info("Zapping: %s", device.path)
if device.is_mapper and not device.is_mpath:
@@ -316,21 +423,21 @@ class Zap(object):
)
@decorators.needs_root
- def zap_osd(self):
+ def zap_osd(self) -> None:
if self.args.osd_id and not self.args.no_systemd:
osd_is_running = systemctl.osd_is_active(self.args.osd_id)
if osd_is_running:
mlogger.error("OSD ID %s is running, stop it with:" % self.args.osd_id)
mlogger.error("systemctl stop ceph-osd@%s" % self.args.osd_id)
raise SystemExit("Unable to zap devices associated with OSD ID: %s" % self.args.osd_id)
- devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
- self.zap(devices)
+ self.args.devices = self.find_associated_devices()
+ self.zap()
- def dmcrypt_close(self, dmcrypt_uuid):
+ def dmcrypt_close(self, dmcrypt_uuid: str) -> None:
mlogger.info("Closing encrypted volume %s", dmcrypt_uuid)
encryption.dmcrypt_close(mapping=dmcrypt_uuid, skip_path_check=True)
- def main(self):
+ def main(self) -> None:
sub_command_help = dedent("""
Zaps the given logical volume(s), raw device(s) or partition(s) for reuse by ceph-volume.
If given a path to a logical volume it must be in the format of vg/lv. Any
@@ -418,12 +525,56 @@ class Zap(object):
help='Skip systemd unit checks',
)
+ parser.add_argument(
+ '--replace-block',
+ dest='replace_block',
+ action='store_true',
+ help='Mark the block device as unavailable.'
+ )
+
+ parser.add_argument(
+ '--replace-db',
+ dest='replace_db',
+ action='store_true',
+ help='Mark the db device as unavailable.'
+ )
+
+ parser.add_argument(
+ '--replace-wal',
+ dest='replace_wal',
+ action='store_true',
+ help='Mark the wal device as unavailable.'
+ )
+
+ parser.add_argument(
+ '--clear-replace-header',
+ dest='clear_replace_header',
+ type=arg_validators.ValidClearReplaceHeaderDevice(),
+ help='clear the replace header on devices.'
+ )
+
if len(self.argv) == 0:
print(sub_command_help)
return
self.args = parser.parse_args(self.argv)
+ if self.args.clear_replace_header:
+ rc: bool = False
+ try:
+ rc = self.clear_replace_header()
+ except Exception as e:
+ raise SystemExit(e)
+ if rc:
+ mlogger.info(f'Replacement header cleared on {self.args.clear_replace_header}')
+ else:
+ mlogger.info(f'No replacement header detected on {self.args.clear_replace_header}, nothing to do.')
+ raise SystemExit(not rc)
+
+ if self.args.replace_block or self.args.replace_db or self.args.replace_wal:
+ self.args.destroy = True
+ mlogger.info('--replace-block|db|wal passed, enforcing --destroy.')
+
if self.args.osd_id or self.args.osd_fsid:
self.zap_osd()
else:
diff --git a/src/ceph-volume/ceph_volume/devices/raw/list.py b/src/ceph-volume/ceph_volume/devices/raw/list.py
index f6ac08eab98..68923216a41 100644
--- a/src/ceph-volume/ceph_volume/devices/raw/list.py
+++ b/src/ceph-volume/ceph_volume/devices/raw/list.py
@@ -5,12 +5,14 @@ import logging
from textwrap import dedent
from ceph_volume import decorators, process
from ceph_volume.util import disk
-from typing import Any, Dict, List as _List
+from ceph_volume.util.device import Device
+from typing import Any, Dict, Optional, List as _List
+from concurrent.futures import ThreadPoolExecutor
logger = logging.getLogger(__name__)
-def direct_report(devices):
+def direct_report(devices: Optional[_List[str]] = None) -> Dict[str, Any]:
"""
Other non-cli consumers of listing information will want to consume the
report without the need to parse arguments or other flags. This helper
@@ -20,27 +22,29 @@ def direct_report(devices):
_list = List([])
return _list.generate(devices)
-def _get_bluestore_info(dev: str) -> Dict[str, Any]:
+def _get_bluestore_info(devices: _List[str]) -> Dict[str, Any]:
result: Dict[str, Any] = {}
- out, err, rc = process.call([
- 'ceph-bluestore-tool', 'show-label',
- '--dev', dev], verbose_on_failure=False)
+ command: _List[str] = ['ceph-bluestore-tool',
+ 'show-label', '--bdev_aio_poll_ms=1']
+ for device in devices:
+ command.extend(['--dev', device])
+ out, err, rc = process.call(command, verbose_on_failure=False)
if rc:
- # ceph-bluestore-tool returns an error (below) if device is not bluestore OSD
- # > unable to read label for <device>: (2) No such file or directory
- # but it's possible the error could be for a different reason (like if the disk fails)
- logger.debug(f'assuming device {dev} is not BlueStore; ceph-bluestore-tool failed to get info from device: {out}\n{err}')
+ logger.debug(f"ceph-bluestore-tool couldn't detect any BlueStore device.\n{out}\n{err}")
else:
oj = json.loads(''.join(out))
- if dev not in oj:
- # should be impossible, so warn
- logger.warning(f'skipping device {dev} because it is not reported in ceph-bluestore-tool output: {out}')
- try:
- result = disk.bluestore_info(dev, oj)
- except KeyError as e:
- # this will appear for devices that have a bluestore header but aren't valid OSDs
- # for example, due to incomplete rollback of OSDs: https://tracker.ceph.com/issues/51869
- logger.error(f'device {dev} does not have all BlueStore data needed to be a valid OSD: {out}\n{e}')
+ for device in devices:
+ if device not in oj:
+ # should be impossible, so warn
+ logger.warning(f'skipping device {device} because it is not reported in ceph-bluestore-tool output: {out}')
+ if oj.get(device):
+ try:
+ osd_uuid = oj[device]['osd_uuid']
+ result[osd_uuid] = disk.bluestore_info(device, oj)
+ except KeyError as e:
+ # this will appear for devices that have a bluestore header but aren't valid OSDs
+ # for example, due to incomplete rollback of OSDs: https://tracker.ceph.com/issues/51869
+ logger.error(f'device {device} does not have all BlueStore data needed to be a valid OSD: {out}\n{e}')
return result
@@ -50,68 +54,67 @@ class List(object):
def __init__(self, argv: _List[str]) -> None:
self.argv = argv
-
- def is_atari_partitions(self, _lsblk: Dict[str, Any]) -> bool:
- dev = _lsblk['NAME']
- if _lsblk.get('PKNAME'):
- parent = _lsblk['PKNAME']
- try:
- if disk.has_bluestore_label(parent):
- logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
- 'device is likely a phantom Atari partition. device info: {}'.format(_lsblk)))
- return True
- except OSError as e:
- logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
- 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
- return True
- return False
-
- def exclude_atari_partitions(self, _lsblk_all: Dict[str, Any]) -> _List[Dict[str, Any]]:
- return [_lsblk for _lsblk in _lsblk_all if not self.is_atari_partitions(_lsblk)]
-
- def generate(self, devs=None):
+ self.info_devices: _List[Dict[str, str]] = []
+ self.devices_to_scan: _List[str] = []
+
+ def exclude_atari_partitions(self) -> None:
+ result: _List[str] = []
+ for info_device in self.info_devices:
+ path = info_device['NAME']
+ parent_device = info_device.get('PKNAME')
+ if parent_device:
+ try:
+ if disk.has_bluestore_label(parent_device):
+ logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(path, parent_device),
+ 'device is likely a phantom Atari partition. device info: {}'.format(info_device)))
+ continue
+ except OSError as e:
+ logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(path),
+ 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent_device, e)))
+ continue
+ result.append(path)
+ self.devices_to_scan = result
+
+ def exclude_lvm_osd_devices(self) -> None:
+ with ThreadPoolExecutor() as pool:
+ filtered_devices_to_scan = pool.map(self.filter_lvm_osd_devices, self.devices_to_scan)
+ self.devices_to_scan = [device for device in filtered_devices_to_scan if device is not None]
+
+ def filter_lvm_osd_devices(self, device: str) -> Optional[str]:
+ d = Device(device)
+ return d.path if not d.ceph_device_lvm else None
+
+ def generate(self, devices: Optional[_List[str]] = None) -> Dict[str, Any]:
logger.debug('Listing block devices via lsblk...')
- info_devices = []
- if not devs or not any(devs):
+ if not devices or not any(devices):
# If no devs are given initially, we want to list ALL devices including children and
# parents. Parent disks with child partitions may be the appropriate device to return if
# the parent disk has a bluestore header, but children may be the most appropriate
# devices to return if the parent disk does not have a bluestore header.
- info_devices = disk.lsblk_all(abspath=True)
- devs = [device['NAME'] for device in info_devices if device.get('NAME',)]
+ self.info_devices = disk.lsblk_all(abspath=True)
+ # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
+ # bluestore's on-disk format as an Atari partition table. These false Atari partitions
+ # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
+ # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
+ # parent, it is a child. If the parent is a valid bluestore OSD, the child will only
+ # exist if it is a phantom Atari partition, and the child should be ignored. If the
+ # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
+ # determine whether a parent is bluestore, we should err on the side of not reporting
+ # the child so as not to give a false negative.
+ self.exclude_atari_partitions()
+ self.exclude_lvm_osd_devices()
+
else:
- for dev in devs:
- info_devices.append(disk.lsblk(dev, abspath=True))
-
- # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
- # bluestore's on-disk format as an Atari partition table. These false Atari partitions
- # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
- # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
- # parent, it is a child. If the parent is a valid bluestore OSD, the child will only
- # exist if it is a phantom Atari partition, and the child should be ignored. If the
- # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
- # determine whether a parent is bluestore, we should err on the side of not reporting
- # the child so as not to give a false negative.
- info_devices = self.exclude_atari_partitions(info_devices)
-
- result = {}
- logger.debug('inspecting devices: {}'.format(devs))
- for info_device in info_devices:
- bs_info = _get_bluestore_info(info_device['NAME'])
- if not bs_info:
- # None is also returned in the rare event that there is an issue reading info from
- # a BlueStore disk, so be sure to log our assumption that it isn't bluestore
- logger.info('device {} does not have BlueStore information'.format(info_device['NAME']))
- continue
- uuid = bs_info['osd_uuid']
- if uuid not in result:
- result[uuid] = {}
- result[uuid].update(bs_info)
+ self.devices_to_scan = devices
+
+ result: Dict[str, Any] = {}
+ logger.debug('inspecting devices: {}'.format(self.devices_to_scan))
+ result = _get_bluestore_info(self.devices_to_scan)
return result
@decorators.needs_root
- def list(self, args):
+ def list(self, args: argparse.Namespace) -> None:
report = self.generate(args.device)
if args.format == 'json':
print(json.dumps(report, indent=4, sort_keys=True))
@@ -120,7 +123,7 @@ class List(object):
raise SystemExit('No valid Ceph devices found')
raise RuntimeError('not implemented yet')
- def main(self):
+ def main(self) -> None:
sub_command_help = dedent("""
List OSDs on raw devices with raw device labels (usually the first
block of the device).
diff --git a/src/ceph-volume/ceph_volume/main.py b/src/ceph-volume/ceph_volume/main.py
index f8eca65ec49..4f27f429e89 100644
--- a/src/ceph-volume/ceph_volume/main.py
+++ b/src/ceph-volume/ceph_volume/main.py
@@ -11,8 +11,16 @@ try:
from importlib.metadata import entry_points
def get_entry_points(group: str): # type: ignore
- return entry_points().get(group, []) # type: ignore
+ eps = entry_points()
+ if hasattr(eps, 'select'):
+ # New importlib.metadata uses .select()
+ return eps.select(group=group)
+ else:
+ # Fallback to older EntryPoints that returns dicts
+ return eps.get(group, []) # type: ignore
+
except ImportError:
+ # Fallback to `pkg_resources` for older versions
from pkg_resources import iter_entry_points as entry_points # type: ignore
def get_entry_points(group: str): # type: ignore
diff --git a/src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py b/src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py
index ba3719cd3f3..aa11d553723 100644
--- a/src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py
+++ b/src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py
@@ -367,7 +367,7 @@ class LvmBlueStore(BlueStore):
if is_encrypted:
osd_lv_path = '/dev/mapper/%s' % osd_block_lv.__dict__['lv_uuid']
lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
- self.with_tpm = bool(osd_block_lv.tags.get('ceph.with_tpm', 0))
+ self.with_tpm = osd_block_lv.tags.get('ceph.with_tpm') == '1'
if not self.with_tpm:
encryption_utils.write_lockbox_keyring(osd_id,
osd_fsid,
diff --git a/src/ceph-volume/ceph_volume/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/conftest.py
index ee58081d97d..e6bf31737b6 100644
--- a/src/ceph-volume/ceph_volume/tests/conftest.py
+++ b/src/ceph-volume/ceph_volume/tests/conftest.py
@@ -360,7 +360,7 @@ def device_info(monkeypatch, patch_bluestore_label):
has_bluestore_label=False):
if devices:
for dev in devices.keys():
- devices[dev]['device_nodes'] = os.path.basename(dev)
+ devices[dev]['device_nodes'] = [os.path.basename(dev)]
else:
devices = {}
lsblk = lsblk if lsblk else {}
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/data_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/data_zap.py
new file mode 100644
index 00000000000..c971b7776ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/data_zap.py
@@ -0,0 +1,81 @@
+ceph_bluestore_tool_output = '''
+{
+ "/dev/sdb": {
+ "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b6",
+ "size": 1099511627776,
+ "btime": "2021-07-23T16:02:22.809186+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "1099511627776",
+ "bluefs": "1",
+ "ceph_fsid": "sdb-fsid",
+ "ceph_version_when_created": "ceph version 19.3.0-5537-gb9ba4e48 (b9ba4e48633d6d90d5927a4e66b9ecbb4d7e6e73) squid (dev)",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "type": "bluestore",
+ "whoami": "0"
+ },
+ "/dev/vdx": {
+ "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b7",
+ "size": 214748364800,
+ "btime": "2024-10-16T10:51:05.955279+0000",
+ "description": "main",
+ "bfm_blocks": "52428800",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "214748364800",
+ "bluefs": "1",
+ "ceph_fsid": "2d20bc8c-8a0c-11ef-aaba-525400e54507",
+ "ceph_version_when_created": "ceph version 19.3.0-5537-gb9ba4e48 (b9ba4e48633d6d90d5927a4e66b9ecbb4d7e6e73) squid (dev)",
+ "created_at": "2024-10-16T10:51:09.121455Z",
+ "elastic_shared_blobs": "1",
+ "epoch": "16",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "multi": "yes",
+ "osd_key": "AQCZmg9nxOKTCBAA6EQftuqMuKMHqypSAfqBsQ==",
+ "ready": "ready",
+ "type": "bluestore",
+ "whoami": "5"
+ },
+ "/dev/vdy": {
+ "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b6",
+ "size": 214748364800,
+ "btime": "2024-10-16T10:51:05.961279+0000",
+ "description": "bluefs db"
+ },
+ "/dev/vdz": {
+ "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b6",
+ "size": 214748364800,
+ "btime": "2024-10-16T10:51:05.961279+0000",
+ "description": "bluefs wal"
+ }
+}
+'''.split('\n')
+
+lsblk_all = ['NAME="/dev/sdb" KNAME="/dev/sdb" PKNAME="" PARTLABEL=""',
+ 'NAME="/dev/sdx" KNAME="/dev/sdx" PKNAME="" PARTLABEL=""',
+ 'NAME="/dev/sdy" KNAME="/dev/sdy" PKNAME="" PARTLABEL=""',
+ 'NAME="/dev/sdz" KNAME="/dev/sdz" PKNAME="" PARTLABEL=""']
+
+blkid_output = ['/dev/ceph-1172bba3-3e0e-45e5-ace6-31ae8401221f/osd-block-5050a85c-d1a7-4d66-b4ba-2e9b1a2970ae: TYPE="ceph_bluestore" USAGE="other"']
+
+udevadm_property = '''DEVNAME=/dev/sdb
+DEVTYPE=disk
+ID_ATA=1
+ID_BUS=ata
+ID_MODEL=SK_hynix_SC311_SATA_512GB
+ID_PART_TABLE_TYPE=gpt
+ID_PART_TABLE_UUID=c8f91d57-b26c-4de1-8884-0c9541da288c
+ID_PATH=pci-0000:00:17.0-ata-3
+ID_PATH_TAG=pci-0000_00_17_0-ata-3
+ID_REVISION=70000P10
+ID_SERIAL=SK_hynix_SC311_SATA_512GB_MS83N71801150416A
+TAGS=:systemd:
+USEC_INITIALIZED=16117769'''.split('\n') \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
index d630a7a6bf8..d9b3bdfd239 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
@@ -1,3 +1,4 @@
+# type: ignore
import os
import pytest
from copy import deepcopy
@@ -5,16 +6,54 @@ from mock.mock import patch, call, Mock
from ceph_volume import process
from ceph_volume.api import lvm as api
from ceph_volume.devices.lvm import zap
-
-
-class TestZap(object):
- def test_invalid_osd_id_passed(self):
+from . import data_zap
+from typing import Tuple, List
+
+
+def process_call(command, **kw):
+ result: Tuple[List[str], List[str], int] = ''
+ if 'udevadm' in command:
+ result = data_zap.udevadm_property, [], 0
+ if 'ceph-bluestore-tool' in command:
+ result = data_zap.ceph_bluestore_tool_output, [], 0
+ if 'is-active' in command:
+ result = [], [], 1
+ if 'lsblk' in command:
+ result = data_zap.lsblk_all, [], 0
+ if 'blkid' in command:
+ result = data_zap.blkid_output, [], 0
+ if 'pvs' in command:
+ result = [], [], 0
+ return result
+
+
+class TestZap:
+ def test_invalid_osd_id_passed(self) -> None:
with pytest.raises(SystemExit):
zap.Zap(argv=['--osd-id', 'foo']).main()
-class TestFindAssociatedDevices(object):
-
- def test_no_lvs_found_that_match_id(self, monkeypatch, device_info):
+ @patch('ceph_volume.util.disk._dd_write', Mock())
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_clear_replace_header_is_being_replaced(self, m_device: Mock) -> None:
+ m_dev = m_device.return_value
+ m_dev.is_being_replaced = True
+ with pytest.raises(SystemExit) as e:
+ zap.Zap(argv=['--clear', '/dev/foo']).main()
+ assert e.value.code == 0
+
+ @patch('ceph_volume.util.disk._dd_write', Mock())
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_clear_replace_header_is_not_being_replaced(self, m_device: Mock) -> None:
+ m_dev = m_device.return_value
+ m_dev.is_being_replaced = False
+ with pytest.raises(SystemExit) as e:
+ zap.Zap(argv=['--clear', '/dev/foo']).main()
+ assert e.value.code == 1
+
+ @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+ @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_no_lvs_and_raw_found_that_match_id(self, is_root, monkeypatch, device_info):
tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_tags=tags, lv_path='/dev/VolGroup/lv')
@@ -22,10 +61,15 @@ class TestFindAssociatedDevices(object):
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
- with pytest.raises(RuntimeError):
- zap.find_associated_devices(osd_id=10)
+ z = zap.Zap(['--osd-id', '10'])
- def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info):
+ with pytest.raises(SystemExit):
+ z.main()
+
+ @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+ @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_no_lvs_and_raw_found_that_match_fsid(self, is_root, monkeypatch):
tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
@@ -34,10 +78,15 @@ class TestFindAssociatedDevices(object):
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
- with pytest.raises(RuntimeError):
- zap.find_associated_devices(osd_fsid='aaaa-lkjh')
+ z = zap.Zap(['--osd-fsid', 'aaaa-lkjh'])
- def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info):
+ with pytest.raises(SystemExit):
+ z.main()
+
+ @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+ @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_no_lvs_and_raw_found_that_match_id_fsid(self, is_root, monkeypatch):
tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
@@ -46,45 +95,82 @@ class TestFindAssociatedDevices(object):
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
- with pytest.raises(RuntimeError):
- zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh')
+ z = zap.Zap(['--osd-id', '9', '--osd-fsid', 'aaaa-lkjh'])
- def test_no_ceph_lvs_found(self, monkeypatch):
+ with pytest.raises(SystemExit):
+ z.main()
+
+ @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+ def test_no_ceph_lvs_and_no_ceph_raw_found(self, is_root, monkeypatch):
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='',
lv_path='/dev/VolGroup/lv')
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
- with pytest.raises(RuntimeError):
- zap.find_associated_devices(osd_id=100)
+ z = zap.Zap(['--osd-id', '100'])
- def test_lv_is_matched_id(self, monkeypatch):
+ with pytest.raises(SystemExit):
+ z.main()
+
+ @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_lv_is_matched_id(self, mock_zap, monkeypatch, is_root):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = [osd]
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+
+ z = zap.Zap(['--osd-id', '0'])
+ z.main()
+ assert z.args.devices[0].path == '/dev/VolGroup/lv'
+ mock_zap.assert_called_once()
+
+ # @patch('ceph_volume.devices.lvm.zap.disk.has_bluestore_label', Mock(return_value=True))
+ @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+ @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_raw_is_matched_id(self, mock_zap, monkeypatch, is_root):
volumes = []
- volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
- monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
- result = zap.find_associated_devices(osd_id='0')
- assert result[0].path == '/dev/VolGroup/lv'
+ z = zap.Zap(['--osd-id', '0'])
+ z.main()
+ assert z.args.devices[0].path == '/dev/sdb'
+ mock_zap.assert_called_once()
- def test_lv_is_matched_fsid(self, monkeypatch):
+ @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+ def test_lv_is_matched_fsid(self, mock_zap, monkeypatch, is_root):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
- volumes = []
- volumes.append(osd)
+ volumes = [osd]
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes))
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
- result = zap.find_associated_devices(osd_fsid='asdf-lkjh')
- assert result[0].path == '/dev/VolGroup/lv'
+ z = zap.Zap(['--osd-fsid', 'asdf-lkjh'])
+ z.main()
+
+ assert z.args.devices[0].path == '/dev/VolGroup/lv'
+ mock_zap.assert_called_once
+
+ @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+ @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_raw_is_matched_fsid(self, mock_zap, monkeypatch, is_root):
+ volumes = []
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+
+ z = zap.Zap(['--osd-fsid', 'd5a496bc-dcb9-4ad0-a12c-393d3200d2b6'])
+ z.main()
+
+ assert z.args.devices[0].path == '/dev/sdb'
+ mock_zap.assert_called_once
- def test_lv_is_matched_id_fsid(self, monkeypatch):
+ @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+ def test_lv_is_matched_id_fsid(self, mock_zap, monkeypatch, is_root):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
@@ -94,26 +180,43 @@ class TestFindAssociatedDevices(object):
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
- result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh')
- assert result[0].path == '/dev/VolGroup/lv'
+ z = zap.Zap(['--osd-id', '0', '--osd-fsid', 'asdf-lkjh', '--no-systemd'])
+ z.main()
+ assert z.args.devices[0].path == '/dev/VolGroup/lv'
+ mock_zap.assert_called_once
-class TestEnsureAssociatedLVs(object):
-
- @patch('ceph_volume.devices.lvm.zap.api', Mock(return_value=[]))
- def test_nothing_is_found(self):
+ @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+ @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_raw_is_matched_id_fsid(self, mock_zap, monkeypatch, is_root):
volumes = []
- result = zap.ensure_associated_lvs(volumes)
- assert result == []
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
- def test_data_is_found(self, fake_call):
- tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
- osd = api.Volume(
- lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
+ z = zap.Zap(['--osd-id', '0', '--osd-fsid', 'd5a496bc-dcb9-4ad0-a12c-393d3200d2b6'])
+ z.main()
+
+ assert z.args.devices[0].path == '/dev/sdb'
+ mock_zap.assert_called_once
+
+ @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+ @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(side_effect=['/dev/vdx', '/dev/vdy', '/dev/vdz', None]))
+ @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+ def test_raw_multiple_devices(self, mock_zap, monkeypatch, is_root):
volumes = []
- volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
- assert result == ['/dev/VolGroup/data']
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ z = zap.Zap(['--osd-id', '5'])
+ z.main()
+
+ set([device.path for device in z.args.devices]) == {'/dev/vdx', '/dev/vdy', '/dev/vdz'}
+ mock_zap.assert_called_once
+
+ @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+ @patch('ceph_volume.devices.lvm.zap.api.get_lvs', Mock(return_value=[]))
+ def test_nothing_is_found(self, is_root):
+ z = zap.Zap(['--osd-id', '0'])
+ with pytest.raises(SystemExit):
+ z.main()
def test_block_is_found(self, fake_call):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
@@ -121,7 +224,7 @@ class TestEnsureAssociatedLVs(object):
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
volumes = []
volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
+ result = zap.Zap([]).ensure_associated_lvs(volumes)
assert result == ['/dev/VolGroup/block']
def test_success_message_for_fsid(self, factory, is_root, capsys):
@@ -140,28 +243,6 @@ class TestEnsureAssociatedLVs(object):
out, err = capsys.readouterr()
assert "Zapping successful for OSD: 1" in err
- def test_journal_is_found(self, fake_call):
- tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
- osd = api.Volume(
- lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
- volumes = []
- volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
- assert result == ['/dev/VolGroup/lv']
-
- @patch('ceph_volume.api.lvm.process.call', Mock(return_value=('', '', 0)))
- def test_multiple_journals_are_found(self):
- tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
- volumes = []
- for i in range(3):
- osd = api.Volume(
- lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
- volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
- assert '/dev/VolGroup/lv0' in result
- assert '/dev/VolGroup/lv1' in result
- assert '/dev/VolGroup/lv2' in result
-
@patch('ceph_volume.api.lvm.process.call', Mock(return_value=('', '', 0)))
def test_multiple_dbs_are_found(self):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db'
@@ -170,7 +251,7 @@ class TestEnsureAssociatedLVs(object):
osd = api.Volume(
lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
+ result = zap.Zap([]).ensure_associated_lvs(volumes)
assert '/dev/VolGroup/lv0' in result
assert '/dev/VolGroup/lv1' in result
assert '/dev/VolGroup/lv2' in result
@@ -183,7 +264,7 @@ class TestEnsureAssociatedLVs(object):
osd = api.Volume(
lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
+ result = zap.Zap([]).ensure_associated_lvs(volumes)
assert '/dev/VolGroup/lv0' in result
assert '/dev/VolGroup/lv1' in result
assert '/dev/VolGroup/lv2' in result
@@ -196,14 +277,14 @@ class TestEnsureAssociatedLVs(object):
osd = api.Volume(
lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags)
volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
+ result = zap.Zap([]).ensure_associated_lvs(volumes)
assert '/dev/VolGroup/lvjournal' in result
assert '/dev/VolGroup/lvwal' in result
assert '/dev/VolGroup/lvdb' in result
@patch('ceph_volume.devices.lvm.zap.api.get_lvs')
def test_ensure_associated_lvs(self, m_get_lvs):
- zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
+ zap.Zap([]).ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
calls = [
call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/data_list.py b/src/ceph-volume/ceph_volume/tests/devices/raw/data_list.py
new file mode 100644
index 00000000000..e1d1a48967a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/data_list.py
@@ -0,0 +1,102 @@
+ceph_bluestore_tool_show_label_output: str = '''{
+ "/dev/sdb": {
+ "osd_uuid": "sdb-uuid",
+ "size": 1099511627776,
+ "btime": "2021-07-23T16:02:22.809186+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "1099511627776",
+ "bluefs": "1",
+ "ceph_fsid": "sdb-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "type": "bluestore",
+ "whoami": "0"
+ },
+ "/dev/sdb2": {
+ "osd_uuid": "sdb2-uuid",
+ "size": 1099511627776,
+ "btime": "2021-07-23T16:02:22.809186+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "1099511627776",
+ "bluefs": "1",
+ "ceph_fsid": "sdb2-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "type": "bluestore",
+ "whoami": "2"
+ },
+ "/dev/sde1": {
+ "osd_uuid": "sde1-uuid",
+ "size": 214747316224,
+ "btime": "2023-07-26T13:20:19.509457+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "214747316224",
+ "bluefs": "1",
+ "ceph_fsid": "sde1-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQCSHcFkUeLIMBAAjKqANkXafjvVISkXt6FGCA==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "type": "bluestore",
+ "whoami": "1"
+ },
+ "/dev/mapper/ceph--osd--block--1": {
+ "osd_uuid": "lvm-1-uuid",
+ "size": 549751619584,
+ "btime": "2021-07-23T16:04:37.881060+0000",
+ "description": "main",
+ "bfm_blocks": "134216704",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "549751619584",
+ "bluefs": "1",
+ "ceph_fsid": "lvm-1-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "type": "bluestore",
+ "whoami": "2"
+ },
+ "/dev/mapper/ceph--osd--block--1": {
+ "osd_uuid": "lvm-1-uuid",
+ "size": 549751619584,
+ "btime": "2021-07-23T16:04:37.881060+0000",
+ "description": "main",
+ "bfm_blocks": "134216704",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "549751619584",
+ "bluefs": "1",
+ "ceph_fsid": "lvm-1-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "type": "bluestore",
+ "whoami": "2"
+ }
+}''' \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
index 604fb4faa3e..23d2bfdaa2c 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
@@ -1,5 +1,7 @@
+# type: ignore
import pytest
-from mock.mock import patch
+from .data_list import ceph_bluestore_tool_show_label_output
+from mock.mock import patch, Mock
from ceph_volume.devices import raw
# Sample lsblk output is below that overviews the test scenario. (--json output for reader clarity)
@@ -74,98 +76,6 @@ def _lsblk_output(dev, parent=None):
ret = 'NAME="{}" KNAME="{}" PKNAME="{}"'.format(dev, dev, parent)
return [ret] # needs to be in a list form
-def _bluestore_tool_label_output_sdb():
- return '''{
- "/dev/sdb": {
- "osd_uuid": "sdb-uuid",
- "size": 1099511627776,
- "btime": "2021-07-23T16:02:22.809186+0000",
- "description": "main",
- "bfm_blocks": "268435456",
- "bfm_blocks_per_key": "128",
- "bfm_bytes_per_block": "4096",
- "bfm_size": "1099511627776",
- "bluefs": "1",
- "ceph_fsid": "sdb-fsid",
- "kv_backend": "rocksdb",
- "magic": "ceph osd volume v026",
- "mkfs_done": "yes",
- "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
- "ready": "ready",
- "require_osd_release": "16",
- "whoami": "0"
- }
-}'''
-
-def _bluestore_tool_label_output_sdb2():
- return '''{
- "/dev/sdb2": {
- "osd_uuid": "sdb2-uuid",
- "size": 1099511627776,
- "btime": "2021-07-23T16:02:22.809186+0000",
- "description": "main",
- "bfm_blocks": "268435456",
- "bfm_blocks_per_key": "128",
- "bfm_bytes_per_block": "4096",
- "bfm_size": "1099511627776",
- "bluefs": "1",
- "ceph_fsid": "sdb2-fsid",
- "kv_backend": "rocksdb",
- "magic": "ceph osd volume v026",
- "mkfs_done": "yes",
- "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
- "ready": "ready",
- "require_osd_release": "16",
- "whoami": "2"
- }
-}'''
-
-def _bluestore_tool_label_output_sde1():
- return '''{
- "/dev/sde1": {
- "osd_uuid": "sde1-uuid",
- "size": 214747316224,
- "btime": "2023-07-26T13:20:19.509457+0000",
- "description": "main",
- "bfm_blocks": "268435456",
- "bfm_blocks_per_key": "128",
- "bfm_bytes_per_block": "4096",
- "bfm_size": "214747316224",
- "bluefs": "1",
- "ceph_fsid": "sde1-fsid",
- "kv_backend": "rocksdb",
- "magic": "ceph osd volume v026",
- "mkfs_done": "yes",
- "osd_key": "AQCSHcFkUeLIMBAAjKqANkXafjvVISkXt6FGCA==",
- "ready": "ready",
- "require_osd_release": "16",
- "whoami": "1"
- }
-}'''
-
-def _bluestore_tool_label_output_dm_okay():
- return '''{
- "/dev/mapper/ceph--osd--block--1": {
- "osd_uuid": "lvm-1-uuid",
- "size": 549751619584,
- "btime": "2021-07-23T16:04:37.881060+0000",
- "description": "main",
- "bfm_blocks": "134216704",
- "bfm_blocks_per_key": "128",
- "bfm_bytes_per_block": "4096",
- "bfm_size": "549751619584",
- "bluefs": "1",
- "ceph_fsid": "lvm-1-fsid",
- "kv_backend": "rocksdb",
- "magic": "ceph osd volume v026",
- "mkfs_done": "yes",
- "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
- "ready": "ready",
- "require_osd_release": "16",
- "whoami": "2"
- }
-}'''
-
def _process_call_side_effect(command, **kw):
if "lsblk" in command:
if "/dev/" in command[-1]:
@@ -186,19 +96,7 @@ def _process_call_side_effect(command, **kw):
pytest.fail('command {} needs behavior specified for it'.format(command))
if "ceph-bluestore-tool" in command:
- if "/dev/sdb" in command:
- # sdb is a bluestore OSD
- return _bluestore_tool_label_output_sdb(), '', 0
- if "/dev/sdb2" in command:
- # sdb2 is a phantom atari partition that appears to have some valid bluestore info
- return _bluestore_tool_label_output_sdb2(), '', 0
- if "/dev/sde1" in command:
- return _bluestore_tool_label_output_sde1(), '', 0
- if "/dev/mapper/ceph--osd--block--1" in command:
- # dm device 1 is a valid bluestore OSD (the other is corrupted/invalid)
- return _bluestore_tool_label_output_dm_okay(), '', 0
- # sda and children, sdb's children, sdc, sdd, dm device 2 all do NOT have bluestore OSD data
- return [], 'fake No such file or directory error', 1
+ return ceph_bluestore_tool_show_label_output, '', 0
pytest.fail('command {} needs behavior specified for it'.format(command))
def _has_bluestore_label_side_effect(disk_path):
@@ -224,6 +122,7 @@ def _has_bluestore_label_side_effect(disk_path):
class TestList(object):
+ @patch('ceph_volume.devices.raw.list.List.exclude_lvm_osd_devices', Mock())
@patch('ceph_volume.util.device.disk.get_devices')
@patch('ceph_volume.util.disk.has_bluestore_label')
@patch('ceph_volume.process.call')
@@ -257,6 +156,7 @@ class TestList(object):
assert sde1['ceph_fsid'] == 'sde1-fsid'
assert sde1['type'] == 'bluestore'
+ @patch('ceph_volume.devices.raw.list.List.exclude_lvm_osd_devices', Mock())
@patch('ceph_volume.util.device.disk.get_devices')
@patch('ceph_volume.util.disk.has_bluestore_label')
@patch('ceph_volume.process.call')
@@ -275,4 +175,4 @@ class TestList(object):
result = raw.list.List([]).generate()
assert len(result) == 2
- assert 'sdb-uuid' in result
+ assert {'sdb-uuid', 'sde1-uuid'} == set(result.keys())
diff --git a/src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py b/src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py
index f4f50b06f8a..fd7c468037c 100644
--- a/src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py
+++ b/src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py
@@ -159,6 +159,7 @@ class TestRawBlueStore:
@patch('ceph_volume.objectstore.rawbluestore.encryption_utils.rename_mapper', Mock(return_value=MagicMock()))
@patch('ceph_volume.util.disk.get_bluestore_header')
+ @patch('ceph_volume.objectstore.rawbluestore.encryption_utils.luks_close', Mock(return_value=MagicMock()))
@patch('ceph_volume.objectstore.rawbluestore.encryption_utils.luks_open', Mock(return_value=MagicMock()))
def test_activate_dmcrypt_tpm(self, m_bs_header, rawbluestore, fake_lsblk_all, mock_raw_direct_report, is_root) -> None:
m_bs_header.return_value = {
diff --git a/src/ceph-volume/ceph_volume/tests/test_inventory.py b/src/ceph-volume/ceph_volume/tests/test_inventory.py
index 785d8b56e86..29cd1fc4e4d 100644
--- a/src/ceph-volume/ceph_volume/tests/test_inventory.py
+++ b/src/ceph-volume/ceph_volume/tests/test_inventory.py
@@ -118,7 +118,7 @@ def device_data(device_info):
class TestInventory(object):
expected_keys = [
- 'ceph_device',
+ 'ceph_device_lvm',
'path',
'rejected_reasons',
'sys_api',
@@ -126,6 +126,7 @@ class TestInventory(object):
'lvs',
'device_id',
'lsm_data',
+ 'being_replaced'
]
expected_sys_api_keys = [
diff --git a/src/ceph-volume/ceph_volume/util/arg_validators.py b/src/ceph-volume/ceph_volume/util/arg_validators.py
index 99e7d039e74..e75b34e550e 100644
--- a/src/ceph-volume/ceph_volume/util/arg_validators.py
+++ b/src/ceph-volume/ceph_volume/util/arg_validators.py
@@ -7,6 +7,9 @@ from ceph_volume.util import disk
from ceph_volume.util.encryption import set_dmcrypt_no_workqueue
+mlogger = terminal.MultiLogger(__name__)
+
+
def valid_osd_id(val):
return str(int(val))
@@ -70,6 +73,17 @@ class ValidZapDevice(ValidDevice):
return self._device
+class ValidClearReplaceHeaderDevice(ValidDevice):
+ def __call__(self, dev_path: str) -> str:
+ super().get_device(dev_path)
+ return self._format_device(self._is_valid_device())
+
+ def _is_valid_device(self) -> Device:
+ if not self._device.is_being_replaced:
+ mlogger.info(f'{self.dev_path} has no replacement header.')
+ return self._device
+
+
class ValidDataDevice(ValidDevice):
def __call__(self, dev_path):
super().get_device(dev_path)
diff --git a/src/ceph-volume/ceph_volume/util/device.py b/src/ceph-volume/ceph_volume/util/device.py
index 9c2c11e7f31..04eefeac750 100644
--- a/src/ceph-volume/ceph_volume/util/device.py
+++ b/src/ceph-volume/ceph_volume/util/device.py
@@ -1,13 +1,14 @@
# -*- coding: utf-8 -*-
-
+# type: ignore
import logging
import os
from functools import total_ordering
-from ceph_volume import sys_info, allow_loop_devices
+from ceph_volume import sys_info, allow_loop_devices, BEING_REPLACED_HEADER
from ceph_volume.api import lvm
from ceph_volume.util import disk, system
from ceph_volume.util.lsmdisk import LSMDisk
from ceph_volume.util.constants import ceph_disk_guids
+from typing import List, Tuple
logger = logging.getLogger(__name__)
@@ -85,13 +86,14 @@ class Device(object):
{attr:<25} {value}"""
report_fields = [
- 'ceph_device',
+ 'ceph_device_lvm',
'rejected_reasons',
'available',
'path',
'sys_api',
'device_id',
'lsm_data',
+ 'being_replaced'
]
pretty_report_sys_fields = [
'actuators',
@@ -135,7 +137,8 @@ class Device(object):
self.blkid_api = None
self._exists = None
self._is_lvm_member = None
- self.ceph_device = False
+ self.ceph_device_lvm = False
+ self.being_replaced: bool = self.is_being_replaced
self._parse()
if self.path in sys_info.devices.keys():
self.device_nodes = sys_info.devices[self.path]['device_nodes']
@@ -233,7 +236,7 @@ class Device(object):
self.path = lv.lv_path
self.vg_name = lv.vg_name
self.lv_name = lv.name
- self.ceph_device = lvm.is_ceph_device(lv)
+ self.ceph_device_lvm = lvm.is_ceph_device(lv)
else:
self.lvs = []
if self.lsblk_all:
@@ -298,7 +301,7 @@ class Device(object):
rot=self.rotational,
available=self.available,
model=self.model,
- device_nodes=self.device_nodes
+ device_nodes=','.join(self.device_nodes)
)
def json_report(self):
@@ -363,7 +366,7 @@ class Device(object):
self._is_lvm_member = True
self.lvs.extend(lvm.get_device_lvs(path))
if self.lvs:
- self.ceph_device = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs])
+ self.ceph_device_lvm = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs])
def _get_partitions(self):
"""
@@ -590,7 +593,7 @@ class Device(object):
return [vg_free]
@property
- def has_partitions(self):
+ def has_partitions(self) -> bool:
'''
Boolean to determine if a given device has partitions.
'''
@@ -598,7 +601,14 @@ class Device(object):
return True
return False
- def _check_generic_reject_reasons(self):
+ @property
+ def is_being_replaced(self) -> bool:
+ '''
+ Boolean to indicate if the device is being replaced.
+ '''
+ return disk._dd_read(self.path, 26) == BEING_REPLACED_HEADER
+
+ def _check_generic_reject_reasons(self) -> List[str]:
reasons = [
('id_bus', 'usb', 'id_bus'),
('ro', '1', 'read-only'),
@@ -639,9 +649,11 @@ class Device(object):
rejected.append('Has partitions')
if self.has_fs:
rejected.append('Has a FileSystem')
+ if self.is_being_replaced:
+ rejected.append('Is being replaced')
return rejected
- def _check_lvm_reject_reasons(self):
+ def _check_lvm_reject_reasons(self) -> Tuple[bool, List[str]]:
rejected = []
if self.vgs:
available_vgs = [vg for vg in self.vgs if int(vg.vg_free_count) > 10]
@@ -654,7 +666,7 @@ class Device(object):
return len(rejected) == 0, rejected
- def _check_raw_reject_reasons(self):
+ def _check_raw_reject_reasons(self) -> Tuple[bool, List[str]]:
rejected = self._check_generic_reject_reasons()
if len(self.vgs) > 0:
rejected.append('LVM detected')
diff --git a/src/ceph-volume/ceph_volume/util/disk.py b/src/ceph-volume/ceph_volume/util/disk.py
index 78c140597d6..921e61a4534 100644
--- a/src/ceph-volume/ceph_volume/util/disk.py
+++ b/src/ceph-volume/ceph_volume/util/disk.py
@@ -7,7 +7,7 @@ import json
from ceph_volume import process, allow_loop_devices
from ceph_volume.api import lvm
from ceph_volume.util.system import get_file_contents
-from typing import Dict, List, Any
+from typing import Dict, List, Any, Union, Optional
logger = logging.getLogger(__name__)
@@ -251,7 +251,9 @@ def lsblk(device, columns=None, abspath=False):
return result[0]
-def lsblk_all(device='', columns=None, abspath=False):
+def lsblk_all(device: str = '',
+ columns: Optional[List[str]] = None,
+ abspath: bool = False) -> List[Dict[str, str]]:
"""
Create a dictionary of identifying values for a device using ``lsblk``.
Each supported column is a key, in its *raw* format (all uppercase
@@ -332,7 +334,6 @@ def lsblk_all(device='', columns=None, abspath=False):
if device:
base_command.append('--nodeps')
base_command.append(device)
-
out, err, rc = process.call(base_command)
if rc != 0:
@@ -346,12 +347,21 @@ def lsblk_all(device='', columns=None, abspath=False):
return result
-def is_device(dev):
+def is_device(dev: str) -> bool:
"""
- Boolean to determine if a given device is a block device (**not**
- a partition!)
+ Determines whether the given path corresponds to a block device (not a partition).
+
+ This function checks whether the provided device path represents a valid block device,
+ such as a physical disk (/dev/sda) or an allowed loop device, but excludes partitions
+ (/dev/sdc1). It performs several validation steps, including file existence, path format,
+ device type, and additional checks for loop devices if allowed.
+
+ Args:
+ dev (str): The path to the device (e.g., "/dev/sda").
- For example: /dev/sda would return True, but not /dev/sdc1
+ Returns:
+ bool: True if the path corresponds to a valid block device (not a partition),
+ otherwise False.
"""
if not os.path.exists(dev):
return False
@@ -363,7 +373,7 @@ def is_device(dev):
TYPE = lsblk(dev).get('TYPE')
if TYPE:
- return TYPE in ['disk', 'mpath']
+ return TYPE in ['disk', 'mpath', 'loop']
# fallback to stat
return _stat_is_device(os.lstat(dev).st_mode) and not is_partition(dev)
@@ -857,13 +867,14 @@ def get_devices(_sys_block_path='/sys/block', device=''):
device_slaves = os.listdir(os.path.join(sysdir, 'slaves'))
metadata['partitions'] = get_partitions_facts(sysdir)
+ metadata['device_nodes'] = []
if device_slaves:
- metadata['device_nodes'] = ','.join(device_slaves)
+ metadata['device_nodes'].extend(device_slaves)
else:
if block[2] == 'part':
- metadata['device_nodes'] = block[3]
+ metadata['device_nodes'].append(block[3])
else:
- metadata['device_nodes'] = devname
+ metadata['device_nodes'].append(devname)
metadata['actuators'] = None
if os.path.isdir(sysdir + "/queue/independent_access_ranges/"):
@@ -979,7 +990,7 @@ def _dd_read(device: str, count: int, skip: int = 0) -> str:
return result
-def _dd_write(device: str, data: str, skip: int = 0) -> None:
+def _dd_write(device: str, data: Union[str, bytes], skip: int = 0) -> None:
"""Write bytes to a device
Args:
@@ -991,10 +1002,14 @@ def _dd_write(device: str, data: str, skip: int = 0) -> None:
OSError: If there is an error opening or writing to the device.
Exception: If any other error occurs during the write operation.
"""
+
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+
try:
with open(device, 'r+b') as b:
b.seek(skip)
- b.write(data.encode('utf-8'))
+ b.write(data)
except OSError:
logger.warning(f"Can't write to {device}")
raise
@@ -1370,8 +1385,8 @@ class UdevData:
"""
result: str = self.path
if self.is_lvm:
- vg: str = self.environment.get('DM_VG_NAME')
- lv: str = self.environment.get('DM_LV_NAME')
+ vg: str = self.environment.get('DM_VG_NAME', '')
+ lv: str = self.environment.get('DM_LV_NAME', '')
result = f'/dev/{vg}/{lv}'
return result
@@ -1385,6 +1400,6 @@ class UdevData:
"""
result: str = self.path
if self.is_lvm:
- name: str = self.environment.get('DM_NAME')
+ name: str = self.environment.get('DM_NAME', '')
result = f'/dev/mapper/{name}'
return result
diff --git a/src/ceph-volume/ceph_volume/util/prepare.py b/src/ceph-volume/ceph_volume/util/prepare.py
index 9c863b83d93..ff7fc023fc4 100644
--- a/src/ceph-volume/ceph_volume/util/prepare.py
+++ b/src/ceph-volume/ceph_volume/util/prepare.py
@@ -9,6 +9,7 @@ import logging
import json
from ceph_volume import process, conf, terminal
from ceph_volume.util import system, constants, str_to_int, disk
+from typing import Optional
logger = logging.getLogger(__name__)
mlogger = terminal.MultiLogger(__name__)
@@ -121,7 +122,7 @@ def get_block_wal_size(lv_format=True):
return wal_size
-def create_id(fsid, json_secrets, osd_id=None):
+def create_id(fsid: str, json_secrets: str, osd_id: Optional[str]=None) -> str:
"""
:param fsid: The osd fsid to create, always required
:param json_secrets: a json-ready object with whatever secrets are wanted