summaryrefslogtreecommitdiffstats
path: root/qa
diff options
context:
space:
mode:
authorLoic Dachary <loic@dachary.org>2016-09-22 18:13:00 +0200
committerGitHub <noreply@github.com>2016-09-22 18:13:00 +0200
commiteb968f886e02fa683b0d3965e71d1ead2677a059 (patch)
tree16ac3445af90f24b4ec6c04fce27a07cd576f250 /qa
parentMerge pull request #10371 from Yan-waller/yj-wip-msgsimple-0720 (diff)
parentceph-disk: update/add ceph-disk test cases for bluestore (diff)
downloadceph-eb968f886e02fa683b0d3965e71d1ead2677a059.tar.xz
ceph-eb968f886e02fa683b0d3965e71d1ead2677a059.zip
Merge pull request #10135 from david-z/wip-enhance-ceph-disk-bluestore
ceph-disk: support creating block.db and block.wal with customized size for bluestore Reviewed-by: Loic Dachary <ldachary@redhat.com>
Diffstat (limited to 'qa')
-rw-r--r--qa/workunits/ceph-disk/ceph-disk-test.py83
1 files changed, 83 insertions, 0 deletions
diff --git a/qa/workunits/ceph-disk/ceph-disk-test.py b/qa/workunits/ceph-disk/ceph-disk-test.py
index 0e4a50b2df2..137869b6713 100644
--- a/qa/workunits/ceph-disk/ceph-disk-test.py
+++ b/qa/workunits/ceph-disk/ceph-disk-test.py
@@ -143,6 +143,15 @@ class CephDisk:
def get_journal_partition(self, uuid):
return self.get_space_partition('journal', uuid)
+ def get_block_partition(self, uuid):
+ return self.get_space_partition('block', uuid)
+
+ def get_blockdb_partition(self, uuid):
+ return self.get_space_partition('block.db', uuid)
+
+ def get_blockwal_partition(self, uuid):
+ return self.get_space_partition('block.wal', uuid)
+
def get_space_partition(self, name, uuid):
data_partition = self.get_osd_partition(uuid)
space_dev = data_partition[name + '_dev']
@@ -454,10 +463,84 @@ class TestCephDisk(object):
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
+ assert len(device['partitions']) == 4
+ c.check_osd_status(osd_uuid, 'block')
+ c.check_osd_status(osd_uuid, 'block.wal')
+ c.check_osd_status(osd_uuid, 'block.db')
+ c.helper("pool_read_write")
+ c.destroy_osd(osd_uuid)
+ c.sh("ceph-disk --verbose zap " + disk)
+
+ def test_activate_bluestore_seperated_block_db_wal(self):
+ c = CephDisk()
+ disk1 = c.unused_disks()[0]
+ disk2 = c.unused_disks()[1]
+ osd_uuid = str(uuid.uuid1())
+ c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
+ c.conf['global']['osd objectstore'] = 'bluestore'
+ c.save_conf()
+ c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
+ " " + disk1 + " --block.db " + disk2 + " --block.wal " + disk2)
+ c.wait_for_osd_up(osd_uuid)
+ device = json.loads(c.sh("ceph-disk list --format json " + disk1))[0]
+ assert len(device['partitions']) == 2
+ device = json.loads(c.sh("ceph-disk list --format json " + disk2))[0]
assert len(device['partitions']) == 2
c.check_osd_status(osd_uuid, 'block')
+ c.check_osd_status(osd_uuid, 'block.wal')
+ c.check_osd_status(osd_uuid, 'block.db')
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)
+ c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
+
+ def test_activate_bluestore_reuse_db_wal_partition(self):
+ c = CephDisk()
+ disks = c.unused_disks()
+ block_disk = disks[0]
+ db_wal_disk = disks[1]
+ #
+ # Create an OSD with two disks (one for block,
+ # the other for block.db and block.wal ) and then destroy osd.
+ #
+ osd_uuid1 = str(uuid.uuid1())
+ c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
+ c.conf['global']['osd objectstore'] = 'bluestore'
+ c.save_conf()
+ c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
+ osd_uuid1 + " " + block_disk + " --block.db " + db_wal_disk +
+ " --block.wal " + db_wal_disk)
+ c.wait_for_osd_up(osd_uuid1)
+ blockdb_partition = c.get_blockdb_partition(osd_uuid1)
+ blockdb_path = blockdb_partition['path']
+ blockwal_partition = c.get_blockwal_partition(osd_uuid1)
+ blockwal_path = blockwal_partition['path']
+ c.destroy_osd(osd_uuid1)
+ c.sh("ceph-disk --verbose zap " + block_disk)
+ #
+ # Create another OSD with the block.db and block.wal partition
+ # of the previous OSD
+ #
+ osd_uuid2 = str(uuid.uuid1())
+ c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
+ osd_uuid2 + " " + block_disk + " --block.db " + blockdb_path +
+ " --block.wal " + blockwal_path)
+ c.wait_for_osd_up(osd_uuid2)
+ device = json.loads(c.sh("ceph-disk list --format json " + block_disk))[0]
+ assert len(device['partitions']) == 2
+ device = json.loads(c.sh("ceph-disk list --format json " + db_wal_disk))[0]
+ assert len(device['partitions']) == 2
+ c.check_osd_status(osd_uuid2, 'block')
+ c.check_osd_status(osd_uuid2, 'block.wal')
+ c.check_osd_status(osd_uuid2, 'block.db')
+ blockdb_partition = c.get_blockdb_partition(osd_uuid2)
+ blockwal_partition = c.get_blockwal_partition(osd_uuid2)
+ #
+ # Verify the previous OSD partition has been reused
+ #
+ assert blockdb_partition['path'] == blockdb_path
+ assert blockwal_partition['path'] == blockwal_path
+ c.destroy_osd(osd_uuid2)
+ c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
def test_activate_with_journal_dev_is_symlink(self):
c = CephDisk()