summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--qa/tasks/qemu.py262
1 files changed, 139 insertions, 123 deletions
diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py
index b0eafab82c7..7392ecc5bc6 100644
--- a/qa/tasks/qemu.py
+++ b/qa/tasks/qemu.py
@@ -23,21 +23,62 @@ DEFAULT_IMAGE_SIZE = 10240 # in megabytes
DEFAULT_CPUS = 1
DEFAULT_MEM = 4096 # in megabytes
-def create_images(ctx, config, managers):
+def normalize_disks(config):
+ # normalize the 'disks' parameter into a list of dictionaries
for client, client_config in config.items():
+ clone = client_config.get('clone', False)
+ image_url = client_config.get('image_url', DEFAULT_IMAGE_URL)
+ device_type = client_config.get('type', 'filesystem')
+
disks = client_config.get('disks', DEFAULT_NUM_DISKS)
if not isinstance(disks, list):
- disks = [{} for n in range(int(disks))]
- clone = client_config.get('clone', False)
+ disks = [{'image_name': '{client}.{num}'.format(client=client,
+ num=i)}
+ for i in range(int(disks))]
+ client_config['disks'] = disks
+
+ for i, disk in enumerate(disks):
+ if 'action' not in disk:
+ disk['action'] = 'create'
+ assert disk['action'] in ['none', 'create', 'clone'], 'invalid disk action'
+ assert disk['action'] != 'clone' or 'parent_name' in disk, 'parent_name required for clone'
+
+ if 'image_size' not in disk:
+ disk['image_size'] = DEFAULT_IMAGE_SIZE
+ disk['image_size'] = int(disk['image_size'])
+
+ if 'image_url' not in disk and i == 0:
+ disk['image_url'] = image_url
+
+ if 'device_type' not in disk:
+ disk['device_type'] = device_type
+
+ disk['device_letter'] = chr(ord('a') + i)
+
assert disks, 'at least one rbd device must be used'
- for i, disk in enumerate(disks[1:]):
+
+ if clone:
+ for disk in disks:
+ if disk['action'] != 'create':
+ continue
+ clone = dict(disk)
+ clone['action'] = 'clone'
+ clone['parent_name'] = clone['image_name']
+ clone['image_name'] += '-clone'
+ del disk['device_letter']
+ disks.append(clone)
+
+def create_images(ctx, config, managers):
+ for client, client_config in config.items():
+ disks = client_config['disks']
+ for disk in disks:
+ if disk.get('action') != 'create' or 'image_url' in disk:
+ continue
create_config = {
client: {
- 'image_name': '{client}.{num}'.format(client=client,
- num=i + 1),
- 'image_format': 2 if clone else 1,
- 'image_size': (disk or {}).get('image_size',
- DEFAULT_IMAGE_SIZE),
+ 'image_name': disk['image_name'],
+ 'image_format': 2,
+ 'image_size': disk['image_size'],
}
}
managers.append(
@@ -47,24 +88,21 @@ def create_images(ctx, config, managers):
def create_clones(ctx, config, managers):
for client, client_config in config.items():
- clone = client_config.get('clone', False)
- if clone:
- num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
- if isinstance(num_disks, list):
- num_disks = len(num_disks)
- for i in range(num_disks):
- create_config = {
- client: {
- 'image_name':
- '{client}.{num}-clone'.format(client=client, num=i),
- 'parent_name':
- '{client}.{num}'.format(client=client, num=i),
- }
+ disks = client_config['disks']
+ for disk in disks:
+ if disk['action'] != 'clone':
+ continue
+
+ create_config = {
+ client: {
+ 'image_name': disk['image_name'],
+ 'parent_name': disk['parent_name']
}
- managers.append(
- lambda create_config=create_config:
- rbd.clone_image(ctx=ctx, config=create_config)
- )
+ }
+ managers.append(
+ lambda create_config=create_config:
+ rbd.clone_image(ctx=ctx, config=create_config)
+ )
@contextlib.contextmanager
def create_dirs(ctx, config):
@@ -132,13 +170,15 @@ def generate_iso(ctx, config):
test_teardown = ''.join(f.readlines())
user_data = test_setup
- if client_config.get('type', 'filesystem') == 'filesystem':
- num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
- if isinstance(num_disks, list):
- num_disks = len(num_disks)
- for i in range(1, num_disks):
- dev_letter = chr(ord('a') + i)
- user_data += """
+
+ disks = client_config['disks']
+ for disk in disks:
+ if disk['device_type'] != 'filesystem' or \
+ 'device_letter' not in disk or \
+ 'image_url' in disk:
+ continue
+ dev_letter = disk['device_letter']
+ user_data += """
- |
#!/bin/bash
mkdir /mnt/test_{dev_letter}
@@ -219,49 +259,58 @@ def download_image(ctx, config):
"""Downland base image, remove image file when done"""
log.info('downloading base image')
testdir = teuthology.get_testdir(ctx)
+
+ client_base_files = {}
for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
- base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
- image_url = client_config.get('image_url', DEFAULT_IMAGE_URL)
- remote.run(
- args=[
- 'wget', '-nv', '-O', base_file, image_url,
- ]
- )
- disks = client_config.get('disks', None)
- if not isinstance(disks, list):
- disks = [{}]
- image_name = '{client}.0'.format(client=client)
- image_size = (disks[0] or {}).get('image_size', DEFAULT_IMAGE_SIZE)
- remote.run(
- args=[
- 'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
- base_file, 'rbd:rbd/{image_name}'.format(image_name=image_name)
- ]
- )
- remote.run(
- args=[
- 'rbd', 'resize',
- '--size={image_size}M'.format(image_size=image_size),
- image_name,
- ]
- )
+ client_base_files[client] = []
+ disks = client_config['disks']
+ for disk in disks:
+ if disk['action'] != 'create' or 'image_url' not in disk:
+ continue
+
+ base_file = '{tdir}/qemu/base.{name}.qcow2'.format(tdir=testdir,
+ name=disk['image_name'])
+ client_base_files[client].append(base_file)
+
+ remote.run(
+ args=[
+ 'wget', '-nv', '-O', base_file, disk['image_url'],
+ ]
+ )
+ remote.run(
+ args=[
+ 'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
+ base_file, 'rbd:rbd/{image_name}'.format(image_name=disk['image_name'])
+ ]
+ )
+
+ for disk in disks:
+ if disk['action'] == 'clone' or \
+ (disk['action'] == 'create' and 'image_url' not in disk):
+ continue
+
+ remote.run(
+ args=[
+ 'rbd', 'resize',
+ '--size={image_size}M'.format(image_size=disk['image_size']),
+ disk['image_name'], run.Raw('||'), 'true'
+ ]
+ )
+
try:
yield
finally:
log.debug('cleaning up base image files')
- for client in config.keys():
- base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
- tdir=testdir,
- client=client,
- )
+ for client, base_files in client_base_files.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'rm', '-f', base_file,
- ],
- )
+ for base_file in base_files:
+ remote.run(
+ args=[
+ 'rm', '-f', base_file,
+ ],
+ )
def _setup_nfs_mount(remote, client, service_name, mount_dir):
@@ -392,17 +441,15 @@ def run_qemu(ctx, config):
else:
cachemode = 'writethrough'
- clone = client_config.get('clone', False)
- num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
- if isinstance(num_disks, list):
- num_disks = len(num_disks)
- for i in range(num_disks):
- suffix = '-clone' if clone else ''
+ disks = client_config['disks']
+ for disk in disks:
+ if 'device_letter' not in disk:
+ continue
+
args.extend([
'-drive',
'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format(
- img='{client}.{num}{suffix}'.format(client=client, num=i,
- suffix=suffix),
+ img=disk['image_name'],
id=client[len('client.'):],
cachemode=cachemode,
),
@@ -487,40 +534,26 @@ def task(ctx, config):
all:
test: http://download.ceph.com/qa/test.sh
- For tests that don't need a filesystem, set type to block::
+ For tests that want to explicitly describe the RBD images to connect:
tasks:
- ceph:
- qemu:
client.0:
- test: http://download.ceph.com/qa/test.sh
- type: block
-
- The test should be configured to run on /dev/vdb and later
- devices.
-
- If you want to run a test that uses more than one rbd image,
- specify how many images to use::
-
- tasks:
- - ceph:
- - qemu:
- client.0:
- test: http://download.ceph.com/qa/test.sh
- type: block
- disks: 2
-
- - or -
-
- tasks:
- - ceph:
- - qemu:
- client.0:
- test: http://ceph.com/qa/test.sh
- type: block
- disks:
- - image_size: 1024
- - image_size: 2048
+ test: http://download.ceph.com/qa/test.sh
+ clone: True/False (optionally clone all created disks),
+ image_url: <URL> (optional default image URL)
+ type: filesystem / block (optional default device type)
+ disks: [
+ {
+ action: create / clone / none (optional, defaults to create)
+ image_name: <image name> (optional)
+ parent_name: <parent_name> (if action == clone),
+ type: filesystem / block (optional, defaults to fileystem)
+ image_url: <URL> (optional),
+ image_size: <MiB> (optional)
+ }, ...
+ ]
You can set the amount of CPUs and memory the VM has (default is 1 CPU and
4096 MB)::
@@ -533,15 +566,6 @@ def task(ctx, config):
cpus: 4
memory: 512 # megabytes
- If you want to run a test against a cloned rbd image, set clone to true::
-
- tasks:
- - ceph:
- - qemu:
- client.0:
- test: http://download.ceph.com/qa/test.sh
- clone: true
-
If you need to configure additional cloud-config options, set cloud_config
to the required data set::
@@ -558,20 +582,12 @@ def task(ctx, config):
test data
type: text/plain
filename: /tmp/data
-
- If you need to override the default cloud image, set image_url:
-
- tasks:
- - ceph
- - qemu:
- client.0:
- test: http://ceph.com/qa/test.sh
- image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
"""
assert isinstance(config, dict), \
"task qemu only supports a dictionary for configuration"
config = teuthology.replace_all_with_clients(ctx.cluster, config)
+ normalize_disks(config)
managers = []
create_images(ctx=ctx, config=config, managers=managers)