summaryrefslogtreecommitdiffstats
path: root/qa
diff options
context:
space:
mode:
Diffstat (limited to 'qa')
-rw-r--r--qa/tasks/cbt.py22
-rw-r--r--qa/tasks/cbt_performance.py152
2 files changed, 171 insertions, 3 deletions
diff --git a/qa/tasks/cbt.py b/qa/tasks/cbt.py
index 2e7d89d5cc0..b3d2fd829d5 100644
--- a/qa/tasks/cbt.py
+++ b/qa/tasks/cbt.py
@@ -1,6 +1,7 @@
import logging
import os
import yaml
+from tasks.cbt_performance import CBTperformance
from teuthology import misc
from teuthology.orchestra import run
@@ -41,6 +42,7 @@ class CBT(Task):
iterations=self.config.get('cluster', {}).get('iterations', 1),
tmp_dir='/tmp/cbt',
pool_profiles=self.config.get('cluster', {}).get('pool_profiles'),
+ pid_dir=self.config.get('cluster', {}).get('pid_dir', '/var/run/ceph'),
)
benchmark_config = self.config.get('benchmarks')
@@ -62,11 +64,13 @@ class CBT(Task):
(remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
benchmark_config['cosbench']['auth'] = "username=cosbench:operator;password=intel2012;url=http://%s:80/auth/v1.0;retry=9" %(ips[0])
client_endpoints_config = self.config.get('client_endpoints', None)
+ monitoring_profiles = self.config.get('monitoring_profiles', None)
return dict(
cluster=cluster_config,
benchmarks=benchmark_config,
client_endpoints = client_endpoints_config,
+ monitoring_profiles = monitoring_profiles,
)
def install_dependencies(self):
@@ -74,7 +78,7 @@ class CBT(Task):
if system_type == 'rpm':
install_cmd = ['sudo', 'yum', '-y', 'install']
- cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-devel', 'pdsh', 'pdsh-rcmd-ssh']
+ cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-devel', 'pdsh', 'pdsh-rcmd-ssh','linux-tools-generic']
self.log.info('Installing collectl')
collectl_location = "https://sourceforge.net/projects/collectl/files/collectl/collectl-4.3.1/collectl-4.3.1.src.tar.gz/download"
self.first_mon.run(
@@ -89,7 +93,7 @@ class CBT(Task):
)
else:
install_cmd = ['sudo', 'apt-get', '-y', '--force-yes', 'install']
- cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl']
+ cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl', 'linux-tools-generic']
self.first_mon.run(args=install_cmd + cbt_depends)
benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
@@ -227,6 +231,16 @@ class CBT(Task):
def begin(self):
super(CBT, self).begin()
testdir = misc.get_testdir(self.ctx)
+ # disable perf_event_paranoid to allow perf to run
+ self.first_mon.run(
+ args=[
+ 'sudo',
+ '/sbin/sysctl',
+ '-q',
+ '-w',
+ 'kernel.perf_event_paranoid=0',
+ ],
+ )
self.first_mon.run(
args=[
'{tdir}/cbt/cbt.py'.format(tdir=testdir),
@@ -300,6 +314,8 @@ class CBT(Task):
'{tdir}/xml'.format(tdir=testdir),
]
)
-
+ # Collect cbt performance data
+ cbt_performance = CBTperformance()
+ cbt_performance.collect(self.ctx, self.config)
task = CBT
diff --git a/qa/tasks/cbt_performance.py b/qa/tasks/cbt_performance.py
new file mode 100644
index 00000000000..44f6d9ade3d
--- /dev/null
+++ b/qa/tasks/cbt_performance.py
@@ -0,0 +1,152 @@
+import logging
+import requests
+import re
+import os
+import json
+
+from pathlib import Path
+from io import StringIO
+from teuthology import misc
+
+server = "http://mira118.front.sepia.ceph.com"
+api_port = "4000"
+grafana_port = "3000"
+schema_name = "public"
+table_name = "cbt_performance"
+user_name = "postgres"
+password = "root"
+
+class CBTperformance:
+ def __init__(self):
+ self.log = logging.getLogger(__name__)
+ self.auth = (user_name, password)
+ self.endpoint_url = f'{server}:{api_port}/{table_name}'
+ self.headers = {'Content-Type': 'application/json'}
+
+ def collect(self, ctx, config):
+ self.log.info('Collecting CBT performance data config')
+
+ tasks = ctx.config.get('tasks', None)
+ for task in tasks:
+ if "cbt" in task:
+ benchmark = task["cbt"]
+ break
+
+ cbt_results_arry = self.read_results(ctx, config)
+ for cbt_results in cbt_results_arry:
+ cbt_results = json.loads(json.dumps(cbt_results))
+ if cbt_results:
+ data = {
+ "job_id" : ctx.config.get('job_id', None),
+ "started_at" : ctx.config.get('timestamp', None),
+ "benchmark_mode" : cbt_results.get("Benchmark_mode", None),
+ "seq" : cbt_results.get("seq", None),
+ "total_cpu_cycles" : cbt_results.get("total_cpu_cycles", None),
+ "branch" : ctx.config.get('branch', None),
+ "sha1" : ctx.config.get('sha1', None),
+ "os_type" : ctx.config.get('os_type', None),
+ "os_version" : ctx.config.get('os_version', None),
+ "machine_type" : ctx.config.get('machine_type', None),
+ "benchmark" : benchmark["benchmarks"],
+ "results" : cbt_results.get("results", None),
+ }
+ response = requests.post(self.endpoint_url, json=data, headers=self.headers, auth=self.auth)
+ if response.status_code == 201:
+ self.log.info("Data inserted successfully.")
+ ctx.summary['cbt_perf_url'] = self.create_cbt_perf_url(ctx, config)
+ else:
+ self.log.info(f"Error inserting data: {response}")
+
+
+ def read_results(self, ctx, config):
+ results = []
+ if not config.get('pref_read', True): #change to False to disable
+ return results
+
+ self.log.info('reading cbt preferences')
+ testdir = misc.get_testdir(ctx)
+ first_mon = next(iter(ctx.cluster.only(misc.get_first_mon(ctx, config)).remotes.keys()))
+
+ # return all json_results files from remote
+ proc = first_mon.run(
+ args=[
+ 'find', '{tdir}'.format(tdir=testdir), '-name',
+ 'json_output.*'
+ ],
+ stdout=StringIO(),
+ wait=True
+ )
+ json_output_paths = proc.stdout.getvalue().split('\n')
+
+ for json_output_path in json_output_paths:
+ if json_output_path:
+ path_full = Path(json_output_path)
+ match = re.search(r'/json_output\.(?P<json>\d+)', json_output_path)
+ if match:
+ Benchmark_mode = path_full.parent.name if path_full.parent.name in ['rand', 'write', 'seq'] else 'fio'
+ seq = match.group('json')
+
+ results.append(
+ { "results": json.loads(first_mon.read_file(json_output_path).decode('utf-8'))
+ , "Benchmark_mode": Benchmark_mode
+ , "seq": seq
+ , "total_cpu_cycles": self.read_total_cpu_cycles(ctx, config, os.path.dirname(json_output_path))
+ }
+ )
+ return results
+
+ def read_total_cpu_cycles(self, ctx, config, testdir):
+ if not config.get('pref_read', True): #change to False to disable
+ return None
+
+ self.log.info('reading total cpu cycles')
+ first_mon = next(iter(ctx.cluster.only(misc.get_first_mon(ctx, config)).remotes.keys()))
+
+ # return all json_results files from remote
+ proc = first_mon.run(
+ args=[
+ 'find', '{tdir}'.format(tdir=testdir), '-name',
+ 'perf_stat.*'
+ ],
+ stdout=StringIO(),
+ wait=True
+ )
+
+ cpu_cycles_paths = proc.stdout.getvalue().split('\n')
+ self.log.info(f'cpu_cycles_paths: {cpu_cycles_paths}')
+ total_cpu_cycles = 0
+ for cpu_cycles_path in cpu_cycles_paths:
+ if not cpu_cycles_path:
+ continue
+
+ match = re.search(r'(.*) cycles(.*?) .*', first_mon.read_file(cpu_cycles_path).decode('utf-8'), re.M | re.I)
+ if not match:
+ continue
+
+ cpu_cycles = match.group(1).strip()
+ total_cpu_cycles = total_cpu_cycles + int(cpu_cycles.replace(',', ''))
+
+ self.log.info(f'total cpu cycles: {total_cpu_cycles}')
+ return total_cpu_cycles
+
+ def create_cbt_perf_url(self, ctx, config):
+ tasks = ctx.config.get('tasks', None)
+ for task in tasks:
+ if "cbt" in task:
+ benchmark = task["cbt"]
+ break
+
+ is_fio = benchmark["benchmarks"].get("librbdfio")
+ if is_fio:
+ Dash_id = '2Jx1MmfVk/fio'
+ else:
+ is_radosbench = benchmark["benchmarks"].get("radosbench")
+ if is_radosbench:
+ Dash_id = 'Rfy7LkB4k/rados-bench'
+ else:
+ return None
+
+ job_id = ctx.config.get('job_id', None)
+ branch = ctx.config.get('branch', None)
+
+ return f'{server}:{grafana_port}/d/{Dash_id}?orgId=1&var-branch_name={branch}&var-job_id_selected={job_id}'