diff options
author | Thomas Bechtold <tbechtold@suse.com> | 2019-12-09 16:17:23 +0100 |
---|---|---|
committer | Thomas Bechtold <tbechtold@suse.com> | 2019-12-12 10:21:01 +0100 |
commit | 0127cd1e8817b05b1c3150540b021f9a24b47089 (patch) | |
tree | 4d70688e81f80ed40abe6bc126c1067eb1d46de8 | |
parent | Merge pull request #32175 from rzarzynski/wip-crimson-errorator-do_with-frien... (diff) | |
download | ceph-0127cd1e8817b05b1c3150540b021f9a24b47089.tar.xz ceph-0127cd1e8817b05b1c3150540b021f9a24b47089.zip |
qa: Enable flake8 tox and fix failures
There were a couple of problems found by flake8 in the qa/
directory (most of them fixed now). Enabling flake8 during the usual
check runs hopefully avoids adding new issues in the future.
Signed-off-by: Thomas Bechtold <tbechtold@suse.com>
66 files changed, 82 insertions, 184 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 2fe35f0091a..bab40cc4965 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -628,6 +628,8 @@ add_custom_target(check add_subdirectory(src) +add_subdirectory(qa) + add_subdirectory(doc) if(WITH_MANPAGE) add_subdirectory(man) diff --git a/qa/CMakeLists.txt b/qa/CMakeLists.txt new file mode 100644 index 00000000000..06de6620b95 --- /dev/null +++ b/qa/CMakeLists.txt @@ -0,0 +1,9 @@ +set(CEPH_BUILD_VIRTUALENV $ENV{TMPDIR}) +if(NOT CEPH_BUILD_VIRTUALENV) + set(CEPH_BUILD_VIRTUALENV ${CMAKE_BINARY_DIR}) +endif() + +if(WITH_TESTS) + include(AddCephTest) + add_tox_test(qa flake8) +endif() diff --git a/qa/standalone/special/ceph_objectstore_tool.py b/qa/standalone/special/ceph_objectstore_tool.py index 952eda3b3ae..21cf5e3bb99 100755 --- a/qa/standalone/special/ceph_objectstore_tool.py +++ b/qa/standalone/special/ceph_objectstore_tool.py @@ -46,7 +46,7 @@ if sys.version_info[0] >= 3: def decode(s): return s.decode('utf-8') - def check_output(*args, **kwargs): + def check_output(*args, **kwargs): # noqa return decode(subprocess.check_output(*args, **kwargs)) else: def decode(s): @@ -337,7 +337,7 @@ def check_entry_transactions(entry, enum): def check_transaction_ops(ops, enum, tnum): - if len(ops) is 0: + if len(ops) == 0: logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum)) errors = 0 for onum in range(len(ops)): @@ -376,7 +376,7 @@ def test_dump_journal(CFSD_PREFIX, osds): os.unlink(TMPFILE) journal_errors = check_journal(jsondict) - if journal_errors is not 0: + if journal_errors != 0: logging.error(jsondict) ERRORS += journal_errors @@ -520,7 +520,7 @@ def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path): for line in output.strip().split('\n'): print(line) linev = re.split('\s+', line) - if linev[0] is '': + if linev[0] == '': linev.pop(0) print('linev %s' % linev) weights.append(float(linev[2])) diff --git a/qa/tasks/barbican.py b/qa/tasks/barbican.py index 37c8f34aa43..0ce4aefb7d0 100644 --- a/qa/tasks/barbican.py +++ b/qa/tasks/barbican.py @@ -4,14 +4,12 @@ Deploy and configure Barbican for Teuthology import argparse import contextlib import logging -import string import httplib from urlparse import urlparse import json from teuthology import misc as teuthology from teuthology import contextutil -from teuthology import safepath from teuthology.orchestra import run from teuthology.exceptions import ConfigError @@ -201,7 +199,6 @@ def run_barbican(ctx, config): # start the public endpoint client_public_with_id = 'barbican.public' + '.' + client_id - client_public_with_cluster = cluster_name + '.' + client_public_with_id run_cmd = ['cd', get_barbican_dir(ctx), run.Raw('&&'), '.', '.barbicanenv/bin/activate', run.Raw('&&'), @@ -248,8 +245,6 @@ def create_secrets(ctx, config): keystone_role = cconfig.get('use-keystone-role', None) keystone_host, keystone_port = ctx.keystone.public_endpoints[keystone_role] - keystone_url = 'http://{host}:{port}/v2.0'.format(host=keystone_host, - port=keystone_port) barbican_host, barbican_port = ctx.barbican.endpoints[cclient] barbican_url = 'http://{host}:{port}'.format(host=barbican_host, port=barbican_port) @@ -482,7 +477,6 @@ def task(ctx, config): config = all_clients if isinstance(config, list): config = dict.fromkeys(config) - clients = config.keys() overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. diff --git a/qa/tasks/cbt.py b/qa/tasks/cbt.py index 606a0c4db3d..c529dd6eef9 100644 --- a/qa/tasks/cbt.py +++ b/qa/tasks/cbt.py @@ -3,7 +3,6 @@ import os import yaml from teuthology import misc -from teuthology.config import config as teuth_config from teuthology.orchestra import run from teuthology.task import Task diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index 2baca41c4fa..364c6aceac4 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -85,18 +85,18 @@ def ceph_crash(ctx, config): path = os.path.join(ctx.archive, 'remote') try: os.makedirs(path) - except OSError as e: + except OSError: pass for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) try: os.makedirs(sub) - except OSError as e: + except OSError: pass try: teuthology.pull_directory(remote, '/var/lib/ceph/crash', os.path.join(sub, 'crash')) - except ReadError as e: + except ReadError: pass @@ -270,13 +270,13 @@ def ceph_log(ctx, config): path = os.path.join(ctx.archive, 'remote') try: os.makedirs(path) - except OSError as e: + except OSError: pass for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) try: os.makedirs(sub) - except OSError as e: + except OSError: pass teuthology.pull_directory(remote, '/var/log/ceph', os.path.join(sub, 'log')) @@ -397,8 +397,6 @@ def create_rbd_pool(ctx, config): @contextlib.contextmanager def cephfs_setup(ctx, config): cluster_name = config['cluster'] - testdir = teuthology.get_testdir(ctx) - coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) first_mon = teuthology.get_first_mon(ctx, config, cluster_name) (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() diff --git a/qa/tasks/ceph_fuse.py b/qa/tasks/ceph_fuse.py index 08254fed3c3..03f5a56e4a7 100644 --- a/qa/tasks/ceph_fuse.py +++ b/qa/tasks/ceph_fuse.py @@ -7,7 +7,6 @@ import logging from teuthology import misc as teuthology from cephfs.fuse_mount import FuseMount -from tasks.cephfs.filesystem import Filesystem log = logging.getLogger(__name__) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 6d8bebeb07d..d2d7f43632f 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -679,7 +679,7 @@ class OSDThrasher(Thrasher): Decrease the size of the pool """ pool = self.ceph_manager.get_pool() - orig_pg_num = self.ceph_manager.get_pool_pg_num(pool) + _ = self.ceph_manager.get_pool_pg_num(pool) self.log("Shrinking pool %s" % (pool,)) if self.ceph_manager.contract_pool( pool, @@ -906,7 +906,7 @@ class OSDThrasher(Thrasher): Random action selector. """ chance_down = self.config.get('chance_down', 0.4) - chance_test_min_size = self.config.get('chance_test_min_size', 0) + _ = self.config.get('chance_test_min_size', 0) chance_test_backfill_full = \ self.config.get('chance_test_backfill_full', 0) if isinstance(chance_down, int): @@ -1653,7 +1653,7 @@ class CephManager: while True: proc = self.admin_socket(service_type, service_id, args, check_status=False, stdout=stdout) - if proc.exitstatus is 0: + if proc.exitstatus == 0: return proc else: tries += 1 diff --git a/qa/tasks/cephadm.py b/qa/tasks/cephadm.py index fb2d24d22f4..911068fe5f4 100644 --- a/qa/tasks/cephadm.py +++ b/qa/tasks/cephadm.py @@ -6,27 +6,17 @@ from cStringIO import StringIO import argparse import configobj import contextlib -import errno import logging import os import json -import time -import gevent -import re -import socket import uuid -from paramiko import SSHException -from ceph_manager import CephManager, write_conf +from ceph_manager import CephManager from tarfile import ReadError -from tasks.cephfs.filesystem import Filesystem from teuthology import misc as teuthology from teuthology import contextutil -from teuthology import exceptions from teuthology.orchestra import run -import ceph_client as cclient from teuthology.orchestra.daemon import DaemonGroup -from tasks.daemonwatchdog import DaemonWatchdog from teuthology.config import config as teuth_config # these items we use from ceph.py should probably eventually move elsewhere @@ -93,7 +83,6 @@ def normalize_hostnames(ctx): @contextlib.contextmanager def download_cephadm(ctx, config, ref): cluster_name = config['cluster'] - testdir = teuthology.get_testdir(ctx) if config.get('cephadm_mode') != 'cephadm-package': ref = config.get('cephadm_branch', ref) @@ -179,13 +168,13 @@ def ceph_log(ctx, config): path = os.path.join(ctx.archive, 'remote') try: os.makedirs(path) - except OSError as e: + except OSError: pass for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.name) try: os.makedirs(sub) - except OSError as e: + except OSError: pass teuthology.pull_directory(remote, '/var/log/ceph/' + fsid, os.path.join(sub, 'log')) @@ -207,19 +196,19 @@ def ceph_crash(ctx, config): path = os.path.join(ctx.archive, 'remote') try: os.makedirs(path) - except OSError as e: + except OSError: pass for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.name) try: os.makedirs(sub) - except OSError as e: + except OSError: pass try: teuthology.pull_directory(remote, '/var/lib/ceph/%s/crash' % fsid, os.path.join(sub, 'crash')) - except ReadError as e: + except ReadError: pass @contextlib.contextmanager @@ -387,7 +376,6 @@ def ceph_mons(ctx, config): """ cluster_name = config['cluster'] fsid = ctx.ceph[cluster_name].fsid - testdir = teuthology.get_testdir(ctx) num_mons = 1 try: @@ -457,7 +445,6 @@ def ceph_mgrs(ctx, config): """ cluster_name = config['cluster'] fsid = ctx.ceph[cluster_name].fsid - testdir = teuthology.get_testdir(ctx) try: nodes = [] @@ -549,7 +536,6 @@ def ceph_mdss(ctx, config): """ cluster_name = config['cluster'] fsid = ctx.ceph[cluster_name].fsid - testdir = teuthology.get_testdir(ctx) nodes = [] daemons = {} @@ -628,7 +614,6 @@ def shell(ctx, config): """ Execute (shell) commands """ - testdir = teuthology.get_testdir(ctx) cluster_name = config.get('cluster', 'ceph') if 'all' in config and len(config) == 1: @@ -724,7 +709,7 @@ def restart(ctx, config): healthy(ctx=ctx, config=dict(cluster=cluster)) if config.get('wait-for-osds-up', False): for cluster in clusters: - wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster)) + ctx.managers[cluster].wait_for_all_osds_up() yield @contextlib.contextmanager diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index be0fc9197a7..5c778231f0b 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -1191,7 +1191,7 @@ class Filesystem(MDSCluster): def dirfrag_exists(self, ino, frag): try: self.rados(["stat", "{0:x}.{1:08x}".format(ino, frag)]) - except CommandFailedError as e: + except CommandFailedError: return False else: return True diff --git a/qa/tasks/cephfs/fuse_mount.py b/qa/tasks/cephfs/fuse_mount.py index 3392b1762c5..c7eb0ff7ef2 100644 --- a/qa/tasks/cephfs/fuse_mount.py +++ b/qa/tasks/cephfs/fuse_mount.py @@ -1,4 +1,3 @@ - from StringIO import StringIO import json import time @@ -10,7 +9,6 @@ from teuthology.contextutil import MaxWhileTries from teuthology.orchestra import run from teuthology.orchestra.run import CommandFailedError from .mount import CephFSMount -from tasks.cephfs.filesystem import Filesystem log = logging.getLogger(__name__) @@ -166,7 +164,7 @@ class FuseMount(CephFSMount): try: self.inst = status['inst_str'] self.addr = status['addr_str'] - except KeyError as e: + except KeyError: sessions = self.fs.rank_asok(['session', 'ls']) for s in sessions: if s['id'] == self.id: diff --git a/qa/tasks/cephfs/test_cephfs_shell.py b/qa/tasks/cephfs/test_cephfs_shell.py index 527301bbfad..5835e3f1ac8 100644 --- a/qa/tasks/cephfs/test_cephfs_shell.py +++ b/qa/tasks/cephfs/test_cephfs_shell.py @@ -12,8 +12,6 @@ from re import search as re_search from time import sleep from StringIO import StringIO from tasks.cephfs.cephfs_test_case import CephFSTestCase -from tasks.cephfs.fuse_mount import FuseMount -from teuthology.exceptions import CommandFailedError from teuthology.misc import sudo_write_file log = logging.getLogger(__name__) @@ -608,8 +606,8 @@ class TestDU(TestCephFSShell): path_prefix='') args = ['du', '/'] - for path in path_to_files: - args.append(path) + for p in path_to_files: + args.append(p) du_output = self.get_cephfs_shell_cmd_output(args) for expected_output in expected_patterns_in_output: diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index 2bad19d8afe..fdee8fc7cf2 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -591,7 +591,7 @@ class TestClientRecovery(CephFSTestCase): SESSION_AUTOCLOSE = 50 time_at_beg = time.time() mount_a_gid = self.mount_a.get_global_id() - mount_a_pid = self.mount_a.client_pid + _ = self.mount_a.client_pid self.fs.set_var('session_timeout', SESSION_TIMEOUT) self.fs.set_var('session_autoclose', SESSION_AUTOCLOSE) self.assert_session_count(2, self.fs.mds_asok(['session', 'ls'])) diff --git a/qa/tasks/cephfs/test_exports.py b/qa/tasks/cephfs/test_exports.py index 3ffdb553cf0..7d2a3425a89 100644 --- a/qa/tasks/cephfs/test_exports.py +++ b/qa/tasks/cephfs/test_exports.py @@ -150,7 +150,6 @@ class TestExports(CephFSTestCase): status = self.fs.wait_for_daemons() rank1 = self.fs.get_rank(rank=1, status=status) - name1 = 'mds.'+rank1['name'] # Create a directory that is pre-exported to rank 1 self.mount_a.run_shell(["mkdir", "-p", "a/aa"]) diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 786980ba087..f3ce939328d 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -1,12 +1,10 @@ import time import signal -import json import logging from random import randint from cephfs_test_case import CephFSTestCase from teuthology.exceptions import CommandFailedError -from teuthology import misc as teuthology from tasks.cephfs.fuse_mount import FuseMount log = logging.getLogger(__name__) @@ -24,7 +22,7 @@ class TestClusterResize(CephFSTestCase): log.info("status = {0}".format(status)) original_ranks = set([info['gid'] for info in status.get_ranks(fscid)]) - original_standbys = set([info['gid'] for info in status.get_standbys()]) + _ = set([info['gid'] for info in status.get_standbys()]) oldmax = self.fs.get_var('max_mds') self.assertTrue(n > oldmax) @@ -44,7 +42,7 @@ class TestClusterResize(CephFSTestCase): log.info("status = {0}".format(status)) original_ranks = set([info['gid'] for info in status.get_ranks(fscid)]) - original_standbys = set([info['gid'] for info in status.get_standbys()]) + _ = set([info['gid'] for info in status.get_standbys()]) oldmax = self.fs.get_var('max_mds') self.assertTrue(n < oldmax) @@ -360,7 +358,7 @@ class TestStandbyReplay(CephFSTestCase): def _confirm_no_replay(self): status = self.fs.status() - standby_count = len(list(status.get_standbys())) + _ = len(list(status.get_standbys())) self.assertEqual(0, len(list(self.fs.get_replays(status=status)))) return status diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index cca9bb617d4..a22169071d9 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -6,7 +6,6 @@ import errno import time import json import logging -import time log = logging.getLogger(__name__) @@ -77,7 +76,7 @@ class TestMisc(CephFSTestCase): def get_pool_df(fs, name): try: return fs.get_pool_df(name)['objects'] > 0 - except RuntimeError as e: + except RuntimeError: return False self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30) @@ -171,8 +170,7 @@ class TestMisc(CephFSTestCase): out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get', pool_name, 'size', '-f', 'json-pretty') - j = json.loads(out) - pool_size = int(j['size']) + _ = json.loads(out) proc = self.mount_a.run_shell(['df', '.']) output = proc.stdout.getvalue() diff --git a/qa/tasks/cephfs/test_recovery_pool.py b/qa/tasks/cephfs/test_recovery_pool.py index 1684d170c8e..36b4e58ec8c 100644 --- a/qa/tasks/cephfs/test_recovery_pool.py +++ b/qa/tasks/cephfs/test_recovery_pool.py @@ -1,17 +1,13 @@ - """ Test our tools for recovering metadata from the data pool into an alternate pool """ -import json import logging -import os -from textwrap import dedent import traceback -from collections import namedtuple, defaultdict +from collections import namedtuple from teuthology.orchestra.run import CommandFailedError -from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology +from tasks.cephfs.cephfs_test_case import CephFSTestCase log = logging.getLogger(__name__) diff --git a/qa/tasks/cephfs/test_scrub.py b/qa/tasks/cephfs/test_scrub.py index d96f5691ba2..e4f0cb9beb3 100644 --- a/qa/tasks/cephfs/test_scrub.py +++ b/qa/tasks/cephfs/test_scrub.py @@ -2,12 +2,9 @@ Test CephFS scrub (distinct from OSD scrub) functionality """ import logging -import os -import traceback from collections import namedtuple -from teuthology.orchestra.run import CommandFailedError -from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology +from tasks.cephfs.cephfs_test_case import CephFSTestCase log = logging.getLogger(__name__) diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py index d6947805347..3b67c36b022 100644 --- a/qa/tasks/cephfs/test_scrub_checks.py +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -43,7 +43,7 @@ class TestScrubControls(CephFSTestCase): log.info("client_path: {0}".format(client_path)) log.info("Cloning repo into place") - repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path) + TestScrubChecks.clone_repo(self.mount_a, client_path) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -66,7 +66,7 @@ class TestScrubControls(CephFSTestCase): log.info("client_path: {0}".format(client_path)) log.info("Cloning repo into place") - repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path) + _ = TestScrubChecks.clone_repo(self.mount_a, client_path) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -94,7 +94,7 @@ class TestScrubControls(CephFSTestCase): log.info("client_path: {0}".format(client_path)) log.info("Cloning repo into place") - repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path) + _ = TestScrubChecks.clone_repo(self.mount_a, client_path) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -307,7 +307,7 @@ class TestScrubChecks(CephFSTestCase): success, errstring = validator(jout, 0) if not success: - raise AsokCommandFailedError(command, rout, jout, errstring) + raise AsokCommandFailedError(command, 0, jout, errstring) return jout def asok_command(self, mds_rank, command, validator): diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index db3d5dfe6cd..8bacffdfbfb 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -1,4 +1,3 @@ -from StringIO import StringIO import time import json import logging diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py index ae5a58ca898..067c7b1fb92 100644 --- a/qa/tasks/cephfs/test_snapshots.py +++ b/qa/tasks/cephfs/test_snapshots.py @@ -1,8 +1,6 @@ import sys import logging import signal -import time -import errno from textwrap import dedent from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase @@ -527,7 +525,7 @@ class TestSnapshots(CephFSTestCase): self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)]) try: self.create_snap_dir(sname) - except CommandFailedError as e: + except CommandFailedError: # after reducing limit we expect the new snapshot creation to fail pass self.delete_dir_and_snaps("accounts", new_limit + 1) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 56964880e50..a294cc46087 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -4,7 +4,6 @@ import logging from textwrap import dedent import datetime import gevent -import datetime from teuthology.orchestra.run import CommandFailedError, Raw from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology @@ -138,7 +137,7 @@ class TestStrays(CephFSTestCase): size_unit = 1024 # small, numerous files file_multiplier = 200 else: - raise NotImplemented(throttle_type) + raise NotImplementedError(throttle_type) # Pick up config changes self.fs.mds_fail_restart() @@ -225,7 +224,7 @@ class TestStrays(CephFSTestCase): num_strays_purging, mds_max_purge_files )) else: - raise NotImplemented(throttle_type) + raise NotImplementedError(throttle_type) log.info("Waiting for purge to complete {0}/{1}, {2}/{3}".format( num_strays_purging, num_strays, diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index d33223b2bc9..0c59225dfc7 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -1,6 +1,5 @@ import json import logging -import time import os from textwrap import dedent from tasks.cephfs.cephfs_test_case import CephFSTestCase @@ -1078,7 +1077,7 @@ vc.disconnect() volume_prefix = "/myprefix" group_id = "grpid" volume_id = "volid" - mount_path = self._volume_client_python(vc_mount, dedent(""" + self._volume_client_python(vc_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) print(create_result['mount_path']) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index a0fece8d5e4..19ea2418c4f 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -291,7 +291,7 @@ class TestVolumes(CephFSTestCase): nsize = usedsize/2 try: self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) - except CommandFailedError as ce: + except CommandFailedError: raise RuntimeError("expected the 'fs subvolume resize' command to succeed") # verify the quota diff --git a/qa/tasks/cephfs/xfstests_dev.py b/qa/tasks/cephfs/xfstests_dev.py index bf15b02a3a0..d8520d3bf21 100644 --- a/qa/tasks/cephfs/xfstests_dev.py +++ b/qa/tasks/cephfs/xfstests_dev.py @@ -118,7 +118,7 @@ class XFSTestsDev(CephFSTestCase): else: raise RuntimeError('expected a yum based or a apt based system') - proc = self.mount_a.client_remote.run(args=args, omit_sudo=False) + self.mount_a.client_remote.run(args=args, omit_sudo=False) def create_reqd_users(self): self.mount_a.client_remote.run(args=['sudo', 'useradd', 'fsgqa'], diff --git a/qa/tasks/check_counter.py b/qa/tasks/check_counter.py index b15dc6fe21c..fc877f285b6 100644 --- a/qa/tasks/check_counter.py +++ b/qa/tasks/check_counter.py @@ -4,7 +4,6 @@ import json from teuthology.task import Task from teuthology import misc -import ceph_manager log = logging.getLogger(__name__) diff --git a/qa/tasks/create_verify_lfn_objects.py b/qa/tasks/create_verify_lfn_objects.py index 01ab1a370b7..53254158128 100644 --- a/qa/tasks/create_verify_lfn_objects.py +++ b/qa/tasks/create_verify_lfn_objects.py @@ -35,7 +35,7 @@ def task(ctx, config): for ns in namespace: def object_name(i): nslength = 0 - if namespace is not '': + if namespace != '': nslength = len(namespace) numstr = str(i) fillerlen = l - nslength - len(prefix) - len(numstr) diff --git a/qa/tasks/daemonwatchdog.py b/qa/tasks/daemonwatchdog.py index b0212db1a00..fcb1bffb2e3 100644 --- a/qa/tasks/daemonwatchdog.py +++ b/qa/tasks/daemonwatchdog.py @@ -1,7 +1,6 @@ import logging import signal import time -import random from gevent import sleep from gevent.greenlet import Greenlet diff --git a/qa/tasks/divergent_priors.py b/qa/tasks/divergent_priors.py index 7a4d1327020..b565c774c44 100644 --- a/qa/tasks/divergent_priors.py +++ b/qa/tasks/divergent_priors.py @@ -155,6 +155,6 @@ def task(ctx, config): for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, '/tmp/existing']) - assert exit_status is 0 + assert exit_status == 0 log.info("success") diff --git a/qa/tasks/divergent_priors2.py b/qa/tasks/divergent_priors2.py index fa2fae9e7dc..12a9fd4a9af 100644 --- a/qa/tasks/divergent_priors2.py +++ b/qa/tasks/divergent_priors2.py @@ -183,7 +183,7 @@ def task(ctx, config): for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, '/tmp/existing']) - assert exit_status is 0 + assert exit_status == 0 cmd = 'rm {file}'.format(file=expfile) exp_remote.run(args=cmd, wait=True) diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py index 8b6d2c7d540..237d9127fc1 100644 --- a/qa/tasks/dump_stuck.py +++ b/qa/tasks/dump_stuck.py @@ -2,7 +2,6 @@ Dump_stuck command """ import logging -import re import time import ceph_manager diff --git a/qa/tasks/exec_on_cleanup.py b/qa/tasks/exec_on_cleanup.py index 0aecf78e5b3..a7c7ee5dae9 100644 --- a/qa/tasks/exec_on_cleanup.py +++ b/qa/tasks/exec_on_cleanup.py @@ -5,7 +5,6 @@ import logging import contextlib from teuthology import misc as teuthology -from teuthology import contextutil log = logging.getLogger(__name__) diff --git a/qa/tasks/fs.py b/qa/tasks/fs.py index 4286318527e..4b47e754bfa 100644 --- a/qa/tasks/fs.py +++ b/qa/tasks/fs.py @@ -2,10 +2,8 @@ CephFS sub-tasks. """ -import contextlib import logging import re -import time from tasks.cephfs.filesystem import Filesystem diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py index 5961165eb0d..522f1673da2 100644 --- a/qa/tasks/keystone.py +++ b/qa/tasks/keystone.py @@ -9,7 +9,6 @@ from cStringIO import StringIO from teuthology import misc as teuthology from teuthology import contextutil from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user from teuthology.packaging import install_package from teuthology.packaging import remove_package from teuthology.exceptions import ConfigError @@ -209,7 +208,6 @@ def run_keystone(ctx, config): # start the public endpoint client_public_with_id = 'keystone.public' + '.' + client_id - client_public_with_cluster = cluster_name + '.' + client_public_with_id public_host, public_port = ctx.keystone.public_endpoints[client] run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public', diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py index 2647eba761c..29e2c351346 100644 --- a/qa/tasks/mds_creation_failure.py +++ b/qa/tasks/mds_creation_failure.py @@ -1,4 +1,5 @@ - +# FIXME: this file has many undefined vars which are accessed! +# flake8: noqa import logging import contextlib import time diff --git a/qa/tasks/mgr/dashboard/test_cephfs.py b/qa/tasks/mgr/dashboard/test_cephfs.py index bd8666ea499..47a39d18bf6 100644 --- a/qa/tasks/mgr/dashboard/test_cephfs.py +++ b/qa/tasks/mgr/dashboard/test_cephfs.py @@ -62,7 +62,7 @@ class CephfsTest(DashboardTestCase): def test_cephfs_evict_client_does_not_exist(self): fs_id = self.get_fs_id() - data = self._delete("/api/cephfs/{}/client/1234".format(fs_id)) + self._delete("/api/cephfs/{}/client/1234".format(fs_id)) self.assertStatus(404) def test_cephfs_get(self): diff --git a/qa/tasks/mgr/dashboard/test_ganesha.py b/qa/tasks/mgr/dashboard/test_ganesha.py index 6b89ca508fb..cd869a00e40 100644 --- a/qa/tasks/mgr/dashboard/test_ganesha.py +++ b/qa/tasks/mgr/dashboard/test_ganesha.py @@ -3,9 +3,8 @@ from __future__ import absolute_import -import time -from .helper import DashboardTestCase, JObj, JLeaf, JList +from .helper import DashboardTestCase class GaneshaTest(DashboardTestCase): diff --git a/qa/tasks/mgr/dashboard/test_orchestrator.py b/qa/tasks/mgr/dashboard/test_orchestrator.py index 348bde14b72..99c004711bd 100644 --- a/qa/tasks/mgr/dashboard/test_orchestrator.py +++ b/qa/tasks/mgr/dashboard/test_orchestrator.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import -import os import json from .helper import DashboardTestCase diff --git a/qa/tasks/mgr/dashboard/test_rbd.py b/qa/tasks/mgr/dashboard/test_rbd.py index 7ac313a5650..4623873a61d 100644 --- a/qa/tasks/mgr/dashboard/test_rbd.py +++ b/qa/tasks/mgr/dashboard/test_rbd.py @@ -764,7 +764,7 @@ class RbdTest(DashboardTestCase): id = self.create_image_in_trash('rbd', 'test_rbd') self.assertStatus(200) - img = self.get_image('rbd', None, 'test_rbd') + self.get_image('rbd', None, 'test_rbd') self.assertStatus(404) time.sleep(1) diff --git a/qa/tasks/mgr/mgr_test_case.py b/qa/tasks/mgr/mgr_test_case.py index a1fa5515e5f..2830829e725 100644 --- a/qa/tasks/mgr/mgr_test_case.py +++ b/qa/tasks/mgr/mgr_test_case.py @@ -97,9 +97,9 @@ class MgrTestCase(CephTestCase): assert cls.mgr_cluster is not None if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED: - self.skipTest("Only have {0} manager daemons, " - "{1} are required".format( - len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED)) + cls.skipTest( + "Only have {0} manager daemons, {1} are required".format( + len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED)) cls.setup_mgrs() diff --git a/qa/tasks/mgr/test_cephadm_orchestrator.py b/qa/tasks/mgr/test_cephadm_orchestrator.py index 7c1bc826d8c..16966b2afbc 100644 --- a/qa/tasks/mgr/test_cephadm_orchestrator.py +++ b/qa/tasks/mgr/test_cephadm_orchestrator.py @@ -1,7 +1,4 @@ -import json import logging -from tempfile import NamedTemporaryFile -from teuthology.exceptions import CommandFailedError from mgr_test_case import MgrTestCase log = logging.getLogger(__name__) diff --git a/qa/tasks/mgr/test_orchestrator_cli.py b/qa/tasks/mgr/test_orchestrator_cli.py index e62af60ceab..0de542ff738 100644 --- a/qa/tasks/mgr/test_orchestrator_cli.py +++ b/qa/tasks/mgr/test_orchestrator_cli.py @@ -1,7 +1,6 @@ import errno import json import logging -from tempfile import NamedTemporaryFile from time import sleep from teuthology.exceptions import CommandFailedError diff --git a/qa/tasks/mon_clock_skew_check.py b/qa/tasks/mon_clock_skew_check.py index 5c4088c7369..f7862cb1354 100644 --- a/qa/tasks/mon_clock_skew_check.py +++ b/qa/tasks/mon_clock_skew_check.py @@ -2,11 +2,8 @@ Handle clock skews in monitors. """ import logging -import contextlib import ceph_manager import time -import gevent -from StringIO import StringIO from teuthology import misc as teuthology log = logging.getLogger(__name__) diff --git a/qa/tasks/netem.py b/qa/tasks/netem.py index 95018150da9..4fa08bbc0a0 100644 --- a/qa/tasks/netem.py +++ b/qa/tasks/netem.py @@ -6,10 +6,7 @@ Reference:https://wiki.linuxfoundation.org/networking/netem. import logging import contextlib -from teuthology import misc as teuthology from cStringIO import StringIO -from teuthology.orchestra import run -from teuthology import contextutil from paramiko import SSHException import socket import time @@ -168,7 +165,7 @@ class Toggle: try: self.packet_drop() log.info('link down') - except SSHException as e: + except SSHException: log.debug('Failed to run command') self.stop_event.wait(timeout=self.interval) @@ -176,7 +173,7 @@ class Toggle: try: delete_dev(self.remote, self.interface) log.info('link up') - except SSHException as e: + except SSHException: log.debug('Failed to run command') def begin(self, gname): diff --git a/qa/tasks/osd_max_pg_per_osd.py b/qa/tasks/osd_max_pg_per_osd.py index 03ea218f569..739959e2fbd 100644 --- a/qa/tasks/osd_max_pg_per_osd.py +++ b/qa/tasks/osd_max_pg_per_osd.py @@ -76,7 +76,6 @@ def test_create_from_peer(ctx, config): 4. delete a pool, verify pgs go active. """ pg_num = config.get('pg_num', 1) - pool_size = config.get('pool_size', 2) from_primary = config.get('from_primary', True) manager = ctx.managers['ceph'] @@ -121,7 +120,6 @@ def test_create_from_peer(ctx, config): def task(ctx, config): assert isinstance(config, dict), \ 'osd_max_pg_per_osd task only accepts a dict for config' - manager = ctx.managers['ceph'] if config.get('test_create_from_mon', True): test_create_from_mon(ctx, config) else: diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py index 94862090322..d73b9476314 100644 --- a/qa/tasks/radosbench.py +++ b/qa/tasks/radosbench.py @@ -81,7 +81,7 @@ def task(ctx, config): pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name) osize = config.get('objectsize', 65536) - if osize is 0: + if osize == 0: objectsize = [] else: objectsize = ['-O', str(osize)] @@ -135,5 +135,5 @@ def task(ctx, config): log.info('joining radosbench (timing out after %ss)', timeout) run.wait(radosbench.itervalues(), timeout=timeout) - if pool is not 'data' and create_pool: + if pool != 'data' and create_pool: manager.remove_pool(pool) diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py index 6dab13f058f..e9cfc369668 100644 --- a/qa/tasks/radosgw_admin.py +++ b/qa/tasks/radosgw_admin.py @@ -10,7 +10,6 @@ Rgw admin testing against a running instance # python qa/tasks/radosgw_admin.py [USER] HOSTNAME # -import copy import json import logging import time @@ -24,11 +23,9 @@ from cStringIO import StringIO import boto.exception import boto.s3.connection import boto.s3.acl -from boto.utils import RequestHook import httplib2 -import util.rgw as rgw_utils from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops @@ -291,7 +288,6 @@ def task(ctx, config): display_name2='Fud' display_name3='Bar' email='foo@foo.com' - email2='bar@bar.com' access_key='9te6NH5mcdcq0Tc5i8i1' secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' access_key2='p5YnriCv1nAtykxBrupQ' @@ -1052,8 +1048,6 @@ def task(ctx, config): # TESTCASE 'zonegroup-info', 'zonegroup', 'get', 'get zonegroup info', 'succeeds' (err, out) = rgwadmin(ctx, client, ['zonegroup', 'get'], check_status=True) -import sys -from tasks.radosgw_admin import task from teuthology.config import config from teuthology.orchestra import cluster, remote import argparse; diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py index 5d8bf18687b..50f88ea8570 100644 --- a/qa/tasks/radosgw_admin_rest.py +++ b/qa/tasks/radosgw_admin_rest.py @@ -7,9 +7,8 @@ To extract the inventory (in csv format) use the command: grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' """ -from cStringIO import StringIO import logging -import json + import boto.exception import boto.s3.connection diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py index 67b16851b7b..dffd10a0dc8 100644 --- a/qa/tasks/ragweed.py +++ b/qa/tasks/ragweed.py @@ -10,13 +10,10 @@ import os import random import string -import util.rgw as rgw_utils - from teuthology import misc as teuthology from teuthology import contextutil from teuthology.config import config as teuth_config from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user log = logging.getLogger(__name__) diff --git a/qa/tasks/rbd_fsx.py b/qa/tasks/rbd_fsx.py index 12e50d98b05..396d8fed2a2 100644 --- a/qa/tasks/rbd_fsx.py +++ b/qa/tasks/rbd_fsx.py @@ -4,7 +4,7 @@ Run fsx on an rbd image import contextlib import logging -from teuthology.orchestra import run +from teuthology.exceptions import ConfigError from teuthology.parallel import parallel from teuthology import misc as teuthology diff --git a/qa/tasks/rbd_mirror_thrash.py b/qa/tasks/rbd_mirror_thrash.py index d7fdf5607f9..a42d19e7083 100644 --- a/qa/tasks/rbd_mirror_thrash.py +++ b/qa/tasks/rbd_mirror_thrash.py @@ -13,9 +13,7 @@ from gevent import sleep from gevent.greenlet import Greenlet from gevent.event import Event -from teuthology import misc from teuthology.exceptions import CommandFailedError -from teuthology.task import Task from teuthology.orchestra import run from tasks.thrasher import Thrasher diff --git a/qa/tasks/reg11184.py b/qa/tasks/reg11184.py index 7bb304608b0..3a1daa0a385 100644 --- a/qa/tasks/reg11184.py +++ b/qa/tasks/reg11184.py @@ -232,7 +232,7 @@ def task(ctx, config): for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, '/tmp/existing']) - assert exit_status is 0 + assert exit_status == 0 (remote,) = ctx.\ cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py index d666a5e6a01..aceef8894f8 100644 --- a/qa/tasks/rgw.py +++ b/qa/tasks/rgw.py @@ -3,11 +3,7 @@ rgw routines """ import argparse import contextlib -import json import logging -import os -import errno -import util.rgw as rgw_utils from teuthology.orchestra import run from teuthology import misc as teuthology @@ -15,9 +11,9 @@ from teuthology import contextutil from teuthology.exceptions import ConfigError from util import get_remote_for_role from util.rgw import rgwadmin, wait_for_radosgw -from util.rados import (rados, create_ec_pool, - create_replicated_pool, - create_cache_pool) +from util.rados import (create_ec_pool, + create_replicated_pool, + create_cache_pool) log = logging.getLogger(__name__) diff --git a/qa/tasks/rgw_multisite.py b/qa/tasks/rgw_multisite.py index 3c4c4da142e..5400020db75 100644 --- a/qa/tasks/rgw_multisite.py +++ b/qa/tasks/rgw_multisite.py @@ -2,7 +2,6 @@ rgw multisite configuration routines """ import argparse -import contextlib import logging import random import string @@ -223,7 +222,7 @@ class Gateway(multisite.Gateway): # insert zone args before the first | pipe = args.index(run.Raw('|')) args = args[0:pipe] + zone.zone_args() + args[pipe:] - except ValueError, e: + except ValueError: args += zone.zone_args() self.daemon.command_kwargs['args'] = args diff --git a/qa/tasks/rgw_multisite_tests.py b/qa/tasks/rgw_multisite_tests.py index dade6e47483..dee6bfaa303 100644 --- a/qa/tasks/rgw_multisite_tests.py +++ b/qa/tasks/rgw_multisite_tests.py @@ -2,7 +2,6 @@ rgw multisite testing """ import logging -import sys import nose.core import nose.config diff --git a/qa/tasks/s3a_hadoop.py b/qa/tasks/s3a_hadoop.py index d3c503b0d07..6756f2cdbbf 100644 --- a/qa/tasks/s3a_hadoop.py +++ b/qa/tasks/s3a_hadoop.py @@ -1,6 +1,5 @@ import contextlib import logging -import time from teuthology import misc from teuthology.orchestra import run diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py index 52b7915d9d5..95ee8be5a8c 100644 --- a/qa/tasks/s3tests.py +++ b/qa/tasks/s3tests.py @@ -14,7 +14,6 @@ from teuthology import misc as teuthology from teuthology import contextutil from teuthology.config import config as teuth_config from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user from teuthology.exceptions import ConfigError log = logging.getLogger(__name__) diff --git a/qa/tasks/s3tests_java.py b/qa/tasks/s3tests_java.py index 47f9de1d2fc..e14bb5ea7dd 100644 --- a/qa/tasks/s3tests_java.py +++ b/qa/tasks/s3tests_java.py @@ -9,14 +9,11 @@ import os import random import string import yaml -import socket import getpass from teuthology import misc as teuthology -from teuthology.exceptions import ConfigError from teuthology.task import Task from teuthology.orchestra import run -from teuthology.orchestra.remote import Remote log = logging.getLogger(__name__) @@ -352,7 +349,7 @@ class S3tests_java(Task): stdout=StringIO() ) - if gr is not 'All': + if gr != 'All': self.ctx.cluster.only(client).run( args=args + ['--tests'] + [gr] + extra_args, stdout=StringIO() diff --git a/qa/tasks/swift.py b/qa/tasks/swift.py index e89fcf03e48..402172c8cdc 100644 --- a/qa/tasks/swift.py +++ b/qa/tasks/swift.py @@ -13,7 +13,6 @@ from teuthology import misc as teuthology from teuthology import contextutil from teuthology.config import config as teuth_config from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user log = logging.getLogger(__name__) diff --git a/qa/tasks/tempest.py b/qa/tasks/tempest.py index 4dfaa44d445..71e80bc6471 100644 --- a/qa/tasks/tempest.py +++ b/qa/tasks/tempest.py @@ -6,7 +6,7 @@ import logging from teuthology import misc as teuthology from teuthology import contextutil -from teuthology.config import config as teuth_config +from teuthology.exceptions import ConfigError from teuthology.orchestra import run log = logging.getLogger(__name__) @@ -247,7 +247,6 @@ def task(ctx, config): config = all_clients if isinstance(config, list): config = dict.fromkeys(config) - clients = config.keys() overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. diff --git a/qa/tasks/tox.py b/qa/tasks/tox.py index 46b4f565dc0..81d712f44b1 100644 --- a/qa/tasks/tox.py +++ b/qa/tasks/tox.py @@ -3,7 +3,6 @@ import contextlib import logging from teuthology import misc as teuthology -from teuthology import contextutil from teuthology.orchestra import run log = logging.getLogger(__name__) diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py index d3abf1ced3b..d1ea39d1cd0 100644 --- a/qa/tasks/util/rgw.py +++ b/qa/tasks/util/rgw.py @@ -1,14 +1,8 @@ from cStringIO import StringIO import logging import json -import requests import time -from requests.packages.urllib3 import PoolManager -from requests.packages.urllib3.util import Retry -from urlparse import urlparse - -from teuthology.orchestra.connection import split_user from teuthology import misc as teuthology log = logging.getLogger(__name__) diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index 965ad9717f6..e5d1c4c6ea6 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -41,7 +41,6 @@ import shutil import re import os import time -import json import sys import errno from unittest import suite, loader @@ -623,9 +622,9 @@ class LocalKernelMount(KernelMount): except Exception as e: self.client_remote.run(args=[ 'sudo', - run.Raw('PATH=/usr/sbin:$PATH'), + Raw('PATH=/usr/sbin:$PATH'), 'lsof', - run.Raw(';'), + Raw(';'), 'ps', 'auxf', ], timeout=(15*60), omit_sudo=False) raise e @@ -1215,7 +1214,7 @@ def scan_tests(modules): max_required_mgr = 0 require_memstore = False - for suite, case in enumerate_methods(overall_suite): + for suite_, case in enumerate_methods(overall_suite): max_required_mds = max(max_required_mds, getattr(case, "MDSS_REQUIRED", 0)) max_required_clients = max(max_required_clients, @@ -1292,7 +1291,6 @@ def exec_test(): opt_teardown_cluster = False global opt_log_ps_output opt_log_ps_output = False - opt_clear_old_log = False use_kernel_client = False args = sys.argv[1:] @@ -1312,7 +1310,6 @@ def exec_test(): elif f == '--log-ps-output': opt_log_ps_output = True elif f == '--clear-old-log': - opt_clear_old_log = True clear_old_log() elif f == "--kclient": use_kernel_client = True diff --git a/qa/workunits/fs/multiclient_sync_read_eof.py b/qa/workunits/fs/multiclient_sync_read_eof.py index d3e0f8e652e..1d5bb650621 100755 --- a/qa/workunits/fs/multiclient_sync_read_eof.py +++ b/qa/workunits/fs/multiclient_sync_read_eof.py @@ -2,8 +2,6 @@ import argparse import os -import sys -import time def main(): parser = argparse.ArgumentParser() diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index 6b26d7a296f..20bb9e912dc 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -2,10 +2,8 @@ from __future__ import print_function -import json import subprocess import shlex -from StringIO import StringIO import errno import sys import os @@ -13,8 +11,7 @@ import io import re -import rados -from ceph_argparse import * +from ceph_argparse import * # noqa keyring_base = '/tmp/cephtest-caps.keyring' diff --git a/qa/workunits/restart/test-backtraces.py b/qa/workunits/restart/test-backtraces.py index 2fa67a23f38..07fe8845f4e 100755 --- a/qa/workunits/restart/test-backtraces.py +++ b/qa/workunits/restart/test-backtraces.py @@ -9,13 +9,9 @@ import time import sys if sys.version_info[0] == 2: - from cStringIO import StringIO - range = xrange elif sys.version_info[0] == 3: - from io import StringIO - range = range import rados as rados @@ -47,8 +43,6 @@ def set_mds_config_param(ceph, param): if r != 0: raise Exception -import ConfigParser -import contextlib class _TrimIndentFile(object): def __init__(self, fp): @@ -150,10 +144,10 @@ def verify(rados_ioctx, ino, values, pool): bt = decode(binbt) + ind = 0 if bt['ino'] != ino: raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format( bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values)) - ind = 0 for (n, i) in values: if bt['ancestors'][ind]['dirino'] != i: raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format( |