summaryrefslogtreecommitdiffstats
path: root/src/test/ceph_objectstore_tool.py
diff options
context:
space:
mode:
authorDavid Zafman <dzafman@redhat.com>2015-05-22 20:52:00 +0200
committerDavid Zafman <dzafman@redhat.com>2015-06-02 02:41:11 +0200
commit90202a70f8c32120625c99ef5131778b3c9f6303 (patch)
tree9b68a02d5b7eb2175b204386a2523a47ce5e05be /src/test/ceph_objectstore_tool.py
parentceph-objectstore-tool: Invalidate pg stats when objects were skipped during p... (diff)
downloadceph-90202a70f8c32120625c99ef5131778b3c9f6303.tar.xz
ceph-90202a70f8c32120625c99ef5131778b3c9f6303.zip
ceph-objectstore-tool: Allow --pgid specified on import (for post split)
Revamp split import testing to use --pgid option post split Signed-off-by: David Zafman <dzafman@redhat.com>
Diffstat (limited to 'src/test/ceph_objectstore_tool.py')
-rwxr-xr-xsrc/test/ceph_objectstore_tool.py223
1 files changed, 158 insertions, 65 deletions
diff --git a/src/test/ceph_objectstore_tool.py b/src/test/ceph_objectstore_tool.py
index 035ceb3703b..7c75c69131a 100755
--- a/src/test/ceph_objectstore_tool.py
+++ b/src/test/ceph_objectstore_tool.py
@@ -295,6 +295,36 @@ def kill_daemons():
call("./init-ceph -c {conf} stop osd mon > /dev/null 2>&1".format(conf=CEPH_CONF), shell=True)
+def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME):
+ repcount = 0
+ ERRORS = 0
+ for nsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]:
+ nspace = nsfile.split("-")[0]
+ file = nsfile.split("-")[1]
+ path = os.path.join(DATADIR, nsfile)
+ tmpfd = open(TMPFILE, "w")
+ cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=tmpfd)
+ if ret:
+ logging.critical("INTERNAL ERROR")
+ return 1
+ tmpfd.close()
+ obj_locs = get_lines(TMPFILE)
+ if len(obj_locs) == 0:
+ logging.error("Can't find imported object {name}".format(name=file))
+ ERRORS += 1
+ for obj_loc in obj_locs:
+ repcount += 1
+ cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True)
+ if ret != 0:
+ logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
+ ERRORS += 1
+ return ERRORS, repcount
+
+
def main(argv):
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
nullfd = open(os.devnull, "w")
@@ -528,13 +558,20 @@ def main(argv):
cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG)
ERRORS += test_failure_tty(cmd, "stdout is a tty and no --file filename specified")
+ # Prep a valid export file for import failure tests
OTHERFILE = "/tmp/foo.{pid}".format(pid=pid)
- foofd = open(OTHERFILE, "w")
- foofd.close()
+ cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE)
+ logging.debug(cmd)
+ call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
- # On import can't specify a PG
- cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {FOO}").format(osd=ONEOSD, pg=ONEPG, FOO=OTHERFILE)
- ERRORS += test_failure(cmd, "--pgid option invalid with import")
+ # On import can't specify a PG with a non-existent pool
+ cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg="10.0", file=OTHERFILE)
+ ERRORS += test_failure(cmd, "Can't specify a different pgid pool, must be")
+
+ # On import can't specify a PG with a bad seed
+ TMPPG="{pool}.80".format(pool=REPID)
+ cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE)
+ ERRORS += test_failure(cmd, "Illegal pgid, the seed is larger than current pg_num")
os.unlink(OTHERFILE)
cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE)
@@ -1019,7 +1056,7 @@ def main(argv):
if pg == PGS[0]:
cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd)
elif pg == PGS[1]:
- cmd = (CFSD_PREFIX + "--op import --file - < {file}").format(osd=osd, file=file)
+ cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg)
else:
cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
logging.debug(cmd)
@@ -1035,29 +1072,8 @@ def main(argv):
if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
print "Verify replicated import data"
- for nsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(REP_NAME) == 0]:
- nspace = nsfile.split("-")[0]
- file = nsfile.split("-")[1]
- path = os.path.join(DATADIR, nsfile)
- tmpfd = open(TMPFILE, "w")
- cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace)
- logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=tmpfd)
- if ret:
- logging.critical("INTERNAL ERROR")
- return 1
- tmpfd.close()
- obj_locs = get_lines(TMPFILE)
- if len(obj_locs) == 0:
- logging.error("Can't find imported object {name}".format(name=file))
- ERRORS += 1
- for obj_loc in obj_locs:
- cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc)
- logging.debug(cmd)
- ret = call(cmd, shell=True)
- if ret != 0:
- logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc))
- ERRORS += 1
+ data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME)
+ ERRORS += data_errors
else:
logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES")
@@ -1105,55 +1121,132 @@ def main(argv):
else:
logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES")
- # Cause REP_POOL to split and test import with object/log filtering
- cmd = "./ceph osd pool set {pool} pg_num 32".format(pool=REP_POOL)
- logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
- time.sleep(15)
- cmd = "./ceph osd pool set {pool} pgp_num 32".format(pool=REP_POOL)
+ # Clear directories of previous portion
+ call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
+ call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)
+ os.mkdir(TESTDIR)
+ os.mkdir(DATADIR)
+
+ # Cause SPLIT_POOL to split and test import with object/log filtering
+ print "Testing import all objects after a split"
+ SPLIT_POOL = "split_pool"
+ PG_COUNT = 1
+ SPLIT_OBJ_COUNT = 5
+ SPLIT_NSPACE_COUNT = 2
+ SPLIT_NAME = "split"
+ cmd = "./ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT)
logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
- wait_for_health()
- kill_daemons()
-
- print "Remove pgs for another import"
+ call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
+ SPLITID = get_pool_id(SPLIT_POOL, nullfd)
+ pool_size = int(check_output("./ceph osd pool get {pool} size".format(pool=SPLIT_POOL), shell=True, stderr=nullfd).split(" ")[1])
+ EXP_ERRORS = 0
RM_ERRORS = 0
- for pg in ALLREPPGS:
- for osd in get_osds(pg, OSDDIR):
- cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd)
+ IMP_ERRORS = 0
+
+ objects = range(1, SPLIT_OBJ_COUNT + 1)
+ nspaces = range(SPLIT_NSPACE_COUNT)
+ for n in nspaces:
+ nspace = get_nspace(n)
+
+ for i in objects:
+ NAME = SPLIT_NAME + "{num}".format(num=i)
+ LNAME = nspace + "-" + NAME
+ DDNAME = os.path.join(DATADIR, LNAME)
+
+ cmd = "rm -f " + DDNAME
logging.debug(cmd)
- ret = call(cmd, shell=True, stdout=nullfd)
+ call(cmd, shell=True)
+
+ if i == 1:
+ dataline = range(DATALINECOUNT)
+ else:
+ dataline = range(1)
+ fd = open(DDNAME, "w")
+ data = "This is the split data for " + LNAME + "\n"
+ for _ in dataline:
+ fd.write(data)
+ fd.close()
+
+ cmd = "./rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stderr=nullfd)
if ret != 0:
- logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
- RM_ERRORS += 1
+ logging.critical("Rados put command failed with {ret}".format(ret=ret))
+ return 1
- ERRORS += RM_ERRORS
+ wait_for_health()
+ kill_daemons()
- IMP_ERRORS = 0
- if RM_ERRORS == 0:
- print "Test pg import after PGs have split"
- for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and string.find(f, "osd") == 0]:
- dir = os.path.join(TESTDIR, osd)
- PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
- for pg in PGS:
- if pg not in ALLREPPGS:
- continue
- file = os.path.join(dir, pg)
- cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file)
+ for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and string.find(f, "osd") == 0]:
+ os.mkdir(os.path.join(TESTDIR, osd))
+
+ pg = "{pool}.0".format(pool=SPLITID)
+ EXPORT_PG = pg
+
+ export_osds = get_osds(pg, OSDDIR)
+ for osd in export_osds:
+ mydir = os.path.join(TESTDIR, osd)
+ fname = os.path.join(mydir, pg)
+ cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
+ if ret != 0:
+ logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret))
+ EXP_ERRORS += 1
+
+ ERRORS += EXP_ERRORS
+
+ if EXP_ERRORS == 0:
+ vstart(new=False)
+ wait_for_health()
+
+ time.sleep(20)
+
+ cmd = "./ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd)
+ time.sleep(5)
+ wait_for_health()
+
+ time.sleep(15)
+
+ kill_daemons()
+
+ # Now 2 PGs, poolid.0 and poolid.1
+ for seed in range(2):
+ pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed)
+
+ which = 0
+ for osd in get_osds(pg, OSDDIR):
+ cmd = (CFSD_PREFIX + "--op remove --pgid {pg}").format(pg=pg, osd=osd)
+ logging.debug(cmd)
+ ret = call(cmd, shell=True, stdout=nullfd)
+
+ # This is weird. The export files are based on only the EXPORT_PG
+ # and where that pg was before the split. Use 'which' to use all
+ # export copies in import.
+ mydir = os.path.join(TESTDIR, export_osds[which])
+ fname = os.path.join(mydir, EXPORT_PG)
+ which += 1
+ cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname)
logging.debug(cmd)
ret = call(cmd, shell=True, stdout=nullfd)
if ret != 0:
logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret))
IMP_ERRORS += 1
- else:
- logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
- ERRORS += IMP_ERRORS
+ ERRORS += IMP_ERRORS
- # Start up again to make sure imports didn't corrupt anything
- if IMP_ERRORS == 0:
- vstart(new=False)
- wait_for_health()
+ # Start up again to make sure imports didn't corrupt anything
+ if IMP_ERRORS == 0:
+ print "Verify split import data"
+ data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME)
+ ERRORS += data_errors
+ if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size):
+ logging.error("Incorrect number of replicas seen {count}".format(count=count))
+ ERRORS += 1
+ vstart(new=False)
+ wait_for_health()
call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True)
call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True)