diff options
Diffstat (limited to 'src/test')
-rw-r--r-- | src/test/common/CMakeLists.txt | 4 | ||||
-rw-r--r-- | src/test/common/test_versioned_variant.cc | 341 | ||||
-rw-r--r-- | src/test/librbd/CMakeLists.txt | 3 | ||||
-rw-r--r-- | src/test/librbd/io/test_mock_ImageRequest.cc | 31 | ||||
-rw-r--r-- | src/test/librbd/io/test_mock_ObjectRequest.cc | 115 | ||||
-rw-r--r-- | src/test/librbd/journal/test_Entries.cc | 63 | ||||
-rw-r--r-- | src/test/librbd/journal/test_Stress.cc | 121 | ||||
-rw-r--r-- | src/test/librbd/test_librbd.cc | 365 | ||||
-rw-r--r-- | src/test/librbd/test_main.cc | 2 | ||||
-rw-r--r-- | src/test/librbd/test_mock_Journal.cc | 2 | ||||
-rw-r--r-- | src/test/objectstore/Allocator_test.cc | 3 | ||||
-rw-r--r-- | src/test/objectstore/fastbmap_allocator_test.cc | 181 | ||||
-rw-r--r-- | src/test/objectstore/store_test.cc | 208 | ||||
-rw-r--r-- | src/test/pybind/test_rbd.py | 22 | ||||
-rw-r--r-- | src/test/rgw/test_rgw_lc.cc | 237 | ||||
-rw-r--r-- | src/test/rgw/test_rgw_lua.cc | 53 | ||||
-rw-r--r-- | src/test/test_c2c.cc | 2 | ||||
-rw-r--r-- | src/test/test_mempool.cc | 2 | ||||
-rw-r--r-- | src/test/test_rgw_admin_meta.cc | 2 |
19 files changed, 1467 insertions, 290 deletions
diff --git a/src/test/common/CMakeLists.txt b/src/test/common/CMakeLists.txt index c044daf662a..b2ed06ee306 100644 --- a/src/test/common/CMakeLists.txt +++ b/src/test/common/CMakeLists.txt @@ -390,6 +390,10 @@ target_link_libraries(unittest_blocked_completion Boost::system GTest::GTest) add_executable(unittest_allocate_unique test_allocate_unique.cc) add_ceph_unittest(unittest_allocate_unique) +add_executable(unittest_versioned_variant test_versioned_variant.cc) +add_ceph_unittest(unittest_versioned_variant) +target_link_libraries(unittest_versioned_variant common) + if(WITH_SYSTEMD) add_executable(unittest_journald_logger test_journald_logger.cc) target_link_libraries(unittest_journald_logger ceph-common) diff --git a/src/test/common/test_versioned_variant.cc b/src/test/common/test_versioned_variant.cc new file mode 100644 index 00000000000..81f12c23c2b --- /dev/null +++ b/src/test/common/test_versioned_variant.cc @@ -0,0 +1,341 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright contributors to the Ceph project + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "common/versioned_variant.h" +#include <bitset> +#include <string> +#include <gtest/gtest.h> + +namespace { + +// type with custom encoding +struct custom_type { + void encode(bufferlist& bl) const { + ENCODE_START(0, 0, bl); + ENCODE_FINISH(bl); + } + void decode(bufferlist::const_iterator& bl) { + DECODE_START(0, bl); + DECODE_FINISH(bl); + } +}; +WRITE_CLASS_ENCODER(custom_type); + +} // anonymous namespace + +namespace ceph { + +TEST(VersionedVariant, Monostate) +{ + using Variant = std::variant<std::monostate>; + bufferlist bl; + { + Variant in; + versioned_variant::encode(in, bl); + } + { + Variant out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + EXPECT_TRUE(std::holds_alternative<std::monostate>(out)); + } +} + +TEST(VersionedVariant, Custom) +{ + using Variant = std::variant<std::monostate, custom_type>; + bufferlist bl; + { + Variant in = custom_type{}; + versioned_variant::encode(in, bl); + } + { + Variant out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + EXPECT_TRUE(std::holds_alternative<custom_type>(out)); + } +} + +TEST(VersionedVariant, DuplicateFirst) +{ + using Variant = std::variant<int, int>; + bufferlist bl; + { + Variant in; + in.emplace<0>(42); + versioned_variant::encode(in, bl); + } + { + Variant out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + ASSERT_EQ(0, out.index()); + EXPECT_EQ(42, std::get<0>(out)); + } +} + +TEST(VersionedVariant, DuplicateSecond) +{ + using Variant = std::variant<int, int>; + bufferlist bl; + { + Variant in; + in.emplace<1>(42); + versioned_variant::encode(in, bl); + } + { + Variant out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + ASSERT_EQ(1, out.index()); + EXPECT_EQ(42, std::get<1>(out)); + } +} + +TEST(VersionedVariant, EncodeOld) +{ + using V1 = std::variant<int>; + using V2 = std::variant<int, std::string>; + + bufferlist bl; + { + // use V1 to encode the initial type + V1 in = 42; + versioned_variant::encode(in, bl); + } + { + // can decode as V1 + V1 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + ASSERT_TRUE(std::holds_alternative<int>(out)); + EXPECT_EQ(42, std::get<int>(out)); + } + { + // can also decode as V2 + V2 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + ASSERT_TRUE(std::holds_alternative<int>(out)); + EXPECT_EQ(42, std::get<int>(out)); + } +} + +TEST(VersionedVariant, EncodeExisting) +{ + using V1 = std::variant<int>; + using V2 = std::variant<int, std::string>; + + bufferlist bl; + { + // use V2 to encode the type shared with V1 + V2 in = 42; + versioned_variant::encode(in, bl); + } + { + // can decode as V2 + V2 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + ASSERT_TRUE(std::holds_alternative<int>(out)); + EXPECT_EQ(42, std::get<int>(out)); + } + { + // can also decode as V1 + V1 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + ASSERT_TRUE(std::holds_alternative<int>(out)); + EXPECT_EQ(42, std::get<int>(out)); + } +} + +TEST(VersionedVariant, EncodeNew) +{ + using V1 = std::variant<int>; + using V2 = std::variant<int, std::string>; + + bufferlist bl; + { + // use V2 to encode the new string type + V2 in = "42"; + versioned_variant::encode(in, bl); + } + { + // can decode as V2 + V2 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(versioned_variant::decode(out, p)); + ASSERT_TRUE(std::holds_alternative<std::string>(out)); + EXPECT_EQ("42", std::get<std::string>(out)); + } + { + // can't decode as V1 + V1 out; + auto p = bl.cbegin(); + EXPECT_THROW(versioned_variant::decode(out, p), buffer::malformed_input); + } +} + + +TEST(ConvertedVariant, Custom) +{ + using Variant = std::variant<custom_type>; + bufferlist bl; + { + Variant in = custom_type{}; + converted_variant::encode(in, bl); + } + { + Variant out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(converted_variant::decode(out, p)); + EXPECT_TRUE(std::holds_alternative<custom_type>(out)); + } +} + +TEST(ConvertedVariant, DuplicateFirst) +{ + using Variant = std::variant<custom_type, int, int>; + bufferlist bl; + { + Variant in; + in.emplace<1>(42); + converted_variant::encode(in, bl); + } + { + Variant out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(converted_variant::decode(out, p)); + ASSERT_EQ(1, out.index()); + EXPECT_EQ(42, std::get<1>(out)); + } +} + +TEST(ConvertedVariant, DuplicateSecond) +{ + using Variant = std::variant<custom_type, int, int>; + bufferlist bl; + { + Variant in; + in.emplace<2>(42); + converted_variant::encode(in, bl); + } + { + Variant out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(converted_variant::decode(out, p)); + ASSERT_EQ(2, out.index()); + EXPECT_EQ(42, std::get<2>(out)); + } +} + +TEST(ConvertedVariant, EncodeOld) +{ + using V1 = custom_type; + using V2 = std::variant<custom_type, int>; + + bufferlist bl; + { + // use V1 to encode the initial type + V1 in; + encode(in, bl); + } + { + // can decode as V1 + V1 out; + auto p = bl.cbegin(); + EXPECT_NO_THROW(decode(out, p)); + } + { + // can also decode as V2 + V2 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(converted_variant::decode(out, p)); + EXPECT_TRUE(std::holds_alternative<custom_type>(out)); + } +} + +TEST(ConvertedVariant, EncodeExisting) +{ + using V1 = custom_type; + using V2 = std::variant<custom_type, int>; + + bufferlist bl; + { + // use V2 to encode the type shared with V1 + V2 in; + converted_variant::encode(in, bl); + } + { + // can decode as V2 + V2 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(converted_variant::decode(out, p)); + EXPECT_TRUE(std::holds_alternative<custom_type>(out)); + } + { + // can also decode as V1 + V1 out; + auto p = bl.cbegin(); + EXPECT_NO_THROW(decode(out, p)); + } +} + +TEST(ConvertedVariant, EncodeNew) +{ + using V1 = custom_type; + using V2 = std::variant<custom_type, int>; + + bufferlist bl; + { + // use V2 to encode the new type + V2 in = 42; + converted_variant::encode(in, bl); + } + { + // can decode as V2 + V2 out; + auto p = bl.cbegin(); + ASSERT_NO_THROW(converted_variant::decode(out, p)); + ASSERT_TRUE(std::holds_alternative<int>(out)); + EXPECT_EQ(42, std::get<int>(out)); + } + { + // can't decode as V1 + V1 out; + auto p = bl.cbegin(); + EXPECT_THROW(decode(out, p), buffer::malformed_input); + } +} + +TEST(Variant, GenerateTestInstances) +{ + using Variant = std::variant<int, bool, double>; + + std::bitset<std::variant_size_v<Variant>> bits; + ASSERT_TRUE(bits.none()); + + std::list<Variant> instances; + generate_test_instances(instances); + + for (const auto& v : instances) { + bits.set(v.index()); + } + + EXPECT_TRUE(bits.all()); +} + +} // namespace ceph diff --git a/src/test/librbd/CMakeLists.txt b/src/test/librbd/CMakeLists.txt index 0ae29b8bf18..c3f0edbea5d 100644 --- a/src/test/librbd/CMakeLists.txt +++ b/src/test/librbd/CMakeLists.txt @@ -19,7 +19,8 @@ set(librbd_test test_Operations.cc test_Trash.cc journal/test_Entries.cc - journal/test_Replay.cc) + journal/test_Replay.cc + journal/test_Stress.cc) add_library(rbd_test STATIC ${librbd_test}) target_link_libraries(rbd_test PRIVATE rbd_test_support diff --git a/src/test/librbd/io/test_mock_ImageRequest.cc b/src/test/librbd/io/test_mock_ImageRequest.cc index 9d6423d66c4..6ee67fe5f1c 100644 --- a/src/test/librbd/io/test_mock_ImageRequest.cc +++ b/src/test/librbd/io/test_mock_ImageRequest.cc @@ -16,12 +16,15 @@ namespace { struct MockTestImageCtx; struct MockTestJournal : public MockJournal { - MOCK_METHOD4(append_write_event, uint64_t(uint64_t, size_t, + MOCK_METHOD3(append_write_event, uint64_t(const io::Extents&, const bufferlist &, bool)); + MOCK_METHOD3(append_write_same_event, uint64_t(const io::Extents&, + const bufferlist &, bool)); MOCK_METHOD5(append_compare_and_write_event, uint64_t(uint64_t, size_t, const bufferlist &, const bufferlist &, bool)); + MOCK_METHOD3(append_discard_event, uint64_t(const io::Extents&, uint32_t, bool)); MOCK_METHOD5(append_io_event_mock, uint64_t(const journal::EventEntry&, uint64_t, size_t, bool, int)); uint64_t append_io_event(journal::EventEntry &&event_entry, @@ -119,9 +122,10 @@ struct TestMockIoImageRequest : public TestMockFixture { } } - void expect_journal_append_io_event(MockTestJournal &mock_journal, uint64_t journal_tid, - uint64_t offset, size_t length) { - EXPECT_CALL(mock_journal, append_io_event_mock(_, offset, length, _, _)) + void expect_journal_append_discard_event(MockTestJournal &mock_journal, + uint64_t journal_tid, + const io::Extents& extents) { + EXPECT_CALL(mock_journal, append_discard_event(extents, _, _)) .WillOnce(Return(journal_tid)); } @@ -386,8 +390,8 @@ TEST_F(TestMockIoImageRequest, PartialDiscardJournalAppendEnabled) { InSequence seq; expect_get_modify_timestamp(mock_image_ctx, false); expect_is_journal_appending(mock_journal, true); - expect_journal_append_io_event(mock_journal, 0, 16, 63); - expect_journal_append_io_event(mock_journal, 1, 84, 100); + expect_journal_append_discard_event(mock_journal, 0, + {{16, 63}, {84, 100}}); expect_object_discard_request(mock_image_ctx, 0, 16, 63, 0); expect_object_discard_request(mock_image_ctx, 0, 84, 100, 0); @@ -419,8 +423,8 @@ TEST_F(TestMockIoImageRequest, TailDiscardJournalAppendEnabled) { InSequence seq; expect_get_modify_timestamp(mock_image_ctx, false); expect_is_journal_appending(mock_journal, true); - expect_journal_append_io_event( - mock_journal, 0, ictx->layout.object_size - 1024, 1024); + expect_journal_append_discard_event( + mock_journal, 0, {{ictx->layout.object_size - 1024, 1024}}); expect_object_discard_request( mock_image_ctx, 0, ictx->layout.object_size - 1024, 1024, 0); @@ -452,7 +456,7 @@ TEST_F(TestMockIoImageRequest, PruneRequiredDiscardJournalAppendEnabled) { InSequence seq; expect_get_modify_timestamp(mock_image_ctx, false); expect_is_journal_appending(mock_journal, true); - EXPECT_CALL(mock_journal, append_io_event_mock(_, _, _, _, _)).Times(0); + EXPECT_CALL(mock_journal, append_discard_event(_, _, _)).Times(0); EXPECT_CALL(*mock_image_ctx.io_object_dispatcher, send(_)).Times(0); C_SaferCond aio_comp_ctx; @@ -482,7 +486,7 @@ TEST_F(TestMockIoImageRequest, LengthModifiedDiscardJournalAppendEnabled) { InSequence seq; expect_get_modify_timestamp(mock_image_ctx, false); expect_is_journal_appending(mock_journal, true); - expect_journal_append_io_event(mock_journal, 0, 32, 32); + expect_journal_append_discard_event(mock_journal, 0, {{32, 32}}); expect_object_discard_request(mock_image_ctx, 0, 32, 32, 0); C_SaferCond aio_comp_ctx; @@ -513,10 +517,9 @@ TEST_F(TestMockIoImageRequest, DiscardGranularityJournalAppendEnabled) { InSequence seq; expect_get_modify_timestamp(mock_image_ctx, false); expect_is_journal_appending(mock_journal, true); - expect_journal_append_io_event(mock_journal, 0, 32, 32); - expect_journal_append_io_event(mock_journal, 1, 96, 64); - expect_journal_append_io_event( - mock_journal, 2, ictx->layout.object_size - 32, 32); + expect_journal_append_discard_event( + mock_journal, 0, + {{32, 32}, {96, 64}, {ictx->layout.object_size - 32, 32}}); expect_object_discard_request(mock_image_ctx, 0, 32, 32, 0); expect_object_discard_request(mock_image_ctx, 0, 96, 64, 0); expect_object_discard_request( diff --git a/src/test/librbd/io/test_mock_ObjectRequest.cc b/src/test/librbd/io/test_mock_ObjectRequest.cc index 0690b7722a0..c20c825018b 100644 --- a/src/test/librbd/io/test_mock_ObjectRequest.cc +++ b/src/test/librbd/io/test_mock_ObjectRequest.cc @@ -1926,7 +1926,7 @@ TEST_F(TestMockIoObjectRequest, ListSnapsWholeObject) { ASSERT_EQ(0, open_image(m_image_name, &ictx)); MockTestImageCtx mock_image_ctx(*ictx); - mock_image_ctx.parent = &mock_image_ctx; + mock_image_ctx.snaps = {3}; InSequence seq; @@ -1937,13 +1937,120 @@ TEST_F(TestMockIoObjectRequest, ListSnapsWholeObject) { clone_info.cloneid = 3; clone_info.snaps = {3}; clone_info.overlap = std::vector<std::pair<uint64_t,uint64_t>>{{0, 1}}; - clone_info.size = 4194304; + clone_info.size = mock_image_ctx.layout.object_size; snap_set.clones.push_back(clone_info); clone_info.cloneid = CEPH_NOSNAP; clone_info.snaps = {}; clone_info.overlap = {}; - clone_info.size = 4194304; + clone_info.size = mock_image_ctx.layout.object_size; + snap_set.clones.push_back(clone_info); + + expect_list_snaps(mock_image_ctx, snap_set, 0); + + { + SnapshotDelta snapshot_delta; + C_SaferCond ctx; + auto req = MockObjectListSnapsRequest::create( + &mock_image_ctx, 0, {{0, mock_image_ctx.layout.object_size - 1}}, + {3, CEPH_NOSNAP}, 0, {}, &snapshot_delta, &ctx); + req->send(); + ASSERT_EQ(0, ctx.wait()); + + SnapshotDelta expected_snapshot_delta; + expected_snapshot_delta[{CEPH_NOSNAP,CEPH_NOSNAP}].insert( + 1, mock_image_ctx.layout.object_size - 2, + {SPARSE_EXTENT_STATE_DATA, mock_image_ctx.layout.object_size - 2}); + EXPECT_EQ(expected_snapshot_delta, snapshot_delta); + } + + expect_list_snaps(mock_image_ctx, snap_set, 0); + + { + SnapshotDelta snapshot_delta; + C_SaferCond ctx; + auto req = MockObjectListSnapsRequest::create( + &mock_image_ctx, 0, {{0, mock_image_ctx.layout.object_size - 1}}, + {3, CEPH_NOSNAP}, LIST_SNAPS_FLAG_WHOLE_OBJECT, {}, &snapshot_delta, + &ctx); + req->send(); + ASSERT_EQ(0, ctx.wait()); + + SnapshotDelta expected_snapshot_delta; + expected_snapshot_delta[{CEPH_NOSNAP,CEPH_NOSNAP}].insert( + 0, mock_image_ctx.layout.object_size - 1, + {SPARSE_EXTENT_STATE_DATA, mock_image_ctx.layout.object_size - 1}); + EXPECT_EQ(expected_snapshot_delta, snapshot_delta); + } +} + +TEST_F(TestMockIoObjectRequest, ListSnapsWholeObjectEndSize) { + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(m_image_name, &ictx)); + + MockTestImageCtx mock_image_ctx(*ictx); + mock_image_ctx.snaps = {3}; + + InSequence seq; + + librados::snap_set_t snap_set; + snap_set.seq = 3; + librados::clone_info_t clone_info; + + clone_info.cloneid = CEPH_NOSNAP; + clone_info.snaps = {}; + clone_info.overlap = {}; + // smaller than object extent (i.e. the op) to test end_size handling + clone_info.size = mock_image_ctx.layout.object_size - 2; + snap_set.clones.push_back(clone_info); + + expect_list_snaps(mock_image_ctx, snap_set, 0); + + { + SnapshotDelta snapshot_delta; + C_SaferCond ctx; + auto req = MockObjectListSnapsRequest::create( + &mock_image_ctx, 0, {{0, mock_image_ctx.layout.object_size - 1}}, + {4, CEPH_NOSNAP}, 0, {}, &snapshot_delta, &ctx); + req->send(); + ASSERT_EQ(0, ctx.wait()); + + EXPECT_TRUE(snapshot_delta.empty()); + } + + expect_list_snaps(mock_image_ctx, snap_set, 0); + + { + SnapshotDelta snapshot_delta; + C_SaferCond ctx; + auto req = MockObjectListSnapsRequest::create( + &mock_image_ctx, 0, {{0, mock_image_ctx.layout.object_size - 1}}, + {4, CEPH_NOSNAP}, LIST_SNAPS_FLAG_WHOLE_OBJECT, {}, &snapshot_delta, + &ctx); + req->send(); + ASSERT_EQ(0, ctx.wait()); + + EXPECT_TRUE(snapshot_delta.empty()); + } +} + +TEST_F(TestMockIoObjectRequest, ListSnapsNoSnapsInSnapSet) { + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(m_image_name, &ictx)); + + MockTestImageCtx mock_image_ctx(*ictx); + mock_image_ctx.snaps = {3}; + + InSequence seq; + + librados::snap_set_t snap_set; + snap_set.seq = 3; + librados::clone_info_t clone_info; + + clone_info.cloneid = 3; + clone_info.snaps = {}; + clone_info.overlap = {}; + clone_info.size = 0; snap_set.clones.push_back(clone_info); expect_list_snaps(mock_image_ctx, snap_set, 0); @@ -1960,7 +2067,7 @@ TEST_F(TestMockIoObjectRequest, ListSnapsWholeObject) { expected_snapshot_delta[{CEPH_NOSNAP,CEPH_NOSNAP}].insert( 0, mock_image_ctx.layout.object_size - 1, {SPARSE_EXTENT_STATE_DATA, mock_image_ctx.layout.object_size - 1}); - ASSERT_EQ(expected_snapshot_delta, snapshot_delta); + EXPECT_EQ(expected_snapshot_delta, snapshot_delta); } } // namespace io diff --git a/src/test/librbd/journal/test_Entries.cc b/src/test/librbd/journal/test_Entries.cc index c392fb9f88a..bb4b06c0368 100644 --- a/src/test/librbd/journal/test_Entries.cc +++ b/src/test/librbd/journal/test_Entries.cc @@ -196,6 +196,69 @@ TEST_F(TestJournalEntries, AioDiscard) { ASSERT_EQ(234U, aio_discard_event.length); } +TEST_F(TestJournalEntries, AioDiscardWithPrune) { + REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); + + // The discard path can create multiple image extents (ImageRequest.cc) in the + // case where the discard request needs to be pruned and multiple objects are + // involved in the request. This test ensures that journal event entries are + // queued up for each image extent. + + // Create an image that is multiple objects so that we can force multiple + // image extents on the discard path. + CephContext* cct = reinterpret_cast<CephContext*>(_rados.cct()); + auto object_size = 1ull << cct->_conf.get_val<uint64_t>("rbd_default_order"); + auto image_size = 4 * object_size; + + auto image_name = get_temp_image_name(); + ASSERT_EQ(0, create_image_pp(m_rbd, m_ioctx, image_name, image_size)); + + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(image_name, &ictx)); + + ::journal::Journaler *journaler = create_journaler(ictx); + ASSERT_TRUE(journaler != NULL); + + C_SaferCond cond_ctx; + auto c = librbd::io::AioCompletion::create(&cond_ctx); + c->get(); + // We offset the discard by -4096 bytes and set discard granularity to 8192; + // this should cause two image extents to be formed in + // AbstractImageWriteRequest<I>::send_request(). + api::Io<>::aio_discard(*ictx, c, object_size - 4096, 2 * object_size, 8192, + true); + ASSERT_EQ(0, c->wait_for_complete()); + c->put(); + + for (uint64_t chunk = 0; chunk < 2; chunk++) { + auto offset = object_size; + auto size = object_size; + if (chunk == 1) { + offset = object_size * 2; + size = object_size - 8192; + } + + ::journal::ReplayEntry replay_entry; + if (!journaler->try_pop_front(&replay_entry)) { + ASSERT_TRUE(wait_for_entries_available(ictx)); + ASSERT_TRUE(journaler->try_pop_front(&replay_entry)); + } + + librbd::journal::EventEntry event_entry; + ASSERT_TRUE(get_event_entry(replay_entry, &event_entry)); + + ASSERT_EQ(librbd::journal::EVENT_TYPE_AIO_DISCARD, + event_entry.get_event_type()); + + librbd::journal::AioDiscardEvent aio_discard_event = + boost::get<librbd::journal::AioDiscardEvent>(event_entry.event); + ASSERT_EQ(offset, aio_discard_event.offset); + ASSERT_EQ(size, aio_discard_event.length); + + journaler->committed(replay_entry); + } +} + TEST_F(TestJournalEntries, AioFlush) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); diff --git a/src/test/librbd/journal/test_Stress.cc b/src/test/librbd/journal/test_Stress.cc new file mode 100644 index 00000000000..d3df9147ae6 --- /dev/null +++ b/src/test/librbd/journal/test_Stress.cc @@ -0,0 +1,121 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "test/librados/test_cxx.h" +#include "test/librbd/test_fixture.h" +#include "test/librbd/test_support.h" +#include "cls/rbd/cls_rbd_types.h" +#include "cls/journal/cls_journal_types.h" +#include "cls/journal/cls_journal_client.h" +#include "journal/Journaler.h" +#include "librbd/ExclusiveLock.h" +#include "librbd/ImageCtx.h" +#include "librbd/ImageState.h" +#include "librbd/ImageWatcher.h" +#include "librbd/internal.h" +#include "librbd/Journal.h" +#include "librbd/Operations.h" +#include "librbd/api/Io.h" +#include "librbd/api/Snapshot.h" +#include "librbd/io/AioCompletion.h" +#include "librbd/io/ImageDispatchSpec.h" +#include "librbd/io/ImageRequest.h" +#include "librbd/io/ReadResult.h" +#include "librbd/journal/Types.h" +#include <boost/scope_exit.hpp> + +void register_test_journal_stress() { +} + +namespace librbd { +namespace journal { + +class TestJournalStress : public TestFixture { +}; + +TEST_F(TestJournalStress, DiscardWithPruneWriteOverlap) { + REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); + + // Overlap discards and writes while discard pruning is occurring. This tests + // the conditions under which https://tracker.ceph.com/issues/63422 occurred. + + // Create an image that is multiple objects so that we can force multiple + // image extents on the discard path. + int order = 22; + auto object_size = uint64_t{1} << order; + auto image_size = 4 * object_size; + + // Write-around cache required for overlapping I/O delays. + std::map<std::string, std::string> config; + config["rbd_cache"] = "true"; + config["rbd_cache_policy"] = "writearound"; + config["rbd_cache_max_dirty"] = std::to_string(image_size); + config["rbd_cache_writethrough_until_flush"] = "false"; + // XXX: Work around https://tracker.ceph.com/issues/63681, which this test + // exposes when run under Valgrind. + config["librados_thread_count"] = "15"; + + librados::Rados rados; + ASSERT_EQ("", connect_cluster_pp(rados, config)); + + librados::IoCtx ioctx; + ASSERT_EQ(0, rados.ioctx_create(_pool_name.c_str(), ioctx)); + + uint64_t features; + ASSERT_TRUE(::get_features(&features)); + auto image_name = get_temp_image_name(); + ASSERT_EQ(0, create_image_full_pp(m_rbd, ioctx, image_name, image_size, + features, false, &order)); + + auto ictx = new librbd::ImageCtx(image_name, "", nullptr, ioctx, false); + ASSERT_EQ(0, ictx->state->open(0)); + BOOST_SCOPE_EXIT(ictx) { + ictx->state->close(); + } BOOST_SCOPE_EXIT_END; + + std::thread write_thread( + [ictx, object_size]() { + std::string payload(object_size, '1'); + + for (auto i = 0; i < 200; i++) { + // Alternate overlaps with the two objects that the discard below + // touches. + for (auto offset = object_size; + offset < object_size * 3; + offset += object_size) { + bufferlist payload_bl; + payload_bl.append(payload); + auto aio_comp = new librbd::io::AioCompletion(); + api::Io<>::aio_write(*ictx, aio_comp, offset, payload.size(), + std::move(payload_bl), 0, true); + ASSERT_EQ(0, aio_comp->wait_for_complete()); + aio_comp->release(); + } + } + } + ); + + auto discard_exit = false; + std::thread discard_thread( + [ictx, object_size, &discard_exit]() { + while (!discard_exit) { + // We offset the discard by -4096 bytes and set discard granularity to + // 8192; this should cause two image extents to be formed in + // AbstractImageWriteRequest<I>::send_request() on objects 1 and 2, + // overlapping with the writes above. + auto aio_comp = new librbd::io::AioCompletion(); + api::Io<>::aio_discard(*ictx, aio_comp, object_size - 4096, + 2 * object_size, 8192, true); + ASSERT_EQ(0, aio_comp->wait_for_complete()); + aio_comp->release(); + } + } + ); + + write_thread.join(); + discard_exit = true; + discard_thread.join(); +} + +} // namespace journal +} // namespace librbd diff --git a/src/test/librbd/test_librbd.cc b/src/test/librbd/test_librbd.cc index f4bb74fe397..9d17c4d85cf 100644 --- a/src/test/librbd/test_librbd.cc +++ b/src/test/librbd/test_librbd.cc @@ -7358,61 +7358,6 @@ interval_set<uint64_t> round_diff_interval(const interval_set<uint64_t>& diff, return rounded_diff; } -TEST_F(TestLibRBD, SnapDiff) -{ - REQUIRE_FEATURE(RBD_FEATURE_FAST_DIFF); - - rados_ioctx_t ioctx; - rados_ioctx_create(_cluster, m_pool_name.c_str(), &ioctx); - - rbd_image_t image; - int order = 0; - std::string image_name = get_temp_image_name(); - uint64_t size = 100 << 20; - ASSERT_EQ(0, create_image(ioctx, image_name.c_str(), size, &order)); - ASSERT_EQ(0, rbd_open(ioctx, image_name.c_str(), &image, nullptr)); - - char test_data[TEST_IO_SIZE + 1]; - for (size_t i = 0; i < TEST_IO_SIZE; ++i) { - test_data[i] = (char) (rand() % (126 - 33) + 33); - } - test_data[TEST_IO_SIZE] = '\0'; - - ASSERT_PASSED(write_test_data, image, test_data, 0, - TEST_IO_SIZE, LIBRADOS_OP_FLAG_FADVISE_NOCACHE); - - interval_set<uint64_t> diff; - ASSERT_EQ(0, rbd_diff_iterate2(image, nullptr, 0, size, true, true, - iterate_cb, &diff)); - EXPECT_EQ(1 << order, diff.size()); - - ASSERT_EQ(0, rbd_snap_create(image, "snap1")); - ASSERT_EQ(0, rbd_snap_create(image, "snap2")); - - diff.clear(); - ASSERT_EQ(0, rbd_diff_iterate2(image, nullptr, 0, size, true, true, - iterate_cb, &diff)); - EXPECT_EQ(1 << order, diff.size()); - - diff.clear(); - ASSERT_EQ(0, rbd_diff_iterate2(image, "snap1", 0, size, true, true, - iterate_cb, &diff)); - EXPECT_EQ(0, diff.size()); - - diff.clear(); - ASSERT_EQ(0, rbd_diff_iterate2(image, "snap2", 0, size, true, true, - iterate_cb, &diff)); - EXPECT_EQ(0, diff.size()); - - ASSERT_EQ(0, rbd_snap_remove(image, "snap1")); - ASSERT_EQ(0, rbd_snap_remove(image, "snap2")); - - ASSERT_EQ(0, rbd_close(image)); - ASSERT_EQ(0, rbd_remove(ioctx, image_name.c_str())); - - rados_ioctx_destroy(ioctx); -} - template <typename T> class DiffIterateTest : public TestLibRBD { public: @@ -7501,12 +7446,276 @@ ostream& operator<<(ostream & o, const diff_extent& e) { int vector_iterate_cb(uint64_t off, size_t len, int exists, void *arg) { - cout << "iterate_cb " << off << "~" << len << std::endl; + //cout << "iterate_cb " << off << "~" << len << std::endl; vector<diff_extent> *diff = static_cast<vector<diff_extent> *>(arg); diff->push_back(diff_extent(off, len, exists, 0)); return 0; } +TYPED_TEST(DiffIterateTest, DiffIterateDeterministic) +{ + REQUIRE(!is_feature_enabled(RBD_FEATURE_STRIPINGV2)); + + rados_ioctx_t ioctx; + ASSERT_EQ(0, rados_ioctx_create(this->_cluster, this->m_pool_name.c_str(), + &ioctx)); + + rbd_image_t image; + int order = 22; + std::string name = this->get_temp_image_name(); + uint64_t size = 20 << 20; + + ASSERT_EQ(0, create_image(ioctx, name.c_str(), size, &order)); + ASSERT_EQ(0, rbd_open(ioctx, name.c_str(), &image, NULL)); + + uint64_t object_size = 0; + if (this->whole_object) { + object_size = 1 << order; + } + + std::vector<diff_extent> extents; + ASSERT_EQ(0, rbd_diff_iterate2(image, NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(0u, extents.size()); + + ASSERT_EQ(0, rbd_snap_create(image, "snap1")); + + std::string buf(256, '1'); + ASSERT_EQ(256, rbd_write(image, 0, 256, buf.data())); + ASSERT_EQ(0, rbd_diff_iterate2(image, NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + extents.clear(); + + ASSERT_EQ(0, rbd_snap_create(image, "snap2")); + + ASSERT_EQ(256, rbd_write(image, 1 << order, 256, buf.data())); + ASSERT_EQ(0, rbd_diff_iterate2(image, NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + ASSERT_EQ(0, rbd_snap_create(image, "snap3")); + + // 1. beginning of time -> HEAD + ASSERT_EQ(0, rbd_diff_iterate2(image, NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 2. snap1 -> HEAD + ASSERT_EQ(0, rbd_diff_iterate2(image, "snap1", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 3. snap2 -> HEAD + ASSERT_EQ(0, rbd_diff_iterate2(image, "snap2", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[0]); + extents.clear(); + + // 4. snap3 -> HEAD + ASSERT_EQ(0, rbd_diff_iterate2(image, "snap3", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(0u, extents.size()); + + ASSERT_PASSED(this->validate_object_map, image); + ASSERT_EQ(0, rbd_snap_set(image, "snap3")); + + // 5. beginning of time -> snap3 + ASSERT_EQ(0, rbd_diff_iterate2(image, NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 6. snap1 -> snap3 + ASSERT_EQ(0, rbd_diff_iterate2(image, "snap1", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 7. snap2 -> snap3 + ASSERT_EQ(0, rbd_diff_iterate2(image, "snap2", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[0]); + extents.clear(); + + ASSERT_PASSED(this->validate_object_map, image); + ASSERT_EQ(0, rbd_snap_set(image, "snap2")); + + // 8. beginning of time -> snap2 + ASSERT_EQ(0, rbd_diff_iterate2(image, NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + extents.clear(); + + // 9. snap1 -> snap2 + ASSERT_EQ(0, rbd_diff_iterate2(image, "snap1", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + extents.clear(); + + ASSERT_PASSED(this->validate_object_map, image); + ASSERT_EQ(0, rbd_snap_set(image, "snap1")); + + // 10. beginning of time -> snap1 + ASSERT_EQ(0, rbd_diff_iterate2(image, NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(0u, extents.size()); + + ASSERT_PASSED(this->validate_object_map, image); + + ASSERT_EQ(0, rbd_close(image)); + rados_ioctx_destroy(ioctx); +} + +TYPED_TEST(DiffIterateTest, DiffIterateDeterministicPP) +{ + REQUIRE(!is_feature_enabled(RBD_FEATURE_STRIPINGV2)); + + librados::IoCtx ioctx; + ASSERT_EQ(0, this->_rados.ioctx_create(this->m_pool_name.c_str(), ioctx)); + + librbd::RBD rbd; + librbd::Image image; + int order = 22; + std::string name = this->get_temp_image_name(); + uint64_t size = 20 << 20; + + ASSERT_EQ(0, create_image_pp(rbd, ioctx, name.c_str(), size, &order)); + ASSERT_EQ(0, rbd.open(ioctx, image, name.c_str(), NULL)); + + uint64_t object_size = 0; + if (this->whole_object) { + object_size = 1 << order; + } + + std::vector<diff_extent> extents; + ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(0u, extents.size()); + + ASSERT_EQ(0, image.snap_create("snap1")); + + ceph::bufferlist bl; + bl.append(std::string(256, '1')); + ASSERT_EQ(256, image.write(0, 256, bl)); + ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + extents.clear(); + + ASSERT_EQ(0, image.snap_create("snap2")); + + ASSERT_EQ(256, image.write(1 << order, 256, bl)); + ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + ASSERT_EQ(0, image.snap_create("snap3")); + + // 1. beginning of time -> HEAD + ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 2. snap1 -> HEAD + ASSERT_EQ(0, image.diff_iterate2("snap1", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 3. snap2 -> HEAD + ASSERT_EQ(0, image.diff_iterate2("snap2", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[0]); + extents.clear(); + + // 4. snap3 -> HEAD + ASSERT_EQ(0, image.diff_iterate2("snap3", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(0u, extents.size()); + + ASSERT_PASSED(this->validate_object_map, image); + ASSERT_EQ(0, image.snap_set("snap3")); + + // 5. beginning of time -> snap3 + ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 6. snap1 -> snap3 + ASSERT_EQ(0, image.diff_iterate2("snap1", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(2u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[1]); + extents.clear(); + + // 7. snap2 -> snap3 + ASSERT_EQ(0, image.diff_iterate2("snap2", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(1 << order, 256, true, object_size), extents[0]); + extents.clear(); + + ASSERT_PASSED(this->validate_object_map, image); + ASSERT_EQ(0, image.snap_set("snap2")); + + // 8. beginning of time -> snap2 + ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + extents.clear(); + + // 9. snap1 -> snap2 + ASSERT_EQ(0, image.diff_iterate2("snap1", 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(1u, extents.size()); + ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); + extents.clear(); + + ASSERT_PASSED(this->validate_object_map, image); + ASSERT_EQ(0, image.snap_set("snap1")); + + // 10. beginning of time -> snap1 + ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, + vector_iterate_cb, &extents)); + ASSERT_EQ(0u, extents.size()); + + ASSERT_PASSED(this->validate_object_map, image); +} + TYPED_TEST(DiffIterateTest, DiffIterateDiscard) { librados::IoCtx ioctx; @@ -7655,50 +7864,6 @@ TYPED_TEST(DiffIterateTest, DiffIterateStress) ASSERT_PASSED(this->validate_object_map, image); } -TYPED_TEST(DiffIterateTest, DiffIterateRegression6926) -{ - librados::IoCtx ioctx; - ASSERT_EQ(0, this->_rados.ioctx_create(this->m_pool_name.c_str(), ioctx)); - - librbd::RBD rbd; - librbd::Image image; - int order = 0; - std::string name = this->get_temp_image_name(); - uint64_t size = 20 << 20; - - ASSERT_EQ(0, create_image_pp(rbd, ioctx, name.c_str(), size, &order)); - ASSERT_EQ(0, rbd.open(ioctx, image, name.c_str(), NULL)); - - uint64_t object_size = 0; - if (this->whole_object) { - object_size = 1 << order; - } - vector<diff_extent> extents; - ceph::bufferlist bl; - - ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, - vector_iterate_cb, (void *) &extents)); - ASSERT_EQ(0u, extents.size()); - - ASSERT_EQ(0, image.snap_create("snap1")); - char data[256]; - memset(data, 1, sizeof(data)); - bl.append(data, 256); - ASSERT_EQ(256, image.write(0, 256, bl)); - - extents.clear(); - ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, - vector_iterate_cb, (void *) &extents)); - ASSERT_EQ(1u, extents.size()); - ASSERT_EQ(diff_extent(0, 256, true, object_size), extents[0]); - - ASSERT_EQ(0, image.snap_set("snap1")); - extents.clear(); - ASSERT_EQ(0, image.diff_iterate2(NULL, 0, size, true, this->whole_object, - vector_iterate_cb, (void *) &extents)); - ASSERT_EQ(static_cast<size_t>(0), extents.size()); -} - TYPED_TEST(DiffIterateTest, DiffIterateParent) { REQUIRE_FEATURE(RBD_FEATURE_LAYERING); diff --git a/src/test/librbd/test_main.cc b/src/test/librbd/test_main.cc index 2ff9f69dea9..82b72b1ef7e 100644 --- a/src/test/librbd/test_main.cc +++ b/src/test/librbd/test_main.cc @@ -17,6 +17,7 @@ extern void register_test_image_watcher(); extern void register_test_internal(); extern void register_test_journal_entries(); extern void register_test_journal_replay(); +extern void register_test_journal_stress(); extern void register_test_migration(); extern void register_test_mirroring(); extern void register_test_mirroring_watcher(); @@ -37,6 +38,7 @@ int main(int argc, char **argv) register_test_internal(); register_test_journal_entries(); register_test_journal_replay(); + register_test_journal_stress(); register_test_migration(); register_test_mirroring(); register_test_mirroring_watcher(); diff --git a/src/test/librbd/test_mock_Journal.cc b/src/test/librbd/test_mock_Journal.cc index 2fe74d2fe46..589695c50b3 100644 --- a/src/test/librbd/test_mock_Journal.cc +++ b/src/test/librbd/test_mock_Journal.cc @@ -460,7 +460,7 @@ public: bl.append_zero(length); std::shared_lock owner_locker{mock_image_ctx.owner_lock}; - return mock_journal->append_write_event(0, length, bl, false); + return mock_journal->append_write_event({{0, length}}, bl, false); } uint64_t when_append_compare_and_write_event( diff --git a/src/test/objectstore/Allocator_test.cc b/src/test/objectstore/Allocator_test.cc index 8204179b531..0e76c479002 100644 --- a/src/test/objectstore/Allocator_test.cc +++ b/src/test/objectstore/Allocator_test.cc @@ -587,8 +587,7 @@ TEST_P(AllocTest, test_alloc_47883) PExtentVector extents; auto need = 0x3f980000; auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents); - EXPECT_GT(got, 0); - EXPECT_EQ(got, 0x630000); + EXPECT_GE(got, 0x630000); } TEST_P(AllocTest, test_alloc_50656_best_fit) diff --git a/src/test/objectstore/fastbmap_allocator_test.cc b/src/test/objectstore/fastbmap_allocator_test.cc index c5953198505..710b3798f7a 100644 --- a/src/test/objectstore/fastbmap_allocator_test.cc +++ b/src/test/objectstore/fastbmap_allocator_test.cc @@ -625,6 +625,8 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); { + // Original free space disposition (start chunk, count): + // <NC/2, NC/2> size_t to_release = 2 * _1m + 0x1000; // release 2M + 4K at the beginning interval_vector_t r; @@ -637,6 +639,8 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <0, 513>, <NC / 2, NC / 2> // allocate 4K within the deallocated range uint64_t allocated4 = 0; interval_vector_t a4; @@ -652,79 +656,91 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 1M - should go to the second 1M chunk + // Original free space disposition (start chunk, count): + // <1, 512>, <NC / 2, NC / 2> + // allocate 1M - should go to offset 4096 uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m); - ASSERT_EQ(a4[0].offset, _1m); + ASSERT_EQ(a4[0].offset, 4096); ASSERT_EQ(a4[0].length, _1m); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <257, 256>, <NC / 2, NC / 2> // and allocate yet another 8K within the deallocated range uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x2000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, 0x2000u); - ASSERT_EQ(a4[0].offset, 0x1000u); + ASSERT_EQ(a4[0].offset, _1m + 0x1000u); ASSERT_EQ(a4[0].length, 0x2000u); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // release just allocated 1M + // Original free space disposition (start chunk, count): + // <259, 254>, <NC / 2, NC / 2> + // release 4K~1M interval_vector_t r; - r.emplace_back(_1m, _1m); + r.emplace_back(0x1000, _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 2u); - ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 3u); + //ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 3M - should go to the second 1M chunk and @capacity/2 + // Original free space disposition (start chunk, count): + // <1, 257>, <259, 254>, <NC / 2, NC / 2> + // allocate 3M - should go to the first 1M chunk and @capacity/2 uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(3 * _1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(allocated4, 3 * _1m); - ASSERT_EQ(a4[0].offset, _1m); + ASSERT_EQ(a4[0].offset, 0x1000); ASSERT_EQ(a4[0].length, _1m); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 2 * _1m); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { - // release allocated 1M in the second meg chunk except + // Original free space disposition (start chunk, count): + // <259, 254>, <NC / 2 - 512, NC / 2 - 512> + // release allocated 1M in the first meg chunk except // the first 4K chunk interval_vector_t r; - r.emplace_back(_1m + 0x1000, _1m); + r.emplace_back(0x1000, _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <1, 256>, <259, 254>, <NC / 2 - 512, NC / 2 - 512> // release 2M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 2 * _1m); @@ -733,10 +749,12 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks) / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <1, 256>, <259, 254>, <NC / 2, NC / 2> // allocate 4x512K - should go to the second halves of // the first and second 1M chunks and @(capacity / 2) uint64_t allocated4 = 0; @@ -744,51 +762,54 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) al2.allocate_l2(2 * _1m, _1m / 2, &allocated4, &a4); ASSERT_EQ(a4.size(), 3u); ASSERT_EQ(allocated4, 2 * _1m); - ASSERT_EQ(a4[0].offset, _1m / 2); + ASSERT_EQ(a4[1].offset, 0x1000); + ASSERT_EQ(a4[1].length, _1m); + ASSERT_EQ(a4[0].offset, _1m + 0x3000); ASSERT_EQ(a4[0].length, _1m / 2); - ASSERT_EQ(a4[1].offset, _1m + _1m / 2); - ASSERT_EQ(a4[1].length, _1m / 2); ASSERT_EQ(a4[2].offset, capacity / 2); - ASSERT_EQ(a4[2].length, _1m); + ASSERT_EQ(a4[2].length, _1m / 2); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - // below we have 512K - 4K & 512K - 12K chunks which both fit into - // the same bin = 6 - ASSERT_EQ(bins_overall[6], 2u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); } { - // cleanup first 2M except except the last 4K chunk + // Original free space disposition (start chunk, count): + // <387, 126>, <NC / 2 + 128, NC / 2 - 128> + // cleanup first 1536K except the last 4K chunk interval_vector_t r; - r.emplace_back(0, 2 * _1m - 0x1000); + r.emplace_back(0, _1m + _1m / 2 - 0x1000); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); } { - // release 2M @(capacity / 2) + // Original free space disposition (start chunk, count): + // <0, 383> <387, 126>, <NC / 2 + 128, NC / 2 - 128> + // release 512K @(capacity / 2) interval_vector_t r; - r.emplace_back(capacity / 2, 2 * _1m); + r.emplace_back(capacity / 2, _1m / 2); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 132M using 4M granularity should go to (capacity / 2) + // Original free space disposition (start chunk, count): + // <0, 383> <387, 126>, <NC / 2, NC / 2> + // allocate 132M (=33792*4096) = using 4M granularity should go to (capacity / 2) uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(132 * _1m, 4 * _1m , &allocated4, &a4); @@ -799,24 +820,40 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u); } { - // cleanup left 4K chunk in the first 2M + // Original free space disposition (start chunk, count): + // <0, 383> <387, 126>, <NC / 2 + 33792, NC / 2 - 33792> + // cleanup remaining 4*4K chunks in the first 2M interval_vector_t r; - r.emplace_back(2 * _1m - 0x1000, 0x1000); + r.emplace_back(383 * 4096, 4 * 0x1000); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((2 * _1m + 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <0, 513>, <NC / 2 + 33792, NC / 2 - 33792> // release 132M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 132 * _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((2 * _1m + 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <0, 513>, <NC / 2, NC / 2> // allocate 132M using 2M granularity should go to the first chunk and to // (capacity / 2) uint64_t allocated4 = 0; @@ -827,14 +864,31 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(a4[0].length, 2 * _1m); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 130 * _1m); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <512, 1>, <NC / 2 + 33792, NC / 2 - 33792> // release 130M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 132 * _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <512,1>, <NC / 2, NC / 2> // release 4K~16K // release 28K~32K // release 68K~24K @@ -843,21 +897,46 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) r.emplace_back(0x7000, 0x8000); r.emplace_back(0x11000, 0x6000); al2.free_l2(r); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 4u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(0x4000 / 0x1000) - 1], 2u); // accounts both 0x4000 & 0x6000 + ASSERT_EQ(bins_overall[cbits(0x8000 / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 32K using 16K granularity - should bypass the first - // unaligned extent, use the second free extent partially given - // the 16K alignment and then fallback to capacity / 2 + // Original free space disposition (start chunk, count): + // <1, 4>, <7, 8>, <17, 6> <512,1>, <NC / 2, NC / 2> + // allocate 80K using 16K granularity uint64_t allocated4 = 0; interval_vector_t a4; - al2.allocate_l2(0x8000, 0x4000, &allocated4, &a4); - ASSERT_EQ(a4.size(), 2u); - ASSERT_EQ(a4[0].offset, 0x8000u); - ASSERT_EQ(a4[0].length, 0x4000u); - ASSERT_EQ(a4[1].offset, capacity / 2); + al2.allocate_l2(0x14000, 0x4000, &allocated4, &a4); + + ASSERT_EQ(a4.size(), 4); + ASSERT_EQ(a4[1].offset, 0x1000u); ASSERT_EQ(a4[1].length, 0x4000u); - } + ASSERT_EQ(a4[0].offset, 0x7000u); + ASSERT_EQ(a4[0].length, 0x8000u); + ASSERT_EQ(a4[2].offset, 0x11000u); + ASSERT_EQ(a4[2].length, 0x4000u); + ASSERT_EQ(a4[3].offset, capacity / 2); + ASSERT_EQ(a4[3].length, 0x4000u); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(0x2000 / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 1) - 1], 1u); + } + { + // Original free space disposition (start chunk, count): + // <21, 2> <512,1>, <NC / 2 + 1, NC / 2 - 1> + } } std::cout << "Done L2 cont aligned" << std::endl; } @@ -913,7 +992,7 @@ TEST(TestAllocatorLevel01, test_4G_alloc_bug2) al2.allocate_l2(0x3e000000, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(allocated4, 0x3e000000u); - ASSERT_EQ(a4[0].offset, 0x5fed00000u); + ASSERT_EQ(a4[0].offset, 0x5fec30000u); ASSERT_EQ(a4[0].length, 0x1300000u); ASSERT_EQ(a4[1].offset, 0x628000000u); ASSERT_EQ(a4[1].length, 0x3cd00000u); diff --git a/src/test/objectstore/store_test.cc b/src/test/objectstore/store_test.cc index 03dc1a87e1f..25e1721c403 100644 --- a/src/test/objectstore/store_test.cc +++ b/src/test/objectstore/store_test.cc @@ -92,7 +92,23 @@ static bool bl_eq(bufferlist& expected, bufferlist& actual) return false; } +void dump_bluefs_stats() +{ + AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); + ceph_assert(admin_socket); + + ceph::bufferlist in, out; + ostringstream err; + auto r = admin_socket->execute_command( + { "{\"prefix\": \"bluefs stats\"}" }, + in, err, &out); + if (r != 0) { + cerr << "failure querying: " << cpp_strerror(r) << std::endl; + } else { + std::cout << std::string(out.c_str(), out.length()) << std::endl; + } +} template <typename T> int queue_transaction( @@ -9607,9 +9623,9 @@ TEST_P(StoreTestSpecificAUSize, BluestoreRepairSharedBlobTest) { string key; _key_encode_u64(1, &key); bluestore_shared_blob_t sb(1); - sb.ref_map.get(0x2000, block_size); - sb.ref_map.get(0x4000, block_size); - sb.ref_map.get(0x4000, block_size); + sb.ref_map.get(0x822000, block_size); + sb.ref_map.get(0x824000, block_size); + sb.ref_map.get(0x824000, block_size); bufferlist bl; encode(sb, bl); bstore->inject_broken_shared_blob_key(key, bl); @@ -10462,24 +10478,11 @@ void doManySetAttr(ObjectStore* store, std::cout << "done" << std::endl; do_check_fn(store); - AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); - ceph_assert(admin_socket); - - ceph::bufferlist in, out; - ostringstream err; - - auto r = admin_socket->execute_command( - { "{\"prefix\": \"bluefs stats\"}" }, - in, err, &out); - if (r != 0) { - cerr << "failure querying: " << cpp_strerror(r) << std::endl; - } else { - std::cout << std::string(out.c_str(), out.length()) << std::endl; - } + dump_bluefs_stats(); test_obj.shutdown(); } -TEST_P(StoreTestSpecificAUSize, SpilloverTest) { +TEST_P(StoreTestSpecificAUSize, SpilloverLegacyTest) { if (string(GetParam()) != "bluestore") return; if (smr) { @@ -10509,24 +10512,12 @@ TEST_P(StoreTestSpecificAUSize, SpilloverTest) { ceph_assert(bstore); bstore->compact(); const PerfCounters* logger = bstore->get_bluefs_perf_counters(); - //experimentally it was discovered that this case results in 400+MB spillover - //using lower 300MB threshold just to be safe enough - std::cout << "DB used:" << logger->get(l_bluefs_db_used_bytes) << std::endl; - std::cout << "SLOW used:" << logger->get(l_bluefs_slow_used_bytes) << std::endl; - ASSERT_GE(logger->get(l_bluefs_slow_used_bytes), 16 * 1024 * 1024); - - struct store_statfs_t statfs; - osd_alert_list_t alerts; - int r = store->statfs(&statfs, &alerts); - ASSERT_EQ(r, 0); - ASSERT_EQ(alerts.count("BLUEFS_SPILLOVER"), 1); - std::cout << "spillover_alert:" << alerts.find("BLUEFS_SPILLOVER")->second - << std::endl; + ASSERT_GT(logger->get(l_bluefs_slow_used_bytes), 0); } ); } -TEST_P(StoreTestSpecificAUSize, SpilloverFixedTest) { +TEST_P(StoreTestSpecificAUSize, SpilloverLegacyFixedByFitToFastTest) { if (string(GetParam()) != "bluestore") return; if (smr) { @@ -10536,8 +10527,15 @@ TEST_P(StoreTestSpecificAUSize, SpilloverFixedTest) { SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "3221225472"); - SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra"); - SetVal(g_conf(), "bluestore_volume_selection_reserved", "1"); // just use non-zero to enable + SetVal(g_conf(), "bluestore_volume_selection_policy", "fit_to_fast"); + // original RocksDB settings used before https://github.com/ceph/ceph/pull/47221/ + // which enable BlueFS spillover. + SetVal(g_conf(), "bluestore_rocksdb_options", + "compression=kNoCompression,max_write_buffer_number=4," + "min_write_buffer_number_to_merge=1,recycle_log_file_num=4," + "write_buffer_size=268435456,writable_file_max_buffer_size=0," + "compaction_readahead_size=2097152,max_background_compactions=2," + "max_total_wal_size=1073741824"); g_conf().apply_changes(nullptr); @@ -10549,12 +10547,28 @@ TEST_P(StoreTestSpecificAUSize, SpilloverFixedTest) { ceph_assert(bstore); bstore->compact(); const PerfCounters* logger = bstore->get_bluefs_perf_counters(); - ASSERT_EQ(0, logger->get(l_bluefs_slow_used_bytes)); + ASSERT_EQ(logger->get(l_bluefs_slow_used_bytes), 0); } ); } -TEST_P(StoreTestSpecificAUSize, SpilloverFixed2Test) { +void do_bluefs_write(BlueFS* _fs, + const char* dirname, + const char* filename, + uint64_t to_write) +{ + BlueFS::FileWriter* h; + ASSERT_EQ(0, _fs->open_for_write(dirname, filename, &h, false)); + uint64_t buf_size = 1ull << 20; + string buf(buf_size, 'a'); + for (uint64_t w = 0; w < to_write; w += buf_size) { + h->append(buf.c_str(), buf_size); + _fs->fsync(h); + } + _fs->close_writer(h); +} + +TEST_P(StoreTestSpecificAUSize, SpilloverTest) { if (string(GetParam()) != "bluestore") return; if (smr) { @@ -10564,27 +10578,31 @@ TEST_P(StoreTestSpecificAUSize, SpilloverFixed2Test) { SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "3221225472"); - SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra"); - //default 2.0 factor results in too high threshold, using less value - // that results in less but still present spillover. - SetVal(g_conf(), "bluestore_volume_selection_reserved_factor", "0.5"); + SetVal(g_conf(), "bluestore_volume_selection_policy", "rocksdb_original"); g_conf().apply_changes(nullptr); StartDeferred(65536); - doManySetAttr(store.get(), - [&](ObjectStore* _store) { + BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); + ceph_assert(bstore); + BlueFS* fs = bstore->get_bluefs(); + do_bluefs_write(fs, "db", "file1", 1ull << 30); // 1GB + do_bluefs_write(fs, "db.slow", "file2", 1ull << 30); // 1 GB - BlueStore* bstore = dynamic_cast<BlueStore*> (_store); - ceph_assert(bstore); - bstore->compact(); - const PerfCounters* logger = bstore->get_bluefs_perf_counters(); - ASSERT_LE(logger->get(l_bluefs_slow_used_bytes), 300 * 1024 * 1024); // see SpilloverTest for 300MB choice rationale - } - ); + dump_bluefs_stats(); + const PerfCounters* logger = bstore->get_bluefs_perf_counters(); + ASSERT_EQ(1ull << 30, logger->get(l_bluefs_slow_used_bytes)); + + struct store_statfs_t statfs; + osd_alert_list_t alerts; + int r = store->statfs(&statfs, &alerts); + ASSERT_EQ(r, 0); + ASSERT_EQ(alerts.count("BLUEFS_SPILLOVER"), 1); + std::cout << "spillover_alert:" << alerts.find("BLUEFS_SPILLOVER")->second + << std::endl; } -TEST_P(StoreTestSpecificAUSize, SpilloverFixed3Test) { +TEST_P(StoreTestSpecificAUSize, SpilloverFixedCompletelyTest) { if (string(GetParam()) != "bluestore") return; if (smr) { @@ -10594,21 +10612,60 @@ TEST_P(StoreTestSpecificAUSize, SpilloverFixed3Test) { SetVal(g_conf(), "bluestore_block_db_create", "true"); SetVal(g_conf(), "bluestore_block_db_size", "3221225472"); - SetVal(g_conf(), "bluestore_volume_selection_policy", "fit_to_fast"); + SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra"); + SetVal(g_conf(), "bluestore_volume_selection_reserved", "1"); // just use non-zero to enable g_conf().apply_changes(nullptr); StartDeferred(65536); - doManySetAttr(store.get(), - [&](ObjectStore* _store) { + BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); + ceph_assert(bstore); + BlueFS* fs = bstore->get_bluefs(); + do_bluefs_write(fs, "db", "file1", 1ull << 30); // 1GB + do_bluefs_write(fs, "db.slow", "file2", 1ull << 30); // 1 GB - BlueStore* bstore = dynamic_cast<BlueStore*> (_store); - ceph_assert(bstore); - bstore->compact(); - const PerfCounters* logger = bstore->get_bluefs_perf_counters(); - ASSERT_EQ(logger->get(l_bluefs_slow_used_bytes), 0); // reffering to SpilloverFixedTest - } - ); + dump_bluefs_stats(); + const PerfCounters* logger = bstore->get_bluefs_perf_counters(); + ASSERT_EQ(0, logger->get(l_bluefs_slow_used_bytes)); +} + +TEST_P(StoreTestSpecificAUSize, SpilloverFixedPartialTest) { + if (string(GetParam()) != "bluestore") + return; + if (smr) { + cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl; + return; + } + + SetVal(g_conf(), "bluestore_block_db_create", "true"); + SetVal(g_conf(), "bluestore_block_db_size", stringify(3ull << 30).c_str()); + SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra"); + //default 2.0 factor results in too high threshold, using less value + // that results in a reduced but existing spillover. + // + SetVal(g_conf(), "bluestore_volume_selection_reserved_factor", "1"); + + g_conf().apply_changes(nullptr); + + StartDeferred(65536); + BlueStore* bstore = dynamic_cast<BlueStore*> (store.get()); + ceph_assert(bstore); + BlueFS* fs = bstore->get_bluefs(); + do_bluefs_write(fs, "db", "file1", 1ull << 30); // 1 GB + do_bluefs_write(fs, "db.slow", "file2", 1ull << 30); // 1 GB + + dump_bluefs_stats(); + const PerfCounters* logger = bstore->get_bluefs_perf_counters(); + ASSERT_LT(100ull << 20, logger->get(l_bluefs_slow_used_bytes)); + ASSERT_GT(1ull << 30, logger->get(l_bluefs_slow_used_bytes)); + + struct store_statfs_t statfs; + osd_alert_list_t alerts; + int r = store->statfs(&statfs, &alerts); + ASSERT_EQ(r, 0); + ASSERT_EQ(alerts.count("BLUEFS_SPILLOVER"), 1); + std::cout << "spillover_alert:" << alerts.find("BLUEFS_SPILLOVER")->second + << std::endl; } TEST_P(StoreTestSpecificAUSize, Ticket45195Repro) { @@ -10783,19 +10840,7 @@ TEST_P(StoreTestSpecificAUSize, BluefsWriteInSingleDiskEnvTest) { bstore->inject_bluefs_file("db.wal", "store_test_injection_wal", 1 << 20ul); bstore->inject_bluefs_file("db", "store_test_injection_wal", 1 << 20ul); - AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); - ceph_assert(admin_socket); - - ceph::bufferlist in, out; - ostringstream err; - auto r = admin_socket->execute_command( - { "{\"prefix\": \"bluefs stats\"}" }, - in, err, &out); - if (r != 0) { - cerr << "failure querying: " << cpp_strerror(r) << std::endl; - } else { - std::cout << std::string(out.c_str(), out.length()) << std::endl; - } + dump_bluefs_stats(); } TEST_P(StoreTestSpecificAUSize, BluefsWriteInNoWalDiskEnvTest) { @@ -10816,20 +10861,7 @@ TEST_P(StoreTestSpecificAUSize, BluefsWriteInNoWalDiskEnvTest) { bstore->inject_bluefs_file("db.wal", "store_test_injection_wal", 1 << 20ul); bstore->inject_bluefs_file("db", "store_test_injection_wal", 1 << 20ul); - AdminSocket* admin_socket = g_ceph_context->get_admin_socket(); - ceph_assert(admin_socket); - - ceph::bufferlist in, out; - ostringstream err; - auto r = admin_socket->execute_command( - { "{\"prefix\": \"bluefs stats\"}" }, - in, err, &out); - if (r != 0) { - cerr << "failure querying: " << cpp_strerror(r) << std::endl; - } - else { - std::cout << std::string(out.c_str(), out.length()) << std::endl; - } + dump_bluefs_stats(); } TEST_P(StoreTestOmapUpgrade, NoOmapHeader) { @@ -11005,6 +11037,8 @@ int main(int argc, char **argv) { g_ceph_context->_conf.set_val_or_die("bluestore_debug_randomize_serial_transaction", "10"); + g_ceph_context->_conf.set_val_or_die("bluefs_check_volume_selector_on_umount", "true"); + g_ceph_context->_conf.set_val_or_die("bdev_debug_aio", "true"); // specify device size diff --git a/src/test/pybind/test_rbd.py b/src/test/pybind/test_rbd.py index 7b5f31b577a..0ce3c0dd90c 100644 --- a/src/test/pybind/test_rbd.py +++ b/src/test/pybind/test_rbd.py @@ -415,6 +415,18 @@ def test_remove_canceled(tmp_image): assert_raises(OperationCanceled, RBD().remove, ioctx, image_name, on_progress=progress_cb) +def test_remove_with_progress_except(): + create_image() + d = {'received_callback': False} + def progress_cb(current, total): + d['received_callback'] = True + raise Exception() + + # exception is logged and ignored with a Cython warning: + # Exception ignored in: 'rbd.progress_callback' + RBD().remove(ioctx, image_name, on_progress=progress_cb) + eq(True, d['received_callback']) + def test_rename(tmp_image): rbd = RBD() image_name2 = get_temp_image_name() @@ -1251,6 +1263,16 @@ class TestImage(object): assert(comp.get_return_value() < 0) eq(sys.getrefcount(comp), 2) + # test3: except case + def cbex(_, buf): + raise KeyError() + + def test3(): + comp = self.image.aio_read(IMG_SIZE, 20, cbex) + comp.wait_for_complete_and_cb() + + assert_raises(KeyError, test3) + def test_aio_write(self): retval = [None] def cb(comp): diff --git a/src/test/rgw/test_rgw_lc.cc b/src/test/rgw/test_rgw_lc.cc index 83a4cac676d..d10b482cbfc 100644 --- a/src/test/rgw/test_rgw_lc.cc +++ b/src/test/rgw/test_rgw_lc.cc @@ -5,7 +5,6 @@ #include "rgw_lc.h" #include "rgw_lc_s3.h" #include <gtest/gtest.h> -//#include <spawn/spawn.hpp> #include <string> #include <vector> #include <stdexcept> @@ -107,3 +106,239 @@ TEST(TestLCFilterInvalidAnd, XMLDoc3) /* check our flags */ ASSERT_EQ(filter.get_flags(), uint32_t(LCFlagType::none)); } + +struct LCWorkTimeTests : ::testing::Test +{ + CephContext* cct; + std::unique_ptr<RGWLC::LCWorker> worker; + + // expects input in the form of "%m/%d/%y %H:%M:%S"; e.g., "01/15/23 23:59:01" + utime_t get_utime_by_date_time_string(const std::string& date_time_str) + { + struct tm tm{}; + struct timespec ts = {0}; + + strptime(date_time_str.c_str(), "%m/%d/%y %H:%M:%S", &tm); + ts.tv_sec = mktime(&tm); + + return utime_t(ts); + } + + // expects a map from input value (date & time string) to expected result (boolean) + void run_should_work_test(const auto& test_values_to_expectations_map) { + for (const auto& [date_time_str, expected_value] : test_values_to_expectations_map) { + auto ut = get_utime_by_date_time_string(date_time_str); + auto should_work = worker->should_work(ut); + + ASSERT_EQ(should_work, expected_value) + << "input time: " << ut + << " expected: " << expected_value + << " should_work: " << should_work + << " work-time-window: " << cct->_conf->rgw_lifecycle_work_time << std::endl; + } + } + + // expects a map from input value (a tuple of date & time strings) to expected result (seconds) + void run_schedule_next_start_time_test(const auto& test_values_to_expectations_map) { + for (const auto& [date_time_str_tuple, expected_value] : test_values_to_expectations_map) { + auto work_started_at = get_utime_by_date_time_string(std::get<0>(date_time_str_tuple)); + auto work_completed_at = get_utime_by_date_time_string(std::get<1>(date_time_str_tuple)); + auto wait_secs_till_next_start = worker->schedule_next_start_time(work_started_at, work_completed_at); + + ASSERT_EQ(wait_secs_till_next_start, expected_value) + << "work_started_at: " << work_started_at + << " work_completed_at: " << work_completed_at + << " expected: " << expected_value + << " wait_secs_till_next_start: " << wait_secs_till_next_start + << " work-time-window: " << cct->_conf->rgw_lifecycle_work_time << std::endl; + } + } + +protected: + + void SetUp() override { + cct = (new CephContext(CEPH_ENTITY_TYPE_ANY))->get(); + + cct->_conf->set_value("rgw_lc_max_wp_worker", 0, 0); // no need to create a real workpool + worker = std::make_unique<RGWLC::LCWorker>(nullptr, cct, nullptr, 0); + } + + void TearDown() override { + worker.reset(); + cct->put(); + } +}; + +TEST_F(LCWorkTimeTests, ShouldWorkDefaultWorkTime) +{ + std::unordered_map<std::string, bool> test_values_to_expectations = { + {"01/01/23 00:00:00", true}, + {"01/01/24 00:00:00", true}, // date is not relevant, but only the time-window + {"01/01/23 00:00:01", true}, + {"01/01/23 03:00:00", true}, + {"01/01/23 05:59:59", true}, + {"01/01/23 06:00:00", true}, + {"01/01/23 06:00:59", true}, // seconds don't matter, but only hours and minutes + {"01/01/23 06:01:00", false}, + {"01/01/23 23:59:59", false}, + {"01/02/23 23:59:59", false}, + {"01/01/23 12:00:00", false}, + {"01/01/23 14:00:00", false} + }; + + run_should_work_test(test_values_to_expectations); +} + +TEST_F(LCWorkTimeTests, ShouldWorkCustomWorkTimeEndTimeInTheSameDay) +{ + cct->_conf->rgw_lifecycle_work_time = "14:00-16:00"; + + std::unordered_map<std::string, bool> test_values_to_expectations = { + {"01/01/23 00:00:00", false}, + {"01/01/23 12:00:00", false}, + {"01/01/24 13:59:59", false}, + {"01/01/23 14:00:00", true}, + {"01/01/23 16:00:00", true}, + {"01/01/23 16:00:59", true}, + {"01/01/23 16:01:00", false}, + {"01/01/23 17:00:00", false}, + {"01/01/23 23:59:59", false}, + }; + + run_should_work_test(test_values_to_expectations); +} + +TEST_F(LCWorkTimeTests, ShouldWorkCustomWorkTimeEndTimeInTheSameDay24Hours) +{ + cct->_conf->rgw_lifecycle_work_time = "00:00-23:59"; + + std::unordered_map<std::string, bool> test_values_to_expectations = { + {"01/01/23 23:59:00", true}, + {"01/01/23 23:59:59", true}, + {"01/01/23 00:00:00", true}, + {"01/01/23 00:00:01", true}, + {"01/01/23 00:01:00", true}, + {"01/01/23 01:00:00", true}, + {"01/01/23 12:00:00", true}, + {"01/01/23 17:00:00", true}, + {"01/01/23 23:00:00", true} + }; + + run_should_work_test(test_values_to_expectations); +} + + +TEST_F(LCWorkTimeTests, ShouldWorkCustomWorkTimeEndTimeInTheNextDay) +{ + cct->_conf->rgw_lifecycle_work_time = "14:00-01:00"; + + std::unordered_map<std::string, bool> test_values_to_expectations = { + {"01/01/23 13:59:00", false}, + {"01/01/23 13:59:59", false}, + {"01/01/24 14:00:00", true}, // used-to-fail + {"01/01/24 17:00:00", true}, // used-to-fail + {"01/01/24 23:59:59", true}, // used-to-fail + {"01/01/23 00:00:00", true}, // used-to-fail + {"01/01/23 00:59:59", true}, // used-to-fail + {"01/01/23 01:00:00", true}, // used-to-fail + {"01/01/23 01:00:59", true}, // used-to-fail + {"01/01/23 01:01:00", false}, + {"01/01/23 05:00:00", false}, + {"01/01/23 12:00:00", false}, + {"01/01/23 13:00:00", false} + }; + + run_should_work_test(test_values_to_expectations); +} + +TEST_F(LCWorkTimeTests, ShouldWorkCustomWorkTimeEndTimeInTheNextDay24Hours) +{ + cct->_conf->rgw_lifecycle_work_time = "14:00-13:59"; + + // all of the below cases used-to-fail + std::unordered_map<std::string, bool> test_values_to_expectations = { + {"01/01/23 00:00:00", true}, + {"01/01/23 00:00:01", true}, + {"01/01/23 00:01:00", true}, + {"01/01/24 01:00:00", true}, + {"01/01/24 12:00:00", true}, + {"01/01/24 13:00:00", true}, + {"01/01/24 13:59:00", true}, + {"01/01/24 13:59:59", true}, + {"01/01/23 14:00:00", true}, + {"01/01/23 14:00:01", true}, + {"01/01/23 14:01:00", true}, + {"01/01/23 16:00:00", true}, + {"01/01/23 23:59:00", true}, + {"01/01/23 23:59:59", true}, + }; + + run_should_work_test(test_values_to_expectations); +} + +TEST_F(LCWorkTimeTests, ShouldWorkCustomWorkTimeEndTimeInTheNextDayIrregularMins) +{ + cct->_conf->rgw_lifecycle_work_time = "22:15-03:33"; + + std::unordered_map<std::string, bool> test_values_to_expectations = { + {"01/01/23 22:14:59", false}, + {"01/01/23 22:15:00", true}, // used-to-fail + {"01/01/24 00:00:00", true}, // used-to-fail + {"01/01/24 01:00:00", true}, // used-to-fail + {"01/01/24 02:00:00", true}, // used-to-fail + {"01/01/23 03:33:00", true}, // used-to-fail + {"01/01/23 03:33:59", true}, // used-to-fail + {"01/01/23 03:34:00", false}, + {"01/01/23 04:00:00", false}, + {"01/01/23 12:00:00", false}, + {"01/01/23 22:00:00", false}, + }; + + run_should_work_test(test_values_to_expectations); +} + +TEST_F(LCWorkTimeTests, ShouldWorkCustomWorkTimeStartEndSameHour) +{ + cct->_conf->rgw_lifecycle_work_time = "22:15-22:45"; + + std::unordered_map<std::string, bool> test_values_to_expectations = { + {"01/01/23 22:14:59", false}, + {"01/01/23 22:15:00", true}, + {"01/01/24 22:44:59", true}, + {"01/01/24 22:45:59", true}, + {"01/01/24 22:46:00", false}, + {"01/01/23 23:00:00", false}, + {"01/01/23 00:00:00", false}, + {"01/01/23 12:00:00", false}, + {"01/01/23 21:00:00", false}, + }; + + run_should_work_test(test_values_to_expectations); +} + +TEST_F(LCWorkTimeTests, ScheduleNextStartTime) +{ + cct->_conf->rgw_lifecycle_work_time = "22:15-03:33"; + + // items of the map: [ (work_started_time, work_completed_time), expected_value (seconds) ] + // + // expected_value is the difference between configured start time (i.e, 22:15:00) and + // the second item of the tuple (i.e., work_completed_time). + // + // Note that "seconds" of work completion time is taken into account but date is not relevant. + // e.g., the first testcase: 75713 == 01:13:07 - 22:15:00 (https://tinyurl.com/ydm86752) + std::map<std::tuple<std::string, std::string>, int> test_values_to_expectations = { + {{"01/01/23 22:15:05", "01/01/23 01:13:07"}, 75713}, + {{"01/01/23 22:15:05", "01/02/23 01:13:07"}, 75713}, + {{"01/01/23 22:15:05", "01/01/23 22:17:07"}, 86273}, + {{"01/01/23 22:15:05", "01/02/23 22:17:07"}, 86273}, + {{"01/01/23 22:15:05", "01/01/23 22:14:00"}, 60}, + {{"01/01/23 22:15:05", "01/02/23 22:14:00"}, 60}, + {{"01/01/23 22:15:05", "01/01/23 22:15:00"}, 24 * 60 * 60}, + {{"01/01/23 22:15:05", "01/02/23 22:15:00"}, 24 * 60 * 60}, + {{"01/01/23 22:15:05", "01/01/23 22:15:01"}, 24 * 60 * 60 - 1}, + {{"01/01/23 22:15:05", "01/02/23 22:15:01"}, 24 * 60 * 60 - 1}, + }; + + run_schedule_next_start_time_test(test_values_to_expectations); +} diff --git a/src/test/rgw/test_rgw_lua.cc b/src/test/rgw/test_rgw_lua.cc index 07f8521c231..0485e71ede3 100644 --- a/src/test/rgw/test_rgw_lua.cc +++ b/src/test/rgw/test_rgw_lua.cc @@ -86,7 +86,7 @@ public: return 0; } - virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB *cb) override { + virtual int read_stats_async(const DoutPrefixProvider *dpp, boost::intrusive_ptr<sal::ReadStatsCB> cb) override { return 0; } @@ -635,8 +635,12 @@ TEST(TestRGWLua, Acl) function print_grant(k, g) print("Grant Key: " .. tostring(k)) print("Grant Type: " .. g.Type) - print("Grant Group Type: " .. g.GroupType) - print("Grant Referer: " .. g.Referer) + if (g.GroupType) then + print("Grant Group Type: " .. g.GroupType) + end + if (g.Referer) then + print("Grant Referer: " .. g.Referer) + end if (g.User) then print("Grant User.Tenant: " .. g.User.Tenant) print("Grant User.Id: " .. g.User.Id) @@ -662,11 +666,11 @@ TEST(TestRGWLua, Acl) )"; DEFINE_REQ_STATE; - ACLOwner owner; - owner.set_id(rgw_user("jack", "black")); - owner.set_name("jack black"); - s.user_acl.reset(new RGWAccessControlPolicy(g_cct)); - s.user_acl->set_owner(owner); + const ACLOwner owner{ + .id = rgw_user("jack", "black"), + .display_name = "jack black" + }; + s.user_acl.set_owner(owner); ACLGrant grant1, grant2, grant3, grant4, grant5, grant6_1, grant6_2; grant1.set_canon(rgw_user("jane", "doe"), "her grant", 1); grant2.set_group(ACL_GROUP_ALL_USERS ,2); @@ -675,13 +679,13 @@ TEST(TestRGWLua, Acl) grant5.set_group(ACL_GROUP_AUTHENTICATED_USERS, 5); grant6_1.set_canon(rgw_user("kill", "bill"), "his grant", 6); grant6_2.set_canon(rgw_user("kill", "bill"), "her grant", 7); - s.user_acl->get_acl().add_grant(&grant1); - s.user_acl->get_acl().add_grant(&grant2); - s.user_acl->get_acl().add_grant(&grant3); - s.user_acl->get_acl().add_grant(&grant4); - s.user_acl->get_acl().add_grant(&grant5); - s.user_acl->get_acl().add_grant(&grant6_1); - s.user_acl->get_acl().add_grant(&grant6_2); + s.user_acl.get_acl().add_grant(grant1); + s.user_acl.get_acl().add_grant(grant2); + s.user_acl.get_acl().add_grant(grant3); + s.user_acl.get_acl().add_grant(grant4); + s.user_acl.get_acl().add_grant(grant5); + s.user_acl.get_acl().add_grant(grant6_1); + s.user_acl.get_acl().add_grant(grant6_2); const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); ASSERT_EQ(rc, 0); } @@ -730,17 +734,14 @@ TEST(TestRGWLua, UseFunction) )"; DEFINE_REQ_STATE; - s.owner.set_name("user two"); - s.owner.set_id(rgw_user("tenant2", "user2")); - s.user_acl.reset(new RGWAccessControlPolicy()); - s.user_acl->get_owner().set_name("user three"); - s.user_acl->get_owner().set_id(rgw_user("tenant3", "user3")); - s.bucket_acl.reset(new RGWAccessControlPolicy()); - s.bucket_acl->get_owner().set_name("user four"); - s.bucket_acl->get_owner().set_id(rgw_user("tenant4", "user4")); - s.object_acl.reset(new RGWAccessControlPolicy()); - s.object_acl->get_owner().set_name("user five"); - s.object_acl->get_owner().set_id(rgw_user("tenant5", "user5")); + s.owner.display_name = "user two"; + s.owner.id = rgw_user("tenant2", "user2"); + s.user_acl.get_owner().display_name = "user three"; + s.user_acl.get_owner().id = rgw_user("tenant3", "user3"); + s.bucket_acl.get_owner().display_name = "user four"; + s.bucket_acl.get_owner().id = rgw_user("tenant4", "user4"); + s.object_acl.get_owner().display_name = "user five"; + s.object_acl.get_owner().id = rgw_user("tenant5", "user5"); const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); ASSERT_EQ(rc, 0); diff --git a/src/test/test_c2c.cc b/src/test/test_c2c.cc index 5fe1ac2f1fc..1569be305e5 100644 --- a/src/test/test_c2c.cc +++ b/src/test/test_c2c.cc @@ -70,7 +70,7 @@ int main(int argc, const char **argv) while(1) { size_t i; if (sharding) { - i = mempool::pool_t::pick_a_shard_int(); + i = mempool::pick_a_shard_int(); } else { i = 0; } diff --git a/src/test/test_mempool.cc b/src/test/test_mempool.cc index 9dd96682f36..a8e20a41747 100644 --- a/src/test/test_mempool.cc +++ b/src/test/test_mempool.cc @@ -412,7 +412,7 @@ TEST(mempool, check_shard_select) for (size_t i = 0; i < samples; i++) { workers.push_back( std::thread([&](){ - size_t i = mempool::pool_t::pick_a_shard_int(); + size_t i = mempool::pick_a_shard_int(); shards[i]++; })); } diff --git a/src/test/test_rgw_admin_meta.cc b/src/test/test_rgw_admin_meta.cc index b1d5fad0600..00c43d10b54 100644 --- a/src/test/test_rgw_admin_meta.cc +++ b/src/test/test_rgw_admin_meta.cc @@ -460,7 +460,7 @@ int compare_access_keys(RGWAccessKey& k1, RGWAccessKey& k2) { int compare_user_info(RGWUserInfo& i1, RGWUserInfo& i2) { int rv; - if ((rv = i1.user_id.compare(i2.user_id)) != 0) + if ((rv = i1.user_id.id.compare(i2.user_id.id)) != 0) return rv; if ((rv = i1.display_name.compare(i2.display_name)) != 0) return rv; |