summaryrefslogtreecommitdiffstats
path: root/src/crimson/osd/object_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/crimson/osd/object_context.h')
-rw-r--r--src/crimson/osd/object_context.h187
1 files changed, 56 insertions, 131 deletions
diff --git a/src/crimson/osd/object_context.h b/src/crimson/osd/object_context.h
index e17af91e3ad..4195e5dc597 100644
--- a/src/crimson/osd/object_context.h
+++ b/src/crimson/osd/object_context.h
@@ -9,6 +9,7 @@
#include <seastar/core/shared_future.hh>
#include <seastar/core/shared_ptr.hh>
+#include "common/fmt_common.h"
#include "common/intrusive_lru.h"
#include "osd/object_state.h"
#include "crimson/common/exception.h"
@@ -73,6 +74,8 @@ public:
using watch_key_t = std::pair<uint64_t, entity_name_t>;
std::map<watch_key_t, seastar::shared_ptr<crimson::osd::Watch>> watchers;
+ CommonOBCPipeline obc_pipeline;
+
ObjectContext(hobject_t hoid) : lock(hoid),
obs(std::move(hoid)) {}
@@ -128,30 +131,49 @@ public:
}
bool is_valid() const {
- return !invalidated_by_interval_change;
+ return !invalidated;
}
private:
- template <typename Lock, typename Func>
- auto _with_lock(Lock& lock, Func&& func) {
- return lock.lock(
- ).then([&lock, func=std::forward<Func>(func), obc=Ref(this)]() mutable {
- return seastar::futurize_invoke(
- func
- ).finally([&lock, obc=std::move(obc)] {
- /* We chain the finally block here because it's possible for lock.lock()
- * above to fail due to a call to ObjectContext::interrupt, which calls
- * tri_mutex::abort. In the event of such an error, the lock isn't
- * actually taken and calling unlock() would be incorrect. */
- lock.unlock();
- });
- });
- }
-
boost::intrusive::list_member_hook<> obc_accessing_hook;
uint64_t list_link_cnt = 0;
+
+ /**
+ * loading_started
+ *
+ * ObjectContext instances may be used for pipeline stages
+ * prior to actually being loaded.
+ *
+ * ObjectContextLoader::load_and_lock* use loading_started
+ * to determine whether to initiate loading or simply take
+ * the desired lock directly.
+ *
+ * If loading_started is not set, the task must set it and
+ * (syncronously) take an exclusive lock. That exclusive lock
+ * must be held until the loading completes, at which point the
+ * lock may be relaxed or released.
+ *
+ * If loading_started is set, it is safe to directly take
+ * the desired lock, once the lock is obtained loading may
+ * be assumed to be complete.
+ *
+ * loading_started, once set, remains set for the lifetime
+ * of the object.
+ */
+ bool loading_started = false;
+
+ /// true once set_*_state has been called, used for debugging
bool fully_loaded = false;
- bool invalidated_by_interval_change = false;
+
+ /**
+ * invalidated
+ *
+ * Set to true upon eviction from cache. This happens to all
+ * cached obc's upon interval change and to the target of
+ * a repop received on a replica to ensure that the cached
+ * state is refreshed upon subsequent replica read.
+ */
+ bool invalidated = false;
friend class ObjectContextRegistry;
friend class ObjectContextLoader;
@@ -172,122 +194,20 @@ public:
}
}
+ template <typename FormatContext>
+ auto fmt_print_ctx(FormatContext & ctx) const {
+ return fmt::format_to(
+ ctx.out(), "ObjectContext({}, oid={}, refcount={})",
+ (void*)this,
+ get_oid(),
+ get_use_count());
+ }
+
using obc_accessing_option_t = boost::intrusive::member_hook<
ObjectContext,
boost::intrusive::list_member_hook<>,
&ObjectContext::obc_accessing_hook>;
- template<RWState::State Type, typename InterruptCond = void, typename Func>
- auto with_lock(Func&& func) {
- if constexpr (!std::is_void_v<InterruptCond>) {
- auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
- switch (Type) {
- case RWState::RWWRITE:
- return _with_lock(lock.for_write(), std::move(wrapper));
- case RWState::RWREAD:
- return _with_lock(lock.for_read(), std::move(wrapper));
- case RWState::RWEXCL:
- return _with_lock(lock.for_excl(), std::move(wrapper));
- case RWState::RWNONE:
- return seastar::futurize_invoke(std::move(wrapper));
- default:
- assert(0 == "noop");
- }
- } else {
- switch (Type) {
- case RWState::RWWRITE:
- return _with_lock(lock.for_write(), std::forward<Func>(func));
- case RWState::RWREAD:
- return _with_lock(lock.for_read(), std::forward<Func>(func));
- case RWState::RWEXCL:
- return _with_lock(lock.for_excl(), std::forward<Func>(func));
- case RWState::RWNONE:
- return seastar::futurize_invoke(std::forward<Func>(func));
- default:
- assert(0 == "noop");
- }
- }
- }
-
- /**
- * load_then_with_lock
- *
- * Takes two functions as arguments -- load_func to be invoked
- * with an exclusive lock, and func to be invoked under the
- * lock type specified by the Type template argument.
- *
- * Caller must ensure that *this is not already locked, presumably
- * by invoking load_then_with_lock immediately after construction.
- *
- * @param [in] load_func Function to be invoked under excl lock
- * @param [in] func Function to be invoked after load_func under
- * lock of type Type.
- */
- template<RWState::State Type, typename Func, typename Func2>
- auto load_then_with_lock(Func &&load_func, Func2 &&func) {
- class lock_state_t {
- tri_mutex *lock = nullptr;
- bool excl = false;
-
- public:
- lock_state_t(tri_mutex &lock) : lock(&lock), excl(true) {
- ceph_assert(lock.try_lock_for_excl());
- }
- lock_state_t(lock_state_t &&o) : lock(o.lock), excl(o.excl) {
- o.lock = nullptr;
- o.excl = false;
- }
- lock_state_t() = delete;
- lock_state_t &operator=(lock_state_t &&o) = delete;
- lock_state_t(const lock_state_t &o) = delete;
- lock_state_t &operator=(const lock_state_t &o) = delete;
-
- void demote() {
- ceph_assert(excl);
- ceph_assert(lock);
- if constexpr (Type == RWState::RWWRITE) {
- lock->demote_to_write();
- } else if constexpr (Type == RWState::RWREAD) {
- lock->demote_to_read();
- } else if constexpr (Type == RWState::RWNONE) {
- lock->unlock_for_excl();
- }
- excl = false;
- }
-
- ~lock_state_t() {
- if (!lock)
- return;
-
- if constexpr (Type == RWState::RWEXCL) {
- lock->unlock_for_excl();
- } else {
- if (excl) {
- lock->unlock_for_excl();
- return;
- }
-
- if constexpr (Type == RWState::RWWRITE) {
- lock->unlock_for_write();
- } else if constexpr (Type == RWState::RWREAD) {
- lock->unlock_for_read();
- }
- }
- }
- };
-
- return seastar::do_with(
- lock_state_t{lock},
- [load_func=std::move(load_func), func=std::move(func)](auto &ls) mutable {
- return std::invoke(
- std::move(load_func)
- ).si_then([func=std::move(func), &ls]() mutable {
- ls.demote();
- return std::invoke(std::move(func));
- });
- });
- }
-
bool empty() const {
return !lock.is_acquired();
}
@@ -313,12 +233,14 @@ public:
void clear_range(const hobject_t &from,
const hobject_t &to) {
- obc_lru.clear_range(from, to);
+ obc_lru.clear_range(from, to, [](auto &obc) {
+ obc.invalidated = true;
+ });
}
void invalidate_on_interval_change() {
obc_lru.clear([](auto &obc) {
- obc.invalidated_by_interval_change = true;
+ obc.invalidated = true;
});
}
@@ -336,3 +258,6 @@ std::optional<hobject_t> resolve_oid(const SnapSet &ss,
const hobject_t &oid);
} // namespace crimson::osd
+
+template <>
+struct fmt::formatter<RWState::State> : fmt::ostream_formatter {};