summaryrefslogtreecommitdiffstats
path: root/src/rgw/driver
diff options
context:
space:
mode:
authorAdam C. Emerson <aemerson@redhat.com>2023-04-14 20:59:19 +0200
committerAdam Emerson <aemerson@redhat.com>2024-01-24 21:51:46 +0100
commit7c4eee61c4ab6e54f9298053eaecde682b19cc2b (patch)
treea97dcdff66328c0c0535824bd11235cea5d680e3 /src/rgw/driver
parentceph/async: `io_context_pool` constructor/start takes init function (diff)
downloadceph-7c4eee61c4ab6e54f9298053eaecde682b19cc2b.tar.xz
ceph-7c4eee61c4ab6e54f9298053eaecde682b19cc2b.zip
rgw: SAL drivers take `boost::asio::io_context`
`RadosDriver` needs it, and since SAL generally uses `optional_yield`, other stores are going to need it to implement that sensibly. Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
Diffstat (limited to 'src/rgw/driver')
-rw-r--r--src/rgw/driver/rados/rgw_rados.cc16
-rw-r--r--src/rgw/driver/rados/rgw_sal_rados.cc22
-rw-r--r--src/rgw/driver/rados/rgw_sal_rados.h13
-rw-r--r--src/rgw/driver/rados/rgw_service.cc40
-rw-r--r--src/rgw/driver/rados/rgw_service.h26
5 files changed, 77 insertions, 40 deletions
diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc
index 207ed5c919c..fe0d14d1029 100644
--- a/src/rgw/driver/rados/rgw_rados.cc
+++ b/src/rgw/driver/rados/rgw_rados.cc
@@ -1365,11 +1365,10 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y)
int RGWRados::init_svc(bool raw, const DoutPrefixProvider *dpp)
{
if (raw) {
- return svc.init_raw(cct, use_cache, get_rados_handle(), null_yield, dpp);
+ return svc.init_raw(cct, driver, use_cache, null_yield, dpp);
}
- return svc.init(cct, use_cache, run_sync_thread, get_rados_handle(),
- null_yield, dpp);
+ return svc.init(cct, driver, use_cache, run_sync_thread, null_yield, dpp);
}
int RGWRados::init_ctl(const DoutPrefixProvider *dpp)
@@ -1383,9 +1382,16 @@ int RGWRados::init_ctl(const DoutPrefixProvider *dpp)
*/
int RGWRados::init_begin(const DoutPrefixProvider *dpp)
{
- int ret = init_rados();
+ int ret;
+
+ ret = driver->init_neorados(dpp);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to initialize neorados (ret=" << cpp_strerror(-ret) << ")" << dendl;
+ return ret;
+ }
+ ret = init_rados();
if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: failed to init rados (ret=" << cpp_strerror(-ret) << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to initialize librados (ret=" << cpp_strerror(-ret) << ")" << dendl;
return ret;
}
diff --git a/src/rgw/driver/rados/rgw_sal_rados.cc b/src/rgw/driver/rados/rgw_sal_rados.cc
index 32d11a151b5..600e54d97d5 100644
--- a/src/rgw/driver/rados/rgw_sal_rados.cc
+++ b/src/rgw/driver/rados/rgw_sal_rados.cc
@@ -22,6 +22,8 @@
#include <boost/algorithm/string.hpp>
#include <boost/process.hpp>
+#include "common/async/blocked_completion.h"
+
#include "common/Clock.h"
#include "common/errno.h"
@@ -1124,12 +1126,25 @@ int RadosStore::get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_
return rados->get_max_chunk_size(obj.pool, chunk_size, dpp);
}
+int RadosStore::init_neorados(const DoutPrefixProvider* dpp) {
+ if (!neorados) try {
+ neorados = neorados::RADOS::make_with_cct(dpp->get_cct(), io_context,
+ ceph::async::use_blocked);
+ } catch (const boost::system::system_error& e) {
+ ldpp_dout(dpp, 0) << "ERROR: creating neorados handle failed: "
+ << e.what() << dendl;
+ return ceph::from_error_code(e.code());
+ }
+ return 0;
+}
+
int RadosStore::initialize(CephContext *cct, const DoutPrefixProvider *dpp)
{
std::unique_ptr<ZoneGroup> zg =
std::make_unique<RadosZoneGroup>(this, svc()->zone->get_zonegroup());
zone = make_unique<RadosZone>(this, std::move(zg));
- return 0;
+
+ return init_neorados(dpp);
}
int RadosStore::log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info, optional_yield y)
@@ -3718,9 +3733,10 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
extern "C" {
-void* newRadosStore(void)
+void* newRadosStore(void* io_context)
{
- rgw::sal::RadosStore* store = new rgw::sal::RadosStore();
+ rgw::sal::RadosStore* store = new rgw::sal::RadosStore(
+ *static_cast<boost::asio::io_context*>(io_context));
if (store) {
RGWRados* rados = new RGWRados();
diff --git a/src/rgw/driver/rados/rgw_sal_rados.h b/src/rgw/driver/rados/rgw_sal_rados.h
index 7b810504457..d5174f39bc8 100644
--- a/src/rgw/driver/rados/rgw_sal_rados.h
+++ b/src/rgw/driver/rados/rgw_sal_rados.h
@@ -15,6 +15,10 @@
#pragma once
+#include "include/neorados/RADOS.hpp"
+
+#include <boost/asio/io_context.hpp>
+
#include "rgw_sal_store.h"
#include "rgw_rados.h"
#include "rgw_notify.h"
@@ -112,19 +116,22 @@ class RadosZone : public StoreZone {
class RadosStore : public StoreDriver {
private:
+ boost::asio::io_context& io_context;
RGWRados* rados;
RGWUserCtl* user_ctl;
std::unique_ptr<RadosZone> zone;
+ std::optional<neorados::RADOS> neorados;
std::string topics_oid(const std::string& tenant) const;
public:
- RadosStore()
- : rados(nullptr) {
+ RadosStore(boost::asio::io_context& io_context)
+ : io_context(io_context), rados(nullptr) {
}
~RadosStore() {
delete rados;
}
+ int init_neorados(const DoutPrefixProvider* dpp);
virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) override;
virtual const std::string get_name() const override {
return "rados";
@@ -240,6 +247,8 @@ class RadosStore : public StoreDriver {
void setRados(RGWRados * st) { rados = st; }
RGWRados* getRados(void) { return rados; }
+ boost::asio::io_context& get_io_context() { return io_context; }
+ neorados::RADOS& get_neorados() { return *neorados; }
RGWServices* svc() { return &rados->svc; }
const RGWServices* svc() const { return &rados->svc; }
diff --git a/src/rgw/driver/rados/rgw_service.cc b/src/rgw/driver/rados/rgw_service.cc
index 5b78472dfe3..4be0738bae2 100644
--- a/src/rgw/driver/rados/rgw_service.cc
+++ b/src/rgw/driver/rados/rgw_service.cc
@@ -34,6 +34,7 @@
#include "rgw_datalog.h"
#include "rgw_metadata.h"
#include "rgw_otp.h"
+#include "rgw_sal_rados.h"
#include "rgw_user.h"
#include "rgw_role.h"
@@ -48,10 +49,10 @@ RGWServices_Def::~RGWServices_Def()
}
int RGWServices_Def::init(CephContext *cct,
+ rgw::sal::RadosStore* driver,
bool have_cache,
bool raw,
bool run_sync,
- librados::Rados* rados,
optional_yield y,
const DoutPrefixProvider *dpp)
{
@@ -88,7 +89,8 @@ int RGWServices_Def::init(CephContext *cct,
async_processor->start();
finisher->init();
- bi_rados->init(zone.get(), rados, bilog_rados.get(), datalog_rados.get());
+ bi_rados->init(zone.get(), driver->getRados()->get_rados_handle(),
+ bilog_rados.get(), datalog_rados.get());
bilog_rados->init(bi_rados.get());
bucket_sobj->init(zone.get(), sysobj.get(), sysobj_cache.get(),
bi_rados.get(), meta.get(), meta_be_sobj.get(),
@@ -97,27 +99,29 @@ int RGWServices_Def::init(CephContext *cct,
sysobj.get(),
sysobj_cache.get(),
bucket_sobj.get());
- cls->init(zone.get(), rados);
- config_key_rados->init(rados);
- mdlog->init(rados, zone.get(), sysobj.get(), cls.get(),
- async_processor.get());
+ cls->init(zone.get(), driver->getRados()->get_rados_handle());
+ config_key_rados->init(driver->getRados()->get_rados_handle());
+ mdlog->init(driver->getRados()->get_rados_handle(), zone.get(), sysobj.get(),
+ cls.get(), async_processor.get());
meta->init(sysobj.get(), mdlog.get(), meta_bes);
meta_be_sobj->init(sysobj.get(), mdlog.get());
meta_be_otp->init(sysobj.get(), mdlog.get(), cls.get());
- notify->init(zone.get(), rados, finisher.get());
+ notify->init(zone.get(), driver->getRados()->get_rados_handle(),
+ finisher.get());
otp->init(zone.get(), meta.get(), meta_be_otp.get());
- zone->init(sysobj.get(), rados, sync_modules.get(), bucket_sync_sobj.get());
- zone_utils->init(rados, zone.get());
+ zone->init(sysobj.get(), driver->getRados()->get_rados_handle(),
+ sync_modules.get(), bucket_sync_sobj.get());
+ zone_utils->init(driver->getRados()->get_rados_handle(), zone.get());
quota->init(zone.get());
sync_modules->init(zone.get());
- sysobj_core->core_init(rados, zone.get());
+ sysobj_core->core_init(driver->getRados()->get_rados_handle(), zone.get());
if (have_cache) {
- sysobj_cache->init(rados, zone.get(), notify.get());
- sysobj->init(rados, sysobj_cache.get());
+ sysobj_cache->init(driver->getRados()->get_rados_handle(), zone.get(), notify.get());
+ sysobj->init(driver->getRados()->get_rados_handle(), sysobj_cache.get());
} else {
- sysobj->init(rados, sysobj_core.get());
+ sysobj->init(driver->getRados()->get_rados_handle(), sysobj_core.get());
}
- user_rados->init(rados, zone.get(), sysobj.get(), sysobj_cache.get(),
+ user_rados->init(driver->getRados()->get_rados_handle(), zone.get(), sysobj.get(), sysobj_cache.get(),
meta.get(), meta_be_sobj.get(), sync_modules.get());
role_rados->init(zone.get(), meta.get(), meta_be_sobj.get(), sysobj.get());
@@ -146,7 +150,7 @@ int RGWServices_Def::init(CephContext *cct,
r = datalog_rados->start(dpp, &zone->get_zone(),
zone->get_zone_params(),
- rados);
+ driver->getRados()->get_rados_handle());
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start datalog_rados service (" << cpp_strerror(-r) << dendl;
return r;
@@ -301,13 +305,11 @@ void RGWServices_Def::shutdown()
has_shutdown = true;
}
-int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw,
- bool run_sync, librados::Rados* rados,
- optional_yield y, const DoutPrefixProvider *dpp)
+int RGWServices::do_init(CephContext *_cct, rgw::sal::RadosStore* driver, bool have_cache, bool raw, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp)
{
cct = _cct;
- int r = _svc.init(cct, have_cache, raw, run_sync, rados, y, dpp);
+ int r = _svc.init(cct, driver, have_cache, raw, run_sync, y, dpp);
if (r < 0) {
return r;
}
diff --git a/src/rgw/driver/rados/rgw_service.h b/src/rgw/driver/rados/rgw_service.h
index 7c05f043a47..9996b42e251 100644
--- a/src/rgw/driver/rados/rgw_service.h
+++ b/src/rgw/driver/rados/rgw_service.h
@@ -11,6 +11,10 @@
#include "rgw_common.h"
+namespace rgw::sal {
+class RadosStore;
+}
+
struct RGWServices_Def;
class RGWServiceInstance
@@ -108,8 +112,8 @@ struct RGWServices_Def
RGWServices_Def();
~RGWServices_Def();
- int init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync,
- librados::Rados* rados, optional_yield y,
+ int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
+ bool raw_storage, bool run_sync, optional_yield y,
const DoutPrefixProvider *dpp);
void shutdown();
};
@@ -150,19 +154,19 @@ struct RGWServices
RGWSI_Role_RADOS *role{nullptr};
RGWAsyncRadosProcessor* async_processor;
- int do_init(CephContext *cct, bool have_cache, bool raw_storage,
- bool run_sync, librados::Rados* rados, optional_yield y,
+ int do_init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
+ bool raw_storage, bool run_sync, optional_yield y,
const DoutPrefixProvider *dpp);
- int init(CephContext *cct, bool have_cache, bool run_sync,
- librados::Rados* rados, optional_yield y,
- const DoutPrefixProvider *dpp) {
- return do_init(cct, have_cache, false, run_sync, rados, y, dpp);
+ int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
+ bool run_sync, optional_yield y, const DoutPrefixProvider *dpp) {
+ return do_init(cct, store, have_cache, false, run_sync, y, dpp);
}
- int init_raw(CephContext *cct, bool have_cache, librados::Rados* rados,
- optional_yield y, const DoutPrefixProvider *dpp) {
- return do_init(cct, have_cache, true, false, rados, y, dpp);
+ int init_raw(CephContext *cct, rgw::sal::RadosStore* store,
+ bool have_cache, optional_yield y,
+ const DoutPrefixProvider *dpp) {
+ return do_init(cct, store, have_cache, true, false, y, dpp);
}
void shutdown() {
_svc.shutdown();