diff options
author | Adam C. Emerson <aemerson@redhat.com> | 2022-12-02 07:55:47 +0100 |
---|---|---|
committer | Casey Bodley <cbodley@redhat.com> | 2023-11-29 19:15:27 +0100 |
commit | a493b0710a50786ef0374df52831774fe6ca8d51 (patch) | |
tree | b6978426f4e9deeb04b78c81ae3bd1004c73ec81 /src/rgw | |
parent | rgw: Remove `RGWSI_RADOS` from `RGWSI_ConfigKey_RADOS` (diff) | |
download | ceph-a493b0710a50786ef0374df52831774fe6ca8d51.tar.xz ceph-a493b0710a50786ef0374df52831774fe6ca8d51.zip |
rgw: Remove `RGWSI_RADOS` from `RGWSI_MDLog`
Simply use the RADOS handle and `rgw_rados_ref` directly.
Also move `async_processor` out from `RGWSI_RADOS` and into
`RGWServices_Def`. This is as good a place as it for any, for now, as
it's reachable by everyone who needs it and exists through the
lifetime of the process.
Eventually it's going to go away due to coroutinization, anyway.
Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
Diffstat (limited to 'src/rgw')
-rw-r--r-- | src/rgw/driver/rados/rgw_data_sync.cc | 4 | ||||
-rw-r--r-- | src/rgw/driver/rados/rgw_period.cc | 2 | ||||
-rw-r--r-- | src/rgw/driver/rados/rgw_rados.cc | 6 | ||||
-rw-r--r-- | src/rgw/driver/rados/rgw_service.cc | 10 | ||||
-rw-r--r-- | src/rgw/driver/rados/rgw_service.h | 3 | ||||
-rw-r--r-- | src/rgw/driver/rados/rgw_trim_bilog.cc | 12 | ||||
-rw-r--r-- | src/rgw/driver/rados/rgw_trim_datalog.cc | 2 | ||||
-rw-r--r-- | src/rgw/driver/rados/rgw_trim_mdlog.cc | 6 | ||||
-rw-r--r-- | src/rgw/rgw_admin.cc | 16 | ||||
-rw-r--r-- | src/rgw/services/svc_mdlog.cc | 33 | ||||
-rw-r--r-- | src/rgw/services/svc_mdlog.h | 10 | ||||
-rw-r--r-- | src/rgw/services/svc_rados.cc | 13 | ||||
-rw-r--r-- | src/rgw/services/svc_rados.h | 6 |
13 files changed, 62 insertions, 61 deletions
diff --git a/src/rgw/driver/rados/rgw_data_sync.cc b/src/rgw/driver/rados/rgw_data_sync.cc index 02965f9073d..7b8c0b7343d 100644 --- a/src/rgw/driver/rados/rgw_data_sync.cc +++ b/src/rgw/driver/rados/rgw_data_sync.cc @@ -6068,7 +6068,7 @@ int RGWBucketPipeSyncStatusManager::do_init(const DoutPrefixProvider *dpp, } sync_module.reset(new RGWDefaultSyncModuleInstance()); - auto async_rados = driver->svc()->rados->get_async_processor(); + auto async_rados = driver->svc()->async_processor; sync_env.init(this, driver->ctx(), driver, driver->svc(), async_rados, &http_manager, @@ -6680,7 +6680,7 @@ int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp, RGWDataSyncEnv env; RGWSyncModuleInstanceRef module; // null sync module - env.init(dpp, driver->ctx(), driver, driver->svc(), driver->svc()->rados->get_async_processor(), + env.init(dpp, driver->ctx(), driver, driver->svc(), driver->svc()->async_processor, nullptr, nullptr, nullptr, module, nullptr); RGWDataSyncCtx sc; diff --git a/src/rgw/driver/rados/rgw_period.cc b/src/rgw/driver/rados/rgw_period.cc index 61602b354e2..4a16faccefb 100644 --- a/src/rgw/driver/rados/rgw_period.cc +++ b/src/rgw/driver/rados/rgw_period.cc @@ -154,7 +154,7 @@ static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Driver* dri { rgw::sal::RadosStore* rados_store = static_cast<rgw::sal::RadosStore*>(driver); // initialize a sync status manager to read the status - RGWMetaSyncStatusManager mgr(rados_store, rados_store->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager mgr(rados_store, rados_store->svc()->async_processor); int r = mgr.init(dpp); if (r < 0) { return r; diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc index 9cf0831ecaa..5fd174c3aff 100644 --- a/src/rgw/driver/rados/rgw_rados.cc +++ b/src/rgw/driver/rados/rgw_rados.cc @@ -1021,7 +1021,7 @@ void RGWRados::finalize() /* Before joining any sync threads, drain outstanding requests & * mark the async_processor as going_down() */ if (svc.rados) { - svc.rados->stop_processor(); + svc.async_processor->stop(); } if (run_sync_thread) { @@ -1258,7 +1258,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y) << pt.second.name << " present in zonegroup" << dendl; } } - auto async_processor = svc.rados->get_async_processor(); + auto async_processor = svc.async_processor; std::lock_guard l{meta_sync_thread_lock}; meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->driver, async_processor); ret = meta_sync_processor_thread->init(dpp); @@ -1283,7 +1283,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y) std::lock_guard dl{data_sync_thread_lock}; for (auto source_zone : svc.zone->get_data_sync_source_zones()) { ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl; - auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.rados->get_async_processor(), source_zone); + auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.async_processor, source_zone); ret = thread->init(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl; diff --git a/src/rgw/driver/rados/rgw_service.cc b/src/rgw/driver/rados/rgw_service.cc index 9dfa5465dfa..af4043efa7a 100644 --- a/src/rgw/driver/rados/rgw_service.cc +++ b/src/rgw/driver/rados/rgw_service.cc @@ -31,6 +31,7 @@ #include "common/errno.h" #include "rgw_bucket.h" +#include "rgw_cr_rados.h" #include "rgw_datalog.h" #include "rgw_metadata.h" #include "rgw_otp.h" @@ -78,6 +79,8 @@ int RGWServices_Def::init(CephContext *cct, sysobj_core = std::make_unique<RGWSI_SysObj_Core>(cct); user_rados = std::make_unique<RGWSI_User_RADOS>(cct); role_rados = std::make_unique<RGWSI_Role_RADOS>(cct); + async_processor = std::make_unique<RGWAsyncRadosProcessor>( + cct, cct->_conf->rgw_num_async_rados_threads); if (have_cache) { sysobj_cache = std::make_unique<RGWSI_SysObj_Cache>(dpp, cct); @@ -85,6 +88,7 @@ int RGWServices_Def::init(CephContext *cct, vector<RGWSI_MetaBackend *> meta_bes{meta_be_sobj.get(), meta_be_otp.get()}; + async_processor->start(); finisher->init(); bi_rados->init(zone.get(), radoshandle, bilog_rados.get(), datalog_rados.get()); bilog_rados->init(bi_rados.get()); @@ -97,7 +101,8 @@ int RGWServices_Def::init(CephContext *cct, bucket_sobj.get()); cls->init(zone.get(), radoshandle); config_key_rados->init(radoshandle); - mdlog->init(rados.get(), zone.get(), sysobj.get(), cls.get()); + mdlog->init(radoshandle, zone.get(), sysobj.get(), cls.get(), + async_processor.get()); meta->init(sysobj.get(), mdlog.get(), meta_bes); meta_be_sobj->init(sysobj.get(), mdlog.get()); meta_be_otp->init(sysobj.get(), mdlog.get(), cls.get()); @@ -300,10 +305,10 @@ void RGWServices_Def::shutdown() quota->shutdown(); zone_utils->shutdown(); zone->shutdown(); + async_processor->stop(); rados->shutdown(); has_shutdown = true; - } int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw, @@ -345,6 +350,7 @@ int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw, core = _svc.sysobj_core.get(); user = _svc.user_rados.get(); role = _svc.role_rados.get(); + async_processor = _svc.async_processor.get(); return 0; } diff --git a/src/rgw/driver/rados/rgw_service.h b/src/rgw/driver/rados/rgw_service.h index 46ee16417fe..ad80a6c8826 100644 --- a/src/rgw/driver/rados/rgw_service.h +++ b/src/rgw/driver/rados/rgw_service.h @@ -74,6 +74,7 @@ class RGWSI_User; class RGWSI_User_RADOS; class RGWDataChangesLog; class RGWSI_Role_RADOS; +class RGWAsyncRadosProcessor; struct RGWServices_Def { @@ -104,6 +105,7 @@ struct RGWServices_Def std::unique_ptr<RGWSI_User_RADOS> user_rados; std::unique_ptr<RGWDataChangesLog> datalog_rados; std::unique_ptr<RGWSI_Role_RADOS> role_rados; + std::unique_ptr<RGWAsyncRadosProcessor> async_processor; RGWServices_Def(); ~RGWServices_Def(); @@ -149,6 +151,7 @@ struct RGWServices RGWSI_SysObj_Core *core{nullptr}; RGWSI_User *user{nullptr}; RGWSI_Role_RADOS *role{nullptr}; + RGWAsyncRadosProcessor* async_processor; int do_init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync, librados::Rados* radoshandle, optional_yield y, diff --git a/src/rgw/driver/rados/rgw_trim_bilog.cc b/src/rgw/driver/rados/rgw_trim_bilog.cc index be6d990d962..db998047005 100644 --- a/src/rgw/driver/rados/rgw_trim_bilog.cc +++ b/src/rgw/driver/rados/rgw_trim_bilog.cc @@ -617,7 +617,7 @@ int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp) get_policy_params.zone = zone_id; get_policy_params.bucket = bucket; - yield call(new RGWBucketGetSyncPolicyHandlerCR(store->svc()->rados->get_async_processor(), + yield call(new RGWBucketGetSyncPolicyHandlerCR(store->svc()->async_processor, store, get_policy_params, source_policy, @@ -728,14 +728,14 @@ int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp) } while (clean_info && retries < MAX_RETRIES) { yield call(new RGWPutBucketInstanceInfoCR( - store->svc()->rados->get_async_processor(), + store->svc()->async_processor, store, clean_info->first, false, {}, no_change_attrs(), dpp)); // Raced, try again. if (retcode == -ECANCELED) { yield call(new RGWGetBucketInstanceInfoCR( - store->svc()->rados->get_async_processor(), + store->svc()->async_processor, store, clean_info->first.bucket, &(clean_info->first), nullptr, dpp)); if (retcode < 0) { @@ -1132,7 +1132,7 @@ int BucketTrimCR::operate(const DoutPrefixProvider *dpp) return buckets.size() < config.buckets_per_interval; }; - call(new MetadataListCR(cct, store->svc()->rados->get_async_processor(), + call(new MetadataListCR(cct, store->svc()->async_processor, store->ctl()->meta.mgr, section, status.marker, cb)); } @@ -1219,7 +1219,7 @@ int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp) // prevent others from trimming for our entire wait interval set_status("acquiring trim lock"); - yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store, + yield call(new RGWSimpleRadosLockCR(store->svc()->async_processor, store, obj, name, cookie, config.trim_interval_sec)); if (retcode < 0) { @@ -1232,7 +1232,7 @@ int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp) if (retcode < 0) { // on errors, unlock so other gateways can try set_status("unlocking"); - yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store, + yield call(new RGWSimpleRadosUnlockCR(store->svc()->async_processor, store, obj, name, cookie)); } } diff --git a/src/rgw/driver/rados/rgw_trim_datalog.cc b/src/rgw/driver/rados/rgw_trim_datalog.cc index bac0cda8dd6..5dcddb659e1 100644 --- a/src/rgw/driver/rados/rgw_trim_datalog.cc +++ b/src/rgw/driver/rados/rgw_trim_datalog.cc @@ -224,7 +224,7 @@ int DataLogTrimPollCR::operate(const DoutPrefixProvider *dpp) // prevent other gateways from attempting to trim for the duration set_status("acquiring trim lock"); - yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store, + yield call(new RGWSimpleRadosLockCR(store->svc()->async_processor, store, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, lock_oid), "data_trim", lock_cookie, // interval is a small number and unlikely to overflow diff --git a/src/rgw/driver/rados/rgw_trim_mdlog.cc b/src/rgw/driver/rados/rgw_trim_mdlog.cc index 13773ae2877..be513b6e74d 100644 --- a/src/rgw/driver/rados/rgw_trim_mdlog.cc +++ b/src/rgw/driver/rados/rgw_trim_mdlog.cc @@ -565,7 +565,7 @@ class MetaPeerTrimShardCollectCR : public RGWShardCollectCR { env(env), mdlog(mdlog), period_id(env.current.get_period().get_id()) { meta_env.init(env.dpp, cct, env.store, env.store->svc()->zone->get_master_conn(), - env.store->svc()->rados->get_async_processor(), env.http, nullptr, + env.store->svc()->async_processor, env.http, nullptr, env.store->getRados()->get_sync_tracer()); } @@ -669,7 +669,7 @@ int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp) // prevent others from trimming for our entire wait interval set_status("acquiring trim lock"); - yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store, + yield call(new RGWSimpleRadosLockCR(store->svc()->async_processor, store, obj, name, cookie, // interval is a small number and unlikely to overflow // coverity[store_truncates_time_t:SUPPRESS] @@ -685,7 +685,7 @@ int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp) if (retcode < 0) { // on errors, unlock so other gateways can try set_status("unlocking"); - yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store, + yield call(new RGWSimpleRadosUnlockCR(store->svc()->async_processor, store, obj, name, cookie)); } } diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index c19d44c9e53..f4a2d1480bd 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -2098,7 +2098,7 @@ stringstream& push_ss(stringstream& ss, list<string>& l, int tab = 0) static void get_md_sync_status(list<string>& status) { - RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor); int ret = sync.init(dpp()); if (ret < 0) { @@ -2254,7 +2254,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list<string>& s flush_ss(ss, status); return; } - RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr); + RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr); int ret = sync.init(dpp()); if (ret < 0) { @@ -8995,7 +8995,7 @@ next: } if (opt_cmd == OPT::METADATA_SYNC_STATUS) { - RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor); int ret = sync.init(dpp()); if (ret < 0) { @@ -9039,7 +9039,7 @@ next: } if (opt_cmd == OPT::METADATA_SYNC_INIT) { - RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor); int ret = sync.init(dpp()); if (ret < 0) { @@ -9055,7 +9055,7 @@ next: if (opt_cmd == OPT::METADATA_SYNC_RUN) { - RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor); int ret = sync.init(dpp()); if (ret < 0) { @@ -9075,7 +9075,7 @@ next: cerr << "ERROR: source zone not specified" << std::endl; return EINVAL; } - RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr); + RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr); int ret = sync.init(dpp()); if (ret < 0) { @@ -9145,7 +9145,7 @@ next: return EINVAL; } - RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr); + RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr); int ret = sync.init(dpp()); if (ret < 0) { @@ -9174,7 +9174,7 @@ next: return ret; } - RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module); + RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr, sync_module); ret = sync.init(dpp()); if (ret < 0) { diff --git a/src/rgw/services/svc_mdlog.cc b/src/rgw/services/svc_mdlog.cc index da723f3e129..603718dc96e 100644 --- a/src/rgw/services/svc_mdlog.cc +++ b/src/rgw/services/svc_mdlog.cc @@ -30,13 +30,16 @@ RGWSI_MDLog::RGWSI_MDLog(CephContext *cct, bool _run_sync) : RGWServiceInstance( RGWSI_MDLog::~RGWSI_MDLog() { } -int RGWSI_MDLog::init(RGWSI_RADOS *_rados_svc, RGWSI_Zone *_zone_svc, RGWSI_SysObj *_sysobj_svc, RGWSI_Cls *_cls_svc) +int RGWSI_MDLog::init(librados::Rados* rados_, RGWSI_Zone *_zone_svc, + RGWSI_SysObj *_sysobj_svc, RGWSI_Cls *_cls_svc, + RGWAsyncRadosProcessor* async_processor_) { svc.zone = _zone_svc; svc.sysobj = _sysobj_svc; svc.mdlog = this; - svc.rados = _rados_svc; + rados = rados_; svc.cls = _cls_svc; + async_processor = async_processor_; return 0; } @@ -262,11 +265,12 @@ class ReadHistoryCR : public RGWCoroutine { ReadHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor *cursor, - RGWObjVersionTracker *objv_tracker) + RGWObjVersionTracker *objv_tracker, + RGWAsyncRadosProcessor* async_processor) : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv_tracker(objv_tracker), - async_processor(svc.rados->get_async_processor()) + async_processor(async_processor) {} int operate(const DoutPrefixProvider *dpp) { @@ -312,10 +316,11 @@ class WriteHistoryCR : public RGWCoroutine { WriteHistoryCR(const DoutPrefixProvider *dpp, Svc& svc, const Cursor& cursor, - RGWObjVersionTracker *objv) + RGWObjVersionTracker *objv, + RGWAsyncRadosProcessor* async_processor) : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv(objv), - async_processor(svc.rados->get_async_processor()) + async_processor(async_processor) {} int operate(const DoutPrefixProvider *dpp) { @@ -353,18 +358,22 @@ class TrimHistoryCR : public RGWCoroutine { RGWObjVersionTracker *objv; //< to prevent racing updates Cursor next; //< target cursor for oldest log period Cursor existing; //< existing cursor read from disk + RGWAsyncRadosProcessor* async_processor; public: - TrimHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv) + TrimHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor cursor, + RGWObjVersionTracker *objv, + RGWAsyncRadosProcessor* async_processor) : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), - cursor(cursor), objv(objv), next(cursor) { + cursor(cursor), objv(objv), next(cursor), + async_processor(async_processor) { next.next(); // advance past cursor } int operate(const DoutPrefixProvider *dpp) { reenter(this) { // read an existing history, and write the new history if it's newer - yield call(new ReadHistoryCR(dpp, svc, &existing, objv)); + yield call(new ReadHistoryCR(dpp, svc, &existing, objv, async_processor)); if (retcode < 0) { return set_cr_error(retcode); } @@ -375,7 +384,7 @@ class TrimHistoryCR : public RGWCoroutine { return set_cr_error(-ECANCELED); } // overwrite with updated history - yield call(new WriteHistoryCR(dpp, svc, next, objv)); + yield call(new WriteHistoryCR(dpp, svc, next, objv, async_processor)); if (retcode < 0) { return set_cr_error(retcode); } @@ -512,13 +521,13 @@ Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixPro RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(const DoutPrefixProvider *dpp, Cursor *period, RGWObjVersionTracker *objv) const { - return new mdlog::ReadHistoryCR(dpp, svc, period, objv); + return new mdlog::ReadHistoryCR(dpp, svc, period, objv, async_processor); } RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(const DoutPrefixProvider *dpp, Cursor period, RGWObjVersionTracker *objv) const { - return new mdlog::TrimHistoryCR(dpp, svc, period, objv); + return new mdlog::TrimHistoryCR(dpp, svc, period, objv, async_processor); } RGWMetadataLog* RGWSI_MDLog::get_log(const std::string& period) diff --git a/src/rgw/services/svc_mdlog.h b/src/rgw/services/svc_mdlog.h index 8b37ba11e56..f169ee88db9 100644 --- a/src/rgw/services/svc_mdlog.h +++ b/src/rgw/services/svc_mdlog.h @@ -29,7 +29,6 @@ class RGWCoroutine; class RGWSI_Zone; class RGWSI_SysObj; -class RGWSI_RADOS; namespace mdlog { class ReadHistoryCR; @@ -58,18 +57,21 @@ public: RGWSI_MDLog(CephContext *cct, bool run_sync); virtual ~RGWSI_MDLog(); + librados::Rados* rados{nullptr}; + RGWAsyncRadosProcessor* async_processor{nullptr}; + struct Svc { - RGWSI_RADOS *rados{nullptr}; RGWSI_Zone *zone{nullptr}; RGWSI_SysObj *sysobj{nullptr}; RGWSI_MDLog *mdlog{nullptr}; RGWSI_Cls *cls{nullptr}; } svc; - int init(RGWSI_RADOS *_rados_svc, + int init(librados::Rados* rados_, RGWSI_Zone *_zone_svc, RGWSI_SysObj *_sysobj_svc, - RGWSI_Cls *_cls_svc); + RGWSI_Cls *_cls_svc, + RGWAsyncRadosProcessor* async_processor_); int do_start(optional_yield y, const DoutPrefixProvider *dpp) override; diff --git a/src/rgw/services/svc_rados.cc b/src/rgw/services/svc_rados.cc index d682b87eb2c..34de9857e19 100644 --- a/src/rgw/services/svc_rados.cc +++ b/src/rgw/services/svc_rados.cc @@ -34,27 +34,14 @@ int RGWSI_RADOS::do_start(optional_yield, const DoutPrefixProvider *dpp) return ret; } - async_processor.reset(new RGWAsyncRadosProcessor(cct, cct->_conf->rgw_num_async_rados_threads)); - async_processor->start(); - return 0; } void RGWSI_RADOS::shutdown() { - if (async_processor) { - async_processor->stop(); - } rados.shutdown(); } -void RGWSI_RADOS::stop_processor() -{ - if (async_processor) { - async_processor->stop(); - } -} - librados::Rados* RGWSI_RADOS::get_rados_handle() { return &rados; diff --git a/src/rgw/services/svc_rados.h b/src/rgw/services/svc_rados.h index 4e7cf672c7a..2a3940a34d9 100644 --- a/src/rgw/services/svc_rados.h +++ b/src/rgw/services/svc_rados.h @@ -28,7 +28,6 @@ struct RGWAccessListFilterPrefix : public RGWAccessListFilter { class RGWSI_RADOS : public RGWServiceInstance { librados::Rados rados; - std::unique_ptr<RGWAsyncRadosProcessor> async_processor; int do_start(optional_yield, const DoutPrefixProvider *dpp) override; @@ -66,16 +65,11 @@ public: void init() {} void shutdown() override; - void stop_processor(); std::string cluster_fsid(); uint64_t instance_id(); bool check_secure_mon_conn(const DoutPrefixProvider *dpp) const; - RGWAsyncRadosProcessor *get_async_processor() { - return async_processor.get(); - } - int clog_warn(const std::string& msg); class Handle; |