diff options
Diffstat (limited to 'src/rgw')
57 files changed, 1655 insertions, 535 deletions
diff --git a/src/rgw/CMakeLists.txt b/src/rgw/CMakeLists.txt index 3727c525ce7..41e473e23f0 100644 --- a/src/rgw/CMakeLists.txt +++ b/src/rgw/CMakeLists.txt @@ -487,9 +487,9 @@ target_link_libraries(radosgw PRIVATE install(TARGETS radosgw DESTINATION bin) set(radosgw_admin_srcs - rgw_admin.cc - rgw_sync_checkpoint.cc - rgw_orphan.cc) + radosgw-admin/radosgw-admin.cc + radosgw-admin/sync_checkpoint.cc + radosgw-admin/orphan.cc) # this is unsatisfying and hopefully temporary; ARROW should not be # part of radosgw_admin diff --git a/src/rgw/driver/daos/rgw_sal_daos.cc b/src/rgw/driver/daos/rgw_sal_daos.cc index a87d88c4b85..92dd7afe2fb 100644 --- a/src/rgw/driver/daos/rgw_sal_daos.cc +++ b/src/rgw/driver/daos/rgw_sal_daos.cc @@ -858,8 +858,6 @@ bool DaosZone::is_writeable() { return true; } bool DaosZone::get_redirect_endpoint(std::string* endpoint) { return false; } -bool DaosZone::has_zonegroup_api(const std::string& api) const { return false; } - const std::string& DaosZone::get_current_period_id() { return current_period->get_id(); } diff --git a/src/rgw/driver/daos/rgw_sal_daos.h b/src/rgw/driver/daos/rgw_sal_daos.h index e382fdb04ae..5515579a441 100644 --- a/src/rgw/driver/daos/rgw_sal_daos.h +++ b/src/rgw/driver/daos/rgw_sal_daos.h @@ -484,7 +484,6 @@ class DaosZone : public StoreZone { virtual const std::string& get_name() const override; virtual bool is_writeable() override; virtual bool get_redirect_endpoint(std::string* endpoint) override; - virtual bool has_zonegroup_api(const std::string& api) const override; virtual const std::string& get_current_period_id() override; virtual const RGWAccessKey& get_system_key() { return zone_params->system_key; diff --git a/src/rgw/driver/motr/rgw_sal_motr.cc b/src/rgw/driver/motr/rgw_sal_motr.cc index b999673ac18..463ea8c5b11 100644 --- a/src/rgw/driver/motr/rgw_sal_motr.cc +++ b/src/rgw/driver/motr/rgw_sal_motr.cc @@ -1111,11 +1111,6 @@ bool MotrZone::get_redirect_endpoint(std::string* endpoint) return false; } -bool MotrZone::has_zonegroup_api(const std::string& api) const -{ - return (zonegroup.group.api_name == api); -} - const std::string& MotrZone::get_current_period_id() { return current_period->get_id(); diff --git a/src/rgw/driver/motr/rgw_sal_motr.h b/src/rgw/driver/motr/rgw_sal_motr.h index f92074b9d94..0f99ae48e86 100644 --- a/src/rgw/driver/motr/rgw_sal_motr.h +++ b/src/rgw/driver/motr/rgw_sal_motr.h @@ -525,7 +525,6 @@ class MotrZone : public StoreZone { virtual const std::string& get_name() const override; virtual bool is_writeable() override; virtual bool get_redirect_endpoint(std::string* endpoint) override; - virtual bool has_zonegroup_api(const std::string& api) const override; virtual const std::string& get_current_period_id() override; virtual const RGWAccessKey& get_system_key() { return zone_params->system_key; } virtual const std::string& get_realm_name() { return realm->get_name(); } diff --git a/src/rgw/driver/posix/rgw_sal_posix.cc b/src/rgw/driver/posix/rgw_sal_posix.cc index 1345468210f..9d76462baa0 100644 --- a/src/rgw/driver/posix/rgw_sal_posix.cc +++ b/src/rgw/driver/posix/rgw_sal_posix.cc @@ -2893,6 +2893,14 @@ int POSIXObject::copy_object(const ACLOwner& owner, return dobj->set_obj_attrs(dpp, &attrs, nullptr, y, rgw::sal::FLAG_LOG_OP); } +int POSIXObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) +{ + return -EOPNOTSUPP; +} + int POSIXObject::load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh) { int ret = stat(dpp); diff --git a/src/rgw/driver/posix/rgw_sal_posix.h b/src/rgw/driver/posix/rgw_sal_posix.h index 8ec72bbc1bc..bf3478ad6ab 100644 --- a/src/rgw/driver/posix/rgw_sal_posix.h +++ b/src/rgw/driver/posix/rgw_sal_posix.h @@ -653,6 +653,13 @@ public: const DoutPrefixProvider* dpp, optional_yield y) override; virtual RGWAccessControlPolicy& get_acl(void) override { return acls; } virtual int set_acl(const RGWAccessControlPolicy& acl) override { acls = acl; return 0; } + + /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */ + virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) override; + virtual int load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh = true) override; virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y, uint32_t flags) override; diff --git a/src/rgw/driver/rados/rgw_data_sync.cc b/src/rgw/driver/rados/rgw_data_sync.cc index 792671579b7..c0a9059a251 100644 --- a/src/rgw/driver/rados/rgw_data_sync.cc +++ b/src/rgw/driver/rados/rgw_data_sync.cc @@ -2617,6 +2617,7 @@ class RGWUserPermHandler { rgw::IAM::Environment env; std::unique_ptr<rgw::auth::Identity> identity; RGWAccessControlPolicy user_acl; + std::vector<rgw::IAM::Policy> user_policies; }; std::shared_ptr<_info> info; @@ -2644,7 +2645,7 @@ class RGWUserPermHandler { } auto result = rgw::auth::transform_old_authinfo( - sync_env->dpp, null_yield, sync_env->driver, user.get()); + sync_env->dpp, null_yield, sync_env->driver, user.get(), &info->user_policies); if (!result) { return result.error(); } @@ -2679,6 +2680,7 @@ public: std::shared_ptr<_info> info; RGWAccessControlPolicy bucket_acl; std::optional<perm_state> ps; + boost::optional<rgw::IAM::Policy> bucket_policy; public: Bucket() {} @@ -2686,9 +2688,7 @@ public: const RGWBucketInfo& bucket_info, const map<string, bufferlist>& bucket_attrs); - bool verify_bucket_permission(int perm); - bool verify_object_permission(const map<string, bufferlist>& obj_attrs, - int perm); + bool verify_bucket_permission(const rgw_obj_key& obj_key, const uint64_t op); }; static int policy_from_attrs(CephContext *cct, @@ -2728,6 +2728,14 @@ int RGWUserPermHandler::Bucket::init(RGWUserPermHandler *handler, return r; } + // load bucket policy + try { + bucket_policy = get_iam_policy_from_attr(sync_env->cct, bucket_attrs, bucket_info.bucket.tenant); + } catch (const std::exception& e) { + ldpp_dout(sync_env->dpp, 0) << "ERROR: reading IAM Policy: " << e.what() << dendl; + return -EACCES; + } + ps.emplace(sync_env->cct, info->env, info->identity.get(), @@ -2740,36 +2748,40 @@ int RGWUserPermHandler::Bucket::init(RGWUserPermHandler *handler, return 0; } -bool RGWUserPermHandler::Bucket::verify_bucket_permission(int perm) -{ - return verify_bucket_permission_no_policy(sync_env->dpp, - &(*ps), - info->user_acl, - bucket_acl, - perm); -} - -bool RGWUserPermHandler::Bucket::verify_object_permission(const map<string, bufferlist>& obj_attrs, - int perm) +bool RGWUserPermHandler::Bucket::verify_bucket_permission(const rgw_obj_key& obj_key, const uint64_t op) { - RGWAccessControlPolicy obj_acl; - - int r = policy_from_attrs(sync_env->cct, obj_attrs, &obj_acl); - if (r < 0) { - return r; - } - - return verify_bucket_permission_no_policy(sync_env->dpp, - &(*ps), - bucket_acl, - obj_acl, - perm); + const rgw_obj obj(ps->bucket_info.bucket, obj_key); + const auto arn = rgw::ARN(obj); + + if (ps->identity->get_account()) { + const bool account_root = (ps->identity->get_identity_type() == TYPE_ROOT); + if (!ps->identity->is_owner_of(bucket_acl.get_owner().id)) { + ldpp_dout(sync_env->dpp, 4) << "cross-account request for bucket owner " + << bucket_acl.get_owner().id << " != " << ps->identity->get_aclowner().id << dendl; + // cross-account requests evaluate the identity-based policies separately + // from the resource-based policies and require Allow from both + return ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, account_root, {}, {}, {}, + info->user_policies, {}, op) + && ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, false, info->user_acl, + bucket_acl, bucket_policy, {}, {}, op); + } else { + // don't consult acls for same-account access. require an Allow from + // either identity- or resource-based policy + return ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, account_root, {}, {}, + bucket_policy, info->user_policies, + {}, op); + } + } + constexpr bool account_root = false; + return ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, account_root, + info->user_acl, bucket_acl, + bucket_policy, info->user_policies, + {}, op); } class RGWFetchObjFilter_Sync : public RGWFetchObjFilter_Default { rgw_bucket_sync_pipe sync_pipe; - std::shared_ptr<RGWUserPermHandler::Bucket> bucket_perms; std::optional<rgw_sync_pipe_dest_params> verify_dest_params; std::optional<ceph::real_time> mtime; @@ -2782,10 +2794,8 @@ class RGWFetchObjFilter_Sync : public RGWFetchObjFilter_Default { public: RGWFetchObjFilter_Sync(rgw_bucket_sync_pipe& _sync_pipe, - std::shared_ptr<RGWUserPermHandler::Bucket>& _bucket_perms, std::optional<rgw_sync_pipe_dest_params>&& _verify_dest_params, std::shared_ptr<bool>& _need_retry) : sync_pipe(_sync_pipe), - bucket_perms(_bucket_perms), verify_dest_params(std::move(_verify_dest_params)), need_retry(_need_retry) { *need_retry = false; @@ -2852,12 +2862,6 @@ int RGWFetchObjFilter_Sync::filter(CephContext *cct, *poverride_owner = acl_translation_owner; } } - if (params.mode == rgw_sync_pipe_params::MODE_USER) { - if (!bucket_perms->verify_object_permission(obj_attrs, RGW_PERM_READ)) { - ldout(cct, 0) << "ERROR: " << __func__ << ": permission check failed: user not allowed to fetch object" << dendl; - return -EPERM; - } - } if (!dest_placement_rule && params.dest.storage_class) { @@ -2900,7 +2904,6 @@ class RGWObjFetchCR : public RGWCoroutine { rgw_sync_pipe_params::Mode param_mode; std::optional<RGWUserPermHandler> user_perms; - std::shared_ptr<RGWUserPermHandler::Bucket> source_bucket_perms; RGWUserPermHandler::Bucket dest_bucket_perms; std::optional<rgw_sync_pipe_dest_params> dest_params; @@ -3016,20 +3019,10 @@ public: return set_cr_error(retcode); } - if (!dest_bucket_perms.verify_bucket_permission(RGW_PERM_WRITE)) { + if (!dest_bucket_perms.verify_bucket_permission(dest_key.value_or(key), rgw::IAM::s3PutObject)) { ldout(cct, 0) << "ERROR: " << __func__ << ": permission check failed: user not allowed to write into bucket (bucket=" << sync_pipe.info.dest_bucket.get_key() << ")" << dendl; return -EPERM; } - - /* init source bucket permission structure */ - source_bucket_perms = make_shared<RGWUserPermHandler::Bucket>(); - r = user_perms->init_bucket(sync_pipe.source_bucket_info, - sync_pipe.source_bucket_attrs, - source_bucket_perms.get()); - if (r < 0) { - ldout(cct, 20) << "ERROR: " << __func__ << ": failed to init bucket perms manager for uid=" << *param_user << " bucket=" << sync_pipe.source_bucket_info.bucket.get_key() << dendl; - return set_cr_error(retcode); - } } yield { @@ -3037,12 +3030,11 @@ public: need_retry = make_shared<bool>(); } auto filter = make_shared<RGWFetchObjFilter_Sync>(sync_pipe, - source_bucket_perms, std::move(dest_params), need_retry); call(new RGWFetchRemoteObjCR(sync_env->async_rados, sync_env->driver, sc->source_zone, - nullopt, + param_user, sync_pipe.source_bucket_info.bucket, std::nullopt, sync_pipe.dest_bucket_info, key, dest_key, versioned_epoch, @@ -4528,7 +4520,7 @@ public: } tn->set_resource_name(SSTR(bucket_str_noinstance(bs.bucket) << "/" << key)); } - if (retcode == -ERR_PRECONDITION_FAILED) { + if (retcode == -ERR_PRECONDITION_FAILED || retcode == -EPERM) { pretty_print(sc->env, "Skipping object s3://{}/{} in sync from zone {}\n", bs.bucket.name, key, zone_name); set_status("Skipping object sync: precondition failed (object contains newer change or policy doesn't allow sync)"); diff --git a/src/rgw/driver/rados/rgw_datalog.cc b/src/rgw/driver/rados/rgw_datalog.cc index 4c9503071ef..d7e57d7e1c1 100644 --- a/src/rgw/driver/rados/rgw_datalog.cc +++ b/src/rgw/driver/rados/rgw_datalog.cc @@ -576,7 +576,7 @@ int RGWDataChangesLog::renew_entries(const DoutPrefixProvider *dpp) if (ret < 0) { /* we don't really need to have a special handling for failed cases here, * as this is just an optimization. */ - ldpp_dout(dpp, -1) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: be->push() returned " << ret << dendl; return ret; } diff --git a/src/rgw/driver/rados/rgw_period.cc b/src/rgw/driver/rados/rgw_period.cc index f18e8e46bc5..aacb9b6a09a 100644 --- a/src/rgw/driver/rados/rgw_period.cc +++ b/src/rgw/driver/rados/rgw_period.cc @@ -68,20 +68,6 @@ int RGWPeriod::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) return ret; } -int RGWPeriod::add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y) -{ - if (zonegroup.realm_id != realm_id) { - return 0; - } - int ret = period_map.update(zonegroup, cct); - if (ret < 0) { - ldpp_dout(dpp, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl; - return ret; - } - - return store_info(dpp, false, y); -} - int RGWPeriod::update(const DoutPrefixProvider *dpp, optional_yield y) { auto zone_svc = sysobj_svc->get_zone_svc(); diff --git a/src/rgw/driver/rados/rgw_pubsub_push.cc b/src/rgw/driver/rados/rgw_pubsub_push.cc index 07d65fa1028..d22c61e9b08 100644 --- a/src/rgw/driver/rados/rgw_pubsub_push.cc +++ b/src/rgw/driver/rados/rgw_pubsub_push.cc @@ -281,7 +281,7 @@ public: conn_id, _endpoint, get_bool(args, "use-ssl", false), get_bool(args, "verify-ssl", true), args.get_optional("ca-location"), args.get_optional("mechanism"), args.get_optional("user-name"), - args.get_optional("password"))) { + args.get_optional("password"), args.get_optional("kafka-brokers"))) { throw configuration_error("Kafka: failed to create connection to: " + _endpoint); } @@ -434,4 +434,3 @@ void RGWPubSubEndpoint::shutdown_all() { #endif shutdown_http_manager(); } - diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc index 2ba3559c006..a183feabe2a 100644 --- a/src/rgw/driver/rados/rgw_rados.cc +++ b/src/rgw/driver/rados/rgw_rados.cc @@ -5485,7 +5485,7 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& ob } /* if the bucket is not synced we can remove the meta file */ - if (!svc.zone->is_syncing_bucket_meta(bucket)) { + if (!svc.zone->is_syncing_bucket_meta()) { RGWObjVersionTracker objv_tracker; r = ctl.bucket->remove_bucket_instance_info(bucket, bucket_info, y, dpp); if (r < 0) { @@ -6962,13 +6962,13 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* octx, RGWBu } return 0; -} +} /* RGWRados::set_attrs() */ -static int get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y, - RGWRados* store, RGWBucketInfo& bucket_info, - RGWObjectCtx* rctx, RGWObjManifest* manifest, - int part_num, int* parts_count, bool prefetch, - RGWObjState** pstate, RGWObjManifest** pmanifest) +int RGWRados::get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y, + RGWRados* store, RGWBucketInfo& bucket_info, + RGWObjectCtx* rctx, RGWObjManifest* manifest, + int part_num, int* parts_count, bool prefetch, + RGWObjState** pstate, RGWObjManifest** pmanifest) { if (!manifest) { return -ERR_INVALID_PART; @@ -7047,6 +7047,9 @@ static int get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y, // update the object size sm->state.size = part_manifest.get_obj_size(); + if (!sm->state.attrset.count(RGW_ATTR_COMPRESSION)) { + sm->state.accounted_size = sm->state.size; + } *pmanifest = &part_manifest; return 0; @@ -8948,7 +8951,7 @@ int RGWRados::get_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, return r; } - auto iter = attrset.find(RGW_ATTR_OLH_VER); + auto iter = attrset.find(RGW_ATTR_OLH_INFO); if (iter == attrset.end()) { /* not an olh */ return -EINVAL; } diff --git a/src/rgw/driver/rados/rgw_rados.h b/src/rgw/driver/rados/rgw_rados.h index b24823b60dc..fe79916392f 100644 --- a/src/rgw/driver/rados/rgw_rados.h +++ b/src/rgw/driver/rados/rgw_rados.h @@ -1071,6 +1071,12 @@ public: }; // class RGWRados::Bucket::List }; // class RGWRados::Bucket + static int get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y, + RGWRados* store, RGWBucketInfo& bucket_info, + RGWObjectCtx* rctx, RGWObjManifest* manifest, + int part_num, int* parts_count, bool prefetch, + RGWObjState** pstate, RGWObjManifest** pmanifest); + int on_last_entry_in_listing(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const std::string& obj_prefix, diff --git a/src/rgw/driver/rados/rgw_sal_rados.cc b/src/rgw/driver/rados/rgw_sal_rados.cc index 88da446c3de..4c05421653b 100644 --- a/src/rgw/driver/rados/rgw_sal_rados.cc +++ b/src/rgw/driver/rados/rgw_sal_rados.cc @@ -429,6 +429,10 @@ int RadosBucket::remove(const DoutPrefixProvider* dpp, ldpp_dout(dpp, -1) << "ERROR: unable to remove notifications from bucket. ret=" << ps_ret << dendl; } + if (ret = rgw::bucketlogging::bucket_deletion_cleanup(dpp, store, this, y); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: could not cleanup bucket logging configuration and pending objects, ret = " << ret << dendl; + } + ret = store->ctl()->bucket->unlink_bucket(rados, info.owner, info.bucket, y, dpp, false); if (ret < 0) { @@ -1024,15 +1028,15 @@ int RadosBucket::remove_topics(RGWObjVersionTracker* objv_tracker, objv_tracker, y); } -int RadosBucket::get_logging_object_name(std::string& obj_name, - const std::string& prefix, - optional_yield y, +int RadosBucket::get_logging_object_name(std::string& obj_name, + const std::string& prefix, + optional_yield y, const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker) { rgw_pool data_pool; const auto obj_name_oid = bucketlogging::object_name_oid(this, prefix); if (!store->getRados()->get_obj_data_pool(get_placement_rule(), rgw_obj{get_key(), obj_name_oid}, &data_pool)) { - ldpp_dout(dpp, 1) << "failed to get data pool for bucket '" << get_name() << + ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() << "' when getting logging object name" << dendl; return -EIO; } @@ -1048,23 +1052,23 @@ int RadosBucket::get_logging_object_name(std::string& obj_name, nullptr, nullptr); if (ret < 0) { - ldpp_dout(dpp, 1) << "failed to get logging object name from '" << obj_name_oid << "'. ret = " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to get logging object name from '" << obj_name_oid << "'. ret = " << ret << dendl; return ret; } obj_name = bl.to_str(); return 0; } -int RadosBucket::set_logging_object_name(const std::string& obj_name, - const std::string& prefix, - optional_yield y, - const DoutPrefixProvider *dpp, +int RadosBucket::set_logging_object_name(const std::string& obj_name, + const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, bool new_obj, RGWObjVersionTracker* objv_tracker) { rgw_pool data_pool; const auto obj_name_oid = bucketlogging::object_name_oid(this, prefix); if (!store->getRados()->get_obj_data_pool(get_placement_rule(), rgw_obj{get_key(), obj_name_oid}, &data_pool)) { - ldpp_dout(dpp, 1) << "failed to get data pool for bucket '" << get_name() << + ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() << "' when setting logging object name" << dendl; return -EIO; } @@ -1080,28 +1084,65 @@ int RadosBucket::set_logging_object_name(const std::string& obj_name, y, nullptr); if (ret == -EEXIST) { - ldpp_dout(dpp, 20) << "race detected in initializing '" << obj_name_oid << "' with logging object name:'" << obj_name << "'. ret = " << ret << dendl; + ldpp_dout(dpp, 20) << "INFO: race detected in initializing '" << obj_name_oid << "' with logging object name:'" << obj_name << "'. ret = " << ret << dendl; } else if (ret == -ECANCELED) { - ldpp_dout(dpp, 20) << "race detected in updating logging object name '" << obj_name << "' at '" << obj_name_oid << "'. ret = " << ret << dendl; + ldpp_dout(dpp, 20) << "INFO: race detected in updating logging object name '" << obj_name << "' at '" << obj_name_oid << "'. ret = " << ret << dendl; } else if (ret < 0) { - ldpp_dout(dpp, 1) << "failed to set logging object name '" << obj_name << "' at '" << obj_name_oid << "'. ret = " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to set logging object name '" << obj_name << "' at '" << obj_name_oid << "'. ret = " << ret << dendl; } return ret; } +int RadosBucket::remove_logging_object_name(const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, + RGWObjVersionTracker* objv_tracker) { + rgw_pool data_pool; + const auto obj_name_oid = bucketlogging::object_name_oid(this, prefix); + if (!store->getRados()->get_obj_data_pool(get_placement_rule(), rgw_obj{get_key(), obj_name_oid}, &data_pool)) { + ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() << + "' when setting logging object name" << dendl; + return -EIO; + } + return rgw_delete_system_obj(dpp, store->svc()->sysobj, + data_pool, + obj_name_oid, + objv_tracker, + y); +} + std::string to_temp_object_name(const rgw::sal::Bucket* bucket, const std::string& obj_name) { return fmt::format("{}__shadow_{}0", bucket->get_bucket_id(), obj_name); } +int RadosBucket::remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) { + rgw_pool data_pool; + const rgw_obj head_obj{get_key(), obj_name}; + const auto placement_rule = get_placement_rule(); + + if (!store->getRados()->get_obj_data_pool(placement_rule, head_obj, &data_pool)) { + ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() << + "' when deleting logging object" << dendl; + return -EIO; + } + + const auto temp_obj_name = to_temp_object_name(this, obj_name); + return rgw_delete_system_obj(dpp, store->svc()->sysobj, + data_pool, + temp_obj_name, + nullptr, + y); +} + int RadosBucket::commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) { rgw_pool data_pool; const rgw_obj head_obj{get_key(), obj_name}; const auto placement_rule = get_placement_rule(); if (!store->getRados()->get_obj_data_pool(placement_rule, head_obj, &data_pool)) { - ldpp_dout(dpp, 1) << "failed to get data pool for bucket '" << get_name() << + ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() << "' when comitting logging object" << dendl; return -EIO; } @@ -1110,7 +1151,6 @@ int RadosBucket::commit_logging_object(const std::string& obj_name, optional_yie std::map<string, bufferlist> obj_attrs; ceph::real_time mtime; bufferlist bl_data; - // TODO: this is needed only for etag calculation if (const auto ret = rgw_get_system_obj(store->svc()->sysobj, data_pool, temp_obj_name, @@ -1120,10 +1160,13 @@ int RadosBucket::commit_logging_object(const std::string& obj_name, optional_yie y, dpp, &obj_attrs, - nullptr); ret < 0) { - ldpp_dout(dpp, 1) << "faild to read logging data when comitting to object '" << temp_obj_name + nullptr); ret < 0 && ret != -ENOENT) { + ldpp_dout(dpp, 1) << "ERROR: failed to read logging data when comitting object '" << temp_obj_name << ". error: " << ret << dendl; return ret; + } else if (ret == -ENOENT) { + ldpp_dout(dpp, 1) << "WARNING: temporary logging object '" << temp_obj_name << "' does not exists" << dendl; + return 0; } uint64_t size = bl_data.length(); @@ -1137,13 +1180,13 @@ int RadosBucket::commit_logging_object(const std::string& obj_name, optional_yie nullptr, // no special placment for tail get_key(), head_obj); ret < 0) { - ldpp_dout(dpp, 1) << "failed to create manifest when comitting logging object. error: " << + ldpp_dout(dpp, 1) << "ERROR: failed to create manifest when comitting logging object. error: " << ret << dendl; return ret; } if (const auto ret = manifest_gen.create_next(size); ret < 0) { - ldpp_dout(dpp, 1) << "failed to add object to manifest when comitting logging object. error: " << + ldpp_dout(dpp, 1) << "ERROR: failed to add object to manifest when comitting logging object. error: " << ret << dendl; return ret; } @@ -1151,7 +1194,7 @@ int RadosBucket::commit_logging_object(const std::string& obj_name, optional_yie if (const auto expected_temp_obj = manifest_gen.get_cur_obj(store->getRados()); temp_obj_name != expected_temp_obj.oid) { // TODO: cleanup temporary object, commit would never succeed - ldpp_dout(dpp, 1) << "temporary logging object name mismatch: '" << + ldpp_dout(dpp, 1) << "ERROR: temporary logging object name mismatch: '" << temp_obj_name << "' != '" << expected_temp_obj.oid << "'" << dendl; return -EINVAL; } @@ -1182,11 +1225,11 @@ int RadosBucket::commit_logging_object(const std::string& obj_name, optional_yie const req_context rctx{dpp, y, nullptr}; jspan_context trace{false, false}; if (const auto ret = head_obj_wop.write_meta(0, size, obj_attrs, rctx, trace); ret < 0) { - ldpp_dout(dpp, 1) << "failed to commit logging object '" << temp_obj_name << - "' to bucket id '" << get_bucket_id() <<"'. error: " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to commit logging object '" << temp_obj_name << + "' to bucket id '" << get_info().bucket <<"'. error: " << ret << dendl; return ret; } - ldpp_dout(dpp, 20) << "committed logging object '" << temp_obj_name << + ldpp_dout(dpp, 20) << "INFO: committed logging object '" << temp_obj_name << "' with size of " << size << " bytes, to bucket '" << get_key() << "' as '" << obj_name << "'" << dendl; return 0; @@ -1204,30 +1247,30 @@ void bucket_logging_completion(rados_completion_t completion, void* args) { auto* aio_comp = reinterpret_cast<librados::AioCompletionImpl*>(completion); std::unique_ptr<BucketLoggingCompleteArg> logging_args(reinterpret_cast<BucketLoggingCompleteArg*>(args)); if (aio_comp->get_return_value() < 0) { - ldout(logging_args->cct, 1) << "failed to complete append to logging object '" << logging_args->obj_name << + ldout(logging_args->cct, 1) << "ERROR: failed to complete append to logging object '" << logging_args->obj_name << "'. ret = " << aio_comp->get_return_value() << dendl; } else { - ldout(logging_args->cct, 20) << "wrote " << logging_args->size << " bytes to logging object '" << + ldout(logging_args->cct, 20) << "INFO: wrote " << logging_args->size << " bytes to logging object '" << logging_args->obj_name << "'" << dendl; } } -int RadosBucket::write_logging_object(const std::string& obj_name, - const std::string& record, - optional_yield y, +int RadosBucket::write_logging_object(const std::string& obj_name, + const std::string& record, + optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) { const auto temp_obj_name = to_temp_object_name(this, obj_name); rgw_pool data_pool; rgw_obj obj{get_key(), obj_name}; if (!store->getRados()->get_obj_data_pool(get_placement_rule(), obj, &data_pool)) { - ldpp_dout(dpp, 1) << "failed to get data pool for bucket '" << get_name() << + ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() << "' when writing logging object" << dendl; return -EIO; } librados::IoCtx io_ctx; if (const auto ret = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), data_pool, io_ctx); ret < 0) { - ldpp_dout(dpp, 1) << "failed to get IO context for logging object from data pool:" << data_pool.to_str() << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to get IO context for logging object from data pool:" << data_pool.to_str() << dendl; return -EIO; } bufferlist bl; @@ -1242,7 +1285,7 @@ int RadosBucket::write_logging_object(const std::string& obj_name, auto arg = std::make_unique<BucketLoggingCompleteArg>(temp_obj_name, record.length(), store->ctx()); completion->set_complete_callback(arg.get(), bucket_logging_completion); if (const auto ret = io_ctx.aio_operate(temp_obj_name, completion.get(), &op); ret < 0) { - ldpp_dout(dpp, 1) << "failed to append to logging object '" << temp_obj_name << + ldpp_dout(dpp, 1) << "ERROR: failed to append to logging object '" << temp_obj_name << "'. ret = " << ret << dendl; return ret; } @@ -1251,11 +1294,11 @@ int RadosBucket::write_logging_object(const std::string& obj_name, return 0; } if (const auto ret = rgw_rados_operate(dpp, io_ctx, temp_obj_name, &op, y); ret < 0) { - ldpp_dout(dpp, 1) << "failed to append to logging object '" << temp_obj_name << + ldpp_dout(dpp, 1) << "ERROR: failed to append to logging object '" << temp_obj_name << "'. ret = " << ret << dendl; return ret; } - ldpp_dout(dpp, 20) << "wrote " << record.length() << " bytes to logging object '" << + ldpp_dout(dpp, 20) << "INFO: wrote " << record.length() << " bytes to logging object '" << temp_obj_name << "'" << dendl; return 0; } @@ -2471,7 +2514,108 @@ bool RadosObject::is_sync_completed(const DoutPrefixProvider* dpp, const rgw_bi_log_entry& earliest_marker = entries.front(); return earliest_marker.timestamp > obj_mtime; -} +} /* is_sync_completed */ + +int RadosObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) +{ + int ret{0}; + + /* require an object with a manifest, so call to get_obj_state() must precede this */ + if (! manifest) { + return -EINVAL; + } + + RGWObjManifest::obj_iterator end = manifest->obj_end(dpp); + if (end.get_cur_part_id() == 0) { // not multipart + ldpp_dout(dpp, 20) << __func__ << " object does not have a multipart manifest" + << dendl; + return 0; + } + + auto end_part_id = end.get_cur_part_id(); + auto parts_count = (end_part_id == 1) ? 1 : end_part_id - 1; + if (marker > (parts_count - 1)) { + return 0; + } + + RGWObjManifest::obj_iterator part_iter = manifest->obj_begin(dpp); + + if (marker != 0) { + ldpp_dout_fmt(dpp, 20, + "{} seeking to part #{} in the object manifest", + __func__, marker); + + part_iter = manifest->obj_find_part(dpp, marker + 1); + + if (part_iter == end) { + ldpp_dout_fmt(dpp, 5, + "{} failed to find part #{} in the object manifest", + __func__, marker + 1); + return 0; + } + } + + RGWObjectCtx& obj_ctx = get_ctx(); + RGWBucketInfo& bucket_info = get_bucket()->get_info(); + + Object::Part obj_part{}; + for (; part_iter != manifest->obj_end(dpp); ++part_iter) { + + /* we're only interested in the first object in each logical part */ + auto cur_part_id = part_iter.get_cur_part_id(); + if (cur_part_id == obj_part.part_number) { + continue; + } + + if (max_parts < 1) { + *truncated = true; + break; + } + + /* get_part_obj_state alters the passed manifest** to point to a part + * manifest, which we don't want to leak out here */ + RGWObjManifest* obj_m = manifest; + RGWObjState* astate; + bool part_prefetch = false; + ret = RGWRados::get_part_obj_state(dpp, y, store->getRados(), bucket_info, &obj_ctx, + obj_m, cur_part_id, &parts_count, + part_prefetch, &astate, &obj_m); + + if (ret < 0) { + ldpp_dout_fmt(dpp, 4, + "{} get_part_obj_state() failed ret={}", + __func__, ret); + break; + } + + obj_part.part_number = part_iter.get_cur_part_id(); + obj_part.part_size = astate->accounted_size; + + if (auto iter = astate->attrset.find(RGW_ATTR_CKSUM); + iter != astate->attrset.end()) { + try { + rgw::cksum::Cksum part_cksum; + auto ck_iter = iter->second.cbegin(); + part_cksum.decode(ck_iter); + obj_part.cksum = std::move(part_cksum); + } catch (buffer::error& err) { + ldpp_dout_fmt(dpp, 4, + "WARN: {} could not decode stored cksum, " + "caught buffer::error", + __func__); + } + } + + each_func(obj_part); + *next_marker = ++marker; + --max_parts; + } /* each part */ + + return ret; +} /* RadosObject::list_parts */ int RadosObject::load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh) { @@ -4500,11 +4644,6 @@ bool RadosZone::get_redirect_endpoint(std::string* endpoint) return true; } -bool RadosZone::has_zonegroup_api(const std::string& api) const -{ - return store->svc()->zone->has_zonegroup_api(api); -} - const std::string& RadosZone::get_current_period_id() { return store->svc()->zone->get_current_period_id(); diff --git a/src/rgw/driver/rados/rgw_sal_rados.h b/src/rgw/driver/rados/rgw_sal_rados.h index 23d81a934b0..e65c3c0050e 100644 --- a/src/rgw/driver/rados/rgw_sal_rados.h +++ b/src/rgw/driver/rados/rgw_sal_rados.h @@ -107,7 +107,6 @@ class RadosZone : public StoreZone { virtual const std::string& get_name() const override; virtual bool is_writeable() override; virtual bool get_redirect_endpoint(std::string* endpoint) override; - virtual bool has_zonegroup_api(const std::string& api) const override; virtual const std::string& get_current_period_id() override; virtual const RGWAccessKey& get_system_key() override; virtual const std::string& get_realm_name() override; @@ -593,12 +592,18 @@ class RadosObject : public StoreObject { StoreObject::set_compressed(); } - virtual bool is_sync_completed(const DoutPrefixProvider* dpp, const ceph::real_time& obj_mtime) override; /* For rgw_admin.cc */ RGWObjState& get_state() { return state; } virtual int load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh = true) override; + + /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */ + virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) override; + virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y, uint32_t flags) override; virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj = NULL) override; virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) override; @@ -775,18 +780,23 @@ class RadosBucket : public StoreBucket { optional_yield y, const DoutPrefixProvider *dpp) override; int remove_topics(RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override; - int get_logging_object_name(std::string& obj_name, - const std::string& prefix, - optional_yield y, - const DoutPrefixProvider *dpp, + int get_logging_object_name(std::string& obj_name, + const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, + RGWObjVersionTracker* objv_tracker) override; + int set_logging_object_name(const std::string& obj_name, + const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, + bool new_obj, RGWObjVersionTracker* objv_tracker) override; - int set_logging_object_name(const std::string& obj_name, - const std::string& prefix, - optional_yield y, - const DoutPrefixProvider *dpp, - bool new_obj, + int remove_logging_object_name(const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker) override; int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override; + int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override; int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) override; private: diff --git a/src/rgw/driver/rados/rgw_tools.cc b/src/rgw/driver/rados/rgw_tools.cc index 79d2be0bcfa..bf7a309e864 100644 --- a/src/rgw/driver/rados/rgw_tools.cc +++ b/src/rgw/driver/rados/rgw_tools.cc @@ -339,21 +339,35 @@ int rgw_list_pool(const DoutPrefixProvider *dpp, ldpp_dout(dpp, 10) << "failed to parse cursor: " << marker << dendl; return -EINVAL; } - - auto iter = ioctx.nobjects_begin(oc); + librados::NObjectIterator iter; + try { + iter = ioctx.nobjects_begin(oc); + } catch (const std::system_error& e) { + ldpp_dout(dpp, 1) << "rgw_list_pool: Failed to begin iteration of pool " + << ioctx.get_pool_name() << " with error " + << e.what() << dendl; + return ceph::from_error_code(e.code()); + } /// Pool_iterate if (iter == ioctx.nobjects_end()) return -ENOENT; - for (; oids->size() < max && iter != ioctx.nobjects_end(); ++iter) { - string oid = iter->get_oid(); - ldpp_dout(dpp, 20) << "RGWRados::pool_iterate: got " << oid << dendl; + try { + for (; oids->size() < max && iter != ioctx.nobjects_end(); ++iter) { + string oid = iter->get_oid(); + ldpp_dout(dpp, 20) << "RGWRados::pool_iterate: got " << oid << dendl; - // fill it in with initial values; we may correct later - if (filter && !filter(oid, oid)) - continue; + // fill it in with initial values; we may correct later + if (filter && !filter(oid, oid)) + continue; - oids->push_back(oid); + oids->push_back(oid); + } + } catch (const std::system_error& e) { + ldpp_dout(dpp, 1) << "rgw_list_pool: Failed iterating pool " + << ioctx.get_pool_name() << " with error " + << e.what() << dendl; + return ceph::from_error_code(e.code()); } marker = iter.get_cursor().to_str(); diff --git a/src/rgw/driver/rados/rgw_user.cc b/src/rgw/driver/rados/rgw_user.cc index 894d8e40950..cce593c6bd5 100644 --- a/src/rgw/driver/rados/rgw_user.cc +++ b/src/rgw/driver/rados/rgw_user.cc @@ -189,6 +189,11 @@ static void dump_user_info(Formatter *f, RGWUserInfo &info, } encode_json("type", user_source_type, f); encode_json("mfa_ids", info.mfa_ids, f); + encode_json("account_id", info.account_id, f); + encode_json("path", info.path, f); + encode_json("create_date", info.create_date, f); + encode_json("tags", info.tags, f); + encode_json("group_ids", info.group_ids, f); if (stats) { encode_json("stats", *stats, f); } diff --git a/src/rgw/driver/rados/rgw_user.h b/src/rgw/driver/rados/rgw_user.h index ab157f38e39..4ae7d13eff7 100644 --- a/src/rgw/driver/rados/rgw_user.h +++ b/src/rgw/driver/rados/rgw_user.h @@ -19,11 +19,11 @@ #define RGW_USER_ANON_ID "anonymous" -#define SECRET_KEY_LEN 40 -#define PUBLIC_ID_LEN 20 -#define RAND_SUBUSER_LEN 5 +constexpr auto SECRET_KEY_LEN=40; +constexpr auto PUBLIC_ID_LEN=20; +constexpr auto RAND_SUBUSER_LEN=5; -#define XMLNS_AWS_S3 "http://s3.amazonaws.com/doc/2006-03-01/" +constexpr auto XMLNS_AWS_S3 = "http://s3.amazonaws.com/doc/2006-03-01/"; class RGWUserCtl; class RGWBucketCtl; diff --git a/src/rgw/driver/rados/rgw_zone.h b/src/rgw/driver/rados/rgw_zone.h index c542abc76d6..5fb2b4b8096 100644 --- a/src/rgw/driver/rados/rgw_zone.h +++ b/src/rgw/driver/rados/rgw_zone.h @@ -769,7 +769,6 @@ public: int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true); int delete_obj(const DoutPrefixProvider *dpp, optional_yield y); int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); - int add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y); void fork(); int update(const DoutPrefixProvider *dpp, optional_yield y); diff --git a/src/rgw/rgw_orphan.cc b/src/rgw/radosgw-admin/orphan.cc index b7dc562c721..9fca3b99a7c 100644 --- a/src/rgw/rgw_orphan.cc +++ b/src/rgw/radosgw-admin/orphan.cc @@ -1,6 +1,12 @@ + +/* + * Copyright (C) 2024 IBM +*/ + // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp +#include "radosgw-admin/orphan.h" #include <string> @@ -10,7 +16,6 @@ #include "rgw_op.h" #include "rgw_multi.h" -#include "rgw_orphan.h" #include "rgw_zone.h" #include "rgw_bucket.h" #include "rgw_sal_rados.h" diff --git a/src/rgw/rgw_orphan.h b/src/rgw/radosgw-admin/orphan.h index db811d31d9a..db811d31d9a 100644 --- a/src/rgw/rgw_orphan.h +++ b/src/rgw/radosgw-admin/orphan.h diff --git a/src/rgw/rgw_admin.cc b/src/rgw/radosgw-admin/radosgw-admin.cc index 95a7af6a0fa..13936c87952 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/radosgw-admin/radosgw-admin.cc @@ -1,12 +1,15 @@ +/* + * Copyright (C) 2025 IBM +*/ + // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp -#include <errno.h> -#include <iostream> -#include <sstream> +#include <cerrno> #include <string> - -#include <boost/optional.hpp> +#include <sstream> +#include <optional> +#include <iostream> extern "C" { #include <liboath/oath.h> @@ -38,6 +41,9 @@ extern "C" { #include "include/utime.h" #include "include/str_list.h" +#include "radosgw-admin/orphan.h" +#include "radosgw-admin/sync_checkpoint.h" + #include "rgw_user.h" #include "rgw_otp.h" #include "rgw_rados.h" @@ -48,7 +54,6 @@ extern "C" { #include "rgw_log.h" #include "rgw_formats.h" #include "rgw_usage.h" -#include "rgw_orphan.h" #include "rgw_sync.h" #include "rgw_trim_bilog.h" #include "rgw_trim_datalog.h" @@ -62,7 +67,6 @@ extern "C" { #include "rgw_zone.h" #include "rgw_pubsub.h" #include "rgw_bucket_sync.h" -#include "rgw_sync_checkpoint.h" #include "rgw_lua.h" #include "rgw_sal.h" #include "rgw_sal_config.h" @@ -82,11 +86,6 @@ extern "C" { #define dout_context g_ceph_context -#define SECRET_KEY_LEN 40 -#define PUBLIC_ID_LEN 20 - -using namespace std; - static rgw::sal::Driver* driver = NULL; static constexpr auto dout_subsys = ceph_subsys_rgw; @@ -117,19 +116,13 @@ static const DoutPrefixProvider* dpp() { } \ } while (0) -static inline int posix_errortrans(int r) +using namespace std; + +inline int posix_errortrans(int r) { - switch(r) { - case ERR_NO_SUCH_BUCKET: - r = ENOENT; - break; - default: - break; - } - return r; + return ERR_NO_SUCH_BUCKET == r ? ENOENT : r; } - static const std::string LUA_CONTEXT_LIST("prerequest, postrequest, background, getdata, putdata"); void usage() @@ -178,7 +171,8 @@ void usage() cout << " bucket sync disable disable bucket sync\n"; cout << " bucket sync enable enable bucket sync\n"; cout << " bucket radoslist list rados objects backing bucket's objects\n"; - cout << " bucket logging flush flush pending log records object of source bucket to the log bucket to bucket\n"; + cout << " bucket logging flush flush pending log records object of source bucket to the log bucket\n"; + cout << " bucket logging info get info on bucket logging configuration on source bucket or list of sources in log bucket\n"; cout << " bi get retrieve bucket index object entries\n"; cout << " bi put store bucket index object entries\n"; cout << " bi list list raw bucket index entries\n"; @@ -361,6 +355,7 @@ void usage() cout << " --secret/--secret-key=<key> specify secret key\n"; cout << " --gen-access-key generate random access key (for S3)\n"; cout << " --gen-secret generate random secret key\n"; + cout << " --generate-key create user with or without credentials\n"; cout << " --key-type=<type> key type, options are: swift, s3\n"; cout << " --key-active=<bool> activate or deactivate a key\n"; cout << " --temp-url-key[-2]=<key> temp url key\n"; @@ -707,6 +702,7 @@ enum class OPT { BUCKET_OBJECT_SHARD, BUCKET_RESYNC_ENCRYPTED_MULTIPART, BUCKET_LOGGING_FLUSH, + BUCKET_LOGGING_INFO, POLICY, LOG_LIST, LOG_SHOW, @@ -946,6 +942,7 @@ static SimpleCmd::Commands all_cmds = { { "bucket object shard", OPT::BUCKET_OBJECT_SHARD }, { "bucket resync encrypted multipart", OPT::BUCKET_RESYNC_ENCRYPTED_MULTIPART }, { "bucket logging flush", OPT::BUCKET_LOGGING_FLUSH }, + { "bucket logging info", OPT::BUCKET_LOGGING_INFO }, { "policy", OPT::POLICY }, { "log list", OPT::LOG_LIST }, { "log show", OPT::LOG_SHOW }, @@ -1271,7 +1268,7 @@ static int read_input(const string& infile, bufferlist& bl) } } -#define READ_CHUNK 8196 + constexpr auto READ_CHUNK=8196; int r; int err; @@ -2546,8 +2543,8 @@ static void sync_status(Formatter *formatter) struct indented { int w; // indent width - std::string_view header; - indented(int w, std::string_view header = "") : w(w), header(header) {} + std::string header; + indented(int w, std::string header = "") : w(w), header(header) {} }; std::ostream& operator<<(std::ostream& out, const indented& h) { return out << std::setw(h.w) << h.header << std::setw(1) << ' '; @@ -2555,10 +2552,10 @@ std::ostream& operator<<(std::ostream& out, const indented& h) { struct bucket_source_sync_info { const RGWZone& _source; - std::string_view error; + std::string error; std::map<int,std::string> shards_behind; int total_shards; - std::string_view status; + std::string status; rgw_bucket bucket_source; bucket_source_sync_info(const RGWZone& source): _source(source) {} @@ -3078,14 +3075,12 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf } if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) { bucket_source_sync_info source_sync_info(z->second); - auto ret = bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), z->second, + bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), z->second, c->second, info, pipe, source_sync_info); - if (ret == 0) { - bucket_sync_info.source_status_info.emplace_back(std::move(source_sync_info)); - } + bucket_sync_info.source_status_info.emplace_back(std::move(source_sync_info)); } } } @@ -3556,6 +3551,13 @@ int main(int argc, const char **argv) OPT opt_cmd = OPT::NO_CMD; int gen_access_key = 0; int gen_secret_key = 0; + enum generate_key_enum { + OPTION_SET_FALSE = 0, + OPTION_SET_TRUE = 1, + OPTION_NOT_SET = 2, + }; + + generate_key_enum generate_key = OPTION_NOT_SET; bool set_perm = false; bool set_temp_url_key = false; map<int, string> temp_url_keys; @@ -3837,6 +3839,17 @@ int main(int argc, const char **argv) cerr << "bad key type: " << key_type_str << std::endl; exit(1); } + } else if (ceph_argparse_witharg(args, i, &val, "--generate-key", (char*)NULL)) { + key_type_str = val; + if (key_type_str.compare("true") == 0) { + generate_key = OPTION_SET_TRUE; + } else if(key_type_str.compare("false") == 0) { + generate_key = OPTION_SET_FALSE; + } else { + cerr << "wrong value for --generate-key: " << key_type_str << " please specify either true or false" << std::endl; + exit(1); + } + // do nothing } else if (ceph_argparse_binary_flag(args, i, &key_active, NULL, "--key-active", (char*)NULL)) { key_active_specified = true; } else if (ceph_argparse_witharg(args, i, &val, "--job-id", (char*)NULL)) { @@ -4600,14 +4613,21 @@ int main(int argc, const char **argv) } /* check key parameter conflict */ - if ((!access_key.empty()) && gen_access_key) { - cerr << "ERROR: key parameter conflict, --access-key & --gen-access-key" << std::endl; + if ((!access_key.empty()) && (gen_access_key || generate_key == OPTION_SET_TRUE)) { + cerr << "ERROR: key parameter conflict, --access-key & --gen-access-key/generate-key" << std::endl; return EINVAL; } - if ((!secret_key.empty()) && gen_secret_key) { - cerr << "ERROR: key parameter conflict, --secret & --gen-secret" << std::endl; + if ((!secret_key.empty()) && (gen_secret_key || generate_key == OPTION_SET_TRUE)) { + cerr << "ERROR: key parameter conflict, --secret & --gen-secret/generate-key" << std::endl; return EINVAL; } + if (generate_key == OPTION_SET_FALSE) { + if ((!access_key.empty()) || gen_access_key || (!secret_key.empty()) || gen_secret_key) { + cerr << "ERROR: key parameter conflict, if --generate-key is not set so no other key parameters can be set" << std::endl; + return EINVAL; + } + } + } // default to pretty json @@ -6772,7 +6792,7 @@ int main(int argc, const char **argv) } break; case OPT::USER_CREATE: - if (!user_op.has_existing_user()) { + if (!user_op.has_existing_user() && (generate_key != OPTION_SET_FALSE)) { user_op.set_generate_key(); // generate a new key by default } ret = ruser.add(dpp(), user_op, null_yield, &err_msg); @@ -7731,6 +7751,47 @@ int main(int argc, const char **argv) return 0; } + if (opt_cmd == OPT::BUCKET_LOGGING_INFO) { + if (bucket_name.empty()) { + cerr << "ERROR: bucket not specified" << std::endl; + return EINVAL; + } + int ret = init_bucket(tenant, bucket_name, bucket_id, &bucket); + if (ret < 0) { + return -ret; + } + const auto& bucket_attrs = bucket->get_attrs(); + auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING); + if (iter != bucket_attrs.end()) { + rgw::bucketlogging::configuration configuration; + try { + configuration.enabled = true; + decode(configuration, iter->second); + } catch (buffer::error& err) { + cerr << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING + << "'. error: " << err.what() << std::endl; + return EINVAL; + } + encode_json("logging", configuration, formatter.get()); + formatter->flush(cout); + } + iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING_SOURCES); + if (iter != bucket_attrs.end()) { + rgw::bucketlogging::source_buckets sources; + try { + decode(sources, iter->second); + } catch (buffer::error& err) { + cerr << "ERROR: failed to decode logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES + << "'. error: " << err.what() << std::endl; + return EINVAL; + } + encode_json("logging_sources", sources, formatter.get()); + formatter->flush(cout); + } + + return 0; + } + if (opt_cmd == OPT::LOG_LIST) { // filter by date? if (date.size() && date.size() != 10) { diff --git a/src/rgw/rgw_sync_checkpoint.cc b/src/rgw/radosgw-admin/sync_checkpoint.cc index 1172e79a48f..0303ed6c747 100644 --- a/src/rgw/rgw_sync_checkpoint.cc +++ b/src/rgw/radosgw-admin/sync_checkpoint.cc @@ -5,6 +5,7 @@ * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat, Inc. + * Copyright (C) 2024 IBM * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -13,9 +14,12 @@ * */ +#include "radosgw-admin/sync_checkpoint.h" + #include <fmt/format.h> + #include "common/errno.h" -#include "rgw_sync_checkpoint.h" + #include "rgw_sal_rados.h" #include "rgw_bucket_sync.h" #include "rgw_data_sync.h" diff --git a/src/rgw/rgw_sync_checkpoint.h b/src/rgw/radosgw-admin/sync_checkpoint.h index 28df68d8860..28df68d8860 100644 --- a/src/rgw/rgw_sync_checkpoint.h +++ b/src/rgw/radosgw-admin/sync_checkpoint.h diff --git a/src/rgw/rgw_auth.cc b/src/rgw/rgw_auth.cc index ec2a2079622..a0b494eb9c5 100644 --- a/src/rgw/rgw_auth.cc +++ b/src/rgw/rgw_auth.cc @@ -188,7 +188,8 @@ int load_account_and_policies(const DoutPrefixProvider* dpp, static auto transform_old_authinfo(const RGWUserInfo& user, std::optional<RGWAccountInfo> account, - std::vector<IAM::Policy> policies) + std::vector<IAM::Policy> policies, + sal::Driver* driver) -> std::unique_ptr<rgw::auth::Identity> { /* This class is not intended for public use. Should be removed altogether @@ -198,6 +199,7 @@ static auto transform_old_authinfo(const RGWUserInfo& user, /* For this particular case it's OK to use rgw_user structure to convey * the identity info as this was the policy for doing that before the * new auth. */ + sal::Driver* driver; const rgw_user id; const std::string display_name; const std::string path; @@ -208,8 +210,10 @@ static auto transform_old_authinfo(const RGWUserInfo& user, public: DummyIdentityApplier(const RGWUserInfo& user, std::optional<RGWAccountInfo> account, - std::vector<IAM::Policy> policies) - : id(user.user_id), + std::vector<IAM::Policy> policies, + sal::Driver* driver) + : driver(driver), + id(user.user_id), display_name(user.display_name), path(user.path), is_admin(user.admin), @@ -294,9 +298,9 @@ static auto transform_old_authinfo(const RGWUserInfo& user, << ", is_admin=" << is_admin << ")"; } - void load_acct_info(const DoutPrefixProvider* dpp, - RGWUserInfo& user_info) const override { + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override { // noop, this user info was passed in on construction + return driver->get_user(id); } void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const { @@ -307,13 +311,14 @@ static auto transform_old_authinfo(const RGWUserInfo& user, }; return std::make_unique<DummyIdentityApplier>( - user, std::move(account), std::move(policies)); + user, std::move(account), std::move(policies), driver); } auto transform_old_authinfo(const DoutPrefixProvider* dpp, optional_yield y, sal::Driver* driver, - sal::User* user) + sal::User* user, + std::vector<IAM::Policy>* policies_) -> tl::expected<std::unique_ptr<Identity>, int> { const RGWUserInfo& info = user->get_info(); @@ -328,7 +333,10 @@ auto transform_old_authinfo(const DoutPrefixProvider* dpp, return tl::unexpected(r); } - return transform_old_authinfo(info, std::move(account), std::move(policies)); + if (policies_) { // return policies to caller if requested + *policies_ = policies; + } + return transform_old_authinfo(info, std::move(account), std::move(policies), driver); } } /* namespace auth */ @@ -523,7 +531,7 @@ rgw::auth::Strategy::apply(const DoutPrefixProvider *dpp, const rgw::auth::Strat /* Account used by a given RGWOp is decoupled from identity employed * in the authorization phase (RGWOp::verify_permissions). */ - applier->load_acct_info(dpp, s->user->get_info()); + s->user = applier->load_acct_info(dpp); s->perm_mask = applier->get_perm_mask(); /* This is the single place where we pass req_state as a pointer @@ -631,36 +639,36 @@ void rgw::auth::WebIdentityApplier::create_account(const DoutPrefixProvider* dpp user_info = user->get_info(); } -void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const { +auto rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> { rgw_user federated_user; federated_user.id = this->sub; federated_user.tenant = role_tenant; federated_user.ns = "oidc"; + std::unique_ptr<rgw::sal::User> user = driver->get_user(federated_user); if (account) { // we don't need shadow users for account roles because bucket ownership, // quota, and stats are tracked by the account instead of the user - user_info.user_id = std::move(federated_user); + RGWUserInfo& user_info = user->get_info(); user_info.display_name = user_name; user_info.type = TYPE_WEB; - return; + // the user_info.user_id is initialized by driver->get_user(...) + return user; } - std::unique_ptr<rgw::sal::User> user = driver->get_user(federated_user); - //Check in oidc namespace if (user->load_user(dpp, null_yield) >= 0) { /* Succeeded. */ - user_info = user->get_info(); - return; + // the user_info in user is initialized by user->load_user(...) + return user; } user->clear_ns(); //Check for old users which wouldn't have been created in oidc namespace if (user->load_user(dpp, null_yield) >= 0) { /* Succeeded. */ - user_info = user->get_info(); - return; + // the user_info in user is initialized by user->load_user(...) + return user; } //Check if user_id.buckets already exists, may have been from the time, when shadow users didnt exist @@ -671,7 +679,7 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp last_synced, last_updated); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "ERROR: reading stats for the user returned error " << ret << dendl; - return; + return user; } if (ret == -ENOENT) { /* in case of ENOENT, which means user doesnt have buckets */ //In this case user will be created in oidc namespace @@ -684,7 +692,8 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp } ldpp_dout(dpp, 0) << "NOTICE: couldn't map oidc federated user " << federated_user << dendl; - create_account(dpp, federated_user, this->user_name, user_info); + create_account(dpp, federated_user, this->user_name, user->get_info()); + return user; } void rgw::auth::WebIdentityApplier::modify_request_state(const DoutPrefixProvider *dpp, req_state* s) const @@ -936,7 +945,7 @@ void rgw::auth::RemoteApplier::write_ops_log_entry(rgw_log_entry& entry) const } /* TODO(rzarzynski): we need to handle display_name changes. */ -void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const /* out */ +auto rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> /* out */ { /* It's supposed that RGWRemoteAuthApplier tries to load account info * that belongs to the authenticated identity. Another policy may be @@ -975,9 +984,9 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW (void) load_account_and_policies(dpp, null_yield, driver, user->get_info(), user->get_attrs(), account, policies); - user_info = std::move(user->get_info()); owner_acct_user = std::move(tenanted_uid); - return; + // the user_info in user is initialized by user->load_user(...) + return user; } } @@ -990,15 +999,16 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW (void) load_account_and_policies(dpp, null_yield, driver, user->get_info(), user->get_attrs(), account, policies); - user_info = std::move(user->get_info()); owner_acct_user = acct_user; - return; + // the user_info in user is initialized by user->load_user(...) + return user; } ldpp_dout(dpp, 0) << "NOTICE: couldn't map swift user " << acct_user << dendl; - create_account(dpp, acct_user, implicit_tenant, user_info); + create_account(dpp, acct_user, implicit_tenant, user->get_info()); /* Succeeded if we are here (create_account() hasn't throwed). */ + return user; } void rgw::auth::RemoteApplier::modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const @@ -1098,11 +1108,11 @@ uint32_t rgw::auth::LocalApplier::get_perm_mask(const std::string& subuser_name, } } -void rgw::auth::LocalApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const /* out */ +auto rgw::auth::LocalApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> /* out */ { /* Load the account that belongs to the authenticated identity. An extra call * to RADOS may be safely skipped in this case. */ - user_info = this->user_info; + return std::unique_ptr<rgw::sal::User>(user.release()); } void rgw::auth::LocalApplier::modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const @@ -1121,6 +1131,22 @@ void rgw::auth::LocalApplier::write_ops_log_entry(rgw_log_entry& entry) const } } +rgw::auth::LocalApplier::LocalApplier(CephContext* const cct, + std::unique_ptr<rgw::sal::User> user, + std::optional<RGWAccountInfo> account, + std::vector<IAM::Policy> policies, + std::string subuser, + const std::optional<uint32_t>& perm_mask, + const std::string access_key_id) + : user_info(user->get_info()), + user(std::move(user)), + account(std::move(account)), + policies(std::move(policies)), + subuser(std::move(subuser)), + perm_mask(perm_mask.value_or(RGW_PERM_INVALID)), + access_key_id(access_key_id) { +} + ACLOwner rgw::auth::RoleApplier::get_aclowner() const { ACLOwner owner; @@ -1183,10 +1209,11 @@ bool rgw::auth::RoleApplier::is_identity(const Principal& p) const { return false; } -void rgw::auth::RoleApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const /* out */ +auto rgw::auth::RoleApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> /* out */ { /* Load the user id */ - user_info.user_id = this->token_attrs.user_id; + std::unique_ptr<rgw::sal::User> user = driver->get_user(this->token_attrs.user_id); + return user; } void rgw::auth::RoleApplier::write_ops_log_entry(rgw_log_entry& entry) const @@ -1267,9 +1294,10 @@ rgw::auth::AnonymousEngine::authenticate(const DoutPrefixProvider* dpp, const re } else { RGWUserInfo user_info; rgw_get_anon_user(user_info); - + std::unique_ptr<rgw::sal::User> user = s->user->clone(); + user->get_info() = user_info; auto apl = \ - apl_factory->create_apl_local(cct, s, user_info, std::nullopt, {}, + apl_factory->create_apl_local(cct, s, std::move(user), std::nullopt, {}, rgw::auth::LocalApplier::NO_SUBUSER, std::nullopt, rgw::auth::LocalApplier::NO_ACCESS_KEY); return result_t::grant(std::move(apl)); diff --git a/src/rgw/rgw_auth.h b/src/rgw/rgw_auth.h index f3edbbab845..22b0816bac9 100644 --- a/src/rgw/rgw_auth.h +++ b/src/rgw/rgw_auth.h @@ -105,7 +105,8 @@ inline std::ostream& operator<<(std::ostream& out, auto transform_old_authinfo(const DoutPrefixProvider* dpp, optional_yield y, sal::Driver* driver, - sal::User* user) + sal::User* user, + std::vector<IAM::Policy>* policies_ = nullptr) -> tl::expected<std::unique_ptr<Identity>, int>; // Load the user account and all user/group policies. May throw @@ -139,7 +140,7 @@ public: * * XXX: be aware that the "account" term refers to rgw_user. The naming * is legacy. */ - virtual void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const = 0; /* out */ + virtual auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> = 0; /* out */ /* Apply any changes to request state. This method will be most useful for * TempURL of Swift API. */ @@ -484,7 +485,7 @@ public: bool is_identity(const Principal& p) const override; - void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; uint32_t get_identity_type() const override { return TYPE_WEB; @@ -656,7 +657,7 @@ public: uint32_t get_perm_mask() const override { return info.perm_mask; } void to_str(std::ostream& out) const override; - void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */ + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */ void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override; void write_ops_log_entry(rgw_log_entry& entry) const override; uint32_t get_identity_type() const override { return info.acct_type; } @@ -683,7 +684,7 @@ public: /* rgw::auth::LocalApplier targets those auth engines that base on the data - * enclosed in the RGWUserInfo control structure. As a side effect of doing + * enclosed in the rgw::sal::User->RGWUserInfo control structure. As a side effect of doing * the authentication process, they must have it loaded. Leveraging this is * a way to avoid unnecessary calls to underlying RADOS store. */ class LocalApplier : public IdentityApplier { @@ -691,6 +692,7 @@ class LocalApplier : public IdentityApplier { protected: const RGWUserInfo user_info; + mutable std::unique_ptr<rgw::sal::User> user; const std::optional<RGWAccountInfo> account; const std::vector<IAM::Policy> policies; const std::string subuser; @@ -705,19 +707,12 @@ public: static const std::string NO_ACCESS_KEY; LocalApplier(CephContext* const cct, - const RGWUserInfo& user_info, + std::unique_ptr<rgw::sal::User> user, std::optional<RGWAccountInfo> account, std::vector<IAM::Policy> policies, std::string subuser, const std::optional<uint32_t>& perm_mask, - const std::string access_key_id) - : user_info(user_info), - account(std::move(account)), - policies(std::move(policies)), - subuser(std::move(subuser)), - perm_mask(perm_mask.value_or(RGW_PERM_INVALID)), - access_key_id(access_key_id) { - } + const std::string access_key_id); ACLOwner get_aclowner() const override; uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override; @@ -732,7 +727,7 @@ public: } } void to_str(std::ostream& out) const override; - void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */ + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */ void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override; uint32_t get_identity_type() const override { return user_info.type; } std::string get_acct_name() const override { return {}; } @@ -750,7 +745,7 @@ public: virtual ~Factory() {} virtual aplptr_t create_apl_local(CephContext* cct, const req_state* s, - const RGWUserInfo& user_info, + std::unique_ptr<rgw::sal::User> user, std::optional<RGWAccountInfo> account, std::vector<IAM::Policy> policies, const std::string& subuser, @@ -779,15 +774,20 @@ public: std::vector<std::pair<std::string, std::string>> principal_tags; }; protected: + CephContext* const cct; + rgw::sal::Driver* driver; Role role; TokenAttrs token_attrs; public: RoleApplier(CephContext* const cct, + rgw::sal::Driver* driver, const Role& role, const TokenAttrs& token_attrs) - : role(role), + : cct(cct), + driver(driver), + role(role), token_attrs(token_attrs) {} ACLOwner get_aclowner() const override; @@ -803,7 +803,7 @@ public: return RGW_PERM_NONE; } void to_str(std::ostream& out) const override; - void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */ + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */ uint32_t get_identity_type() const override { return TYPE_ROLE; } std::string get_acct_name() const override { return {}; } std::string get_subuser() const override { return {}; } diff --git a/src/rgw/rgw_auth_filters.h b/src/rgw/rgw_auth_filters.h index a93641e8b8e..7d264197c52 100644 --- a/src/rgw/rgw_auth_filters.h +++ b/src/rgw/rgw_auth_filters.h @@ -117,8 +117,8 @@ public: return get_decoratee().get_account(); } - void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override { /* out */ - return get_decoratee().load_acct_info(dpp, user_info); + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override { /* out */ + return get_decoratee().load_acct_info(dpp); } void modify_request_state(const DoutPrefixProvider* dpp, req_state * s) const override { /* in/out */ @@ -152,7 +152,7 @@ public: } void to_str(std::ostream& out) const override; - void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */ + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */ }; /* static declaration: UNKNOWN_ACCT will be an empty rgw_user that is a result @@ -169,23 +169,25 @@ void ThirdPartyAccountApplier<T>::to_str(std::ostream& out) const } template <typename T> -void ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const +auto ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> { + std::unique_ptr<rgw::sal::User> luser; if (UNKNOWN_ACCT == acct_user_override) { /* There is no override specified by the upper layer. This means that we'll * load the account owned by the authenticated identity (aka auth_user). */ - DecoratedApplier<T>::load_acct_info(dpp, user_info); + luser = DecoratedApplier<T>::load_acct_info(dpp); } else if (DecoratedApplier<T>::is_owner_of(acct_user_override)) { /* The override has been specified but the account belongs to the authenticated * identity. We may safely forward the call to a next stage. */ - DecoratedApplier<T>::load_acct_info(dpp, user_info); + luser = DecoratedApplier<T>::load_acct_info(dpp); } else if (this->is_anonymous()) { /* If the user was authed by the anonymous engine then scope the ANON user * to the correct tenant */ + luser = driver->get_user(rgw_user(RGW_USER_ANON_ID)); if (acct_user_override.tenant.empty()) - user_info.user_id = rgw_user(acct_user_override.id, RGW_USER_ANON_ID); + luser->get_info().user_id = rgw_user(acct_user_override.id, RGW_USER_ANON_ID); else - user_info.user_id = rgw_user(acct_user_override.tenant, RGW_USER_ANON_ID); + luser->get_info().user_id = rgw_user(acct_user_override.tenant, RGW_USER_ANON_ID); } else { /* Compatibility mechanism for multi-tenancy. For more details refer to * load_acct_info method of rgw::auth::RemoteApplier. */ @@ -196,9 +198,10 @@ void ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, user = driver->get_user(tenanted_uid); if (user->load_user(dpp, null_yield) >= 0) { - user_info = user->get_info(); + // the user_info in luser is initialized by user->load_user(...) + luser = user->clone(); /* Succeeded. */ - return; + return luser; } } @@ -213,8 +216,10 @@ void ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, throw ret; } } - user_info = user->get_info(); + // the user_info in luser is initialized by user->load_user(...) + luser = user->clone(); } + return luser; } template <typename T> static inline @@ -248,7 +253,7 @@ public: } void to_str(std::ostream& out) const override; - void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */ + auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */ void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override; /* in/out */ ACLOwner get_aclowner() const override { @@ -271,10 +276,10 @@ void SysReqApplier<T>::to_str(std::ostream& out) const } template <typename T> -void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const +auto SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> { - DecoratedApplier<T>::load_acct_info(dpp, user_info); - is_system = user_info.system; + std::unique_ptr<rgw::sal::User> user = DecoratedApplier<T>::load_acct_info(dpp); + is_system = user->get_info().system; if (is_system) { //ldpp_dout(dpp, 20) << "system request" << dendl; @@ -285,7 +290,7 @@ void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo effective_owner->id = parse_owner(str); if (const auto* uid = std::get_if<rgw_user>(&effective_owner->id); uid) { - std::unique_ptr<rgw::sal::User> user = driver->get_user(*uid); + user = driver->get_user(*uid); if (user->load_user(dpp, null_yield) < 0) { //ldpp_dout(dpp, 0) << "User lookup failed!" << dendl; throw -EACCES; @@ -294,14 +299,14 @@ void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo } } } + return user; } template <typename T> void SysReqApplier<T>::modify_request_state(const DoutPrefixProvider* dpp, req_state* const s) const { if (boost::logic::indeterminate(is_system)) { - RGWUserInfo unused_info; - load_acct_info(dpp, unused_info); + std::unique_ptr<rgw::sal::User> unused_user{ load_acct_info(dpp) }; } if (is_system) { diff --git a/src/rgw/rgw_auth_s3.h b/src/rgw/rgw_auth_s3.h index 2f7fd2d7598..5815a520e02 100644 --- a/src/rgw/rgw_auth_s3.h +++ b/src/rgw/rgw_auth_s3.h @@ -55,14 +55,14 @@ class STSAuthStrategy : public rgw::auth::Strategy, aplptr_t create_apl_local(CephContext* const cct, const req_state* const s, - const RGWUserInfo& user_info, + std::unique_ptr<rgw::sal::User> user, std::optional<RGWAccountInfo> account, std::vector<IAM::Policy> policies, const std::string& subuser, const std::optional<uint32_t>& perm_mask, const std::string& access_key_id) const override { auto apl = rgw::auth::add_sysreq(cct, driver, s, - LocalApplier(cct, user_info, std::move(account), std::move(policies), + LocalApplier(cct, std::move(user), std::move(account), std::move(policies), subuser, perm_mask, access_key_id)); return aplptr_t(new decltype(apl)(std::move(apl))); } @@ -72,7 +72,7 @@ class STSAuthStrategy : public rgw::auth::Strategy, RoleApplier::Role role, RoleApplier::TokenAttrs token_attrs) const override { auto apl = rgw::auth::add_sysreq(cct, driver, s, - rgw::auth::RoleApplier(cct, std::move(role), std::move(token_attrs))); + rgw::auth::RoleApplier(cct, driver, std::move(role), std::move(token_attrs))); return aplptr_t(new decltype(apl)(std::move(apl))); } @@ -176,14 +176,14 @@ class AWSAuthStrategy : public rgw::auth::Strategy, aplptr_t create_apl_local(CephContext* const cct, const req_state* const s, - const RGWUserInfo& user_info, + std::unique_ptr<rgw::sal::User> user, std::optional<RGWAccountInfo> account, std::vector<IAM::Policy> policies, const std::string& subuser, const std::optional<uint32_t>& perm_mask, const std::string& access_key_id) const override { auto apl = rgw::auth::add_sysreq(cct, driver, s, - LocalApplier(cct, user_info, std::move(account), std::move(policies), + LocalApplier(cct, std::move(user), std::move(account), std::move(policies), subuser, perm_mask, access_key_id)); /* TODO(rzarzynski): replace with static_ptr. */ return aplptr_t(new decltype(apl)(std::move(apl))); diff --git a/src/rgw/rgw_bucket_logging.cc b/src/rgw/rgw_bucket_logging.cc index d24a53024f1..dd407f26e8c 100644 --- a/src/rgw/rgw_bucket_logging.cc +++ b/src/rgw/rgw_bucket_logging.cc @@ -192,7 +192,7 @@ ceph::coarse_real_time time_from_name(const std::string& obj_name, const DoutPre ldpp_dout(dpp, 1) << "ERROR: logging object name too short: " << obj_name << dendl; return extracted_time; } - const auto time_start_pos = obj_name_length - (time_format_length + UniqueStringLength + 1); + const auto time_start_pos = obj_name_length - (time_format_length + UniqueStringLength + 1); // note: +1 is for the dash between the timestamp and the unique string std::string time_str = obj_name.substr(time_start_pos, time_format_length); @@ -206,6 +206,13 @@ ceph::coarse_real_time time_from_name(const std::string& obj_name, const DoutPre return extracted_time; } +std::string full_bucket_name(const std::unique_ptr<rgw::sal::Bucket>& bucket) { + if (bucket->get_tenant().empty()) { + return bucket->get_name(); + } + return fmt::format("{}:{}", bucket->get_tenant(), bucket->get_name()); +} + int new_logging_object(const configuration& conf, const std::unique_ptr<rgw::sal::Bucket>& bucket, std::string& obj_name, @@ -235,23 +242,22 @@ int new_logging_object(const configuration& conf, conf.target_prefix, to_string(bucket->get_owner()), source_region, - bucket->get_name(), + full_bucket_name(bucket), t, t, unique); } break; } - int ret = bucket->set_logging_object_name(obj_name, conf.target_prefix, y, dpp, init_obj, objv_tracker); if (ret == -EEXIST || ret == -ECANCELED) { if (ret = bucket->get_logging_object_name(obj_name, conf.target_prefix, y, dpp, nullptr); ret < 0) { ldpp_dout(dpp, 1) << "ERROR: failed to get name of logging object of bucket '" << - conf.target_bucket << "'. ret = " << ret << dendl; + conf.target_bucket << "' and prefix '" << conf.target_prefix << "', ret = " << ret << dendl; return ret; } ldpp_dout(dpp, 20) << "INFO: name already set. got name of logging object '" << obj_name << "' of bucket '" << - conf.target_bucket << "'" << dendl; + conf.target_bucket << "' and prefix '" << conf.target_prefix << "'" << dendl; return -ECANCELED; } else if (ret < 0) { ldpp_dout(dpp, 1) << "ERROR: failed to write name of logging object '" << obj_name << "' of bucket '" << @@ -263,6 +269,44 @@ int new_logging_object(const configuration& conf, return 0; } +int commit_logging_object(const configuration& conf, + const DoutPrefixProvider *dpp, + rgw::sal::Driver* driver, + const std::string& tenant_name, + optional_yield y) { + std::string target_bucket_name; + std::string target_tenant_name; + auto ret = rgw_parse_url_bucket(conf.target_bucket, tenant_name, target_tenant_name, target_bucket_name); + if (ret < 0) { + ldpp_dout(dpp, 1) << "ERROR: failed to parse target bucket '" << conf.target_bucket << "' when commiting logging object, ret = " + << ret << dendl; + return ret; + } + const rgw_bucket target_bucket_id(target_tenant_name, target_bucket_name); + std::unique_ptr<rgw::sal::Bucket> target_bucket; + ret = driver->load_bucket(dpp, target_bucket_id, + &target_bucket, y); + if (ret < 0) { + ldpp_dout(dpp, 1) << "ERROR: failed to get target logging bucket '" << target_bucket_id << "' when commiting logging object, ret = " + << ret << dendl; + return ret; + } + return commit_logging_object(conf, target_bucket, dpp, y); +} + +int commit_logging_object(const configuration& conf, + const std::unique_ptr<rgw::sal::Bucket>& target_bucket, + const DoutPrefixProvider *dpp, + optional_yield y) { + std::string obj_name; + if (const auto ret = target_bucket->get_logging_object_name(obj_name, conf.target_prefix, y, dpp, nullptr); ret < 0) { + ldpp_dout(dpp, 1) << "ERROR: failed to get name of logging object of bucket '" << + target_bucket->get_info().bucket << "'. ret = " << ret << dendl; + return ret; + } + return target_bucket->commit_logging_object(obj_name, y, dpp); +} + int rollover_logging_object(const configuration& conf, const std::unique_ptr<rgw::sal::Bucket>& bucket, std::string& obj_name, @@ -270,12 +314,16 @@ int rollover_logging_object(const configuration& conf, optional_yield y, bool must_commit, RGWObjVersionTracker* objv_tracker) { - if (conf.target_bucket != bucket->get_name()) { - ldpp_dout(dpp, 1) << "ERROR: bucket name mismatch: '" << conf.target_bucket << "' != '" << bucket->get_name() << "'" << dendl; + std::string target_bucket_name; + std::string target_tenant_name; + std::ignore = rgw_parse_url_bucket(conf.target_bucket, bucket->get_tenant(), target_tenant_name, target_bucket_name); + if (target_bucket_name != bucket->get_name() || target_tenant_name != bucket->get_tenant()) { + ldpp_dout(dpp, 1) << "ERROR: bucket name mismatch. conf= '" << conf.target_bucket << + "', bucket= '" << bucket->get_info().bucket << "'" << dendl; return -EINVAL; } const auto old_obj = obj_name; - const auto ret = new_logging_object(conf, bucket, obj_name, dpp, y, false, objv_tracker); + const auto ret = new_logging_object(conf, bucket, obj_name, dpp, y, false, objv_tracker); if (ret == -ECANCELED) { ldpp_dout(dpp, 20) << "INFO: rollover already performed for '" << old_obj << "' to bucket '" << conf.target_bucket << "'. ret = " << ret << dendl; @@ -342,14 +390,14 @@ S3 bucket short (ceph) log record - eTag };*/ -int log_record(rgw::sal::Driver* driver, +int log_record(rgw::sal::Driver* driver, const sal::Object* obj, - const req_state* s, - const std::string& op_name, - const std::string& etag, + const req_state* s, + const std::string& op_name, + const std::string& etag, size_t size, const configuration& conf, - const DoutPrefixProvider *dpp, + const DoutPrefixProvider *dpp, optional_yield y, bool async_completion, bool log_source_bucket) { @@ -357,11 +405,19 @@ int log_record(rgw::sal::Driver* driver, ldpp_dout(dpp, 1) << "ERROR: only bucket operations are logged" << dendl; return -EINVAL; } + std::string target_bucket_name; + std::string target_tenant_name; + auto ret = rgw_parse_url_bucket(conf.target_bucket, s->bucket_tenant, target_tenant_name, target_bucket_name); + if (ret < 0) { + ldpp_dout(dpp, 1) << "ERROR: failed to parse target bucket '" << conf.target_bucket << "', ret = " << ret << dendl; + return ret; + } + const rgw_bucket target_bucket_id(target_tenant_name, target_bucket_name); std::unique_ptr<rgw::sal::Bucket> target_bucket; - auto ret = driver->load_bucket(dpp, rgw_bucket(s->bucket_tenant, conf.target_bucket), + ret = driver->load_bucket(dpp, target_bucket_id, &target_bucket, y); if (ret < 0) { - ldpp_dout(dpp, 1) << "ERROR: failed to get target logging bucket '" << conf.target_bucket << "'. ret = " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to get target logging bucket '" << target_bucket_id << "'. ret = " << ret << dendl; return ret; } std::string obj_name; @@ -382,12 +438,14 @@ int log_record(rgw::sal::Driver* driver, // try to create the temporary log object for the first time ret = new_logging_object(conf, target_bucket, obj_name, dpp, y, true, nullptr); if (ret == 0) { - ldpp_dout(dpp, 20) << "INFO: first time logging for bucket '" << conf.target_bucket << "'" << dendl; + ldpp_dout(dpp, 20) << "INFO: first time logging for bucket '" << conf.target_bucket << "' and prefix '" << + conf.target_prefix << "'" << dendl; } else if (ret == -ECANCELED) { - ldpp_dout(dpp, 20) << "INFO: logging object '" << obj_name << "' already exists for bucket '" << conf.target_bucket << "', will be used" << dendl; + ldpp_dout(dpp, 20) << "INFO: logging object '" << obj_name << "' already exists for bucket '" << conf.target_bucket << "' and prefix" << + conf.target_prefix << "'" << dendl; } else { ldpp_dout(dpp, 1) << "ERROR: failed to create logging object of bucket '" << - conf.target_bucket << "' for the first time. ret = " << ret << dendl; + conf.target_bucket << "' and prefix '" << conf.target_prefix << "' for the first time. ret = " << ret << dendl; return ret; } } else { @@ -420,7 +478,7 @@ int log_record(rgw::sal::Driver* driver, bucket_name = s->src_bucket_name; } else { bucket_owner = to_string( s->bucket->get_owner()); - bucket_name = s->bucket->get_name(); + bucket_name = full_bucket_name(s->bucket); } switch (conf.logging_type) { @@ -459,7 +517,7 @@ int log_record(rgw::sal::Driver* driver, case LoggingType::Journal: record = fmt::format("{} {} [{:%d/%b/%Y:%H:%M:%S %z}] {} {} {} {} {}", dash_if_empty(to_string(s->bucket->get_owner())), - dash_if_empty(s->bucket->get_name()), + dash_if_empty(full_bucket_name(s->bucket)), t, op_name, dash_if_empty_or_null(obj, obj->get_name()), @@ -512,12 +570,12 @@ std::string object_name_oid(const rgw::sal::Bucket* bucket, const std::string& p int log_record(rgw::sal::Driver* driver, LoggingType type, const sal::Object* obj, - const req_state* s, - const std::string& op_name, - const std::string& etag, - size_t size, - const DoutPrefixProvider *dpp, - optional_yield y, + const req_state* s, + const std::string& op_name, + const std::string& etag, + size_t size, + const DoutPrefixProvider *dpp, + optional_yield y, bool async_completion, bool log_source_bucket) { if (!s->bucket) { @@ -534,7 +592,7 @@ int log_record(rgw::sal::Driver* driver, try { configuration.enabled = true; auto bl_iter = iter->second.cbegin(); - decode(configuration, bl_iter); + decode(configuration, bl_iter); if (type != LoggingType::Any && configuration.logging_type != type) { return 0; } @@ -543,20 +601,199 @@ int log_record(rgw::sal::Driver* driver, return 0; } } - ldpp_dout(dpp, 20) << "INFO: found matching logging configuration of bucket '" << s->bucket->get_name() << + ldpp_dout(dpp, 20) << "INFO: found matching logging configuration of bucket '" << s->bucket->get_info().bucket << "' configuration: " << configuration.to_json_str() << dendl; - if (auto ret = log_record(driver, obj, s, op_name, etag, size, configuration, dpp, y, async_completion, log_source_bucket); ret < 0) { - ldpp_dout(dpp, 1) << "ERROR: failed to perform logging for bucket '" << s->bucket->get_name() << + if (auto ret = log_record(driver, obj, s, op_name, etag, size, configuration, dpp, y, async_completion, log_source_bucket); ret < 0) { + ldpp_dout(dpp, 1) << "ERROR: failed to perform logging for bucket '" << s->bucket->get_info().bucket << "'. ret=" << ret << dendl; return ret; } } catch (buffer::error& err) { - ldpp_dout(dpp, 1) << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING + ldpp_dout(dpp, 1) << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING << "'. error: " << err.what() << dendl; return -EINVAL; } return 0; } +int get_bucket_id(const std::string& bucket_name, const std::string& tenant_name, rgw_bucket& bucket_id) { + std::string parsed_bucket_name; + std::string parsed_tenant_name; + if (const auto ret = rgw_parse_url_bucket(bucket_name, tenant_name, parsed_tenant_name, parsed_bucket_name); ret < 0) { + return ret; + } + bucket_id = rgw_bucket{parsed_tenant_name, parsed_bucket_name}; + return 0; +} + +int update_bucket_logging_sources(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, const rgw_bucket& target_bucket_id, const rgw_bucket& src_bucket_id, bool add, optional_yield y) { + std::unique_ptr<rgw::sal::Bucket> target_bucket; + const auto ret = driver->load_bucket(dpp, target_bucket_id, &target_bucket, y); + if (ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: failed to get target bucket '" << target_bucket_id << "', ret = " << ret << dendl; + return ret; + } + return update_bucket_logging_sources(dpp, target_bucket, src_bucket_id, add, y); +} + +int update_bucket_logging_sources(const DoutPrefixProvider* dpp, std::unique_ptr<rgw::sal::Bucket>& bucket, const rgw_bucket& src_bucket_id, bool add, optional_yield y) { + return retry_raced_bucket_write(dpp, bucket.get(), [dpp, &bucket, &src_bucket_id, add, y] { + auto& attrs = bucket->get_attrs(); + auto iter = attrs.find(RGW_ATTR_BUCKET_LOGGING_SOURCES); + if (iter == attrs.end()) { + if (!add) { + ldpp_dout(dpp, 20) << "INFO: no logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES + << "' for bucket '" << bucket->get_info().bucket << "', nothing to remove" << dendl; + return 0; + } + source_buckets sources{src_bucket_id}; + bufferlist bl; + ceph::encode(sources, bl); + attrs.insert(std::make_pair(RGW_ATTR_BUCKET_LOGGING_SOURCES, std::move(bl))); + return bucket->merge_and_store_attrs(dpp, attrs, y); + } + try { + source_buckets sources; + ceph::decode(sources, iter->second); + if ((add && sources.insert(src_bucket_id).second) || + (!add && sources.erase(src_bucket_id) > 0)) { + bufferlist bl; + ceph::encode(sources, bl); + iter->second = std::move(bl); + return bucket->merge_and_store_attrs(dpp, attrs, y); + } + } catch (buffer::error& err) { + ldpp_dout(dpp, 1) << "WARNING: failed to decode logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES + << "' for bucket '" << bucket->get_info().bucket << "', error: " << err.what() << dendl; + } + ldpp_dout(dpp, 20) << "INFO: logging source '" << src_bucket_id << "' already " << + (add ? "added to" : "removed from") << " bucket '" << bucket->get_info().bucket << "'" << dendl; + return 0; + }, y); +} + + +int bucket_deletion_cleanup(const DoutPrefixProvider* dpp, + sal::Driver* driver, + sal::Bucket* bucket, + optional_yield y) { + // if the bucket is used a log bucket, we should delete all pending log objects + // and also delete the object holding the pending object name + auto& attrs = bucket->get_attrs(); + if (const auto iter = attrs.find(RGW_ATTR_BUCKET_LOGGING_SOURCES); iter != attrs.end()) { + try { + source_buckets sources; + ceph::decode(sources, iter->second); + for (const auto& source : sources) { + std::unique_ptr<rgw::sal::Bucket> src_bucket; + if (const auto ret = driver->load_bucket(dpp, source, &src_bucket, y); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: failed to get logging source bucket '" << source << "' for log bucket '" << + bucket->get_info().bucket << "', ret = " << ret << dendl; + continue; + } + auto& src_attrs = src_bucket->get_attrs(); + if (const auto iter = src_attrs.find(RGW_ATTR_BUCKET_LOGGING); iter != src_attrs.end()) { + configuration conf; + try { + auto bl_iter = iter->second.cbegin(); + decode(conf, bl_iter); + std::string obj_name; + RGWObjVersionTracker objv; + if (const auto ret = bucket->get_logging_object_name(obj_name, conf.target_prefix, y, dpp, &objv); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: failed to get logging object name for log bucket '" << bucket->get_info().bucket << + "', ret = " << ret << dendl; + continue; + } + if (const auto ret = bucket->remove_logging_object(obj_name, y, dpp); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: failed to delete pending logging object '" << obj_name << "' for log bucket '" << + bucket->get_info().bucket << "', ret = " << ret << dendl; + continue; + } + ldpp_dout(dpp, 20) << "INFO: successfully deleted pending logging object '" << obj_name << "' from deleted log bucket '" << + bucket->get_info().bucket << "'" << dendl; + if (const auto ret = bucket->remove_logging_object_name(conf.target_prefix, y, dpp, &objv); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: failed to delete object holding bucket logging object name for log bucket '" << + bucket->get_info().bucket << "', ret = " << ret << dendl; + continue; + } + ldpp_dout(dpp, 20) << "INFO: successfully deleted object holding bucket logging object name from deleted log bucket '" << + bucket->get_info().bucket << "'" << dendl; + } catch (buffer::error& err) { + ldpp_dout(dpp, 1) << "WARNING: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING + << "' of bucket '" << src_bucket->get_info().bucket << "', error: " << err.what() << dendl; + } + } + } + } catch (buffer::error& err) { + ldpp_dout(dpp, 1) << "WARNING: failed to decode logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES + << "' for bucket '" << bucket->get_info().bucket << "', error: " << err.what() << dendl; + return -EIO; + } + } + + return source_bucket_cleanup(dpp, driver, bucket, false, y); +} + +int source_bucket_cleanup(const DoutPrefixProvider* dpp, + sal::Driver* driver, + sal::Bucket* bucket, + bool remove_attr, + optional_yield y) { + std::optional<configuration> conf; + const auto& info = bucket->get_info(); + if (const auto ret = retry_raced_bucket_write(dpp, bucket, [dpp, bucket, &conf, &info, remove_attr, y] { + auto& attrs = bucket->get_attrs(); + if (auto iter = attrs.find(RGW_ATTR_BUCKET_LOGGING); iter != attrs.end()) { + try { + auto bl_iter = iter->second.cbegin(); + configuration tmp_conf; + tmp_conf.enabled = true; + decode(tmp_conf, bl_iter); + conf = std::move(tmp_conf); + } catch (buffer::error& err) { + ldpp_dout(dpp, 1) << "WARNING: failed to decode existing logging attribute '" << RGW_ATTR_BUCKET_LOGGING + << "' of bucket '" << info.bucket << "', error: " << err.what() << dendl; + return -EIO; + } + if (remove_attr) { + attrs.erase(iter); + return bucket->merge_and_store_attrs(dpp, attrs, y); + } + } + // nothing to remove or no need to remove + return 0; + }, y); ret < 0) { + if (remove_attr) { + ldpp_dout(dpp, 1) << "ERROR: failed to remove logging attribute '" << RGW_ATTR_BUCKET_LOGGING << "' from bucket '" << + info.bucket << "', ret = " << ret << dendl; + } + return ret; + } + if (!conf) { + // no logging attribute found + return 0; + } + if (const auto ret = commit_logging_object(*conf, dpp, driver, info.bucket.tenant, y); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: could not commit pending logging object of bucket '" << + info.bucket << "', ret = " << ret << dendl; + } else { + ldpp_dout(dpp, 20) << "INFO: successfully committed pending logging object of bucket '" << info.bucket << "'" << dendl; + } + rgw_bucket target_bucket_id; + rgw_bucket src_bucket_id{info.bucket.tenant, info.bucket.name}; + if (const auto ret = get_bucket_id(conf->target_bucket, info.bucket.tenant, target_bucket_id); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: failed to parse target bucket '" << conf->target_bucket << "', ret = " << ret << dendl; + return 0; + } + if (const auto ret = update_bucket_logging_sources(dpp, driver, target_bucket_id, src_bucket_id, false, y); ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: could not update bucket logging source '" << + info.bucket << "', ret = " << ret << dendl; + return 0; + } + ldpp_dout(dpp, 20) << "INFO: successfully updated bucket logging source '" << + info.bucket << "'"<< dendl; + return 0; +} + } // namespace rgw::bucketlogging diff --git a/src/rgw/rgw_bucket_logging.h b/src/rgw/rgw_bucket_logging.h index d4877bafb0f..cbdb8b55f88 100644 --- a/src/rgw/rgw_bucket_logging.h +++ b/src/rgw/rgw_bucket_logging.h @@ -4,7 +4,6 @@ #pragma once #include <string> -#include <optional> #include <cstdint> #include "rgw_sal_fwd.h" #include "include/buffer.h" @@ -16,7 +15,7 @@ class XMLObj; namespace ceph { class Formatter; } class DoutPrefixProvider; struct req_state; -class RGWObjVersionTracker; +struct RGWObjVersionTracker; class RGWOp; namespace rgw::bucketlogging { @@ -66,6 +65,17 @@ enum class LoggingType {Standard, Journal, Any}; enum class PartitionDateSource {DeliveryTime, EventTime}; struct configuration { + bool operator==(const configuration& rhs) const { + return enabled == rhs.enabled && + target_bucket == rhs.target_bucket && + obj_key_format == rhs.obj_key_format && + target_prefix == rhs.target_prefix && + obj_roll_time == rhs.obj_roll_time && + logging_type == rhs.logging_type && + records_batch_size == rhs.records_batch_size && + date_source == rhs.date_source && + key_filter == rhs.key_filter; + } uint32_t default_obj_roll_time = 300; bool enabled = false; std::string target_bucket; @@ -129,6 +139,8 @@ struct configuration { }; WRITE_CLASS_ENCODER(configuration) +using source_buckets = std::set<rgw_bucket>; + constexpr unsigned MAX_BUCKET_LOGGING_BUFFER = 1000; using bucket_logging_records = std::array<std::string, MAX_BUCKET_LOGGING_BUFFER>; @@ -155,7 +167,7 @@ int log_record(rgw::sal::Driver* driver, bool async_completion, bool log_source_bucket); -// commit the pending log objec tto the log bucket +// commit the pending log objec to the log bucket // and create a new pending log object // if "must_commit" is "false" the function will return success even if the pending log object was not committed int rollover_logging_object(const configuration& conf, @@ -166,6 +178,23 @@ int rollover_logging_object(const configuration& conf, bool must_commit, RGWObjVersionTracker* objv_tracker); +// commit the pending log object to the log bucket +// use this for cleanup, when new pending object is not needed +// and target bucket is known +int commit_logging_object(const configuration& conf, + const std::unique_ptr<rgw::sal::Bucket>& target_bucket, + const DoutPrefixProvider *dpp, + optional_yield y); + +// commit the pending log object to the log bucket +// use this for cleanup, when new pending object is not needed +// and target bucket shoud be loaded based on the configuration +int commit_logging_object(const configuration& conf, + const DoutPrefixProvider *dpp, + rgw::sal::Driver* driver, + const std::string& tenant_name, + optional_yield y); + // return the oid of the object holding the name of the temporary logging object // bucket - log bucket // prefix - logging prefix from configuration. should be used when multiple buckets log into the same log bucket @@ -185,5 +214,37 @@ int log_record(rgw::sal::Driver* driver, optional_yield y, bool async_completion, bool log_source_bucket); + +// return (by ref) an rgw_bucket object with the bucket name and tenant name +// fails if the bucket name is not in the format: [tenant name:]<bucket name> +int get_bucket_id(const std::string& bucket_name, const std::string& tenant_name, rgw_bucket& bucket_id); + +// update (add or remove) a source bucket from the list of source buckets in the target bucket +// use this function when the target bucket is already loaded +int update_bucket_logging_sources(const DoutPrefixProvider* dpp, std::unique_ptr<rgw::sal::Bucket>& bucket, + const rgw_bucket& src_bucket, bool add, optional_yield y); + +// update (add or remove) a source bucket from the list of source buckets in the target bucket +// use this function when the target bucket is not known and needs to be loaded +int update_bucket_logging_sources(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, const rgw_bucket& target_bucket_id, + const rgw_bucket& src_bucket_id, bool add, optional_yield y); + +// when source bucket is deleted, all pending log objects should be comitted to the log bucket +// when the target bucket is deleted, all pending log objects should be deleted, as well as the object holding the pending log object name +int bucket_deletion_cleanup(const DoutPrefixProvider* dpp, + sal::Driver* driver, + sal::Bucket* bucket, + optional_yield y); + +// if bucket has bucket logging configuration associated with it then: +// if "remove_attr" is true, the bucket logging configuration should be removed from the bucket +// in addition: +// any pending log objects should be comitted to the log bucket +// and the log bucket should be updated to remove the bucket as a source +int source_bucket_cleanup(const DoutPrefixProvider* dpp, + sal::Driver* driver, + sal::Bucket* bucket, + bool remove_attr, + optional_yield y); } // namespace rgw::bucketlogging diff --git a/src/rgw/rgw_cksum_pipe.cc b/src/rgw/rgw_cksum_pipe.cc index e06957e2715..0bec8d341af 100644 --- a/src/rgw/rgw_cksum_pipe.cc +++ b/src/rgw/rgw_cksum_pipe.cc @@ -18,6 +18,7 @@ #include <string> #include <fmt/format.h> #include <boost/algorithm/string.hpp> +#include "rgw_cksum.h" #include "rgw_common.h" #include "common/dout.h" #include "rgw_client_io.h" @@ -34,7 +35,8 @@ namespace rgw::putobj { {} std::unique_ptr<RGWPutObj_Cksum> RGWPutObj_Cksum::Factory( - rgw::sal::DataProcessor* next, const RGWEnv& env) + rgw::sal::DataProcessor* next, const RGWEnv& env, + rgw::cksum::Type override_type) { /* look for matching headers */ auto algo_header = cksum_algorithm_hdr(env); @@ -49,6 +51,13 @@ namespace rgw::putobj { throw rgw::io::Exception(EINVAL, std::system_category()); } /* no checksum header */ + if (override_type != rgw::cksum::Type::none) { + /* XXXX safe? do we need to fixup env as well? */ + auto algo_header = cksum_algorithm_hdr(override_type); + return + std::make_unique<RGWPutObj_Cksum>( + next, override_type, std::move(algo_header)); + } return std::unique_ptr<RGWPutObj_Cksum>(); } diff --git a/src/rgw/rgw_cksum_pipe.h b/src/rgw/rgw_cksum_pipe.h index fddcd283c84..c459d156335 100644 --- a/src/rgw/rgw_cksum_pipe.h +++ b/src/rgw/rgw_cksum_pipe.h @@ -20,6 +20,7 @@ #include <tuple> #include <cstring> #include <boost/algorithm/string/case_conv.hpp> +#include "rgw_cksum.h" #include "rgw_cksum_digest.h" #include "rgw_common.h" #include "rgw_putobj.h" @@ -29,6 +30,38 @@ namespace rgw::putobj { namespace cksum = rgw::cksum; using cksum_hdr_t = std::pair<const char*, const char*>; + static inline const cksum_hdr_t cksum_algorithm_hdr(rgw::cksum::Type t) { + static constexpr std::string_view hdr = + "HTTP_X_AMZ_SDK_CHECKSUM_ALGORITHM"; + using rgw::cksum::Type; + switch (t) { + case Type::sha256: + return cksum_hdr_t(hdr.data(), "SHA256"); + break; + case Type::crc32: + return cksum_hdr_t(hdr.data(), "CRC32"); + break; + case Type::crc32c: + return cksum_hdr_t(hdr.data(), "CRC32C"); + break; + case Type::xxh3: + return cksum_hdr_t(hdr.data(), "XX3"); + break; + case Type::sha1: + return cksum_hdr_t(hdr.data(), "SHA1"); + break; + case Type::sha512: + return cksum_hdr_t(hdr.data(), "SHA512"); + break; + case Type::blake3: + return cksum_hdr_t(hdr.data(), "BLAKE3"); + break; + default: + break; + }; + return cksum_hdr_t(nullptr, nullptr);; + } + static inline const cksum_hdr_t cksum_algorithm_hdr(const RGWEnv& env) { /* If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm @@ -102,7 +135,8 @@ namespace rgw::putobj { using VerifyResult = std::tuple<bool, const cksum::Cksum&>; static std::unique_ptr<RGWPutObj_Cksum> Factory( - rgw::sal::DataProcessor* next, const RGWEnv&); + rgw::sal::DataProcessor* next, const RGWEnv&, + rgw::cksum::Type override_type); RGWPutObj_Cksum(rgw::sal::DataProcessor* next, rgw::cksum::Type _type, cksum_hdr_t&& _hdr); diff --git a/src/rgw/rgw_common.cc b/src/rgw/rgw_common.cc index 1a59ba02999..6610538542c 100644 --- a/src/rgw/rgw_common.cc +++ b/src/rgw/rgw_common.cc @@ -63,6 +63,7 @@ rgw_http_errors rgw_http_s3_errors({ { ERR_INVALID_DIGEST, {400, "InvalidDigest" }}, { ERR_BAD_DIGEST, {400, "BadDigest" }}, { ERR_INVALID_LOCATION_CONSTRAINT, {400, "InvalidLocationConstraint" }}, + { ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION, {400, "IllegalLocationConstraintException" }}, { ERR_ZONEGROUP_DEFAULT_PLACEMENT_MISCONFIGURATION, {400, "ZonegroupDefaultPlacementMisconfiguration" }}, { ERR_INVALID_BUCKET_NAME, {400, "InvalidBucketName" }}, { ERR_INVALID_OBJECT_NAME, {400, "InvalidObjectName" }}, @@ -3206,3 +3207,14 @@ void RGWObjVersionTracker::generate_new_write_ver(CephContext *cct) append_rand_alpha(cct, write_version.tag, write_version.tag, TAG_LEN); } +boost::optional<rgw::IAM::Policy> +get_iam_policy_from_attr(CephContext* cct, + const std::map<std::string, bufferlist>& attrs, + const std::string& tenant) +{ + if (auto i = attrs.find(RGW_ATTR_IAM_POLICY); i != attrs.end()) { + return Policy(cct, &tenant, i->second.to_str(), false); + } else { + return boost::none; + } +} diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h index d7b0819d356..88f5f7a9c52 100644 --- a/src/rgw/rgw_common.h +++ b/src/rgw/rgw_common.h @@ -108,6 +108,7 @@ using ceph::crypto::MD5; #define RGW_ATTR_X_ROBOTS_TAG RGW_ATTR_PREFIX "x-robots-tag" #define RGW_ATTR_STORAGE_CLASS RGW_ATTR_PREFIX "storage_class" #define RGW_ATTR_BUCKET_LOGGING RGW_ATTR_PREFIX "logging" +#define RGW_ATTR_BUCKET_LOGGING_SOURCES RGW_ATTR_PREFIX "logging-sources" /* S3 Object Lock*/ #define RGW_ATTR_OBJECT_LOCK RGW_ATTR_PREFIX "object-lock" @@ -337,6 +338,7 @@ inline constexpr const char* RGW_REST_STS_XMLNS = #define ERR_PRESIGNED_URL_EXPIRED 2223 #define ERR_PRESIGNED_URL_DISABLED 2224 #define ERR_AUTHORIZATION 2225 // SNS 403 AuthorizationError +#define ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION 2226 #define ERR_BUSY_RESHARDING 2300 // also in cls_rgw_types.h, don't change! #define ERR_NO_SUCH_ENTITY 2301 @@ -1748,24 +1750,22 @@ rgw::IAM::Effect evaluate_iam_policies( bool verify_user_permission(const DoutPrefixProvider* dpp, req_state * const s, - const RGWAccessControlPolicy& user_acl, - const std::vector<rgw::IAM::Policy>& user_policies, - const std::vector<rgw::IAM::Policy>& session_policies, - const rgw::ARN& res, - const uint64_t op, - bool mandatory_policy=true); -bool verify_user_permission_no_policy(const DoutPrefixProvider* dpp, - req_state * const s, - const RGWAccessControlPolicy& user_acl, - const int perm); -bool verify_user_permission(const DoutPrefixProvider* dpp, - req_state * const s, const rgw::ARN& res, const uint64_t op, bool mandatory_policy=true); bool verify_user_permission_no_policy(const DoutPrefixProvider* dpp, req_state * const s, int perm); +bool verify_bucket_permission(const DoutPrefixProvider* dpp, + struct perm_state_base * const s, + const rgw::ARN& arn, + bool account_root, + const RGWAccessControlPolicy& user_acl, + const RGWAccessControlPolicy& bucket_acl, + const boost::optional<rgw::IAM::Policy>& bucket_policy, + const std::vector<rgw::IAM::Policy>& identity_policies, + const std::vector<rgw::IAM::Policy>& session_policies, + const uint64_t op); bool verify_bucket_permission( const DoutPrefixProvider* dpp, req_state * const s, @@ -2013,3 +2013,8 @@ struct AioCompletionDeleter { void operator()(librados::AioCompletion* c) { c->release(); } }; using aio_completion_ptr = std::unique_ptr<librados::AioCompletion, AioCompletionDeleter>; + +extern boost::optional<rgw::IAM::Policy> +get_iam_policy_from_attr(CephContext* cct, + const std::map<std::string, bufferlist>& attrs, + const std::string& tenant); diff --git a/src/rgw/rgw_iam_policy.cc b/src/rgw/rgw_iam_policy.cc index 2a5c9cd313e..ef6761d4222 100644 --- a/src/rgw/rgw_iam_policy.cc +++ b/src/rgw/rgw_iam_policy.cc @@ -94,6 +94,8 @@ static const actpair actpairs[] = { "s3:GetPublicAccessBlock", s3GetPublicAccessBlock }, { "s3:GetObjectAcl", s3GetObjectAcl }, { "s3:GetObject", s3GetObject }, + { "s3:GetObjectAttributes", s3GetObjectAttributes }, + { "s3:GetObjectVersionAttributes", s3GetObjectVersionAttributes }, { "s3:GetObjectTorrent", s3GetObjectTorrent }, { "s3:GetObjectVersionAcl", s3GetObjectVersionAcl }, { "s3:GetObjectVersion", s3GetObjectVersion }, @@ -1335,6 +1337,7 @@ const char* action_bit_string(uint64_t action) { case s3ListBucketVersions: return "s3:ListBucketVersions"; + case s3ListAllMyBuckets: return "s3:ListAllMyBuckets"; @@ -1479,6 +1482,12 @@ const char* action_bit_string(uint64_t action) { case s3BypassGovernanceRetention: return "s3:BypassGovernanceRetention"; + case s3GetObjectAttributes: + return "s3:GetObjectAttributes"; + + case s3GetObjectVersionAttributes: + return "s3:GetObjectVersionAttributes"; + case s3DescribeJob: return "s3:DescribeJob"; diff --git a/src/rgw/rgw_iam_policy.h b/src/rgw/rgw_iam_policy.h index 0476926143f..dd323ee4b9c 100644 --- a/src/rgw/rgw_iam_policy.h +++ b/src/rgw/rgw_iam_policy.h @@ -115,6 +115,8 @@ enum { s3GetBucketEncryption, s3PutBucketEncryption, s3DescribeJob, + s3GetObjectAttributes, + s3GetObjectVersionAttributes, s3All, s3objectlambdaGetObject, @@ -247,6 +249,8 @@ inline int op_to_perm(std::uint64_t op) { case s3GetObjectVersionTagging: case s3GetObjectRetention: case s3GetObjectLegalHold: + case s3GetObjectAttributes: + case s3GetObjectVersionAttributes: case s3ListAllMyBuckets: case s3ListBucket: case s3ListBucketMultipartUploads: diff --git a/src/rgw/rgw_kafka.cc b/src/rgw/rgw_kafka.cc index 0807993338d..b38b1a78ec4 100644 --- a/src/rgw/rgw_kafka.cc +++ b/src/rgw/rgw_kafka.cc @@ -13,6 +13,7 @@ #include <thread> #include <atomic> #include <mutex> +#include <boost/algorithm/string.hpp> #include <boost/functional/hash.hpp> #include <boost/lockfree/queue.hpp> #include "common/dout.h" @@ -595,7 +596,8 @@ public: boost::optional<const std::string&> ca_location, boost::optional<const std::string&> mechanism, boost::optional<const std::string&> topic_user_name, - boost::optional<const std::string&> topic_password) { + boost::optional<const std::string&> topic_password, + boost::optional<const std::string&> brokers) { if (stopped) { ldout(cct, 1) << "Kafka connect: manager is stopped" << dendl; return false; @@ -603,8 +605,8 @@ public: std::string user; std::string password; - std::string broker; - if (!parse_url_authority(url, broker, user, password)) { + std::string broker_list; + if (!parse_url_authority(url, broker_list, user, password)) { // TODO: increment counter ldout(cct, 1) << "Kafka connect: URL parsing failed" << dendl; return false; @@ -632,7 +634,13 @@ public: ldout(cct, 1) << "Kafka connect: user/password are only allowed over secure connection" << dendl; return false; } - connection_id_t tmp_id(broker, user, password, ca_location, mechanism, + + if (brokers.has_value()) { + broker_list.append(","); + broker_list.append(brokers.get()); + } + + connection_id_t tmp_id(broker_list, user, password, ca_location, mechanism, use_ssl); std::lock_guard lock(connections_lock); const auto it = connections.find(tmp_id); @@ -652,7 +660,7 @@ public: return false; } - auto conn = std::make_unique<connection_t>(cct, broker, use_ssl, verify_ssl, ca_location, user, password, mechanism); + auto conn = std::make_unique<connection_t>(cct, broker_list, use_ssl, verify_ssl, ca_location, user, password, mechanism); if (!new_producer(conn.get())) { ldout(cct, 10) << "Kafka connect: producer creation failed in new connection" << dendl; return false; @@ -770,11 +778,12 @@ bool connect(connection_id_t& conn_id, boost::optional<const std::string&> ca_location, boost::optional<const std::string&> mechanism, boost::optional<const std::string&> user_name, - boost::optional<const std::string&> password) { + boost::optional<const std::string&> password, + boost::optional<const std::string&> brokers) { std::shared_lock lock(s_manager_mutex); if (!s_manager) return false; return s_manager->connect(conn_id, url, use_ssl, verify_ssl, ca_location, - mechanism, user_name, password); + mechanism, user_name, password, brokers); } int publish(const connection_id_t& conn_id, diff --git a/src/rgw/rgw_kafka.h b/src/rgw/rgw_kafka.h index b7aa0d15759..858b185219f 100644 --- a/src/rgw/rgw_kafka.h +++ b/src/rgw/rgw_kafka.h @@ -48,7 +48,8 @@ bool connect(connection_id_t& conn_id, boost::optional<const std::string&> ca_location, boost::optional<const std::string&> mechanism, boost::optional<const std::string&> user_name, - boost::optional<const std::string&> password); + boost::optional<const std::string&> password, + boost::optional<const std::string&> brokers); // publish a message over a connection that was already created int publish(const connection_id_t& conn_id, diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index ee42ab647a1..1793c0b8065 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -25,8 +25,10 @@ #include "common/ceph_json.h" #include "common/static_ptr.h" #include "common/perf_counters_key.h" +#include "rgw_cksum.h" #include "rgw_cksum_digest.h" #include "rgw_common.h" +#include "common/split.h" #include "rgw_tracer.h" #include "rgw_rados.h" @@ -331,19 +333,6 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, return ret; } - -static boost::optional<Policy> -get_iam_policy_from_attr(CephContext* cct, - const map<string, bufferlist>& attrs, - const string& tenant) -{ - if (auto i = attrs.find(RGW_ATTR_IAM_POLICY); i != attrs.end()) { - return Policy(cct, &tenant, i->second.to_str(), false); - } else { - return none; - } -} - static boost::optional<PublicAccessBlockConfiguration> get_public_access_conf_from_attr(const map<string, bufferlist>& attrs) { @@ -3571,54 +3560,62 @@ void RGWCreateBucket::execute(optional_yield y) const rgw::SiteConfig& site = *s->penv.site; const std::optional<RGWPeriod>& period = site.get_period(); const RGWZoneGroup& my_zonegroup = site.get_zonegroup(); - - if (s->system_request) { - // allow system requests to override the target zonegroup. for forwarded - // requests, we'll create the bucket for the originating zonegroup - createparams.zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup"); - } - + const std::string rgwx_zonegroup = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup"); const RGWZoneGroup* bucket_zonegroup = &my_zonegroup; - if (createparams.zonegroup_id.empty()) { - // default to the local zonegroup - createparams.zonegroup_id = my_zonegroup.id; - } else if (period) { - auto z = period->period_map.zonegroups.find(createparams.zonegroup_id); - if (z == period->period_map.zonegroups.end()) { - ldpp_dout(this, 0) << "could not find zonegroup " - << createparams.zonegroup_id << " in current period" << dendl; - op_ret = -ENOENT; - return; - } - bucket_zonegroup = &z->second; - } else if (createparams.zonegroup_id != my_zonegroup.id) { - ldpp_dout(this, 0) << "zonegroup does not match current zonegroup " - << createparams.zonegroup_id << dendl; - op_ret = -ENOENT; - return; - } - // validate the LocationConstraint + // Validate LocationConstraint if it's provided and enforcement is strict if (!location_constraint.empty() && !relaxed_region_enforcement) { - // on the master zonegroup, allow any valid api_name. otherwise it has to - // match the bucket's zonegroup - if (period && my_zonegroup.is_master) { - if (!period->period_map.zonegroups_by_api.count(location_constraint)) { + if (period) { + auto location_iter = period->period_map.zonegroups_by_api.find(location_constraint); + if (location_iter == period->period_map.zonegroups_by_api.end()) { ldpp_dout(this, 0) << "location constraint (" << location_constraint << ") can't be found." << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; - s->err.message = "The specified location-constraint is not valid"; + s->err.message = fmt::format("The {} location constraint is not valid.", + location_constraint); return; } - } else if (bucket_zonegroup->api_name != location_constraint) { + bucket_zonegroup = &location_iter->second; + } else if (location_constraint != my_zonegroup.api_name) { // if we don't have a period, we can only use the current zonegroup - so check if the location matches by api name here ldpp_dout(this, 0) << "location constraint (" << location_constraint - << ") doesn't match zonegroup (" << bucket_zonegroup->api_name - << ')' << dendl; - op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; - s->err.message = "The specified location-constraint is not valid"; + << ") doesn't match zonegroup (" << my_zonegroup.api_name << ")" << dendl; + op_ret = -ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION; + s->err.message = fmt::format("The {} location constraint is incompatible " + "for the region specific endpoint this request was sent to.", + location_constraint); return; } } + // If it's a system request, use the provided zonegroup if available + else if (s->system_request && !rgwx_zonegroup.empty()) { + if (period) { + auto zonegroup_iter = period->period_map.zonegroups.find(rgwx_zonegroup); + if (zonegroup_iter == period->period_map.zonegroups.end()) { + ldpp_dout(this, 0) << "could not find zonegroup " << rgwx_zonegroup + << " in current period" << dendl; + op_ret = -ENOENT; + return; + } + bucket_zonegroup = &zonegroup_iter->second; + } + } + + const bool enforce_location_match = + !period || // No period: no multisite, so no need to enforce location match. + !s->system_request || // All user requests are enforced to match zonegroup's location. + !my_zonegroup.is_master; // but if it's a system request (forwarded) only allow remote creation on master zonegroup. + if (enforce_location_match && !my_zonegroup.equals(bucket_zonegroup->get_id())) { + ldpp_dout(this, 0) << "location constraint (" << bucket_zonegroup->api_name + << ") doesn't match zonegroup (" << my_zonegroup.api_name << ")" << dendl; + op_ret = -ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION; + s->err.message = fmt::format("The {} location constraint is incompatible " + "for the region specific endpoint this request was sent to.", + bucket_zonegroup->api_name); + return; + } + + // Set the final zonegroup ID + createparams.zonegroup_id = bucket_zonegroup->id; // select and validate the placement target op_ret = select_bucket_placement(this, *bucket_zonegroup, s->user->get_info(), @@ -3627,7 +3624,7 @@ void RGWCreateBucket::execute(optional_yield y) return; } - if (bucket_zonegroup == &my_zonegroup) { + if (my_zonegroup.equals(bucket_zonegroup->get_id())) { // look up the zone placement pool createparams.zone_placement = rgw::find_zone_placement( this, site.get_zone_params(), createparams.placement_rule); @@ -3716,7 +3713,6 @@ void RGWCreateBucket::execute(optional_yield y) if (!driver->is_meta_master()) { // apply bucket creation on the master zone first - bufferlist in_data; JSONParser jp; op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id, &in_data, &jp, s->info, y); @@ -3793,7 +3789,10 @@ void RGWCreateBucket::execute(optional_yield y) s->bucket->get_info().has_website = !s->bucket->get_info().website_conf.is_empty(); /* This will also set the quota on the bucket. */ - op_ret = s->bucket->merge_and_store_attrs(this, createparams.attrs, y); + s->bucket->set_attrs(std::move(createparams.attrs)); + constexpr bool exclusive = false; // overwrite + constexpr ceph::real_time no_set_mtime{}; + op_ret = s->bucket->put_info(this, exclusive, no_set_mtime, y); } while (op_ret == -ECANCELED && tries++ < 20); /* Restore the proper return code. */ @@ -4344,6 +4343,9 @@ void RGWPutObj::execute(optional_yield y) } return; } + + multipart_cksum_type = upload->cksum_type; + /* upload will go out of scope, so copy the dest placement for later use */ s->dest_placement = *pdest_placement; pdest_placement = &s->dest_placement; @@ -4474,11 +4476,12 @@ void RGWPutObj::execute(optional_yield y) /* optional streaming checksum */ try { cksum_filter = - rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env); + rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env, multipart_cksum_type); } catch (const rgw::io::Exception& e) { op_ret = -e.code().value(); return; } + if (cksum_filter) { filter = &*cksum_filter; } @@ -4625,10 +4628,12 @@ void RGWPutObj::execute(optional_yield y) if (cksum_filter) { const auto& hdr = cksum_filter->header(); + auto expected_ck = cksum_filter->expected(*s->info.env); auto cksum_verify = cksum_filter->verify(*s->info.env); // valid or no supplied cksum cksum = get<1>(cksum_verify); - if (std::get<0>(cksum_verify)) { + if ((!expected_ck) || + std::get<0>(cksum_verify)) { buffer::list cksum_bl; ldpp_dout_fmt(this, 16, @@ -4636,14 +4641,13 @@ void RGWPutObj::execute(optional_yield y) "\n\tcomputed={} == \n\texpected={}", hdr.second, cksum->to_armor(), - cksum_filter->expected(*s->info.env)); + (!!expected_ck) ? expected_ck : "(checksum unavailable)"); cksum->encode(cksum_bl); emplace_attr(RGW_ATTR_CKSUM, std::move(cksum_bl)); } else { /* content checksum mismatch */ auto computed_ck = cksum->to_armor(); - auto expected_ck = cksum_filter->expected(*s->info.env); ldpp_dout_fmt(this, 4, "{} content checksum mismatch" @@ -4846,7 +4850,8 @@ void RGWPostObj::execute(optional_yield y) /* optional streaming checksum */ try { cksum_filter = - rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env); + rgw::putobj::RGWPutObj_Cksum::Factory( + filter, *s->info.env, rgw::cksum::Type::none /* no override */); } catch (const rgw::io::Exception& e) { op_ret = -e.code().value(); return; @@ -5194,7 +5199,10 @@ void RGWPutMetadataBucket::execute(optional_yield y) /* Setting attributes also stores the provided bucket info. Due * to this fact, the new quota settings can be serialized with * the same call. */ - op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); + s->bucket->set_attrs(attrs); + constexpr bool exclusive = false; // overwrite + constexpr ceph::real_time no_set_mtime{}; + op_ret = s->bucket->put_info(this, exclusive, no_set_mtime, s->yield); return op_ret; }, y); } @@ -5982,8 +5990,6 @@ void RGWGetACLs::execute(optional_yield y) acls = ss.str(); } - - int RGWPutACLs::verify_permission(optional_yield y) { bool perm; @@ -6005,6 +6011,74 @@ int RGWPutACLs::verify_permission(optional_yield y) return 0; } +uint16_t RGWGetObjAttrs::recognize_attrs(const std::string& hdr, uint16_t deflt) +{ + auto attrs{deflt}; + auto sa = ceph::split(hdr, ","); + for (auto& k : sa) { + if (boost::iequals(k, "etag")) { + attrs |= as_flag(ReqAttributes::Etag); + } + if (boost::iequals(k, "checksum")) { + attrs |= as_flag(ReqAttributes::Checksum); + } + if (boost::iequals(k, "objectparts")) { + attrs |= as_flag(ReqAttributes::ObjectParts); + } + if (boost::iequals(k, "objectsize")) { + attrs |= as_flag(ReqAttributes::ObjectSize); + } + if (boost::iequals(k, "storageclass")) { + attrs |= as_flag(ReqAttributes::StorageClass); + } + } + return attrs; +} /* RGWGetObjAttrs::recognize_attrs */ + +int RGWGetObjAttrs::verify_permission(optional_yield y) +{ + bool perm = false; + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + + if (! rgw::sal::Object::empty(s->object.get())) { + + auto iam_action1 = s->object->get_instance().empty() ? + rgw::IAM::s3GetObject : + rgw::IAM::s3GetObjectVersion; + + auto iam_action2 = s->object->get_instance().empty() ? + rgw::IAM::s3GetObjectAttributes : + rgw::IAM::s3GetObjectVersionAttributes; + + if (has_s3_existing_tag || has_s3_resource_tag) { + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); + } + + /* XXXX the following conjunction should be &&--but iam_action2 is currently not + * hooked up and always fails (but should succeed if the requestor has READ + * acess to the object) */ + perm = (verify_object_permission(this, s, iam_action1) || /* && */ + verify_object_permission(this, s, iam_action2)); + } + + if (! perm) { + return -EACCES; + } + + return 0; +} + +void RGWGetObjAttrs::pre_exec() +{ + rgw_bucket_object_pre_exec(s); +} + +void RGWGetObjAttrs::execute(optional_yield y) +{ + RGWGetObj::execute(y); +} /* RGWGetObjAttrs::execute */ + int RGWGetLC::verify_permission(optional_yield y) { auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); @@ -6672,6 +6746,14 @@ try_sum_part_cksums(const DoutPrefixProvider *dpp, ++parts_ix; auto& part_cksum = part.second->get_cksum(); + if (! part_cksum) { + ldpp_dout_fmt(dpp, 0, + "ERROR: multipart part checksum not present (ix=={})", + parts_ix); + op_ret = -ERR_INVALID_REQUEST; + return op_ret; + } + ldpp_dout_fmt(dpp, 16, "INFO: {} iterate part: {} {} {}", __func__, parts_ix, part_cksum->type_string(), @@ -8500,6 +8582,10 @@ void RGWGetBucketPolicy::execute(optional_yield y) void RGWDeleteBucketPolicy::send_response() { + if (!op_ret) { + /* A successful Delete Bucket Policy should return a 204 on success */ + op_ret = STATUS_NO_CONTENT; + } if (op_ret) { set_req_state_err(s, op_ret); } @@ -9175,4 +9261,3 @@ void rgw_slo_entry::decode_json(JSONObj *obj) JSONDecoder::decode_json("etag", etag, obj); JSONDecoder::decode_json("size_bytes", size_bytes, obj); }; - diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h index 9f747501729..dcf64c31572 100644 --- a/src/rgw/rgw_op.h +++ b/src/rgw/rgw_op.h @@ -12,6 +12,7 @@ #pragma once +#include <cstdint> #include <limits.h> #include <array> @@ -1111,6 +1112,7 @@ class RGWCreateBucket : public RGWOp { bool relaxed_region_enforcement = false; RGWCORSConfiguration cors_config; std::set<std::string> rmattr_names; + bufferlist in_data; virtual bool need_metadata_upload() const { return false; } @@ -1237,6 +1239,7 @@ protected: std::string multipart_upload_id; std::string multipart_part_str; int multipart_part_num = 0; + rgw::cksum::Type multipart_cksum_type{rgw::cksum::Type::none}; jspan_ptr multipart_trace; boost::optional<ceph::real_time> delete_at; @@ -1644,6 +1647,50 @@ public: uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; } }; +class RGWGetObjAttrs : public RGWGetObj { +protected: + std::string version_id; + std::string expected_bucket_owner; + std::optional<int> marker; + std::optional<int> max_parts; + uint16_t requested_attributes{0}; +#if 0 + /* used to decrypt attributes for objects stored with SSE-C */ + x-amz-server-side-encryption-customer-algorithm + x-amz-server-side-encryption-customer-key + x-amz-server-side-encryption-customer-key-MD5 +#endif +public: + + enum class ReqAttributes : uint16_t { + None = 0, + Etag, + Checksum, + ObjectParts, + StorageClass, + ObjectSize + }; + + static uint16_t as_flag(ReqAttributes attr) { + return 1 << (uint16_t(attr) ? uint16_t(attr) - 1 : 0); + } + + static uint16_t recognize_attrs(const std::string& hdr, uint16_t deflt = 0); + + RGWGetObjAttrs() : RGWGetObj() + { + RGWGetObj::get_data = false; // it's extra false + } + + int verify_permission(optional_yield y) override; + void pre_exec() override; + void execute(optional_yield y) override; + void send_response() override = 0; + const char* name() const override { return "get_obj_attrs"; } + RGWOpType get_type() override { return RGW_OP_GET_OBJ_ATTRS; } + uint32_t op_mask() override { return RGW_OP_TYPE_READ; } +}; /* RGWGetObjAttrs */ + class RGWGetLC : public RGWOp { protected: diff --git a/src/rgw/rgw_op_type.h b/src/rgw/rgw_op_type.h index 49faea6403d..2c8225d289e 100644 --- a/src/rgw/rgw_op_type.h +++ b/src/rgw/rgw_op_type.h @@ -30,6 +30,7 @@ enum RGWOpType { RGW_OP_COPY_OBJ, RGW_OP_GET_ACLS, RGW_OP_PUT_ACLS, + RGW_OP_GET_OBJ_ATTRS, RGW_OP_GET_CORS, RGW_OP_PUT_CORS, RGW_OP_DELETE_CORS, diff --git a/src/rgw/rgw_rest.h b/src/rgw/rgw_rest.h index aa33080af56..9111696453e 100644 --- a/src/rgw/rgw_rest.h +++ b/src/rgw/rgw_rest.h @@ -403,6 +403,17 @@ public: virtual std::string canonical_name() const override { return fmt::format("REST.{}.ACL", s->info.method); } }; +class RGWGetObjAttrs_ObjStore : public RGWGetObjAttrs { +public: + RGWGetObjAttrs_ObjStore() {} + ~RGWGetObjAttrs_ObjStore() override {} + + int get_params(optional_yield y) = 0; + /* not actually used */ + int send_response_data_error(optional_yield y) override { return 0; }; + int send_response_data(bufferlist& bl, off_t ofs, off_t len) override { return 0; }; +}; + class RGWGetLC_ObjStore : public RGWGetLC { public: RGWGetLC_ObjStore() {} diff --git a/src/rgw/rgw_rest_bucket_logging.cc b/src/rgw/rgw_rest_bucket_logging.cc index ed12ce855a9..afd79b0a548 100644 --- a/src/rgw/rgw_rest_bucket_logging.cc +++ b/src/rgw/rgw_rest_bucket_logging.cc @@ -58,30 +58,29 @@ public: return; } - std::unique_ptr<rgw::sal::Bucket> bucket; - op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name), - &bucket, y); + const rgw_bucket src_bucket_id(s->bucket_tenant, s->bucket_name); + std::unique_ptr<rgw::sal::Bucket> src_bucket; + op_ret = driver->load_bucket(this, src_bucket_id, + &src_bucket, y); if (op_ret < 0) { - ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << - (s->bucket_tenant.empty() ? s->bucket_name : s->bucket_tenant + ":" + s->bucket_name) << - "' info, ret = " << op_ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << src_bucket_id << "', ret = " << op_ret << dendl; return; } - if (auto iter = bucket->get_attrs().find(RGW_ATTR_BUCKET_LOGGING); iter != bucket->get_attrs().end()) { + if (auto iter = src_bucket->get_attrs().find(RGW_ATTR_BUCKET_LOGGING); iter != src_bucket->get_attrs().end()) { try { configuration.enabled = true; decode(configuration, iter->second); } catch (buffer::error& err) { - ldpp_dout(this, 1) << "ERROR: failed to decode attribute '" << RGW_ATTR_BUCKET_LOGGING - << "'. error: " << err.what() << dendl; + ldpp_dout(this, 1) << "WARNING: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING + << "' for bucket '" << src_bucket_id << "', error: " << err.what() << dendl; op_ret = -EIO; return; } } else { - ldpp_dout(this, 5) << "WARNING: no logging configuration on bucket '" << bucket->get_name() << "'" << dendl; + ldpp_dout(this, 5) << "WARNING: no logging configuration on bucket '" << src_bucket_id << "'" << dendl; return; } - ldpp_dout(this, 20) << "INFO: found logging configuration on bucket '" << bucket->get_name() << "'" + ldpp_dout(this, 20) << "INFO: found logging configuration on bucket '" << src_bucket_id << "'" << "'. configuration: " << configuration.to_json_str() << dendl; } @@ -159,58 +158,125 @@ class RGWPutBucketLoggingOp : public RGWDefaultResponseOp { return; } - std::unique_ptr<rgw::sal::Bucket> bucket; - op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name), - &bucket, y); + const rgw_bucket src_bucket_id(s->bucket_tenant, s->bucket_name); + std::unique_ptr<rgw::sal::Bucket> src_bucket; + op_ret = driver->load_bucket(this, src_bucket_id, + &src_bucket, y); if (op_ret < 0) { - ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << s->bucket_name << "', ret = " << op_ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << src_bucket_id << "', ret = " << op_ret << dendl; return; } - - auto& attrs = bucket->get_attrs(); if (!configuration.enabled) { - if (auto iter = attrs.find(RGW_ATTR_BUCKET_LOGGING); iter != attrs.end()) { - attrs.erase(iter); - } - } else { - std::unique_ptr<rgw::sal::Bucket> target_bucket; - op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, configuration.target_bucket), - &target_bucket, y); - if (op_ret < 0) { - ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl; - return; - } - const auto& target_attrs = target_bucket->get_attrs(); - if (target_attrs.find(RGW_ATTR_BUCKET_LOGGING) != target_attrs.end()) { - // target bucket must not have logging set on it - ldpp_dout(this, 1) << "ERROR: logging target bucket '" << configuration.target_bucket << "', is configured with bucket logging" << dendl; - op_ret = -EINVAL; - return; - } - // TODO: verify target bucket does not have encryption - bufferlist conf_bl; - encode(configuration, conf_bl); - attrs[RGW_ATTR_BUCKET_LOGGING] = conf_bl; - // TODO: should we add attribute to target bucket indicating it is target to bucket logging? - // if we do, how do we maintain it when bucket logging changes? + op_ret = rgw::bucketlogging::source_bucket_cleanup(this, driver, src_bucket.get(), true, y); + return; + } + + // set logging configuration + rgw_bucket target_bucket_id; + if (op_ret = rgw::bucketlogging::get_bucket_id(configuration.target_bucket, s->bucket_tenant, target_bucket_id); op_ret < 0) { + ldpp_dout(this, 1) << "ERROR: failed to parse target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl; + return; + } + + if (target_bucket_id == src_bucket_id) { + ldpp_dout(this, 1) << "ERROR: target bucket '" << target_bucket_id << "' must be different from source bucket" << dendl; + op_ret = -EINVAL; + return; + } + std::unique_ptr<rgw::sal::Bucket> target_bucket; + op_ret = driver->load_bucket(this, target_bucket_id, + &target_bucket, y); + if (op_ret < 0) { + ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << target_bucket_id << "', ret = " << op_ret << dendl; + return; + } + auto& target_attrs = target_bucket->get_attrs(); + if (target_attrs.find(RGW_ATTR_BUCKET_LOGGING) != target_attrs.end()) { + // target bucket must not have logging set on it + ldpp_dout(this, 1) << "ERROR: logging target bucket '" << target_bucket_id << "', is configured with bucket logging" << dendl; + op_ret = -EINVAL; + return; } - // TODO: use retry_raced_bucket_write from rgw_op.cc - op_ret = bucket->merge_and_store_attrs(this, attrs, y); + // verify target bucket does not have encryption + if (target_attrs.find(RGW_ATTR_BUCKET_ENCRYPTION_POLICY) != target_attrs.end()) { + ldpp_dout(this, 1) << "ERROR: logging target bucket '" << target_bucket_id << "', is configured with encryption" << dendl; + op_ret = -EINVAL; + return; + } + std::optional<rgw::bucketlogging::configuration> old_conf; + bufferlist conf_bl; + encode(configuration, conf_bl); + op_ret = retry_raced_bucket_write(this, src_bucket.get(), [this, &conf_bl, &src_bucket, &old_conf, &configuration, y] { + auto& attrs = src_bucket->get_attrs(); + auto it = attrs.find(RGW_ATTR_BUCKET_LOGGING); + if (it != attrs.end()) { + try { + rgw::bucketlogging::configuration tmp_conf; + tmp_conf.enabled = true; + decode(tmp_conf, it->second); + old_conf = std::move(tmp_conf); + } catch (buffer::error& err) { + ldpp_dout(this, 1) << "WARNING: failed to decode existing logging attribute '" << RGW_ATTR_BUCKET_LOGGING + << "' for bucket '" << src_bucket->get_info().bucket << "', error: " << err.what() << dendl; + } + if (!old_conf || (old_conf && *old_conf != configuration)) { + // conf changed (or was unknown) - update + it->second = conf_bl; + return src_bucket->merge_and_store_attrs(this, attrs, y); + } + // nothing to update + return 0; + } + // conf was added + attrs.insert(std::make_pair(RGW_ATTR_BUCKET_LOGGING, conf_bl)); + return src_bucket->merge_and_store_attrs(this, attrs, y); + }, y); if (op_ret < 0) { ldpp_dout(this, 1) << "ERROR: failed to set logging attribute '" << RGW_ATTR_BUCKET_LOGGING << "' to bucket '" << - bucket->get_name() << "', ret = " << op_ret << dendl; + src_bucket_id << "', ret = " << op_ret << dendl; return; } - - ldpp_dout(this, 20) << "INFO: " << (configuration.enabled ? "wrote" : "removed") - << " logging configuration. bucket '" << bucket->get_name() << "'. configuration: " << - configuration.to_json_str() << dendl; + if (!old_conf) { + ldpp_dout(this, 20) << "INFO: new logging configuration added to bucket '" << src_bucket_id << "'. configuration: " << + configuration.to_json_str() << dendl; + if (const auto ret = rgw::bucketlogging::update_bucket_logging_sources(this, target_bucket, src_bucket_id, true, y); ret < 0) { + ldpp_dout(this, 1) << "WARNING: failed to add source bucket '" << src_bucket_id << "' to logging sources of target bucket '" << + target_bucket_id << "', ret = " << ret << dendl; + } + } else if (*old_conf != configuration) { + // conf changed - do cleanup + if (const auto ret = commit_logging_object(*old_conf, target_bucket, this, y); ret < 0) { + ldpp_dout(this, 1) << "WARNING: could not commit pending logging object when updating logging configuration of bucket '" << + src_bucket->get_info().bucket << "', ret = " << ret << dendl; + } else { + ldpp_dout(this, 20) << "INFO: committed pending logging object when updating logging configuration of bucket '" << + src_bucket->get_info().bucket << "'" << dendl; + } + if (old_conf->target_bucket != configuration.target_bucket) { + rgw_bucket old_target_bucket_id; + if (const auto ret = rgw::bucketlogging::get_bucket_id(old_conf->target_bucket, s->bucket_tenant, old_target_bucket_id); ret < 0) { + ldpp_dout(this, 1) << "ERROR: failed to parse target bucket '" << old_conf->target_bucket << "', ret = " << ret << dendl; + return; + } + if (const auto ret = rgw::bucketlogging::update_bucket_logging_sources(this, driver, old_target_bucket_id, src_bucket_id, false, y); ret < 0) { + ldpp_dout(this, 1) << "WARNING: failed to remove source bucket '" << src_bucket_id << "' from logging sources of original target bucket '" << + old_target_bucket_id << "', ret = " << ret << dendl; + } + if (const auto ret = rgw::bucketlogging::update_bucket_logging_sources(this, target_bucket, src_bucket_id, true, y); ret < 0) { + ldpp_dout(this, 1) << "WARNING: failed to add source bucket '" << src_bucket_id << "' to logging sources of target bucket '" << + target_bucket_id << "', ret = " << ret << dendl; + } + } + ldpp_dout(this, 20) << "INFO: wrote logging configuration to bucket '" << src_bucket_id << "'. configuration: " << + configuration.to_json_str() << dendl; + } else { + ldpp_dout(this, 20) << "INFO: logging configuration of bucket '" << src_bucket_id << "' did not change" << dendl; + } } }; // Post /<bucket name>/?logging -// actual configuration is XML encoded in the body of the message class RGWPostBucketLoggingOp : public RGWDefaultResponseOp { int verify_permission(optional_yield y) override { auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); @@ -234,17 +300,18 @@ class RGWPostBucketLoggingOp : public RGWDefaultResponseOp { return; } - std::unique_ptr<rgw::sal::Bucket> bucket; - op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name), - &bucket, y); + const rgw_bucket src_bucket_id(s->bucket_tenant, s->bucket_name); + std::unique_ptr<rgw::sal::Bucket> src_bucket; + op_ret = driver->load_bucket(this, src_bucket_id, + &src_bucket, y); if (op_ret < 0) { - ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << s->bucket_name << "', ret = " << op_ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << src_bucket_id << "', ret = " << op_ret << dendl; return; } - const auto& bucket_attrs = bucket->get_attrs(); + const auto& bucket_attrs = src_bucket->get_attrs(); auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING); if (iter == bucket_attrs.end()) { - ldpp_dout(this, 1) << "WARNING: no logging configured on bucket" << dendl; + ldpp_dout(this, 1) << "WARNING: no logging configured on bucket '" << src_bucket_id << "'" << dendl; return; } rgw::bucketlogging::configuration configuration; @@ -252,33 +319,38 @@ class RGWPostBucketLoggingOp : public RGWDefaultResponseOp { configuration.enabled = true; decode(configuration, iter->second); } catch (buffer::error& err) { - ldpp_dout(this, 1) << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING - << "'. error: " << err.what() << dendl; + ldpp_dout(this, 1) << "WARNING: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING + << "' for bucket '" << src_bucket_id << "', error: " << err.what() << dendl; op_ret = -EINVAL; return; } + rgw_bucket target_bucket_id; + if (op_ret = rgw::bucketlogging::get_bucket_id(configuration.target_bucket, s->bucket_tenant, target_bucket_id); op_ret < 0) { + ldpp_dout(this, 1) << "ERROR: failed to parse target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl; + return; + } std::unique_ptr<rgw::sal::Bucket> target_bucket; - op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, configuration.target_bucket), + op_ret = driver->load_bucket(this, target_bucket_id, &target_bucket, y); if (op_ret < 0) { - ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << target_bucket_id << "', ret = " << op_ret << dendl; return; } std::string obj_name; RGWObjVersionTracker objv_tracker; op_ret = target_bucket->get_logging_object_name(obj_name, configuration.target_prefix, null_yield, this, &objv_tracker); if (op_ret < 0) { - ldpp_dout(this, 1) << "ERROR: failed to get pending logging object name from target bucket '" << configuration.target_bucket << "'" << dendl; + ldpp_dout(this, 1) << "ERROR: failed to get pending logging object name from target bucket '" << target_bucket_id << "'" << dendl; return; } op_ret = rgw::bucketlogging::rollover_logging_object(configuration, target_bucket, obj_name, this, null_yield, true, &objv_tracker); if (op_ret < 0) { ldpp_dout(this, 1) << "ERROR: failed to flush pending logging object '" << obj_name - << "' to target bucket '" << configuration.target_bucket << "'" << dendl; + << "' to target bucket '" << target_bucket_id << "'" << dendl; return; } - ldpp_dout(this, 20) << "flushed pending logging object '" << obj_name + ldpp_dout(this, 20) << "INFO: flushed pending logging object '" << obj_name << "' to target bucket '" << configuration.target_bucket << "'" << dendl; } }; diff --git a/src/rgw/rgw_rest_pubsub.cc b/src/rgw/rgw_rest_pubsub.cc index adfc86d87cb..f1ffe09cf25 100644 --- a/src/rgw/rgw_rest_pubsub.cc +++ b/src/rgw/rgw_rest_pubsub.cc @@ -234,7 +234,13 @@ bool verify_topic_permission(const DoutPrefixProvider* dpp, req_state* s, return verify_topic_permission(dpp, s, topic.owner, arn, policy, op); } -// command (AWS compliant): +bool should_forward_request_to_master(req_state* s, rgw::sal::Driver* driver) { + return (!driver->is_meta_master() && + rgw::all_zonegroups_support(*s->penv.site, + rgw::zone_features::notification_v2)); +} + +// command (AWS compliant): // POST // Action=CreateTopic&Name=<topic-name>[&OpaqueData=data][&push-endpoint=<endpoint>[&persistent][&<arg1>=<value1>]] class RGWPSCreateTopicOp : public RGWOp { @@ -273,7 +279,7 @@ class RGWPSCreateTopicOp : public RGWOp { // Remove the args that are parsed, so the push_endpoint_args only contains // necessary one's which is parsed after this if. but only if master zone, // else we do not remove as request is forwarded to master. - if (driver->is_meta_master()) { + if (!should_forward_request_to_master(s, driver)) { s->info.args.remove("OpaqueData"); s->info.args.remove("push-endpoint"); s->info.args.remove("persistent"); @@ -396,7 +402,7 @@ class RGWPSCreateTopicOp : public RGWOp { void RGWPSCreateTopicOp::execute(optional_yield y) { // master request will replicate the topic creation. - if (!driver->is_meta_master()) { + if (should_forward_request_to_master(s, driver)) { op_ret = rgw_forward_request_to_master( this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y); if (op_ret < 0) { @@ -863,7 +869,7 @@ class RGWPSSetTopicAttributesOp : public RGWOp { }; void RGWPSSetTopicAttributesOp::execute(optional_yield y) { - if (!driver->is_meta_master()) { + if (should_forward_request_to_master(s, driver)) { op_ret = rgw_forward_request_to_master( this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y); if (op_ret < 0) { @@ -1008,9 +1014,10 @@ class RGWPSDeleteTopicOp : public RGWOp { }; void RGWPSDeleteTopicOp::execute(optional_yield y) { - if (!driver->is_meta_master()) { + if (should_forward_request_to_master(s, driver)) { op_ret = rgw_forward_request_to_master( this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y); + if (op_ret < 0) { ldpp_dout(this, 1) << "DeleteTopic forward_request_to_master returned ret = " << op_ret @@ -1260,7 +1267,7 @@ int RGWPSCreateNotifOp::verify_permission(optional_yield y) { } void RGWPSCreateNotifOp::execute(optional_yield y) { - if (!driver->is_meta_master()) { + if (should_forward_request_to_master(s, driver)) { op_ret = rgw_forward_request_to_master( this, *s->penv.site, s->owner.id, &data, nullptr, s->info, y); if (op_ret < 0) { @@ -1462,7 +1469,7 @@ int RGWPSDeleteNotifOp::verify_permission(optional_yield y) { } void RGWPSDeleteNotifOp::execute(optional_yield y) { - if (!driver->is_meta_master()) { + if (should_forward_request_to_master(s, driver)) { bufferlist indata; op_ret = rgw_forward_request_to_master( this, *s->penv.site, s->owner.id, &indata, nullptr, s->info, y); diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc index 30ebe8e8965..885991244a6 100644 --- a/src/rgw/rgw_rest_s3.cc +++ b/src/rgw/rgw_rest_s3.cc @@ -9,6 +9,7 @@ #include <string_view> #include "common/ceph_crypto.h" +#include "common/dout.h" #include "common/split.h" #include "common/Formatter.h" #include "common/utf8.h" @@ -807,7 +808,6 @@ void RGWGetObjTags_ObjStore_S3::send_response_data(bufferlist& bl) } } - int RGWPutObjTags_ObjStore_S3::get_params(optional_yield y) { RGWXMLParser parser; @@ -2533,6 +2533,10 @@ int RGWCreateBucket_ObjStore_S3::get_params(optional_yield y) if ((op_ret < 0) && (op_ret != -ERR_LENGTH_REQUIRED)) return op_ret; + if (!driver->is_meta_master()) { + in_data.append(data); + } + if (data.length()) { RGWCreateBucketParser parser; @@ -3815,6 +3819,196 @@ void RGWPutACLs_ObjStore_S3::send_response() dump_start(s); } +int RGWGetObjAttrs_ObjStore_S3::get_params(optional_yield y) +{ + string err; + auto& env = s->info.env; + version_id = s->info.args.get("versionId"); + + auto hdr = env->get_optional("HTTP_X_AMZ_EXPECTED_BUCKET_OWNER"); + if (hdr) { + expected_bucket_owner = *hdr; + } + + hdr = env->get_optional("HTTP_X_AMZ_MAX_PARTS"); + if (hdr) { + max_parts = strict_strtol(hdr->c_str(), 10, &err); + if (!err.empty()) { + s->err.message = "Invalid value for MaxParts: " + err; + ldpp_dout(s, 10) << "Invalid value for MaxParts " << *hdr << ": " + << err << dendl; + return -ERR_INVALID_PART; + } + max_parts = std::min(*max_parts, 1000); + } + + hdr = env->get_optional("HTTP_X_AMZ_PART_NUMBER_MARKER"); + if (hdr) { + marker = strict_strtol(hdr->c_str(), 10, &err); + if (!err.empty()) { + s->err.message = "Invalid value for PartNumberMarker: " + err; + ldpp_dout(s, 10) << "Invalid value for PartNumberMarker " << *hdr << ": " + << err << dendl; + return -ERR_INVALID_PART; + } + } + + hdr = env->get_optional("HTTP_X_AMZ_OBJECT_ATTRIBUTES"); + if (hdr) { + requested_attributes = recognize_attrs(*hdr); + } + + /* XXX skipping SSE-C params for now */ + + return 0; +} /* RGWGetObjAttrs_ObjStore_S3::get_params(...) */ + +int RGWGetObjAttrs_ObjStore_S3::get_decrypt_filter( + std::unique_ptr<RGWGetObj_Filter> *filter, + RGWGetObj_Filter* cb, bufferlist* manifest_bl) +{ + // we aren't actually decrypting the data, but for objects encrypted with + // SSE-C we do need to verify that required headers are present and valid + // + // in the SSE-KMS and SSE-S3 cases, this unfortunately causes us to fetch + // decryption keys which we don't need :( + std::unique_ptr<BlockCrypt> block_crypt; // ignored + std::map<std::string, std::string> crypt_http_responses; // ignored + return rgw_s3_prepare_decrypt(s, s->yield, attrs, &block_crypt, + crypt_http_responses); +} + +void RGWGetObjAttrs_ObjStore_S3::send_response() +{ + if (op_ret) + set_req_state_err(s, op_ret); + dump_errno(s); + + if (op_ret == 0) { + version_id = s->object->get_instance(); + + // x-amz-delete-marker: DeleteMarker // not sure we can plausibly do this? + dump_last_modified(s, lastmod); + dump_header_if_nonempty(s, "x-amz-version-id", version_id); + // x-amz-request-charged: RequestCharged + } + + end_header(s, this, to_mime_type(s->format)); + dump_start(s); + + if (op_ret == 0) { + s->formatter->open_object_section("GetObjectAttributes"); + if (requested_attributes & as_flag(ReqAttributes::Etag)) { + if (lo_etag.empty()) { + auto iter = attrs.find(RGW_ATTR_ETAG); + if (iter != attrs.end()) { + lo_etag = iter->second.to_str(); + } + } + s->formatter->dump_string("ETag", lo_etag); + } + + if (requested_attributes & as_flag(ReqAttributes::Checksum)) { + s->formatter->open_object_section("Checksum"); + auto iter = attrs.find(RGW_ATTR_CKSUM); + if (iter != attrs.end()) { + try { + rgw::cksum::Cksum cksum; + auto bliter = iter->second.cbegin(); + cksum.decode(bliter); + if (multipart_parts_count && multipart_parts_count > 0) { + s->formatter->dump_string(cksum.element_name(), + fmt::format("{}-{}", cksum.to_armor(), *multipart_parts_count)); + } else { + s->formatter->dump_string(cksum.element_name(), cksum.to_armor()); + } + } catch (buffer::error& err) { + ldpp_dout(this, 0) + << "ERROR: could not decode stored cksum, caught buffer::error" << dendl; + } + } + s->formatter->close_section(); /* Checksum */ + } /* Checksum */ + + if (requested_attributes & as_flag(ReqAttributes::ObjectParts)) { + if (multipart_parts_count && multipart_parts_count > 0) { + + /* XXX the following was needed to see a manifest at list_parts()! */ + op_ret = s->object->load_obj_state(s, s->yield); + if (op_ret < 0) { + ldpp_dout_fmt(this, 0, + "ERROR: {} load_obj_state() failed ret={}", __func__, + op_ret); + } + + ldpp_dout_fmt(this, 16, + "{} attr flags={} parts_count={}", + __func__, requested_attributes, *multipart_parts_count); + + s->formatter->open_object_section("ObjectParts"); + + bool truncated = false; + int next_marker; + + using namespace rgw::sal; + + int ret = + s->object->list_parts( + this, s->cct, + max_parts ? *max_parts : 1000, + marker ? *marker : 0, + &next_marker, &truncated, + [&](const Object::Part& part) -> int { + s->formatter->open_object_section("Part"); + s->formatter->dump_int("PartNumber", part.part_number); + s->formatter->dump_unsigned("Size", part.part_size); + if (part.cksum.type != rgw::cksum::Type::none) { + s->formatter->dump_string(part.cksum.element_name(), part.cksum.to_armor()); + } + s->formatter->close_section(); /* Part */ + return 0; + }, s->yield); + + if (ret < 0) { + ldpp_dout_fmt(this, 0, + "ERROR: {} list-parts failed for {}", + __func__, s->object->get_name()); + } + /* AWS docs disagree on the name of this element */ + s->formatter->dump_int("PartsCount", *multipart_parts_count); + s->formatter->dump_int("TotalPartsCount", *multipart_parts_count); + s->formatter->dump_bool("IsTruncated", truncated); + if (max_parts) { + s->formatter->dump_int("MaxParts", *max_parts); + } + if(truncated) { + s->formatter->dump_int("NextPartNumberMarker", next_marker); + } + if (marker) { + s->formatter->dump_int("PartNumberMarker", *marker); + } + s->formatter->close_section(); + } /* multipart_parts_count positive */ + } /* ObjectParts */ + + if (requested_attributes & as_flag(ReqAttributes::ObjectSize)) { + s->formatter->dump_int("ObjectSize", s->obj_size); + } + + if (requested_attributes & as_flag(ReqAttributes::StorageClass)) { + auto iter = attrs.find(RGW_ATTR_STORAGE_CLASS); + if (iter != attrs.end()) { + s->formatter->dump_string("StorageClass", iter->second.to_str()); + } else { + s->formatter->dump_string("StorageClass", "STANDARD"); + } + } + s->formatter->close_section(); + } /* op_ret == 0 */ + + rgw_flush_formatter_and_reset(s, s->formatter); +} /* RGWGetObjAttrs_ObjStore_S3::send_response */ + void RGWGetLC_ObjStore_S3::execute(optional_yield y) { config.set_ctx(s->cct); @@ -4794,6 +4988,7 @@ RGWOp *RGWHandler_REST_Bucket_S3::get_obj_op(bool get_data) const RGWOp *RGWHandler_REST_Bucket_S3::op_get() { + /* XXX maybe we could replace this with an indexing operation */ if (s->info.args.sub_resource_exists("encryption")) return nullptr; @@ -4990,6 +5185,8 @@ RGWOp *RGWHandler_REST_Obj_S3::op_get() return new RGWGetObjLayout_ObjStore_S3; } else if (is_tagging_op()) { return new RGWGetObjTags_ObjStore_S3; + } else if (is_attributes_op()) { + return new RGWGetObjAttrs_ObjStore_S3; } else if (is_obj_retention_op()) { return new RGWGetObjRetention_ObjStore_S3; } else if (is_obj_legal_hold_op()) { @@ -6535,7 +6732,7 @@ rgw::auth::s3::LocalEngine::authenticate( /* Ignore signature for HTTP OPTIONS */ if (s->op_type == RGW_OP_OPTIONS_CORS) { auto apl = apl_factory->create_apl_local( - cct, s, user->get_info(), std::move(account), std::move(policies), + cct, s, std::move(user), std::move(account), std::move(policies), k.subuser, std::nullopt, access_key_id); return result_t::grant(std::move(apl), completer_factory(k.key)); } @@ -6556,7 +6753,7 @@ rgw::auth::s3::LocalEngine::authenticate( } auto apl = apl_factory->create_apl_local( - cct, s, user->get_info(), std::move(account), std::move(policies), + cct, s, std::move(user), std::move(account), std::move(policies), k.subuser, std::nullopt, access_key_id); return result_t::grant(std::move(apl), completer_factory(k.key)); } @@ -6765,7 +6962,7 @@ rgw::auth::s3::STSEngine::authenticate( string subuser; auto apl = local_apl_factory->create_apl_local( - cct, s, user->get_info(), std::move(account), std::move(policies), + cct, s, std::move(user), std::move(account), std::move(policies), subuser, token.perm_mask, std::string(_access_key_id)); return result_t::grant(std::move(apl), completer_factory(token.secret_access_key)); } diff --git a/src/rgw/rgw_rest_s3.h b/src/rgw/rgw_rest_s3.h index 50160d79a42..e8fdc69751c 100644 --- a/src/rgw/rgw_rest_s3.h +++ b/src/rgw/rgw_rest_s3.h @@ -374,6 +374,18 @@ public: int get_params(optional_yield y) override; }; +class RGWGetObjAttrs_ObjStore_S3 : public RGWGetObjAttrs_ObjStore { +public: + RGWGetObjAttrs_ObjStore_S3() {} + ~RGWGetObjAttrs_ObjStore_S3() override {} + + int get_params(optional_yield y) override; + int get_decrypt_filter(std::unique_ptr<RGWGetObj_Filter>* filter, + RGWGetObj_Filter* cb, + bufferlist* manifest_bl) override; + void send_response() override; +}; + class RGWGetLC_ObjStore_S3 : public RGWGetLC_ObjStore { protected: RGWLifecycleConfiguration_S3 config; @@ -701,6 +713,9 @@ protected: bool is_acl_op() const { return s->info.args.exists("acl"); } + bool is_attributes_op() const { + return s->info.args.exists("attributes"); + } bool is_cors_op() const { return s->info.args.exists("cors"); } @@ -759,6 +774,9 @@ protected: bool is_acl_op() const { return s->info.args.exists("acl"); } + bool is_attributes_op() const { + return s->info.args.exists("attributes"); + } bool is_tagging_op() const { return s->info.args.exists("tagging"); } diff --git a/src/rgw/rgw_s3_filter.h b/src/rgw/rgw_s3_filter.h index 9bbc4ef0088..0273da9a364 100644 --- a/src/rgw/rgw_s3_filter.h +++ b/src/rgw/rgw_s3_filter.h @@ -9,6 +9,7 @@ class XMLObj; struct rgw_s3_key_filter { + bool operator==(const rgw_s3_key_filter& rhs) const = default; std::string prefix_rule; std::string suffix_rule; std::string regex_rule; diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h index e098c4decf7..97e25179fc9 100644 --- a/src/rgw/rgw_sal.h +++ b/src/rgw/rgw_sal.h @@ -15,6 +15,7 @@ #pragma once +#include <cstdint> #include <optional> #include <boost/intrusive_ptr.hpp> #include <boost/smart_ptr/intrusive_ref_counter.hpp> @@ -26,6 +27,7 @@ #include "rgw_notify_event_type.h" #include "rgw_req_context.h" #include "include/random.h" +#include "include/function2.hpp" // FIXME: following subclass dependencies #include "driver/rados/rgw_user.h" @@ -1004,20 +1006,27 @@ class Bucket { optional_yield y, const DoutPrefixProvider *dpp) = 0; /** Read the name of the pending bucket logging object name */ - virtual int get_logging_object_name(std::string& obj_name, - const std::string& prefix, - optional_yield y, + virtual int get_logging_object_name(std::string& obj_name, + const std::string& prefix, + optional_yield y, const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker) = 0; /** Update the name of the pending bucket logging object name */ - virtual int set_logging_object_name(const std::string& obj_name, - const std::string& prefix, - optional_yield y, - const DoutPrefixProvider *dpp, + virtual int set_logging_object_name(const std::string& obj_name, + const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, bool new_obj, RGWObjVersionTracker* objv_tracker) = 0; + /** Remove the object holding the name of the pending bucket logging object */ + virtual int remove_logging_object_name(const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, + RGWObjVersionTracker* objv_tracker) = 0; /** Move the pending bucket logging object into the bucket */ virtual int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) = 0; + //** Remove the pending bucket logging object */ + virtual int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) = 0; /** Write a record to the pending bucket logging object */ virtual int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) = 0; @@ -1169,6 +1178,9 @@ class Object { std::string* version_id, std::string* tag, std::string* etag, void (*progress_cb)(off_t, void *), void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) = 0; + + /** return logging subsystem */ + virtual unsigned get_subsys() { return ceph_subsys_rgw; }; /** Get the ACL for this object */ virtual RGWAccessControlPolicy& get_acl(void) = 0; /** Set the ACL for this object */ @@ -1249,6 +1261,28 @@ class Object { /** Dump driver-specific object layout info in JSON */ virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) = 0; + /* A transfer data type describing metadata specific to one part of a + * completed multipart upload object, following the GetObjectAttributes + * response syntax for Object::Parts here: + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html */ + class Part + { + public: + int part_number; + uint32_t part_size; + rgw::cksum::Cksum cksum; + }; /* Part */ + + /* callback function/object used by list_parts */ + using list_parts_each_t = + const fu2::unique_function<int(const Part&) const>; + + /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */ + virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) = 0; + /** Get the cached attributes for this object */ virtual Attrs& get_attrs(void) = 0; /** Get the (const) cached attributes for this object */ @@ -1447,7 +1481,7 @@ public: virtual int init(const DoutPrefixProvider* dpp, optional_yield y, ACLOwner& owner, rgw_placement_rule& dest_placement, rgw::sal::Attrs& attrs) = 0; /** List all the parts of this upload, filling the parts cache */ virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct, - int num_parts, int marker, + int max_parts, int marker, int* next_marker, bool* truncated, optional_yield y, bool assume_unsorted = false) = 0; /** Abort this upload */ @@ -1751,8 +1785,6 @@ class Zone { virtual bool is_writeable() = 0; /** Get the URL for the endpoint for redirecting to this zone */ virtual bool get_redirect_endpoint(std::string* endpoint) = 0; - /** Check to see if the given API is supported in this zone */ - virtual bool has_zonegroup_api(const std::string& api) const = 0; /** Get the current period ID for this zone */ virtual const std::string& get_current_period_id() = 0; /** Get thes system access key for this zone */ diff --git a/src/rgw/rgw_sal_dbstore.cc b/src/rgw/rgw_sal_dbstore.cc index 0e4f95846d1..02fd7a49cda 100644 --- a/src/rgw/rgw_sal_dbstore.cc +++ b/src/rgw/rgw_sal_dbstore.cc @@ -458,14 +458,6 @@ namespace rgw::sal { return false; } - bool DBZone::has_zonegroup_api(const std::string& api) const - { - if (api == "default") - return true; - - return false; - } - const std::string& DBZone::get_current_period_id() { return current_period->get_id(); @@ -496,6 +488,14 @@ namespace rgw::sal { return std::make_unique<DBLuaManager>(this); } + int DBObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) + { + return -EOPNOTSUPP; + } + int DBObject::load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh) { RGWObjState* astate; diff --git a/src/rgw/rgw_sal_dbstore.h b/src/rgw/rgw_sal_dbstore.h index b54249df031..4df10d1dce1 100644 --- a/src/rgw/rgw_sal_dbstore.h +++ b/src/rgw/rgw_sal_dbstore.h @@ -303,7 +303,6 @@ protected: virtual const std::string& get_name() const override; virtual bool is_writeable() override; virtual bool get_redirect_endpoint(std::string* endpoint) override; - virtual bool has_zonegroup_api(const std::string& api) const override; virtual const std::string& get_current_period_id() override; virtual const RGWAccessKey& get_system_key() override; virtual const std::string& get_realm_name() override; @@ -529,6 +528,7 @@ protected: DBObject(DBObject& _o) = default; + virtual unsigned get_subsys() { return ceph_subsys_rgw_dbstore; }; virtual int delete_object(const DoutPrefixProvider* dpp, optional_yield y, uint32_t flags, @@ -554,6 +554,13 @@ protected: virtual int set_acl(const RGWAccessControlPolicy& acl) override { acls = acl; return 0; } virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y, uint32_t flags) override; + + /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */ + virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) override; + virtual int load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh = true) override; virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj = NULL) override; virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) override; diff --git a/src/rgw/rgw_sal_filter.cc b/src/rgw/rgw_sal_filter.cc index 733bfa39ee2..15da580988e 100644 --- a/src/rgw/rgw_sal_filter.cc +++ b/src/rgw/rgw_sal_filter.cc @@ -1046,6 +1046,17 @@ RGWAccessControlPolicy& FilterObject::get_acl() return next->get_acl(); } +int FilterObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) +{ + return next->list_parts(dpp, cct, max_parts, marker, next_marker, + truncated, + sal::Object::list_parts_each_t(each_func), + y); +} + int FilterObject::load_obj_state(const DoutPrefixProvider *dpp, optional_yield y, bool follow_olh) { return next->load_obj_state(dpp, y, follow_olh); diff --git a/src/rgw/rgw_sal_filter.h b/src/rgw/rgw_sal_filter.h index 43a440e8b10..b6b6ed42b8f 100644 --- a/src/rgw/rgw_sal_filter.h +++ b/src/rgw/rgw_sal_filter.h @@ -108,9 +108,6 @@ public: virtual bool get_redirect_endpoint(std::string* endpoint) override { return next->get_redirect_endpoint(endpoint); } - virtual bool has_zonegroup_api(const std::string& api) const override { - return next->has_zonegroup_api(api); - } virtual const std::string& get_current_period_id() override { return next->get_current_period_id(); } @@ -669,24 +666,33 @@ public: optional_yield y, const DoutPrefixProvider *dpp) override { return next->remove_topics(objv_tracker, y, dpp); } - int get_logging_object_name(std::string& obj_name, - const std::string& prefix, - optional_yield y, + int get_logging_object_name(std::string& obj_name, + const std::string& prefix, + optional_yield y, const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker) override { return next->get_logging_object_name(obj_name, prefix, y, dpp, objv_tracker); } - int set_logging_object_name(const std::string& obj_name, - const std::string& prefix, - optional_yield y, - const DoutPrefixProvider *dpp, + int set_logging_object_name(const std::string& obj_name, + const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, bool new_obj, RGWObjVersionTracker* objv_track) override { - return next->set_logging_object_name(obj_name, prefix, y, dpp, new_obj, objv_track); + return next->set_logging_object_name(obj_name, prefix, y, dpp, new_obj, objv_track); + } + int remove_logging_object_name(const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, + RGWObjVersionTracker* objv_tracker) override { + return next->remove_logging_object_name(prefix, y, dpp, objv_tracker); } int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp)override { return next->commit_logging_object(obj_name, y, dpp); } + int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override { + return next->remove_logging_object(obj_name, y, dpp); + } int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) override { return next->write_logging_object(obj_name, record, y, dpp, async_completion); } @@ -781,6 +787,12 @@ public: virtual bool empty() const override { return next->empty(); } virtual const std::string &get_name() const override { return next->get_name(); } + /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */ + virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct, + int max_parts, int marker, int* next_marker, + bool* truncated, list_parts_each_t each_func, + optional_yield y) override; + virtual int load_obj_state(const DoutPrefixProvider *dpp, optional_yield y, bool follow_olh = true) override; virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, diff --git a/src/rgw/rgw_sal_store.h b/src/rgw/rgw_sal_store.h index 5cb98d23158..99b90564997 100644 --- a/src/rgw/rgw_sal_store.h +++ b/src/rgw/rgw_sal_store.h @@ -253,18 +253,23 @@ class StoreBucket : public Bucket { optional_yield y, const DoutPrefixProvider *dpp) override {return 0;} int remove_topics(RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {return 0;} - int get_logging_object_name(std::string& obj_name, - const std::string& prefix, - optional_yield y, + int get_logging_object_name(std::string& obj_name, + const std::string& prefix, + optional_yield y, const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker) override { return 0; } - int set_logging_object_name(const std::string& obj_name, - const std::string& prefix, - optional_yield y, - const DoutPrefixProvider *dpp, + int set_logging_object_name(const std::string& obj_name, + const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, bool new_obj, RGWObjVersionTracker* objv_tracker) override { return 0; } + int remove_logging_object_name(const std::string& prefix, + optional_yield y, + const DoutPrefixProvider *dpp, + RGWObjVersionTracker* objv_tracker) override { return 0; } int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override { return 0; } + int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override { return 0; } int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) override { return 0; } diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc index 032b3734bf9..937f74601b3 100644 --- a/src/rgw/rgw_swift_auth.cc +++ b/src/rgw/rgw_swift_auth.cc @@ -522,7 +522,7 @@ ExternalTokenEngine::authenticate(const DoutPrefixProvider* dpp, } auto apl = apl_factory->create_apl_local( - cct, s, user->get_info(), std::move(account), + cct, s, std::move(user), std::move(account), std::move(policies), extract_swift_subuser(swift_user), std::nullopt, LocalApplier::NO_ACCESS_KEY); return result_t::grant(std::move(apl)); @@ -685,7 +685,7 @@ SignedTokenEngine::authenticate(const DoutPrefixProvider* dpp, } auto apl = apl_factory->create_apl_local( - cct, s, user->get_info(), std::move(account), + cct, s, std::move(user), std::move(account), std::move(policies), extract_swift_subuser(swift_user), std::nullopt, LocalApplier::NO_ACCESS_KEY); return result_t::grant(std::move(apl)); diff --git a/src/rgw/rgw_swift_auth.h b/src/rgw/rgw_swift_auth.h index 9049c54f5ca..c27a24a2619 100644 --- a/src/rgw/rgw_swift_auth.h +++ b/src/rgw/rgw_swift_auth.h @@ -23,8 +23,8 @@ namespace swift { class TempURLApplier : public rgw::auth::LocalApplier { public: TempURLApplier(CephContext* const cct, - const RGWUserInfo& user_info) - : LocalApplier(cct, user_info, std::nullopt, {}, LocalApplier::NO_SUBUSER, + std::unique_ptr<rgw::sal::User> user) + : LocalApplier(cct, std::move(user), std::nullopt, {}, LocalApplier::NO_SUBUSER, std::nullopt, LocalApplier::NO_ACCESS_KEY) {} @@ -155,8 +155,8 @@ public: class SwiftAnonymousApplier : public rgw::auth::LocalApplier { public: SwiftAnonymousApplier(CephContext* const cct, - const RGWUserInfo& user_info) - : LocalApplier(cct, user_info, std::nullopt, {}, LocalApplier::NO_SUBUSER, + std::unique_ptr<rgw::sal::User> user) + : LocalApplier(cct, std::move(user), std::nullopt, {}, LocalApplier::NO_SUBUSER, std::nullopt, LocalApplier::NO_ACCESS_KEY) { } bool is_admin_of(const rgw_owner& o) const {return false;} @@ -238,7 +238,7 @@ class DefaultStrategy : public rgw::auth::Strategy, aplptr_t create_apl_local(CephContext* const cct, const req_state* const s, - const RGWUserInfo& user_info, + std::unique_ptr<rgw::sal::User> user, std::optional<RGWAccountInfo> account, std::vector<IAM::Policy> policies, const std::string& subuser, @@ -247,7 +247,7 @@ class DefaultStrategy : public rgw::auth::Strategy, auto apl = \ rgw::auth::add_3rdparty(driver, rgw_user(s->account_name), rgw::auth::add_sysreq(cct, driver, s, - LocalApplier(cct, user_info, std::move(account), std::move(policies), + LocalApplier(cct, std::move(user), std::move(account), std::move(policies), subuser, perm_mask, access_key_id))); /* TODO(rzarzynski): replace with static_ptr. */ return aplptr_t(new decltype(apl)(std::move(apl))); @@ -259,7 +259,9 @@ class DefaultStrategy : public rgw::auth::Strategy, /* TempURL doesn't need any user account override. It's a Swift-specific * mechanism that requires account name internally, so there is no * business with delegating the responsibility outside. */ - return aplptr_t(new rgw::auth::swift::TempURLApplier(cct, user_info)); + std::unique_ptr<rgw::sal::User> user = s->user->clone(); + user->get_info() = user_info; + return aplptr_t(new rgw::auth::swift::TempURLApplier(cct, std::move(user))); } public: diff --git a/src/rgw/services/svc_zone.cc b/src/rgw/services/svc_zone.cc index 70cf40eb6cb..97d81550058 100644 --- a/src/rgw/services/svc_zone.cc +++ b/src/rgw/services/svc_zone.cc @@ -657,18 +657,6 @@ const string& RGWSI_Zone::get_current_period_id() const return current_period->get_id(); } -bool RGWSI_Zone::has_zonegroup_api(const std::string& api) const -{ - if (!current_period->get_id().empty()) { - const auto& zonegroups_by_api = current_period->get_map().zonegroups_by_api; - if (zonegroups_by_api.find(api) != zonegroups_by_api.end()) - return true; - } else if (zonegroup->api_name == api) { - return true; - } - return false; -} - bool RGWSI_Zone::zone_is_writeable() { return writeable_zone && !get_zone().is_read_only(); @@ -743,8 +731,7 @@ bool RGWSI_Zone::is_meta_master() const bool RGWSI_Zone::need_to_log_metadata() const { - return is_meta_master() && - (zonegroup->zones.size() > 1 || current_period->is_multi_zonegroups_with_zones()); + return is_meta_master() && is_syncing_bucket_meta(); } bool RGWSI_Zone::can_reshard() const @@ -761,33 +748,16 @@ bool RGWSI_Zone::can_reshard() const /** * Check to see if the bucket metadata could be synced - * bucket: the bucket to check * Returns false is the bucket is not synced */ -bool RGWSI_Zone::is_syncing_bucket_meta(const rgw_bucket& bucket) +bool RGWSI_Zone::is_syncing_bucket_meta() const { - /* no current period */ if (current_period->get_id().empty()) { return false; } - /* zonegroup is not master zonegroup */ - if (!zonegroup->is_master_zonegroup()) { - return false; - } - - /* single zonegroup and a single zone */ - if (current_period->is_single_zonegroup() && zonegroup->zones.size() == 1) { - return false; - } - - /* zone is not master */ - if (zonegroup->master_zone != zone_public_config->id) { - return false; - } - - return true; + return zonegroup->zones.size() > 1 || current_period->is_multi_zonegroups_with_zones(); } diff --git a/src/rgw/services/svc_zone.h b/src/rgw/services/svc_zone.h index c4a3a28f0d7..719546eb8db 100644 --- a/src/rgw/services/svc_zone.h +++ b/src/rgw/services/svc_zone.h @@ -96,7 +96,6 @@ public: uint32_t get_zone_short_id() const; const std::string& get_current_period_id() const; - bool has_zonegroup_api(const std::string& api) const; bool zone_is_writeable(); bool zone_syncs_from(const RGWZone& target_zone, const RGWZone& source_zone) const; @@ -146,7 +145,7 @@ public: bool need_to_log_data() const; bool need_to_log_metadata() const; bool can_reshard() const; - bool is_syncing_bucket_meta(const rgw_bucket& bucket); + bool is_syncing_bucket_meta() const; int list_zonegroups(const DoutPrefixProvider *dpp, std::list<std::string>& zonegroups); int list_regions(const DoutPrefixProvider *dpp, std::list<std::string>& regions); |