summaryrefslogtreecommitdiffstats
path: root/src/rgw/rgw_op.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/rgw/rgw_op.cc')
-rw-r--r--src/rgw/rgw_op.cc647
1 files changed, 489 insertions, 158 deletions
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index 52b407cc9bd..7b0ca3134a3 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -25,8 +25,10 @@
#include "common/ceph_json.h"
#include "common/static_ptr.h"
#include "common/perf_counters_key.h"
+#include "rgw_cksum.h"
#include "rgw_cksum_digest.h"
#include "rgw_common.h"
+#include "common/split.h"
#include "rgw_tracer.h"
#include "rgw_rados.h"
@@ -64,6 +66,7 @@
#include "rgw_lua.h"
#include "rgw_iam_managed_policy.h"
#include "rgw_bucket_sync.h"
+#include "rgw_bucket_logging.h"
#include "services/svc_zone.h"
#include "services/svc_quota.h"
@@ -148,7 +151,7 @@ int rgw_forward_request_to_master(const DoutPrefixProvider* dpp,
// use the master zone's endpoints
auto conn = RGWRESTConn{dpp->get_cct(), z->second.id, z->second.endpoints,
- creds, zg->second.id, zg->second.api_name};
+ creds, site.get_zonegroup().id, zg->second.api_name};
bufferlist outdata;
constexpr size_t max_response_size = 128 * 1024; // we expect a very small response
int ret = conn.forward(dpp, effective_owner, req, nullptr,
@@ -330,21 +333,6 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp,
return ret;
}
-
-static boost::optional<Policy>
-get_iam_policy_from_attr(CephContext* cct,
- const map<string, bufferlist>& attrs)
-{
- if (auto i = attrs.find(RGW_ATTR_IAM_POLICY); i != attrs.end()) {
- // resource policy is not restricted to the current tenant
- const std::string* policy_tenant = nullptr;
-
- return Policy(cct, policy_tenant, i->second.to_str(), false);
- } else {
- return none;
- }
-}
-
static boost::optional<PublicAccessBlockConfiguration>
get_public_access_conf_from_attr(const map<string, bufferlist>& attrs)
{
@@ -424,7 +412,7 @@ static int read_obj_policy(const DoutPrefixProvider *dpp,
mpobj->set_in_extra_data(true);
object = mpobj.get();
}
- policy = get_iam_policy_from_attr(s->cct, bucket_attrs);
+ policy = get_iam_policy_from_attr(s->cct, bucket_attrs, s->bucket_tenant);
int ret = get_obj_policy_from_attr(dpp, s->cct, driver, s->bucket_owner,
acl, storage_class, object, s->yield);
@@ -602,7 +590,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d
}
try {
- s->iam_policy = get_iam_policy_from_attr(s->cct, s->bucket_attrs);
+ s->iam_policy = get_iam_policy_from_attr(s->cct, s->bucket_attrs, s->bucket_tenant);
} catch (const std::exception& e) {
ldpp_dout(dpp, 0) << "Error reading IAM Policy: " << e.what() << dendl;
@@ -759,7 +747,7 @@ static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s, r
return 0;
}
-static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s) {
+int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s) {
return rgw_iam_add_buckettags(dpp, s, s->bucket.get());
}
@@ -832,7 +820,7 @@ static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvide
return make_tuple(has_existing_obj_tag, has_resource_tag);
}
-static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp, req_state* s, bool check_obj_exist_tag=true) {
+std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp, req_state* s, bool check_obj_exist_tag) {
return rgw_check_policy_condition(dpp, s->iam_policy, s->iam_identity_policies, s->session_policies, check_obj_exist_tag);
}
@@ -943,37 +931,147 @@ void handle_replication_status_header(
}
/*
- * GET on CloudTiered objects is processed only when sent from the sync client.
- * In all other cases, fail with `ERR_INVALID_OBJECT_STATE`.
+ * GET on CloudTiered objects either it will synced to other zones.
+ * In all other cases, it will try to fetch the object from remote cloud endpoint.
+ *
+ * @return:
+ * Note - return status may differ based on whether it is RESTORE op or
+ * READTHROUGH/GET op.
+ * for e.g, ERR_INVALID_OBJECT_STATE is sent for non cloud-transitioned
+ * incase of restore op and ERR_REQUEST_TIMEOUT is applicable only for
+ * read-through etc.
+ * `<0` : failed to process; s->err.message & op_ret set accrodingly
+ * `0` : restore request initiated
+ * `1` : restore is already in progress
+ * `2` : already restored
*/
-int handle_cloudtier_obj(rgw::sal::Attrs& attrs, bool sync_cloudtiered) {
+int handle_cloudtier_obj(req_state* s, const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
+ rgw::sal::Attrs& attrs, bool sync_cloudtiered, std::optional<uint64_t> days,
+ bool restore_op, optional_yield y)
+{
int op_ret = 0;
+ ldpp_dout(dpp, 20) << "reached handle cloud tier " << dendl;
auto attr_iter = attrs.find(RGW_ATTR_MANIFEST);
- if (attr_iter != attrs.end()) {
- RGWObjManifest m;
- try {
- decode(m, attr_iter->second);
- if (m.get_tier_type() == "cloud-s3") {
- if (!sync_cloudtiered) {
- /* XXX: Instead send presigned redirect or read-through */
+ if (attr_iter == attrs.end()) {
+ if (restore_op) {
+ op_ret = -ERR_INVALID_OBJECT_STATE;
+ s->err.message = "only cloud tier object can be restored";
+ return op_ret;
+ } else { //ignore for read-through
+ return 0;
+ }
+ }
+ RGWObjManifest m;
+ try {
+ decode(m, attr_iter->second);
+ if (m.get_tier_type() != "cloud-s3") {
+ ldpp_dout(dpp, 20) << "not a cloud tier object " << s->object->get_key().name << dendl;
+ if (restore_op) {
+ op_ret = -ERR_INVALID_OBJECT_STATE;
+ s->err.message = "only cloud tier object can be restored";
+ return op_ret;
+ } else { //ignore for read-through
+ return 0;
+ }
+ }
+ RGWObjTier tier_config;
+ m.get_tier_config(&tier_config);
+ if (sync_cloudtiered) {
+ bufferlist t, t_tier;
+ t.append("cloud-s3");
+ attrs[RGW_ATTR_CLOUD_TIER_TYPE] = t;
+ encode(tier_config, t_tier);
+ attrs[RGW_ATTR_CLOUD_TIER_CONFIG] = t_tier;
+ return op_ret;
+ }
+ attr_iter = attrs.find(RGW_ATTR_RESTORE_STATUS);
+ rgw::sal::RGWRestoreStatus restore_status = rgw::sal::RGWRestoreStatus::None;
+ if (attr_iter != attrs.end()) {
+ bufferlist bl = attr_iter->second;
+ auto iter = bl.cbegin();
+ decode(restore_status, iter);
+ }
+ if (attr_iter == attrs.end() || restore_status == rgw::sal::RGWRestoreStatus::RestoreFailed) {
+ // first time restore or previous restore failed
+ rgw::sal::Bucket* pbucket = NULL;
+ pbucket = s->bucket.get();
+
+ std::unique_ptr<rgw::sal::PlacementTier> tier;
+ rgw_placement_rule target_placement;
+ target_placement.inherit_from(pbucket->get_placement_rule());
+ attr_iter = attrs.find(RGW_ATTR_STORAGE_CLASS);
+ if (attr_iter != attrs.end()) {
+ target_placement.storage_class = attr_iter->second.to_str();
+ }
+ op_ret = driver->get_zone()->get_zonegroup().get_placement_tier(target_placement, &tier);
+ ldpp_dout(dpp, 20) << "getting tier placement handle cloud tier" << op_ret <<
+ " storage class " << target_placement.storage_class << dendl;
+ if (op_ret < 0) {
+ s->err.message = "failed to restore object";
+ return op_ret;
+ }
+ rgw::sal::RadosPlacementTier* rtier = static_cast<rgw::sal::RadosPlacementTier*>(tier.get());
+ tier_config.tier_placement = rtier->get_rt();
+ if (!restore_op) {
+ if (tier_config.tier_placement.allow_read_through) {
+ days = tier_config.tier_placement.read_through_restore_days;
+ } else { //read-through is not enabled
op_ret = -ERR_INVALID_OBJECT_STATE;
- } else { // fetch object for sync and set cloud_tier attrs
- bufferlist t, t_tier;
- RGWObjTier tier_config;
- m.get_tier_config(&tier_config);
-
- t.append("cloud-s3");
- attrs[RGW_ATTR_CLOUD_TIER_TYPE] = t;
- encode(tier_config, t_tier);
- attrs[RGW_ATTR_CLOUD_TIER_CONFIG] = t_tier;
+ s->err.message = "Read through is not enabled for this config";
+ return op_ret;
}
}
- } catch (const buffer::end_of_buffer&) {
- // ignore empty manifest; it's not cloud-tiered
- } catch (const std::exception& e) {
+ // fill in the entry. XXX: Maybe we can avoid it by passing only necessary params
+ rgw_bucket_dir_entry ent;
+ ent.key.name = s->object->get_key().name;
+ ent.meta.accounted_size = ent.meta.size = s->obj_size;
+ ent.meta.etag = "" ;
+ ceph::real_time mtime = s->object->get_mtime();
+ uint64_t epoch = 0;
+ op_ret = get_system_versioning_params(s, &epoch, NULL);
+ ldpp_dout(dpp, 20) << "getting versioning params tier placement handle cloud tier" << op_ret << dendl;
+ if (op_ret < 0) {
+ ldpp_dout(dpp, 20) << "failed to get versioning params, op_ret = " << op_ret << dendl;
+ s->err.message = "failed to restore object";
+ return op_ret;
+ }
+ op_ret = s->object->restore_obj_from_cloud(pbucket, tier.get(), target_placement, ent, s->cct, tier_config,
+ mtime, epoch, days, dpp, y, s->bucket->get_info().flags);
+ if (op_ret < 0) {
+ ldpp_dout(dpp, 0) << "object " << ent.key.name << " fetching failed" << op_ret << dendl;
+ s->err.message = "failed to restore object";
+ return op_ret;
+ }
+ ldpp_dout(dpp, 20) << "object " << ent.key.name << " fetching succeed" << dendl;
+ /* Even if restore is complete the first read through request will return but actually downloaded
+ * object asyncronously.
+ */
+ if (!restore_op) { //read-through
+ op_ret = -ERR_REQUEST_TIMEOUT;
+ ldpp_dout(dpp, 5) << "restore is still in progress, please check restore status and retry" << dendl;
+ s->err.message = "restore is still in progress";
+ }
+ return op_ret;
+ } else if (restore_status == rgw::sal::RGWRestoreStatus::RestoreAlreadyInProgress) {
+ if (!restore_op) {
+ op_ret = -ERR_REQUEST_TIMEOUT;
+ ldpp_dout(dpp, 5) << "restore is still in progress, please check restore status and retry" << dendl;
+ s->err.message = "restore is still in progress";
+ return op_ret;
+ } else {
+ return 1; // for restore-op, corresponds to RESTORE_ALREADY_IN_PROGRESS
+ }
+ } else {
+ return 2; // corresponds to CLOUD_RESTORED
}
+ } catch (const buffer::end_of_buffer&) {
+ //empty manifest; it's not cloud-tiered
+ if (restore_op) {
+ op_ret = -ERR_INVALID_OBJECT_STATE;
+ s->err.message = "only cloud tier object can be restored";
+ }
+ } catch (const std::exception& e) {
}
-
return op_ret;
}
@@ -1241,9 +1339,9 @@ void RGWDeleteBucketTags::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
- rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_TAGS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ op_ret = s->bucket->put_info(this, false, real_time(), y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
<< s->bucket->get_name()
@@ -1971,7 +2069,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y)
ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
return r;
}
- _bucket_policy = get_iam_policy_from_attr(s->cct, bucket_attrs);
+ _bucket_policy = get_iam_policy_from_attr(s->cct, bucket_attrs, auth_tenant);
bucket_policy = &_bucket_policy;
pbucket = ubucket.get();
} else {
@@ -2106,7 +2204,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
return r;
}
auto _bucket_policy = get_iam_policy_from_attr(
- s->cct, tmp_bucket->get_attrs());
+ s->cct, tmp_bucket->get_attrs(), auth_tenant);
bucket_policy = _bucket_policy.get_ptr();
buckets[bucket_name].swap(tmp_bucket);
policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
@@ -2246,6 +2344,7 @@ void RGWGetObj::execute(optional_yield y)
rgw::op_counters::inc(counters, l_rgw_op_get_obj, 1);
std::unique_ptr<rgw::sal::Object::ReadOp> read_op(s->object->get_read_op());
+ std::string etag;
op_ret = get_params(y);
if (op_ret < 0)
@@ -2368,15 +2467,12 @@ void RGWGetObj::execute(optional_yield y)
} catch (const buffer::error&) {}
}
-
if (get_type() == RGW_OP_GET_OBJ && get_data) {
- op_ret = handle_cloudtier_obj(attrs, sync_cloudtiered);
+ std::optional<uint64_t> days;
+ op_ret = handle_cloudtier_obj(s, this, driver, attrs, sync_cloudtiered, days, false, y);
if (op_ret < 0) {
ldpp_dout(this, 4) << "Cannot get cloud tiered object: " << *s->object
- <<". Failing with " << op_ret << dendl;
- if (op_ret == -ERR_INVALID_OBJECT_STATE) {
- s->err.message = "This object was transitioned to cloud-s3";
- }
+ <<". Failing with " << op_ret << dendl;
goto done_err;
}
}
@@ -3028,17 +3124,19 @@ static int load_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y,
void RGWStatBucket::execute(optional_yield y)
{
- if (!s->bucket_exists) {
- op_ret = -ERR_NO_SUCH_BUCKET;
+ op_ret = get_params(y);
+ if (op_ret < 0) {
return;
}
- op_ret = driver->load_bucket(this, s->bucket->get_key(), &bucket, y);
- if (op_ret) {
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
return;
}
- op_ret = load_bucket_stats(this, y, *s->bucket, stats);
+ if (report_stats) {
+ op_ret = load_bucket_stats(this, y, *s->bucket, stats);
+ }
}
int RGWListBucket::verify_permission(optional_yield y)
@@ -3131,19 +3229,6 @@ void RGWListBucket::execute(optional_yield y)
rgw::op_counters::tinc(counters, l_rgw_op_list_obj_lat, s->time_elapsed());
}
-int RGWGetBucketLogging::verify_permission(optional_yield y)
-{
- auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
- if (has_s3_resource_tag)
- rgw_iam_add_buckettags(this, s);
-
- if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketLogging)) {
- return -EACCES;
- }
-
- return 0;
-}
-
int RGWGetBucketLocation::verify_permission(optional_yield y)
{
auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
@@ -3475,54 +3560,62 @@ void RGWCreateBucket::execute(optional_yield y)
const rgw::SiteConfig& site = *s->penv.site;
const std::optional<RGWPeriod>& period = site.get_period();
const RGWZoneGroup& my_zonegroup = site.get_zonegroup();
-
- if (s->system_request) {
- // allow system requests to override the target zonegroup. for forwarded
- // requests, we'll create the bucket for the originating zonegroup
- createparams.zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
- }
-
+ const std::string rgwx_zonegroup = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
const RGWZoneGroup* bucket_zonegroup = &my_zonegroup;
- if (createparams.zonegroup_id.empty()) {
- // default to the local zonegroup
- createparams.zonegroup_id = my_zonegroup.id;
- } else if (period) {
- auto z = period->period_map.zonegroups.find(createparams.zonegroup_id);
- if (z == period->period_map.zonegroups.end()) {
- ldpp_dout(this, 0) << "could not find zonegroup "
- << createparams.zonegroup_id << " in current period" << dendl;
- op_ret = -ENOENT;
- return;
- }
- bucket_zonegroup = &z->second;
- } else if (createparams.zonegroup_id != my_zonegroup.id) {
- ldpp_dout(this, 0) << "zonegroup does not match current zonegroup "
- << createparams.zonegroup_id << dendl;
- op_ret = -ENOENT;
- return;
- }
- // validate the LocationConstraint
+ // Validate LocationConstraint if it's provided and enforcement is strict
if (!location_constraint.empty() && !relaxed_region_enforcement) {
- // on the master zonegroup, allow any valid api_name. otherwise it has to
- // match the bucket's zonegroup
- if (period && my_zonegroup.is_master) {
- if (!period->period_map.zonegroups_by_api.count(location_constraint)) {
+ if (period) {
+ auto location_iter = period->period_map.zonegroups_by_api.find(location_constraint);
+ if (location_iter == period->period_map.zonegroups_by_api.end()) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint
<< ") can't be found." << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
- s->err.message = "The specified location-constraint is not valid";
+ s->err.message = fmt::format("The {} location constraint is not valid.",
+ location_constraint);
return;
}
- } else if (bucket_zonegroup->api_name != location_constraint) {
+ bucket_zonegroup = &location_iter->second;
+ } else if (location_constraint != my_zonegroup.api_name) { // if we don't have a period, we can only use the current zonegroup - so check if the location matches by api name here
ldpp_dout(this, 0) << "location constraint (" << location_constraint
- << ") doesn't match zonegroup (" << bucket_zonegroup->api_name
- << ')' << dendl;
- op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
- s->err.message = "The specified location-constraint is not valid";
+ << ") doesn't match zonegroup (" << my_zonegroup.api_name << ")" << dendl;
+ op_ret = -ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION;
+ s->err.message = fmt::format("The {} location constraint is incompatible "
+ "for the region specific endpoint this request was sent to.",
+ location_constraint);
return;
}
}
+ // If it's a system request, use the provided zonegroup if available
+ else if (s->system_request && !rgwx_zonegroup.empty()) {
+ if (period) {
+ auto zonegroup_iter = period->period_map.zonegroups.find(rgwx_zonegroup);
+ if (zonegroup_iter == period->period_map.zonegroups.end()) {
+ ldpp_dout(this, 0) << "could not find zonegroup " << rgwx_zonegroup
+ << " in current period" << dendl;
+ op_ret = -ENOENT;
+ return;
+ }
+ bucket_zonegroup = &zonegroup_iter->second;
+ }
+ }
+
+ const bool enforce_location_match =
+ !period || // No period: no multisite, so no need to enforce location match.
+ !s->system_request || // All user requests are enforced to match zonegroup's location.
+ !my_zonegroup.is_master; // but if it's a system request (forwarded) only allow remote creation on master zonegroup.
+ if (enforce_location_match && !my_zonegroup.equals(bucket_zonegroup->get_id())) {
+ ldpp_dout(this, 0) << "location constraint (" << bucket_zonegroup->api_name
+ << ") doesn't match zonegroup (" << my_zonegroup.api_name << ")" << dendl;
+ op_ret = -ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION;
+ s->err.message = fmt::format("The {} location constraint is incompatible "
+ "for the region specific endpoint this request was sent to.",
+ bucket_zonegroup->api_name);
+ return;
+ }
+
+ // Set the final zonegroup ID
+ createparams.zonegroup_id = bucket_zonegroup->id;
// select and validate the placement target
op_ret = select_bucket_placement(this, *bucket_zonegroup, s->user->get_info(),
@@ -3531,7 +3624,7 @@ void RGWCreateBucket::execute(optional_yield y)
return;
}
- if (bucket_zonegroup == &my_zonegroup) {
+ if (my_zonegroup.equals(bucket_zonegroup->get_id())) {
// look up the zone placement pool
createparams.zone_placement = rgw::find_zone_placement(
this, site.get_zone_params(), createparams.placement_rule);
@@ -3620,7 +3713,6 @@ void RGWCreateBucket::execute(optional_yield y)
if (!driver->is_meta_master()) {
// apply bucket creation on the master zone first
- bufferlist in_data;
JSONParser jp;
op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, &jp, s->info, y);
@@ -3697,7 +3789,10 @@ void RGWCreateBucket::execute(optional_yield y)
s->bucket->get_info().has_website = !s->bucket->get_info().website_conf.is_empty();
/* This will also set the quota on the bucket. */
- op_ret = s->bucket->merge_and_store_attrs(this, createparams.attrs, y);
+ s->bucket->set_attrs(std::move(createparams.attrs));
+ constexpr bool exclusive = false; // overwrite
+ constexpr ceph::real_time no_set_mtime{};
+ op_ret = s->bucket->put_info(this, exclusive, no_set_mtime, y);
} while (op_ret == -ECANCELED && tries++ < 20);
/* Restore the proper return code. */
@@ -3853,6 +3948,7 @@ int RGWPutObj::init_processing(optional_yield y) {
return ret;
}
copy_source_bucket_info = bucket->get_info();
+ copy_source_bucket_attrs = bucket->get_attrs();
/* handle x-amz-copy-source-range */
if (copy_source_range) {
@@ -3912,7 +4008,6 @@ int RGWPutObj::verify_permission(optional_yield y)
RGWAccessControlPolicy cs_acl;
boost::optional<Policy> policy;
- map<string, bufferlist> cs_attrs;
auto cs_bucket = driver->get_bucket(copy_source_bucket_info);
auto cs_object = cs_bucket->get_object(rgw_obj_key(copy_source_object_name,
copy_source_version_id));
@@ -3920,7 +4015,7 @@ int RGWPutObj::verify_permission(optional_yield y)
cs_object->set_prefetch_data();
/* check source object permissions */
- int ret = read_obj_policy(this, driver, s, copy_source_bucket_info, cs_attrs, cs_acl, nullptr,
+ int ret = read_obj_policy(this, driver, s, copy_source_bucket_info, copy_source_bucket_attrs, cs_acl, nullptr,
policy, cs_bucket.get(), cs_object.get(), y, true);
if (ret < 0) {
return ret;
@@ -3929,7 +4024,7 @@ int RGWPutObj::verify_permission(optional_yield y)
RGWAccessControlPolicy cs_bucket_acl;
ret = rgw_op_get_bucket_policy_from_attr(this, s->cct, driver,
copy_source_bucket_info.owner,
- cs_attrs, cs_bucket_acl, y);
+ copy_source_bucket_attrs, cs_bucket_acl, y);
if (ret < 0) {
return ret;
}
@@ -4248,6 +4343,9 @@ void RGWPutObj::execute(optional_yield y)
}
return;
}
+
+ multipart_cksum_type = upload->cksum_type;
+
/* upload will go out of scope, so copy the dest placement for later use */
s->dest_placement = *pdest_placement;
pdest_placement = &s->dest_placement;
@@ -4378,11 +4476,12 @@ void RGWPutObj::execute(optional_yield y)
/* optional streaming checksum */
try {
cksum_filter =
- rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env);
+ rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env, multipart_cksum_type);
} catch (const rgw::io::Exception& e) {
op_ret = -e.code().value();
return;
}
+
if (cksum_filter) {
filter = &*cksum_filter;
}
@@ -4529,10 +4628,12 @@ void RGWPutObj::execute(optional_yield y)
if (cksum_filter) {
const auto& hdr = cksum_filter->header();
+ auto expected_ck = cksum_filter->expected(*s->info.env);
auto cksum_verify =
cksum_filter->verify(*s->info.env); // valid or no supplied cksum
cksum = get<1>(cksum_verify);
- if (std::get<0>(cksum_verify)) {
+ if ((!expected_ck) ||
+ std::get<0>(cksum_verify)) {
buffer::list cksum_bl;
ldpp_dout_fmt(this, 16,
@@ -4540,14 +4641,13 @@ void RGWPutObj::execute(optional_yield y)
"\n\tcomputed={} == \n\texpected={}",
hdr.second,
cksum->to_armor(),
- cksum_filter->expected(*s->info.env));
+ (!!expected_ck) ? expected_ck : "(checksum unavailable)");
cksum->encode(cksum_bl);
emplace_attr(RGW_ATTR_CKSUM, std::move(cksum_bl));
} else {
/* content checksum mismatch */
auto computed_ck = cksum->to_armor();
- auto expected_ck = cksum_filter->expected(*s->info.env);
ldpp_dout_fmt(this, 4,
"{} content checksum mismatch"
@@ -4588,6 +4688,13 @@ void RGWPutObj::execute(optional_yield y)
obj_retention->encode(obj_retention_bl);
emplace_attr(RGW_ATTR_OBJECT_RETENTION, std::move(obj_retention_bl));
}
+
+ if (!multipart) {
+ op_ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), etag, s->object->get_size(), this, y, false, false);
+ if (op_ret < 0) {
+ return;
+ }
+ }
// don't track the individual parts of multipart uploads. they replicate in
// full after CompleteMultipart
@@ -4743,7 +4850,8 @@ void RGWPostObj::execute(optional_yield y)
/* optional streaming checksum */
try {
cksum_filter =
- rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env);
+ rgw::putobj::RGWPutObj_Cksum::Factory(
+ filter, *s->info.env, rgw::cksum::Type::none /* no override */);
} catch (const rgw::io::Exception& e) {
op_ret = -e.code().value();
return;
@@ -5091,7 +5199,10 @@ void RGWPutMetadataBucket::execute(optional_yield y)
/* Setting attributes also stores the provided bucket info. Due
* to this fact, the new quota settings can be serialized with
* the same call. */
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ s->bucket->set_attrs(attrs);
+ constexpr bool exclusive = false; // overwrite
+ constexpr ceph::real_time no_set_mtime{};
+ op_ret = s->bucket->put_info(this, exclusive, no_set_mtime, s->yield);
return op_ret;
}, y);
}
@@ -5157,6 +5268,54 @@ void RGWPutMetadataObject::execute(optional_yield y)
op_ret = s->object->set_obj_attrs(this, &attrs, &rmattrs, s->yield, rgw::sal::FLAG_LOG_OP);
}
+int RGWRestoreObj::init_processing(optional_yield y)
+{
+ int op_ret = get_params(y);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ return RGWOp::init_processing(y);
+}
+
+int RGWRestoreObj::verify_permission(optional_yield y)
+{
+ if (!verify_bucket_permission(this, s, ARN(s->object->get_obj()),
+ rgw::IAM::s3RestoreObject)) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void RGWRestoreObj::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWRestoreObj::execute(optional_yield y)
+{
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
+ return;
+ }
+
+ s->object->set_atomic();
+ int op_ret = s->object->get_obj_attrs(y, this);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "failed to fetch get_obj_attrs op ret = " << op_ret << dendl;
+ restore_ret = op_ret;
+ return;
+ }
+ rgw::sal::Attrs attrs;
+ attrs = s->object->get_attrs();
+ op_ret = handle_cloudtier_obj(s, this, driver, attrs, false, expiry_days, true, y);
+ restore_ret = op_ret;
+ ldpp_dout(this, 20) << "Restore completed of object: " << *s->object << "with op ret: " << restore_ret <<dendl;
+
+ return;
+}
+
int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
{
RGWSLOInfo slo_info;
@@ -5383,6 +5542,13 @@ void RGWDeleteObj::execute(optional_yield y)
}
}
+ if (op_ret == 0) {
+ if (auto ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), etag, obj_size, this, y, false, false); ret < 0) {
+ // don't reply with an error in case of failed delete logging
+ ldpp_dout(this, 5) << "WARNING: DELETE operation ignores bucket logging failure: " << ret << dendl;
+ }
+ }
+
if (op_ret == -ECANCELED) {
op_ret = 0;
}
@@ -5553,7 +5719,7 @@ int RGWCopyObj::verify_permission(optional_yield y)
if (op_ret < 0) {
return op_ret;
}
- auto dest_iam_policy = get_iam_policy_from_attr(s->cct, s->bucket->get_attrs());
+ auto dest_iam_policy = get_iam_policy_from_attr(s->cct, s->bucket->get_attrs(), s->bucket_tenant);
//Add destination bucket tags for authorization
auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, dest_iam_policy, s->iam_identity_policies, s->session_policies);
@@ -5727,6 +5893,12 @@ void RGWCopyObj::execute(optional_yield y)
return;
}
+ etag = s->src_object->get_attrs()[RGW_ATTR_ETAG].to_str();
+ op_ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), etag, obj_size, this, y, false, false);
+ if (op_ret < 0) {
+ return;
+ }
+
op_ret = s->src_object->copy_object(s->owner,
s->user->get_id(),
&s->info,
@@ -5755,12 +5927,17 @@ void RGWCopyObj::execute(optional_yield y)
this,
s->yield);
+ int ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Standard, s->src_object.get(), s, "REST.COPY.OBJECT_GET", etag, obj_size, this, y, true, true);
+ if (ret < 0) {
+ ldpp_dout(this, 5) << "WARNING: COPY operation ignores bucket logging failure of the GET part: " << ret << dendl;
+ }
+
if (op_ret < 0) {
return;
}
// send request to notification manager
- int ret = res->publish_commit(this, obj_size, mtime, etag, s->object->get_instance());
+ ret = res->publish_commit(this, obj_size, mtime, etag, s->object->get_instance());
if (ret < 0) {
ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
// too late to rollback operation, hence op_ret is not set here
@@ -5813,8 +5990,6 @@ void RGWGetACLs::execute(optional_yield y)
acls = ss.str();
}
-
-
int RGWPutACLs::verify_permission(optional_yield y)
{
bool perm;
@@ -5836,6 +6011,74 @@ int RGWPutACLs::verify_permission(optional_yield y)
return 0;
}
+uint16_t RGWGetObjAttrs::recognize_attrs(const std::string& hdr, uint16_t deflt)
+{
+ auto attrs{deflt};
+ auto sa = ceph::split(hdr, ",");
+ for (auto& k : sa) {
+ if (boost::iequals(k, "etag")) {
+ attrs |= as_flag(ReqAttributes::Etag);
+ }
+ if (boost::iequals(k, "checksum")) {
+ attrs |= as_flag(ReqAttributes::Checksum);
+ }
+ if (boost::iequals(k, "objectparts")) {
+ attrs |= as_flag(ReqAttributes::ObjectParts);
+ }
+ if (boost::iequals(k, "objectsize")) {
+ attrs |= as_flag(ReqAttributes::ObjectSize);
+ }
+ if (boost::iequals(k, "storageclass")) {
+ attrs |= as_flag(ReqAttributes::StorageClass);
+ }
+ }
+ return attrs;
+} /* RGWGetObjAttrs::recognize_attrs */
+
+int RGWGetObjAttrs::verify_permission(optional_yield y)
+{
+ bool perm = false;
+ auto [has_s3_existing_tag, has_s3_resource_tag] =
+ rgw_check_policy_condition(this, s);
+
+ if (! rgw::sal::Object::empty(s->object.get())) {
+
+ auto iam_action1 = s->object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion;
+
+ auto iam_action2 = s->object->get_instance().empty() ?
+ rgw::IAM::s3GetObjectAttributes :
+ rgw::IAM::s3GetObjectVersionAttributes;
+
+ if (has_s3_existing_tag || has_s3_resource_tag) {
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+ }
+
+ /* XXXX the following conjunction should be &&--but iam_action2 is currently not
+ * hooked up and always fails (but should succeed if the requestor has READ
+ * acess to the object) */
+ perm = (verify_object_permission(this, s, iam_action1) || /* && */
+ verify_object_permission(this, s, iam_action2));
+ }
+
+ if (! perm) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void RGWGetObjAttrs::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWGetObjAttrs::execute(optional_yield y)
+{
+ RGWGetObj::execute(y);
+} /* RGWGetObjAttrs::execute */
+
int RGWGetLC::verify_permission(optional_yield y)
{
auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
@@ -6097,7 +6340,8 @@ void RGWPutLC::execute(optional_yield y)
return;
}
- op_ret = driver->get_rgwlc()->set_bucket_config(s->bucket.get(), s->bucket_attrs, &new_config);
+ op_ret = driver->get_rgwlc()->set_bucket_config(this, y, s->bucket.get(),
+ s->bucket_attrs, &new_config);
if (op_ret < 0) {
return;
}
@@ -6113,7 +6357,8 @@ void RGWDeleteLC::execute(optional_yield y)
return;
}
- op_ret = driver->get_rgwlc()->remove_bucket_config(s->bucket.get(), s->bucket_attrs);
+ op_ret = driver->get_rgwlc()->remove_bucket_config(this, y, s->bucket.get(),
+ s->bucket_attrs);
if (op_ret < 0) {
return;
}
@@ -6215,9 +6460,9 @@ void RGWDeleteCORS::execute(optional_yield y)
return op_ret;
}
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_CORS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
if (op_ret < 0) {
ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket->get_name()
<< " returned err=" << op_ret << dendl;
@@ -6501,6 +6746,14 @@ try_sum_part_cksums(const DoutPrefixProvider *dpp,
++parts_ix;
auto& part_cksum = part.second->get_cksum();
+ if (! part_cksum) {
+ ldpp_dout_fmt(dpp, 0,
+ "ERROR: multipart part checksum not present (ix=={})",
+ parts_ix);
+ op_ret = -ERR_INVALID_REQUEST;
+ return op_ret;
+ }
+
ldpp_dout_fmt(dpp, 16,
"INFO: {} iterate part: {} {} {}",
__func__, parts_ix, part_cksum->type_string(),
@@ -6653,6 +6906,8 @@ void RGWCompleteMultipart::execute(optional_yield y)
if (upload->cksum_type != rgw::cksum::Type::none) {
op_ret = try_sum_part_cksums(this, s->cct, upload.get(), parts, cksum, y);
if (op_ret < 0) {
+ ldpp_dout(this, 16) << "ERROR: try_sum_part_cksums failed, obj="
+ << meta_obj << " ret=" << op_ret << dendl;
return;
}
}
@@ -6677,13 +6932,23 @@ void RGWCompleteMultipart::execute(optional_yield y)
rgw::putobj::find_hdr_cksum(*(s->info.env));
ldpp_dout_fmt(this, 10,
- "INFO: client supplied checksum {}: {}",
+ "INFO: client supplied checksum {}: {} ",
hdr_cksum.header_name(), supplied_cksum);
if (! (supplied_cksum.empty()) &&
(supplied_cksum != armored_cksum)) {
- op_ret = -ERR_INVALID_REQUEST;
- return;
+ /* some minio SDK clients assert a checksum that is cryptographically
+ * valid but omits the part count */
+ auto parts_suffix = fmt::format("-{}", parts->parts.size());
+ auto suffix_len = armored_cksum->size() - parts_suffix.size();
+ if (armored_cksum->compare(0, suffix_len, supplied_cksum) != 0) {
+ ldpp_dout_fmt(this, 4,
+ "{} content checksum mismatch"
+ "\n\tcalculated={} != \n\texpected={}",
+ hdr_cksum.header_name(), armored_cksum, supplied_cksum);
+ op_ret = -ERR_INVALID_REQUEST;
+ return;
+ }
}
buffer::list cksum_bl;
@@ -6703,15 +6968,67 @@ void RGWCompleteMultipart::execute(optional_yield y)
return;
}
+ RGWObjVersionTracker& objv_tracker = meta_obj->get_version_tracker();
+
+ using prefix_map_t = rgw::sal::MultipartUpload::prefix_map_t;
+ prefix_map_t processed_prefixes;
+
+ // no etag and size before completion
+ op_ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), "", 0, this, y, false, false);
+ if (op_ret < 0) {
+ return;
+ }
+
op_ret =
upload->complete(this, y, s->cct, parts->parts, remove_objs, accounted_size,
- compressed, cs_info, ofs, s->req_id, s->owner, olh_epoch,
- s->object.get());
+ compressed, cs_info, ofs, s->req_id, s->owner, olh_epoch,
+ s->object.get(), processed_prefixes);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: upload complete failed ret=" << op_ret << dendl;
return;
}
+ remove_objs.clear();
+
+ // use cls_version_check() when deleting the meta object to detect part uploads that raced
+ // with upload->complete(). any parts that finish after that won't be part of the final
+ // upload, so they need to be gc'd and removed from the bucket index before retrying
+ // deletion of the multipart meta object
+ static constexpr auto MAX_DELETE_RETRIES = 15u;
+ for (auto i = 0u; i < MAX_DELETE_RETRIES; i++) {
+ // remove the upload meta object ; the meta object is not versioned
+ // when the bucket is, as that would add an unneeded delete marker
+ int ret = meta_obj->delete_object(this, y, rgw::sal::FLAG_PREVENT_VERSIONING, &remove_objs, &objv_tracker);
+ if (ret != -ECANCELED || i == MAX_DELETE_RETRIES - 1) {
+ if (ret >= 0) {
+ /* serializer's exclusive lock is released */
+ serializer->clear_locked();
+ } else {
+ ldpp_dout(this, 1) << "ERROR: failed to remove object " << meta_obj << ", ret: " << ret << dendl;
+ }
+ break;
+ }
+
+ ldpp_dout(this, 20) << "deleting meta_obj is cancelled due to mismatch cls_version: " << objv_tracker << dendl;
+ objv_tracker.clear();
+
+ ret = meta_obj->get_obj_attrs(s->yield, this);
+ if (ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get obj attrs, obj=" << meta_obj
+ << " ret=" << ret << dendl;
+
+ if (ret != -ENOENT) {
+ ldpp_dout(this, 0) << "ERROR: failed to remove object " << meta_obj << dendl;
+ }
+ break;
+ }
+
+ ret = upload->cleanup_orphaned_parts(this, s->cct, y, meta_obj->get_obj(), remove_objs, processed_prefixes);
+ if (ret < 0) {
+ ldpp_dout(this, 0) << "ERROR: failed to clenup orphaned parts. ret=" << ret << dendl;
+ }
+ }
+
const ceph::real_time upload_time = upload->get_mtime();
etag = s->object->get_attrs()[RGW_ATTR_ETAG].to_str();
@@ -6721,17 +7038,6 @@ void RGWCompleteMultipart::execute(optional_yield y)
ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
// too late to rollback operation, hence op_ret is not set here
}
-
- // remove the upload meta object ; the meta object is not versioned
- // when the bucket is, as that would add an unneeded delete marker
- ret = meta_obj->delete_object(this, y, rgw::sal::FLAG_PREVENT_VERSIONING);
- if (ret >= 0) {
- /* serializer's exclusive lock is released */
- serializer->clear_locked();
- } else {
- ldpp_dout(this, 4) << "WARNING: failed to remove object " << meta_obj << ", ret: " << ret << dendl;
- }
-
} // RGWCompleteMultipart::execute
bool RGWCompleteMultipart::check_previously_completed(const RGWMultiCompleteUpload* parts)
@@ -6818,17 +7124,30 @@ void RGWAbortMultipart::execute(optional_yield y)
return;
upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id);
+ meta_obj = upload->get_meta_obj();
+ meta_obj->set_in_extra_data(true);
+ meta_obj->get_obj_attrs(s->yield, this);
+
jspan_context trace_ctx(false, false);
if (tracing::rgw::tracer.is_enabled()) {
// read meta object attributes for trace info
- meta_obj = upload->get_meta_obj();
- meta_obj->set_in_extra_data(true);
- meta_obj->get_obj_attrs(s->yield, this);
extract_span_context(meta_obj->get_attrs(), trace_ctx);
}
multipart_trace = tracing::rgw::tracer.add_span(name(), trace_ctx);
+ int max_lock_secs_mp =
+ s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
+ utime_t dur(max_lock_secs_mp, 0);
+ auto serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart");
+ op_ret = serializer->try_lock(this, dur, y);
+ if (op_ret < 0) {
+ if (op_ret == -ENOENT) {
+ op_ret = -ERR_NO_SUCH_UPLOAD;
+ }
+ return;
+ }
op_ret = upload->abort(this, s->cct, y);
+ serializer->unlock();
}
int RGWListMultipart::verify_permission(optional_yield y)
@@ -7087,6 +7406,12 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_
if (op_ret == -ENOENT) {
op_ret = 0;
}
+
+ if (auto ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Any, obj.get(), s, canonical_name(), etag, obj_size, this, y, true, false); ret < 0) {
+ // don't reply with an error in case of failed delete logging
+ ldpp_dout(this, 5) << "WARNING: multi DELETE operation ignores bucket logging failure: " << ret << dendl;
+ }
+
if (op_ret == 0) {
// send request to notification manager
int ret = res->publish_commit(dpp, obj_size, ceph::real_clock::now(), etag, version_id);
@@ -7126,6 +7451,12 @@ void RGWDeleteMultiObj::execute(optional_yield y)
return;
}
+ if (multi_delete->objects.empty()) {
+ s->err.message = "Missing required element Object";
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+
constexpr int DEFAULT_MAX_NUM = 1000;
int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
if (max_num < 0) {
@@ -7196,7 +7527,7 @@ bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
return false;
}
- auto policy = get_iam_policy_from_attr(s->cct, battrs);
+ auto policy = get_iam_policy_from_attr(s->cct, battrs, binfo.bucket.tenant);
bucket_owner = bacl.get_owner();
@@ -7536,7 +7867,7 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
return false;
}
- auto policy = get_iam_policy_from_attr(s->cct, battrs);
+ auto policy = get_iam_policy_from_attr(s->cct, battrs, binfo.bucket.tenant);
return verify_bucket_permission(this, s, ARN(obj), s->user_acl, bacl, policy,
s->iam_identity_policies, s->session_policies,
@@ -8180,7 +8511,7 @@ void RGWPutBucketPolicy::execute(optional_yield y)
try {
const Policy p(
- s->cct, nullptr, data.to_str(),
+ s->cct, &s->bucket_tenant, data.to_str(),
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
rgw::sal::Attrs attrs(s->bucket_attrs);
if (s->bucket_access_conf &&
@@ -8231,7 +8562,7 @@ void RGWGetBucketPolicy::execute(optional_yield y)
rgw::sal::Attrs attrs(s->bucket_attrs);
auto aiter = attrs.find(RGW_ATTR_IAM_POLICY);
if (aiter == attrs.end()) {
- ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
+ ldpp_dout(this, 20) << "can't find bucket IAM POLICY attr bucket_name = "
<< s->bucket_name << dendl;
op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
s->err.message = "The bucket policy does not exist";
@@ -8281,9 +8612,9 @@ void RGWDeleteBucketPolicy::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_IAM_POLICY);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
return op_ret;
}, y);
}
@@ -8746,7 +9077,7 @@ void RGWGetBucketPublicAccessBlock::execute(optional_yield y)
auto attrs = s->bucket_attrs;
if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS);
aiter == attrs.end()) {
- ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
+ ldpp_dout(this, 20) << "can't find bucket IAM POLICY attr bucket_name = "
<< s->bucket_name << dendl;
op_ret = -ERR_NO_SUCH_PUBLIC_ACCESS_BLOCK_CONFIGURATION;
@@ -8801,9 +9132,9 @@ void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_PUBLIC_ACCESS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
return op_ret;
}, y);
}
@@ -8878,7 +9209,7 @@ void RGWGetBucketEncryption::execute(optional_yield y)
const auto& attrs = s->bucket_attrs;
if (auto aiter = attrs.find(RGW_ATTR_BUCKET_ENCRYPTION_POLICY);
aiter == attrs.end()) {
- ldpp_dout(this, 0) << "can't find BUCKET ENCRYPTION attr for bucket_name = " << s->bucket_name << dendl;
+ ldpp_dout(this, 20) << "can't find BUCKET ENCRYPTION attr for bucket_name = " << s->bucket_name << dendl;
op_ret = -ENOENT;
s->err.message = "The server side encryption configuration was not found";
return;
@@ -8912,10 +9243,10 @@ void RGWDeleteBucketEncryption::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
- rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_POLICY);
attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_KEY_ID);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ op_ret = s->bucket->put_info(this, false, real_time(), y);
return op_ret;
}, y);
}