summaryrefslogtreecommitdiffstats
path: root/src/rgw
diff options
context:
space:
mode:
Diffstat (limited to 'src/rgw')
-rw-r--r--src/rgw/CMakeLists.txt9
-rw-r--r--src/rgw/driver/daos/rgw_sal_daos.cc2
-rw-r--r--src/rgw/driver/daos/rgw_sal_daos.h1
-rw-r--r--src/rgw/driver/dbstore/README.md21
-rw-r--r--src/rgw/driver/dbstore/tests/dbstore_tests.cc31
-rw-r--r--src/rgw/driver/motr/rgw_sal_motr.cc5
-rw-r--r--src/rgw/driver/motr/rgw_sal_motr.h1
-rw-r--r--src/rgw/driver/posix/README.md12
-rw-r--r--src/rgw/driver/posix/notify.h2
-rw-r--r--src/rgw/driver/posix/rgw_sal_posix.cc8
-rw-r--r--src/rgw/driver/posix/rgw_sal_posix.h7
-rw-r--r--src/rgw/driver/rados/rgw_bucket.cc56
-rw-r--r--src/rgw/driver/rados/rgw_bucket.h1
-rw-r--r--src/rgw/driver/rados/rgw_d3n_datacache.cc2
-rw-r--r--src/rgw/driver/rados/rgw_data_sync.cc97
-rw-r--r--src/rgw/driver/rados/rgw_datalog.cc2
-rw-r--r--src/rgw/driver/rados/rgw_notify.cc11
-rw-r--r--src/rgw/driver/rados/rgw_period.cc14
-rw-r--r--src/rgw/driver/rados/rgw_pubsub_push.cc3
-rw-r--r--src/rgw/driver/rados/rgw_putobj_processor.cc10
-rw-r--r--src/rgw/driver/rados/rgw_rados.cc105
-rw-r--r--src/rgw/driver/rados/rgw_rados.h6
-rw-r--r--src/rgw/driver/rados/rgw_rest_bucket.cc2
-rw-r--r--src/rgw/driver/rados/rgw_sal_rados.cc412
-rw-r--r--src/rgw/driver/rados/rgw_sal_rados.h27
-rw-r--r--src/rgw/driver/rados/rgw_tools.cc37
-rw-r--r--src/rgw/driver/rados/rgw_tools.h4
-rw-r--r--src/rgw/driver/rados/rgw_user.cc6
-rw-r--r--src/rgw/driver/rados/rgw_user.h8
-rw-r--r--src/rgw/driver/rados/rgw_zone.h1
-rw-r--r--src/rgw/radosgw-admin/orphan.cc (renamed from src/rgw/rgw_orphan.cc)7
-rw-r--r--src/rgw/radosgw-admin/orphan.h (renamed from src/rgw/rgw_orphan.h)0
-rw-r--r--src/rgw/radosgw-admin/radosgw-admin.cc (renamed from src/rgw/rgw_admin.cc)408
-rw-r--r--src/rgw/radosgw-admin/sync_checkpoint.cc (renamed from src/rgw/rgw_sync_checkpoint.cc)6
-rw-r--r--src/rgw/radosgw-admin/sync_checkpoint.h (renamed from src/rgw/rgw_sync_checkpoint.h)0
-rw-r--r--src/rgw/rgw_amqp.cc9
-rw-r--r--src/rgw/rgw_asio_frontend.cc7
-rw-r--r--src/rgw/rgw_auth.cc94
-rw-r--r--src/rgw/rgw_auth.h36
-rw-r--r--src/rgw/rgw_auth_filters.h41
-rw-r--r--src/rgw/rgw_auth_s3.h10
-rw-r--r--src/rgw/rgw_bucket_layout.cc2
-rw-r--r--src/rgw/rgw_bucket_logging.cc799
-rw-r--r--src/rgw/rgw_bucket_logging.h250
-rw-r--r--src/rgw/rgw_cksum_pipe.cc11
-rw-r--r--src/rgw/rgw_cksum_pipe.h36
-rw-r--r--src/rgw/rgw_common.cc16
-rw-r--r--src/rgw/rgw_common.h30
-rw-r--r--src/rgw/rgw_file_int.h24
-rw-r--r--src/rgw/rgw_iam_policy.cc13
-rw-r--r--src/rgw/rgw_iam_policy.h6
-rw-r--r--src/rgw/rgw_kafka.cc30
-rw-r--r--src/rgw/rgw_kafka.h3
-rw-r--r--src/rgw/rgw_lc.cc2
-rw-r--r--src/rgw/rgw_lua_background.cc6
-rw-r--r--src/rgw/rgw_op.cc398
-rw-r--r--src/rgw/rgw_op.h68
-rw-r--r--src/rgw/rgw_op_type.h3
-rw-r--r--src/rgw/rgw_process.cc15
-rw-r--r--src/rgw/rgw_pubsub.cc208
-rw-r--r--src/rgw/rgw_pubsub.h86
-rw-r--r--src/rgw/rgw_ratelimit.h4
-rw-r--r--src/rgw/rgw_rest.cc9
-rw-r--r--src/rgw/rgw_rest.h76
-rw-r--r--src/rgw/rgw_rest_bucket_logging.cc369
-rw-r--r--src/rgw/rgw_rest_bucket_logging.h19
-rw-r--r--src/rgw/rgw_rest_pubsub.cc27
-rw-r--r--src/rgw/rgw_rest_s3.cc359
-rw-r--r--src/rgw/rgw_rest_s3.h32
-rw-r--r--src/rgw/rgw_rest_sts.cc3
-rw-r--r--src/rgw/rgw_rest_swift.cc8
-rw-r--r--src/rgw/rgw_rest_swift.h1
-rw-r--r--src/rgw/rgw_s3_filter.cc269
-rw-r--r--src/rgw/rgw_s3_filter.h103
-rw-r--r--src/rgw/rgw_s3select.cc2
-rw-r--r--src/rgw/rgw_sal.h58
-rw-r--r--src/rgw/rgw_sal_dbstore.cc18
-rw-r--r--src/rgw/rgw_sal_dbstore.h37
-rw-r--r--src/rgw/rgw_sal_filter.cc11
-rw-r--r--src/rgw/rgw_sal_filter.h39
-rw-r--r--src/rgw/rgw_sal_store.h20
-rw-r--r--src/rgw/rgw_swift_auth.cc4
-rw-r--r--src/rgw/rgw_swift_auth.h16
-rw-r--r--src/rgw/services/svc_zone.cc36
-rw-r--r--src/rgw/services/svc_zone.h3
85 files changed, 4042 insertions, 1008 deletions
diff --git a/src/rgw/CMakeLists.txt b/src/rgw/CMakeLists.txt
index 329b01d2cac..41e473e23f0 100644
--- a/src/rgw/CMakeLists.txt
+++ b/src/rgw/CMakeLists.txt
@@ -90,6 +90,7 @@ set(librgw_common_srcs
rgw_notify_event_type.cc
rgw_period_history.cc
rgw_period_puller.cc
+ rgw_s3_filter.cc
rgw_pubsub.cc
rgw_coroutine.cc
rgw_cr_rest.cc
@@ -151,6 +152,8 @@ set(librgw_common_srcs
rgw_data_access.cc
driver/rados/account.cc
driver/rados/buckets.cc
+ rgw_bucket_logging.cc
+ rgw_rest_bucket_logging.cc
driver/rados/cls_fifo_legacy.cc
driver/rados/group.cc
driver/rados/groups.cc
@@ -484,9 +487,9 @@ target_link_libraries(radosgw PRIVATE
install(TARGETS radosgw DESTINATION bin)
set(radosgw_admin_srcs
- rgw_admin.cc
- rgw_sync_checkpoint.cc
- rgw_orphan.cc)
+ radosgw-admin/radosgw-admin.cc
+ radosgw-admin/sync_checkpoint.cc
+ radosgw-admin/orphan.cc)
# this is unsatisfying and hopefully temporary; ARROW should not be
# part of radosgw_admin
diff --git a/src/rgw/driver/daos/rgw_sal_daos.cc b/src/rgw/driver/daos/rgw_sal_daos.cc
index a87d88c4b85..92dd7afe2fb 100644
--- a/src/rgw/driver/daos/rgw_sal_daos.cc
+++ b/src/rgw/driver/daos/rgw_sal_daos.cc
@@ -858,8 +858,6 @@ bool DaosZone::is_writeable() { return true; }
bool DaosZone::get_redirect_endpoint(std::string* endpoint) { return false; }
-bool DaosZone::has_zonegroup_api(const std::string& api) const { return false; }
-
const std::string& DaosZone::get_current_period_id() {
return current_period->get_id();
}
diff --git a/src/rgw/driver/daos/rgw_sal_daos.h b/src/rgw/driver/daos/rgw_sal_daos.h
index e382fdb04ae..5515579a441 100644
--- a/src/rgw/driver/daos/rgw_sal_daos.h
+++ b/src/rgw/driver/daos/rgw_sal_daos.h
@@ -484,7 +484,6 @@ class DaosZone : public StoreZone {
virtual const std::string& get_name() const override;
virtual bool is_writeable() override;
virtual bool get_redirect_endpoint(std::string* endpoint) override;
- virtual bool has_zonegroup_api(const std::string& api) const override;
virtual const std::string& get_current_period_id() override;
virtual const RGWAccessKey& get_system_key() {
return zone_params->system_key;
diff --git a/src/rgw/driver/dbstore/README.md b/src/rgw/driver/dbstore/README.md
index f7e5df331cc..bcde79a2891 100644
--- a/src/rgw/driver/dbstore/README.md
+++ b/src/rgw/driver/dbstore/README.md
@@ -5,7 +5,7 @@ Standalone Rados Gateway (RGW) on DBStore (Experimental)
## CMake Option
Add below cmake option (enabled by default)
- -DWITH_RADOSGW_DBSTORE=ON
+ -DWITH_RADOSGW_DBSTORE=ON
## Build
@@ -15,23 +15,21 @@ Add below cmake option (enabled by default)
## Running Test cluster
-Edit ceph.conf to add below option
+Edit ceph.conf to add below options
[client]
rgw backend store = dbstore
rgw config store = dbstore
-Start vstart cluster
+To start the `vstart` cluster, run the following cmd:
- MON=1 RGW=1 ../src/vstart.sh -o rgw_backend_store=dbstore -o rgw_config_store=dbstore -n -d
+ MON=0 OSD=0 MDS=0 MGR=0 RGW=1 ../src/vstart.sh -n -d --rgw_store dbstore
-The above vstart command brings up RGW server on dbstore. It creates default zonegroup, zone and few default users (eg., testid) to be used for s3 operations.
+The above `vstart` command brings up the RGW server on DBStore without the need for MONs or OSDs. It creates a default zonegroup, zone, and few default users (e.g., `testid`) to be used for S3 operations, and generates database files in the `dev` subdirectory, by default, to store them.
-`radosgw-admin` can be used to create and remove other users, zonegroups and zones.
-
-
-By default, dbstore creates .db file *'/var/lib/ceph/radosgw/dbstore-default_ns.db'* to store the data and *'/var/lib/ceph/radosgw/dbstore-config.db'* file to store the configuration. This can be configured using below options in ceph.conf
+`radosgw-admin` command can be used to create and remove other users, zonegroups and zones.
+The location and prefix for the database files can be configured using the following options:
[client]
dbstore db dir = <path for the directory for storing the db backend store data>
dbstore db name prefix = <prefix to the file names created by db backend store>
@@ -42,8 +40,8 @@ By default, dbstore creates .db file *'/var/lib/ceph/radosgw/dbstore-default_ns.
To execute DBStore unit test cases (using Gtest framework), from build directory
ninja unittest_dbstore_tests
- ./bin/unittest_dbstore_tests [logfile] [loglevel]
- (default logfile: rgw_dbstore_tests.log, loglevel: 20)
+ ./bin/unittest_dbstore_tests [logfile] [loglevel] [tenantname]
+ (default logfile: rgw_dbstore_tests.log, loglevel: 20, default_ns_<timestamp_at_time_of_run>)
ninja unittest_dbstore_mgr_tests
./bin/unittest_dbstore_mgr_tests
@@ -52,4 +50,3 @@ To execute Sample test file
ninja src/rgw/driver/dbstore/install
./bin/dbstore-bin [logfile] [loglevel]
(default logfile: rgw_dbstore_bin.log, loglevel: 20)
-
diff --git a/src/rgw/driver/dbstore/tests/dbstore_tests.cc b/src/rgw/driver/dbstore/tests/dbstore_tests.cc
index 2ceed7218d8..554c4d29382 100644
--- a/src/rgw/driver/dbstore/tests/dbstore_tests.cc
+++ b/src/rgw/driver/dbstore/tests/dbstore_tests.cc
@@ -21,7 +21,7 @@ namespace gtest {
Environment(): tenant("default_ns"), db(nullptr),
db_type("SQLite"), ret(-1) {}
- Environment(string tenantname, string db_typename):
+ Environment(string tenantname, string db_typename):
tenant(tenantname), db(nullptr),
db_type(db_typename), ret(-1) {}
@@ -153,8 +153,8 @@ TEST_F(DBStoreTest, InsertUser) {
RGWAccessKey k2("id2", "key2");
params.op.user.uinfo.access_keys["id1"] = k1;
params.op.user.uinfo.access_keys["id2"] = k2;
- params.op.user.user_version.ver = 1;
- params.op.user.user_version.tag = "UserTAG";
+ params.op.user.user_version.ver = 1;
+ params.op.user.user_version.tag = "UserTAG";
ret = db->ProcessOp(dpp, "InsertUser", &params);
ASSERT_EQ(ret, 0);
@@ -841,7 +841,7 @@ TEST_F(DBStoreTest, IterateObject) {
TEST_F(DBStoreTest, ListBucketObjects) {
struct DBOpParams params = GlobalParams;
int ret = -1;
-
+
int max = 2;
bool is_truncated = false;
rgw_obj_key marker1;
@@ -1032,7 +1032,7 @@ TEST_F(DBStoreTest, DeleteVersionedObject) {
true, &s);
ASSERT_EQ(ret, -ENOENT);
- /* Delete delete marker..should be able to read object now */
+ /* Delete delete marker..should be able to read object now */
params.op.obj.state.obj.key.instance = dm_instance;
DB::Object op_target3(db, params.op.bucket.info, params.op.obj.state.obj);
DB::Object::Delete delete_op2(&op_target3);
@@ -1307,13 +1307,13 @@ TEST_F(DBStoreTest, LCEntry) {
ASSERT_EQ(ret, 0);
// get entry index1, entry1
- ret = db->get_entry(index1, ents[0], entry);
+ ret = db->get_entry(index1, ents[0], entry);
ASSERT_EQ(ret, 0);
ASSERT_EQ(entry.status, lc_uninitial);
ASSERT_EQ(entry.start_time, lc_time);
// get next entry index1, entry2
- ret = db->get_next_entry(index1, ents[1], entry);
+ ret = db->get_next_entry(index1, ents[1], entry);
ASSERT_EQ(ret, 0);
ASSERT_EQ(entry.bucket, ents[2]);
ASSERT_EQ(entry.status, lc_uninitial);
@@ -1323,7 +1323,7 @@ TEST_F(DBStoreTest, LCEntry) {
entry4.status = lc_complete;
ret = db->set_entry(index2, entry4);
ASSERT_EQ(ret, 0);
- ret = db->get_entry(index2, ents[3], entry);
+ ret = db->get_entry(index2, ents[3], entry);
ASSERT_EQ(ret, 0);
ASSERT_EQ(entry.status, lc_complete);
@@ -1337,7 +1337,7 @@ TEST_F(DBStoreTest, LCEntry) {
}
// remove index1, entry3
- ret = db->rm_entry(index1, entry3);
+ ret = db->rm_entry(index1, entry3);
ASSERT_EQ(ret, 0);
// get next entry index1, entry2.. should be empty
@@ -1373,8 +1373,8 @@ TEST_F(DBStoreTest, InsertTestIDUser) {
params.op.user.uinfo.user_email = "tester@ceph.com";
RGWAccessKey k1("0555b35654ad1656d804", "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==");
params.op.user.uinfo.access_keys["0555b35654ad1656d804"] = k1;
- params.op.user.user_version.ver = 1;
- params.op.user.user_version.tag = "UserTAG";
+ params.op.user.user_version.ver = 1;
+ params.op.user.user_version.tag = "UserTAG";
ret = db->ProcessOp(dpp, "InsertUser", &params);
ASSERT_EQ(ret, 0);
@@ -1385,12 +1385,14 @@ int main(int argc, char **argv)
int ret = -1;
string c_logfile = "rgw_dbstore_tests.log";
int c_loglevel = 20;
+ string c_tenant = "default_ns_" + std::to_string(time(NULL));
- // format: ./dbstore-tests logfile loglevel
- if (argc == 3) {
+ // format: ./dbstore-tests logfile loglevel tenantname
+ if (argc == 4) {
c_logfile = argv[1];
c_loglevel = (atoi)(argv[2]);
- cout << "logfile:" << c_logfile << ", loglevel set to " << c_loglevel << "\n";
+ c_tenant = argv[3];
+ cout << "logfile:" << c_logfile << ", loglevel set to " << c_loglevel << ", db is " << c_tenant << "\n";
}
::testing::InitGoogleTest(&argc, argv);
@@ -1398,6 +1400,7 @@ int main(int argc, char **argv)
gtest::env = new gtest::Environment();
gtest::env->logfile = c_logfile;
gtest::env->loglevel = c_loglevel;
+ gtest::env->tenant = c_tenant;
::testing::AddGlobalTestEnvironment(gtest::env);
ret = RUN_ALL_TESTS();
diff --git a/src/rgw/driver/motr/rgw_sal_motr.cc b/src/rgw/driver/motr/rgw_sal_motr.cc
index b999673ac18..463ea8c5b11 100644
--- a/src/rgw/driver/motr/rgw_sal_motr.cc
+++ b/src/rgw/driver/motr/rgw_sal_motr.cc
@@ -1111,11 +1111,6 @@ bool MotrZone::get_redirect_endpoint(std::string* endpoint)
return false;
}
-bool MotrZone::has_zonegroup_api(const std::string& api) const
-{
- return (zonegroup.group.api_name == api);
-}
-
const std::string& MotrZone::get_current_period_id()
{
return current_period->get_id();
diff --git a/src/rgw/driver/motr/rgw_sal_motr.h b/src/rgw/driver/motr/rgw_sal_motr.h
index f92074b9d94..0f99ae48e86 100644
--- a/src/rgw/driver/motr/rgw_sal_motr.h
+++ b/src/rgw/driver/motr/rgw_sal_motr.h
@@ -525,7 +525,6 @@ class MotrZone : public StoreZone {
virtual const std::string& get_name() const override;
virtual bool is_writeable() override;
virtual bool get_redirect_endpoint(std::string* endpoint) override;
- virtual bool has_zonegroup_api(const std::string& api) const override;
virtual const std::string& get_current_period_id() override;
virtual const RGWAccessKey& get_system_key() { return zone_params->system_key; }
virtual const std::string& get_realm_name() { return realm->get_name(); }
diff --git a/src/rgw/driver/posix/README.md b/src/rgw/driver/posix/README.md
index 02dc8dfbe85..73971edc86f 100644
--- a/src/rgw/driver/posix/README.md
+++ b/src/rgw/driver/posix/README.md
@@ -23,15 +23,15 @@ Edit ceph.conf to add below option
rgw config store = dbstore
rgw filter = posix
-Start vstart cluster
+To start the `vstart` cluster, run the following cmd:
- MON=0 OSD=0 MDS=0 MGR=0 RGW=1 ../src/vstart.sh -o rgw_backend_store=dbstore -o rgw_config_store=dbstore -o rgw_filter=posix -n -d
+ MON=0 OSD=0 MDS=0 MGR=0 RGW=1 ../src/vstart.sh -n -d --rgw_store posix
-The above vstart command brings up RGW server on POSIXDriver. It creates default zonegroup, zone and few default users (eg., testid) to be used for s3 operations.
+The above vstart command brings up RGW server on POSIXDriver. It creates default zonegroup, zone and few default users (e.g., testid) to be used for s3 operations.
-`radosgw-admin` can be used to create and remove other users, zonegroups and zones.
+`radosgw-admin` command can be used to create and remove other users, zonegroups and zones.
-By default, the directory exported is *'/tmp/rgw_posix_driver'*. This can be changed with the `rgw_posix_base_path` option, either in ceph.conf or on the vstart command line above.
+By default, the directory exported, *'rgw_posix_driver'*, is created in the `dev` subdirectory. This can be changed with the `rgw_posix_base_path` option.
-The POSIXDriver keeps a LMDB based cache of directories, so that it can provide ordered listings. This directory lives in `rgw_posix_database_root`, which by default is in *'/var/lib/ceph/radosgw'*
+The POSIXDriver keeps a LMDB based cache of directories, so that it can provide ordered listings. This directory lives in `rgw_posix_database_root`, which by default is created in the `dev` subdirectory
diff --git a/src/rgw/driver/posix/notify.h b/src/rgw/driver/posix/notify.h
index 9f6088a893a..4463abc57c2 100644
--- a/src/rgw/driver/posix/notify.h
+++ b/src/rgw/driver/posix/notify.h
@@ -212,7 +212,7 @@ namespace file::listing {
void signal_shutdown() {
uint64_t msg{sig_shutdown};
- (void) write(efd, &msg, sizeof(uint64_t));
+ std::ignore = write(efd, &msg, sizeof(uint64_t));
}
friend class Notify;
diff --git a/src/rgw/driver/posix/rgw_sal_posix.cc b/src/rgw/driver/posix/rgw_sal_posix.cc
index 1345468210f..9d76462baa0 100644
--- a/src/rgw/driver/posix/rgw_sal_posix.cc
+++ b/src/rgw/driver/posix/rgw_sal_posix.cc
@@ -2893,6 +2893,14 @@ int POSIXObject::copy_object(const ACLOwner& owner,
return dobj->set_obj_attrs(dpp, &attrs, nullptr, y, rgw::sal::FLAG_LOG_OP);
}
+int POSIXObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y)
+{
+ return -EOPNOTSUPP;
+}
+
int POSIXObject::load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh)
{
int ret = stat(dpp);
diff --git a/src/rgw/driver/posix/rgw_sal_posix.h b/src/rgw/driver/posix/rgw_sal_posix.h
index 8ec72bbc1bc..bf3478ad6ab 100644
--- a/src/rgw/driver/posix/rgw_sal_posix.h
+++ b/src/rgw/driver/posix/rgw_sal_posix.h
@@ -653,6 +653,13 @@ public:
const DoutPrefixProvider* dpp, optional_yield y) override;
virtual RGWAccessControlPolicy& get_acl(void) override { return acls; }
virtual int set_acl(const RGWAccessControlPolicy& acl) override { acls = acl; return 0; }
+
+ /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */
+ virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y) override;
+
virtual int load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh = true) override;
virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs,
Attrs* delattrs, optional_yield y, uint32_t flags) override;
diff --git a/src/rgw/driver/rados/rgw_bucket.cc b/src/rgw/driver/rados/rgw_bucket.cc
index 21d238d3341..d043aea0783 100644
--- a/src/rgw/driver/rados/rgw_bucket.cc
+++ b/src/rgw/driver/rados/rgw_bucket.cc
@@ -169,7 +169,8 @@ int RGWBucket::init(rgw::sal::Driver* _driver, RGWBucketAdminOpState& op_state,
driver = _driver;
- std::string bucket_name = op_state.get_bucket_name();
+ auto bucket_name = op_state.get_bucket_name();
+ auto bucket_id = op_state.get_bucket_id();
if (bucket_name.empty() && op_state.get_user_id().empty())
return -EINVAL;
@@ -184,7 +185,7 @@ int RGWBucket::init(rgw::sal::Driver* _driver, RGWBucketAdminOpState& op_state,
bucket_name = bucket_name.substr(pos + 1);
}
- int r = driver->load_bucket(dpp, rgw_bucket(tenant, bucket_name),
+ int r = driver->load_bucket(dpp, rgw_bucket(tenant, bucket_name, bucket_id),
&bucket, y);
if (r < 0) {
set_err_msg(err_msg, "failed to fetch bucket info for bucket=" + bucket_name);
@@ -1140,6 +1141,16 @@ int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpS
int RGWBucketAdminOp::unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, string *err)
{
+ rgw_owner owner;
+ if (op_state.is_account_op()) {
+ owner = op_state.get_account_id();
+ } else if (op_state.is_user_op()) {
+ owner = op_state.get_user_id();
+ } else {
+ set_err_msg(err, "requires user or account id");
+ return -EINVAL;
+ }
+
auto radosdriver = dynamic_cast<rgw::sal::RadosStore*>(driver);
if (!radosdriver) {
set_err_msg(err, "rados store only");
@@ -1152,13 +1163,18 @@ int RGWBucketAdminOp::unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op
return ret;
auto* rados = radosdriver->getRados()->get_rados_handle();
- return radosdriver->ctl()->bucket->unlink_bucket(*rados, op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, y, dpp, true);
+ return radosdriver->ctl()->bucket->unlink_bucket(*rados, owner, op_state.get_bucket()->get_info().bucket, y, dpp, true);
}
int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, string *err)
{
- if (!op_state.is_user_op()) {
- set_err_msg(err, "empty user id");
+ rgw_owner new_owner;
+ if (op_state.is_account_op()) {
+ new_owner = op_state.get_account_id();
+ } else if (op_state.is_user_op()) {
+ new_owner = op_state.get_user_id();
+ } else {
+ set_err_msg(err, "requires user or account id");
return -EINVAL;
}
auto radosdriver = dynamic_cast<rgw::sal::RadosStore*>(driver);
@@ -1172,8 +1188,26 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
if (ret < 0)
return ret;
+ std::string display_name;
+ if (op_state.is_account_op()) {
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ ret = driver->load_account_by_id(dpp, y, op_state.get_account_id(),
+ info, attrs, objv);
+ if (ret < 0) {
+ set_err_msg(err, "failed to load account");
+ return ret;
+ }
+ display_name = std::move(info.name);
+ } else if (!bucket.get_user()->get_info().account_id.empty()) {
+ set_err_msg(err, "account users cannot own buckets. use --account-id instead");
+ return -EINVAL;
+ } else {
+ display_name = bucket.get_user()->get_display_name();
+ }
+
string bucket_id = op_state.get_bucket_id();
- std::string display_name = op_state.get_user_display_name();
std::unique_ptr<rgw::sal::Bucket> loc_bucket;
std::unique_ptr<rgw::sal::Bucket> old_bucket;
@@ -1187,7 +1221,7 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
old_bucket = loc_bucket->clone();
- loc_bucket->get_key().tenant = op_state.get_user_id().tenant;
+ loc_bucket->get_key().tenant = op_state.get_tenant();
if (!op_state.new_bucket_name.empty()) {
auto pos = op_state.new_bucket_name.find('/');
@@ -1236,14 +1270,14 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
}
RGWAccessControlPolicy policy_instance;
- policy_instance.create_default(op_state.get_user_id(), display_name);
+ policy_instance.create_default(new_owner, display_name);
owner = policy_instance.get_owner();
aclbl.clear();
policy_instance.encode(aclbl);
bool exclusive = false;
- loc_bucket->get_info().owner = op_state.get_user_id();
+ loc_bucket->get_info().owner = new_owner;
if (*loc_bucket != *old_bucket) {
loc_bucket->get_info().bucket = loc_bucket->get_key();
loc_bucket->get_info().objv_tracker.version_for_read()->ver = 0;
@@ -1259,13 +1293,13 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
/* link to user */
RGWBucketEntryPoint ep;
ep.bucket = loc_bucket->get_info().bucket;
- ep.owner = op_state.get_user_id();
+ ep.owner = new_owner;
ep.creation_time = loc_bucket->get_info().creation_time;
ep.linked = true;
rgw::sal::Attrs ep_attrs;
rgw_ep_info ep_data{ep, ep_attrs};
- r = radosdriver->ctl()->bucket->link_bucket(*rados, op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, y, dpp, true, &ep_data);
+ r = radosdriver->ctl()->bucket->link_bucket(*rados, new_owner, loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, y, dpp, true, &ep_data);
if (r < 0) {
set_err_msg(err, "failed to relink bucket");
return r;
diff --git a/src/rgw/driver/rados/rgw_bucket.h b/src/rgw/driver/rados/rgw_bucket.h
index 85434ba7299..9ee31c8814e 100644
--- a/src/rgw/driver/rados/rgw_bucket.h
+++ b/src/rgw/driver/rados/rgw_bucket.h
@@ -361,6 +361,7 @@ public:
void clear_failure() { failure = false; }
const RGWBucketInfo& get_bucket_info() const { return bucket->get_info(); }
+ rgw::sal::User* get_user() { return user.get(); }
};
class RGWBucketAdminOp {
diff --git a/src/rgw/driver/rados/rgw_d3n_datacache.cc b/src/rgw/driver/rados/rgw_d3n_datacache.cc
index c81954fce1c..be1a4468696 100644
--- a/src/rgw/driver/rados/rgw_d3n_datacache.cc
+++ b/src/rgw/driver/rados/rgw_d3n_datacache.cc
@@ -86,6 +86,8 @@ void D3nDataCache::init(CephContext *_cct) {
// create the cache storage directory
lsubdout(g_ceph_context, rgw, 5) << "D3nDataCache: init: creating the persistent storage directory on start" << dendl;
efs::create_directories(cache_location);
+ efs::permissions(cache_location,
+ efs::perms::owner_all | efs::perms::group_all | efs::perms::others_read);
}
} catch (const efs::filesystem_error& e) {
lderr(g_ceph_context) << "D3nDataCache: init: ERROR initializing the cache storage directory '" << cache_location <<
diff --git a/src/rgw/driver/rados/rgw_data_sync.cc b/src/rgw/driver/rados/rgw_data_sync.cc
index d5437f548c1..c0a9059a251 100644
--- a/src/rgw/driver/rados/rgw_data_sync.cc
+++ b/src/rgw/driver/rados/rgw_data_sync.cc
@@ -2617,6 +2617,7 @@ class RGWUserPermHandler {
rgw::IAM::Environment env;
std::unique_ptr<rgw::auth::Identity> identity;
RGWAccessControlPolicy user_acl;
+ std::vector<rgw::IAM::Policy> user_policies;
};
std::shared_ptr<_info> info;
@@ -2644,7 +2645,7 @@ class RGWUserPermHandler {
}
auto result = rgw::auth::transform_old_authinfo(
- sync_env->dpp, null_yield, sync_env->driver, user.get());
+ sync_env->dpp, null_yield, sync_env->driver, user.get(), &info->user_policies);
if (!result) {
return result.error();
}
@@ -2679,6 +2680,7 @@ public:
std::shared_ptr<_info> info;
RGWAccessControlPolicy bucket_acl;
std::optional<perm_state> ps;
+ boost::optional<rgw::IAM::Policy> bucket_policy;
public:
Bucket() {}
@@ -2686,9 +2688,7 @@ public:
const RGWBucketInfo& bucket_info,
const map<string, bufferlist>& bucket_attrs);
- bool verify_bucket_permission(int perm);
- bool verify_object_permission(const map<string, bufferlist>& obj_attrs,
- int perm);
+ bool verify_bucket_permission(const rgw_obj_key& obj_key, const uint64_t op);
};
static int policy_from_attrs(CephContext *cct,
@@ -2728,6 +2728,14 @@ int RGWUserPermHandler::Bucket::init(RGWUserPermHandler *handler,
return r;
}
+ // load bucket policy
+ try {
+ bucket_policy = get_iam_policy_from_attr(sync_env->cct, bucket_attrs, bucket_info.bucket.tenant);
+ } catch (const std::exception& e) {
+ ldpp_dout(sync_env->dpp, 0) << "ERROR: reading IAM Policy: " << e.what() << dendl;
+ return -EACCES;
+ }
+
ps.emplace(sync_env->cct,
info->env,
info->identity.get(),
@@ -2740,36 +2748,40 @@ int RGWUserPermHandler::Bucket::init(RGWUserPermHandler *handler,
return 0;
}
-bool RGWUserPermHandler::Bucket::verify_bucket_permission(int perm)
-{
- return verify_bucket_permission_no_policy(sync_env->dpp,
- &(*ps),
- info->user_acl,
- bucket_acl,
- perm);
-}
-
-bool RGWUserPermHandler::Bucket::verify_object_permission(const map<string, bufferlist>& obj_attrs,
- int perm)
+bool RGWUserPermHandler::Bucket::verify_bucket_permission(const rgw_obj_key& obj_key, const uint64_t op)
{
- RGWAccessControlPolicy obj_acl;
-
- int r = policy_from_attrs(sync_env->cct, obj_attrs, &obj_acl);
- if (r < 0) {
- return r;
- }
-
- return verify_bucket_permission_no_policy(sync_env->dpp,
- &(*ps),
- bucket_acl,
- obj_acl,
- perm);
+ const rgw_obj obj(ps->bucket_info.bucket, obj_key);
+ const auto arn = rgw::ARN(obj);
+
+ if (ps->identity->get_account()) {
+ const bool account_root = (ps->identity->get_identity_type() == TYPE_ROOT);
+ if (!ps->identity->is_owner_of(bucket_acl.get_owner().id)) {
+ ldpp_dout(sync_env->dpp, 4) << "cross-account request for bucket owner "
+ << bucket_acl.get_owner().id << " != " << ps->identity->get_aclowner().id << dendl;
+ // cross-account requests evaluate the identity-based policies separately
+ // from the resource-based policies and require Allow from both
+ return ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, account_root, {}, {}, {},
+ info->user_policies, {}, op)
+ && ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, false, info->user_acl,
+ bucket_acl, bucket_policy, {}, {}, op);
+ } else {
+ // don't consult acls for same-account access. require an Allow from
+ // either identity- or resource-based policy
+ return ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, account_root, {}, {},
+ bucket_policy, info->user_policies,
+ {}, op);
+ }
+ }
+ constexpr bool account_root = false;
+ return ::verify_bucket_permission(sync_env->dpp, &(*ps), arn, account_root,
+ info->user_acl, bucket_acl,
+ bucket_policy, info->user_policies,
+ {}, op);
}
class RGWFetchObjFilter_Sync : public RGWFetchObjFilter_Default {
rgw_bucket_sync_pipe sync_pipe;
- std::shared_ptr<RGWUserPermHandler::Bucket> bucket_perms;
std::optional<rgw_sync_pipe_dest_params> verify_dest_params;
std::optional<ceph::real_time> mtime;
@@ -2782,10 +2794,8 @@ class RGWFetchObjFilter_Sync : public RGWFetchObjFilter_Default {
public:
RGWFetchObjFilter_Sync(rgw_bucket_sync_pipe& _sync_pipe,
- std::shared_ptr<RGWUserPermHandler::Bucket>& _bucket_perms,
std::optional<rgw_sync_pipe_dest_params>&& _verify_dest_params,
std::shared_ptr<bool>& _need_retry) : sync_pipe(_sync_pipe),
- bucket_perms(_bucket_perms),
verify_dest_params(std::move(_verify_dest_params)),
need_retry(_need_retry) {
*need_retry = false;
@@ -2852,12 +2862,6 @@ int RGWFetchObjFilter_Sync::filter(CephContext *cct,
*poverride_owner = acl_translation_owner;
}
}
- if (params.mode == rgw_sync_pipe_params::MODE_USER) {
- if (!bucket_perms->verify_object_permission(obj_attrs, RGW_PERM_READ)) {
- ldout(cct, 0) << "ERROR: " << __func__ << ": permission check failed: user not allowed to fetch object" << dendl;
- return -EPERM;
- }
- }
if (!dest_placement_rule &&
params.dest.storage_class) {
@@ -2900,7 +2904,6 @@ class RGWObjFetchCR : public RGWCoroutine {
rgw_sync_pipe_params::Mode param_mode;
std::optional<RGWUserPermHandler> user_perms;
- std::shared_ptr<RGWUserPermHandler::Bucket> source_bucket_perms;
RGWUserPermHandler::Bucket dest_bucket_perms;
std::optional<rgw_sync_pipe_dest_params> dest_params;
@@ -3016,20 +3019,10 @@ public:
return set_cr_error(retcode);
}
- if (!dest_bucket_perms.verify_bucket_permission(RGW_PERM_WRITE)) {
+ if (!dest_bucket_perms.verify_bucket_permission(dest_key.value_or(key), rgw::IAM::s3PutObject)) {
ldout(cct, 0) << "ERROR: " << __func__ << ": permission check failed: user not allowed to write into bucket (bucket=" << sync_pipe.info.dest_bucket.get_key() << ")" << dendl;
return -EPERM;
}
-
- /* init source bucket permission structure */
- source_bucket_perms = make_shared<RGWUserPermHandler::Bucket>();
- r = user_perms->init_bucket(sync_pipe.source_bucket_info,
- sync_pipe.source_bucket_attrs,
- source_bucket_perms.get());
- if (r < 0) {
- ldout(cct, 20) << "ERROR: " << __func__ << ": failed to init bucket perms manager for uid=" << *param_user << " bucket=" << sync_pipe.source_bucket_info.bucket.get_key() << dendl;
- return set_cr_error(retcode);
- }
}
yield {
@@ -3037,12 +3030,11 @@ public:
need_retry = make_shared<bool>();
}
auto filter = make_shared<RGWFetchObjFilter_Sync>(sync_pipe,
- source_bucket_perms,
std::move(dest_params),
need_retry);
call(new RGWFetchRemoteObjCR(sync_env->async_rados, sync_env->driver, sc->source_zone,
- nullopt,
+ param_user,
sync_pipe.source_bucket_info.bucket,
std::nullopt, sync_pipe.dest_bucket_info,
key, dest_key, versioned_epoch,
@@ -4528,7 +4520,7 @@ public:
}
tn->set_resource_name(SSTR(bucket_str_noinstance(bs.bucket) << "/" << key));
}
- if (retcode == -ERR_PRECONDITION_FAILED) {
+ if (retcode == -ERR_PRECONDITION_FAILED || retcode == -EPERM) {
pretty_print(sc->env, "Skipping object s3://{}/{} in sync from zone {}\n",
bs.bucket.name, key, zone_name);
set_status("Skipping object sync: precondition failed (object contains newer change or policy doesn't allow sync)");
@@ -6052,13 +6044,12 @@ int RGWSyncBucketCR::operate(const DoutPrefixProvider *dpp)
} else {
tn->log(20, SSTR("logged prev gen entry (bucket=" << source_bs.bucket << ", shard_id=" << source_bs.shard_id << ", gen=" << current_gen << " in error repo: retcode=" << retcode));
}
- } else {
+ }
retcode = -EAGAIN;
tn->log(10, SSTR("ERROR: requested sync of future generation "
<< *gen << " > " << current_gen
<< ", returning " << retcode << " for later retry"));
return set_cr_error(retcode);
- }
} else if (*gen < current_gen) {
tn->log(10, SSTR("WARNING: requested sync of past generation "
<< *gen << " < " << current_gen
diff --git a/src/rgw/driver/rados/rgw_datalog.cc b/src/rgw/driver/rados/rgw_datalog.cc
index 4c9503071ef..d7e57d7e1c1 100644
--- a/src/rgw/driver/rados/rgw_datalog.cc
+++ b/src/rgw/driver/rados/rgw_datalog.cc
@@ -576,7 +576,7 @@ int RGWDataChangesLog::renew_entries(const DoutPrefixProvider *dpp)
if (ret < 0) {
/* we don't really need to have a special handling for failed cases here,
* as this is just an optimization. */
- ldpp_dout(dpp, -1) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: be->push() returned " << ret << dendl;
return ret;
}
diff --git a/src/rgw/driver/rados/rgw_notify.cc b/src/rgw/driver/rados/rgw_notify.cc
index 7b31fd72bd4..5734284d1a3 100644
--- a/src/rgw/driver/rados/rgw_notify.cc
+++ b/src/rgw/driver/rados/rgw_notify.cc
@@ -21,6 +21,7 @@
#include "common/dout.h"
#include "rgw_url.h"
#include <chrono>
+#include <fmt/format.h>
#define dout_subsys ceph_subsys_rgw_notification
@@ -769,9 +770,10 @@ public:
});
// start the worker threads to do the actual queue processing
- const std::string WORKER_THREAD_NAME = "notif-worker";
for (auto worker_id = 0U; worker_id < worker_count; ++worker_id) {
- workers.emplace_back([this]() {
+ workers.emplace_back([this,worker_id]() {
+ const auto thread_name = fmt::format("notif-worker-{}", worker_id);
+ ceph_pthread_setname(thread_name.c_str());
try {
io_context.run();
} catch (const std::exception& err) {
@@ -779,11 +781,6 @@ public:
throw err;
}
});
- const auto thread_name = WORKER_THREAD_NAME+std::to_string(worker_id);
- if (const auto rc = ceph_pthread_setname(workers.back().native_handle(), thread_name.c_str()); rc != 0) {
- ldpp_dout(this, 1) << "ERROR: failed to set notification manager thread name to: " << thread_name
- << ". error: " << rc << dendl;
- }
}
ldpp_dout(this, 10) << "INfO: started notification manager with: " << worker_count << " workers" << dendl;
}
diff --git a/src/rgw/driver/rados/rgw_period.cc b/src/rgw/driver/rados/rgw_period.cc
index f18e8e46bc5..aacb9b6a09a 100644
--- a/src/rgw/driver/rados/rgw_period.cc
+++ b/src/rgw/driver/rados/rgw_period.cc
@@ -68,20 +68,6 @@ int RGWPeriod::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
return ret;
}
-int RGWPeriod::add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y)
-{
- if (zonegroup.realm_id != realm_id) {
- return 0;
- }
- int ret = period_map.update(zonegroup, cct);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl;
- return ret;
- }
-
- return store_info(dpp, false, y);
-}
-
int RGWPeriod::update(const DoutPrefixProvider *dpp, optional_yield y)
{
auto zone_svc = sysobj_svc->get_zone_svc();
diff --git a/src/rgw/driver/rados/rgw_pubsub_push.cc b/src/rgw/driver/rados/rgw_pubsub_push.cc
index 07d65fa1028..d22c61e9b08 100644
--- a/src/rgw/driver/rados/rgw_pubsub_push.cc
+++ b/src/rgw/driver/rados/rgw_pubsub_push.cc
@@ -281,7 +281,7 @@ public:
conn_id, _endpoint, get_bool(args, "use-ssl", false),
get_bool(args, "verify-ssl", true), args.get_optional("ca-location"),
args.get_optional("mechanism"), args.get_optional("user-name"),
- args.get_optional("password"))) {
+ args.get_optional("password"), args.get_optional("kafka-brokers"))) {
throw configuration_error("Kafka: failed to create connection to: " +
_endpoint);
}
@@ -434,4 +434,3 @@ void RGWPubSubEndpoint::shutdown_all() {
#endif
shutdown_http_manager();
}
-
diff --git a/src/rgw/driver/rados/rgw_putobj_processor.cc b/src/rgw/driver/rados/rgw_putobj_processor.cc
index f04ed1db8d4..9e27c5adbc9 100644
--- a/src/rgw/driver/rados/rgw_putobj_processor.cc
+++ b/src/rgw/driver/rados/rgw_putobj_processor.cc
@@ -597,6 +597,11 @@ int MultipartObjectProcessor::complete(
}
if (r < 0) {
+ if (r == -ETIMEDOUT) {
+ // The meta_obj_ref write may eventually succeed, clear the set of objects for deletion. if it
+ // doesn't ever succeed, we'll orphan any tail objects as if we'd crashed before that write
+ writer.clear_written();
+ }
return r == -ENOENT ? -ERR_NO_SUCH_UPLOAD : r;
}
@@ -783,6 +788,11 @@ int AppendObjectProcessor::complete(
attrs, rctx, writer.get_trace(),
flags & rgw::sal::FLAG_LOG_OP);
if (r < 0) {
+ if (r == -ETIMEDOUT) {
+ // The head object write may eventually succeed, clear the set of objects for deletion. if it
+ // doesn't ever succeed, we'll orphan any tail objects as if we'd crashed before that write
+ writer.clear_written();
+ }
return r;
}
if (!obj_op.meta.canceled) {
diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc
index a133b54dc59..69075c506f1 100644
--- a/src/rgw/driver/rados/rgw_rados.cc
+++ b/src/rgw/driver/rados/rgw_rados.cc
@@ -1930,11 +1930,58 @@ int RGWRados::Bucket::List::list_objects_ordered(
": finished due to getting past requested namespace \"" <<
params.ns << "\"" << dendl;
goto done;
- }
+ } else if (!obj.ns.empty()) {
+ // We're in the namespace range and we're enforcing an empty
+ // namespace, therefore we can skip past a congtiguous chunk
+ // of namespaced entries. Namespaces are demarcated in the
+ // index key by underscores before and after the namespace
+ // name (e.g., "_somenamespace_somekey"). Also, regular
+ // entries might begin with an underscore, in which case
+ // they're escaped with another underscore (e.g., "_foobar"
+ // is encoded as "__foobar"). We also have to account for
+ // the fact that in lexical ordering there are characters
+ // both before underscore (e.g., uppercase letters) and
+ // after (e.g., lowercase letters). So that means there can
+ // be five distinct and meaningful regions in the lexical
+ // ordering of entries, which we'll use examples to help
+ // illustrate:
+
+ // 1. FOOBAR (regular pre-underscore)
+ // 2. _BAZ_foobar (namespaced, with namespace pre-underscore)
+ // 3. __foobar (regular with escaped underscore)
+ // 4. _baz_foobar (namespaced, with namespace post-underscore)
+ // 5. foobar (regular, post-underscore)
+
+ // So if we're skipping namespaces and recognize we're in
+ // region 2, we must skip to region 3. And if we recognize
+ // we're in region 4, we skip to region 5.
+ rgw_obj_index_key potential_marker;
+ if (obj.ns[0] < '_') {
+ // We're in region 2, so need to skip to region 3. The
+ // caret (^) is the ASCII character that preceeds
+ // underscore, so we'll set the marker to the
+ // caret/circumflex followed by 0xFF, so the key after can
+ // be in the double underscore range.
+ potential_marker = rgw_obj_index_key("_^\xFF");
+ } else {
+ // we're passed the escaped underscore region (i.e.,
+ // starting with two underscores), so we can skip past the
+ // underscore region
+ potential_marker = rgw_obj_index_key("_\xFF");
+ }
+
+ if (cur_marker < potential_marker) {
+ ldpp_dout(dpp, 20) << __func__ <<
+ ": skipping past region of namespaced entries, starting with \"" <<
+ entry.key << "\"" << dendl;
+ cur_marker = potential_marker;
+ break; // leave inner loop (for) and allow another cls call
+ }
+ }
- /* we're skipping past namespaced objects */
+ // we're skipping past namespaced objects
ldpp_dout(dpp, 20) << __func__ <<
- ": skipping past namespaced objects, including \"" << entry.key <<
+ ": skipping past individual namespaced entry \"" << entry.key <<
"\"" << dendl;
continue;
}
@@ -1955,7 +2002,7 @@ int RGWRados::Bucket::List::list_objects_ordered(
if (params.access_list_filter &&
!params.access_list_filter(obj.name, index_key.name)) {
ldpp_dout(dpp, 20) << __func__ <<
- ": skipping past namespaced objects, including \"" << entry.key <<
+ ": skipping past filtered out entry \"" << entry.key <<
"\"" << dendl;
continue;
}
@@ -3343,12 +3390,17 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si
return 0;
done_cancel:
- int ret = index_op->cancel(rctx.dpp, meta.remove_objs, rctx.y, log_op);
- if (ret < 0) {
- ldpp_dout(rctx.dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
- }
+ // if r == -ETIMEDOUT, rgw can't determine whether or not the rados op succeeded
+ // we shouldn't be calling index_op->cancel() in this case
+ // Instead, we should leave that pending entry in the index so than bucket listing can recover with check_disk_state() and cls_rgw_suggest_changes()
+ if (r != -ETIMEDOUT) {
+ int ret = index_op->cancel(rctx.dpp, meta.remove_objs, rctx.y, log_op);
+ if (ret < 0) {
+ ldpp_dout(rctx.dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
+ }
- meta.canceled = true;
+ meta.canceled = true;
+ }
/* we lost in a race. There are a few options:
* - existing object was rewritten (ECANCELED)
@@ -5252,13 +5304,7 @@ int RGWRados::restore_obj_from_cloud(RGWLCCloudTierCtx& tier_ctx,
ceph::real_time restore_time = real_clock::now();
{
- char buf[32];
- utime_t ut(restore_time);
- snprintf(buf, sizeof(buf), "%lld.%09lld",
- (long long)ut.sec(),
- (long long)ut.nsec());
bufferlist bl;
- bl.append(buf, 32);
encode(restore_time, bl);
attrs[RGW_ATTR_RESTORE_TIME] = std::move(bl);
}
@@ -5278,13 +5324,7 @@ int RGWRados::restore_obj_from_cloud(RGWLCCloudTierCtx& tier_ctx,
delete_at = expiration_date;
{
- char buf[32];
- utime_t ut(expiration_date);
- snprintf(buf, sizeof(buf), "%lld.%09lld",
- (long long)ut.sec(),
- (long long)ut.nsec());
bufferlist bl;
- bl.append(buf, 32);
encode(expiration_date, bl);
attrs[RGW_ATTR_RESTORE_EXPIRY_DATE] = std::move(bl);
}
@@ -5445,7 +5485,7 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& ob
}
/* if the bucket is not synced we can remove the meta file */
- if (!svc.zone->is_syncing_bucket_meta(bucket)) {
+ if (!svc.zone->is_syncing_bucket_meta()) {
RGWObjVersionTracker objv_tracker;
r = ctl.bucket->remove_bucket_instance_info(bucket, bucket_info, y, dpp);
if (r < 0) {
@@ -6105,7 +6145,11 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi
const bool need_invalidate = (r == -ECANCELED);
int64_t poolid = ioctx.get_id();
- if (r >= 0) {
+ if (r == -ETIMEDOUT) {
+ // rgw can't determine whether or not the delete succeeded, shouldn't be calling either of complete_del() or cancel()
+ // leaving that pending entry in the index so that bucket listing can recover with check_disk_state() and cls_rgw_suggest_changes()
+ ldpp_dout(dpp, 0) << "ERROR: rgw_rados_operate returned r=" << r << dendl;
+ } else if (r >= 0 || r == -ENOENT) {
tombstone_cache_t *obj_tombstone_cache = store->get_tombstone_cache();
if (obj_tombstone_cache) {
tombstone_entry entry{*state};
@@ -6918,13 +6962,13 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* octx, RGWBu
}
return 0;
-}
+} /* RGWRados::set_attrs() */
-static int get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y,
- RGWRados* store, RGWBucketInfo& bucket_info,
- RGWObjectCtx* rctx, RGWObjManifest* manifest,
- int part_num, int* parts_count, bool prefetch,
- RGWObjState** pstate, RGWObjManifest** pmanifest)
+int RGWRados::get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y,
+ RGWRados* store, RGWBucketInfo& bucket_info,
+ RGWObjectCtx* rctx, RGWObjManifest* manifest,
+ int part_num, int* parts_count, bool prefetch,
+ RGWObjState** pstate, RGWObjManifest** pmanifest)
{
if (!manifest) {
return -ERR_INVALID_PART;
@@ -7003,6 +7047,9 @@ static int get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y,
// update the object size
sm->state.size = part_manifest.get_obj_size();
+ if (!sm->state.attrset.count(RGW_ATTR_COMPRESSION)) {
+ sm->state.accounted_size = sm->state.size;
+ }
*pmanifest = &part_manifest;
return 0;
diff --git a/src/rgw/driver/rados/rgw_rados.h b/src/rgw/driver/rados/rgw_rados.h
index b24823b60dc..fe79916392f 100644
--- a/src/rgw/driver/rados/rgw_rados.h
+++ b/src/rgw/driver/rados/rgw_rados.h
@@ -1071,6 +1071,12 @@ public:
}; // class RGWRados::Bucket::List
}; // class RGWRados::Bucket
+ static int get_part_obj_state(const DoutPrefixProvider* dpp, optional_yield y,
+ RGWRados* store, RGWBucketInfo& bucket_info,
+ RGWObjectCtx* rctx, RGWObjManifest* manifest,
+ int part_num, int* parts_count, bool prefetch,
+ RGWObjState** pstate, RGWObjManifest** pmanifest);
+
int on_last_entry_in_listing(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const std::string& obj_prefix,
diff --git a/src/rgw/driver/rados/rgw_rest_bucket.cc b/src/rgw/driver/rados/rgw_rest_bucket.cc
index dc71e40335f..0c3f7029604 100644
--- a/src/rgw/driver/rados/rgw_rest_bucket.cc
+++ b/src/rgw/driver/rados/rgw_rest_bucket.cc
@@ -141,6 +141,7 @@ void RGWOp_Bucket_Link::execute(optional_yield y)
RGWBucketAdminOpState op_state;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
+ RESTArgs::get_string(s, "account-id", op_state.account_id, &op_state.account_id);
RESTArgs::get_string(s, "bucket", bucket, &bucket);
RESTArgs::get_string(s, "bucket-id", bucket_id, &bucket_id);
RESTArgs::get_string(s, "new-bucket-name", new_bucket_name, &new_bucket_name);
@@ -184,6 +185,7 @@ void RGWOp_Bucket_Unlink::execute(optional_yield y)
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
+ RESTArgs::get_string(s, "account-id", op_state.account_id, &op_state.account_id);
RESTArgs::get_string(s, "bucket", bucket, &bucket);
op_state.set_user_id(uid);
diff --git a/src/rgw/driver/rados/rgw_sal_rados.cc b/src/rgw/driver/rados/rgw_sal_rados.cc
index 11b86a25841..4c05421653b 100644
--- a/src/rgw/driver/rados/rgw_sal_rados.cc
+++ b/src/rgw/driver/rados/rgw_sal_rados.cc
@@ -13,8 +13,11 @@
*
*/
+#include <asm-generic/errno-base.h>
#include <errno.h>
+#include <fmt/core.h>
#include <stdlib.h>
+#include <string>
#include <system_error>
#include <filesystem>
#include <unistd.h>
@@ -26,9 +29,12 @@
#include "include/function2.hpp"
#include "common/Clock.h"
+#include "common/ceph_time.h"
#include "common/errno.h"
#include "role.h"
+#include "rgw_obj_types.h"
+#include "rgw_rados.h"
#include "rgw_sal.h"
#include "rgw_sal_rados.h"
#include "rgw_bucket.h"
@@ -56,6 +62,7 @@
#include "rgw_rest_realm.h"
#include "rgw_rest_user.h"
#include "rgw_lc_tier.h"
+#include "rgw_bucket_logging.h"
#include "services/svc_sys_obj.h"
#include "services/svc_mdlog.h"
#include "services/svc_cls.h"
@@ -422,6 +429,10 @@ int RadosBucket::remove(const DoutPrefixProvider* dpp,
ldpp_dout(dpp, -1) << "ERROR: unable to remove notifications from bucket. ret=" << ps_ret << dendl;
}
+ if (ret = rgw::bucketlogging::bucket_deletion_cleanup(dpp, store, this, y); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: could not cleanup bucket logging configuration and pending objects, ret = " << ret << dendl;
+ }
+
ret = store->ctl()->bucket->unlink_bucket(rados, info.owner,
info.bucket, y, dpp, false);
if (ret < 0) {
@@ -716,7 +727,7 @@ int RadosBucket::merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new
attrs[it.first] = it.second;
}
return store->ctl()->bucket->set_bucket_instance_attrs(get_info(),
- new_attrs, &get_info().objv_tracker, y, dpp);
+ attrs, &get_info().objv_tracker, y, dpp);
}
int RadosBucket::try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime, optional_yield y)
@@ -1017,6 +1028,281 @@ int RadosBucket::remove_topics(RGWObjVersionTracker* objv_tracker,
objv_tracker, y);
}
+int RadosBucket::get_logging_object_name(std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) {
+ rgw_pool data_pool;
+ const auto obj_name_oid = bucketlogging::object_name_oid(this, prefix);
+ if (!store->getRados()->get_obj_data_pool(get_placement_rule(), rgw_obj{get_key(), obj_name_oid}, &data_pool)) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() <<
+ "' when getting logging object name" << dendl;
+ return -EIO;
+ }
+ bufferlist bl;
+ const int ret = rgw_get_system_obj(store->svc()->sysobj,
+ data_pool,
+ obj_name_oid,
+ bl,
+ objv_tracker,
+ nullptr,
+ y,
+ dpp,
+ nullptr,
+ nullptr);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get logging object name from '" << obj_name_oid << "'. ret = " << ret << dendl;
+ return ret;
+ }
+ obj_name = bl.to_str();
+ return 0;
+}
+
+int RadosBucket::set_logging_object_name(const std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ bool new_obj,
+ RGWObjVersionTracker* objv_tracker) {
+ rgw_pool data_pool;
+ const auto obj_name_oid = bucketlogging::object_name_oid(this, prefix);
+ if (!store->getRados()->get_obj_data_pool(get_placement_rule(), rgw_obj{get_key(), obj_name_oid}, &data_pool)) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() <<
+ "' when setting logging object name" << dendl;
+ return -EIO;
+ }
+ bufferlist bl;
+ bl.append(obj_name);
+ const int ret = rgw_put_system_obj(dpp, store->svc()->sysobj,
+ data_pool,
+ obj_name_oid,
+ bl,
+ new_obj,
+ objv_tracker,
+ ceph::real_time::clock::now(),
+ y,
+ nullptr);
+ if (ret == -EEXIST) {
+ ldpp_dout(dpp, 20) << "INFO: race detected in initializing '" << obj_name_oid << "' with logging object name:'" << obj_name << "'. ret = " << ret << dendl;
+ } else if (ret == -ECANCELED) {
+ ldpp_dout(dpp, 20) << "INFO: race detected in updating logging object name '" << obj_name << "' at '" << obj_name_oid << "'. ret = " << ret << dendl;
+ } else if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to set logging object name '" << obj_name << "' at '" << obj_name_oid << "'. ret = " << ret << dendl;
+ }
+ return ret;
+}
+
+int RadosBucket::remove_logging_object_name(const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) {
+ rgw_pool data_pool;
+ const auto obj_name_oid = bucketlogging::object_name_oid(this, prefix);
+ if (!store->getRados()->get_obj_data_pool(get_placement_rule(), rgw_obj{get_key(), obj_name_oid}, &data_pool)) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() <<
+ "' when setting logging object name" << dendl;
+ return -EIO;
+ }
+ return rgw_delete_system_obj(dpp, store->svc()->sysobj,
+ data_pool,
+ obj_name_oid,
+ objv_tracker,
+ y);
+}
+
+std::string to_temp_object_name(const rgw::sal::Bucket* bucket, const std::string& obj_name) {
+ return fmt::format("{}__shadow_{}0",
+ bucket->get_bucket_id(),
+ obj_name);
+}
+
+int RadosBucket::remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) {
+ rgw_pool data_pool;
+ const rgw_obj head_obj{get_key(), obj_name};
+ const auto placement_rule = get_placement_rule();
+
+ if (!store->getRados()->get_obj_data_pool(placement_rule, head_obj, &data_pool)) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() <<
+ "' when deleting logging object" << dendl;
+ return -EIO;
+ }
+
+ const auto temp_obj_name = to_temp_object_name(this, obj_name);
+ return rgw_delete_system_obj(dpp, store->svc()->sysobj,
+ data_pool,
+ temp_obj_name,
+ nullptr,
+ y);
+}
+
+int RadosBucket::commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) {
+ rgw_pool data_pool;
+ const rgw_obj head_obj{get_key(), obj_name};
+ const auto placement_rule = get_placement_rule();
+
+ if (!store->getRados()->get_obj_data_pool(placement_rule, head_obj, &data_pool)) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() <<
+ "' when comitting logging object" << dendl;
+ return -EIO;
+ }
+
+ const auto temp_obj_name = to_temp_object_name(this, obj_name);
+ std::map<string, bufferlist> obj_attrs;
+ ceph::real_time mtime;
+ bufferlist bl_data;
+ if (const auto ret = rgw_get_system_obj(store->svc()->sysobj,
+ data_pool,
+ temp_obj_name,
+ bl_data,
+ nullptr,
+ &mtime,
+ y,
+ dpp,
+ &obj_attrs,
+ nullptr); ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to read logging data when comitting object '" << temp_obj_name
+ << ". error: " << ret << dendl;
+ return ret;
+ } else if (ret == -ENOENT) {
+ ldpp_dout(dpp, 1) << "WARNING: temporary logging object '" << temp_obj_name << "' does not exists" << dendl;
+ return 0;
+ }
+
+ uint64_t size = bl_data.length();
+ const uint64_t max_obj_size = store->ctx()->_conf->osd_max_object_size;
+ RGWObjManifest manifest;
+ manifest.set_prefix(obj_name);
+ manifest.set_trivial_rule(0, max_obj_size);
+ RGWObjManifest::generator manifest_gen;
+ if (const auto ret = manifest_gen.create_begin(store->ctx(), &manifest,
+ placement_rule,
+ nullptr, // no special placment for tail
+ get_key(),
+ head_obj); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to create manifest when comitting logging object. error: " <<
+ ret << dendl;
+ return ret;
+ }
+
+ if (const auto ret = manifest_gen.create_next(size); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to add object to manifest when comitting logging object. error: " <<
+ ret << dendl;
+ return ret;
+ }
+
+ if (const auto expected_temp_obj = manifest_gen.get_cur_obj(store->getRados());
+ temp_obj_name != expected_temp_obj.oid) {
+ // TODO: cleanup temporary object, commit would never succeed
+ ldpp_dout(dpp, 1) << "ERROR: temporary logging object name mismatch: '" <<
+ temp_obj_name << "' != '" << expected_temp_obj.oid << "'" << dendl;
+ return -EINVAL;
+ }
+
+ RGWObjectCtx obj_ctx(store);
+ obj_ctx.set_atomic(head_obj);
+ const auto& bucket_info = get_info();
+ RGWRados::Object rgw_head_obj(store->getRados(),
+ bucket_info,
+ obj_ctx,
+ head_obj);
+ // disable versioning on the logging objects
+ rgw_head_obj.set_versioning_disabled(true);
+ RGWRados::Object::Write head_obj_wop(&rgw_head_obj);
+ head_obj_wop.meta.manifest = &manifest;
+ head_obj_wop.meta.bucket_owner = bucket_info.owner;
+ head_obj_wop.meta.flags = PUT_OBJ_CREATE;
+ head_obj_wop.meta.mtime = &mtime;
+ // TODO: head_obj_wop.meta.ptag
+ // the owner of the logging object is the bucket owner
+ // not the user that wrote the log that triggered the commit
+ const ACLOwner owner{bucket_info.owner, ""}; // TODO: missing display name
+ head_obj_wop.meta.owner = owner;
+ const auto etag = TOPNSPC::crypto::digest<TOPNSPC::crypto::MD5>(bl_data).to_str();
+ bufferlist bl_etag;
+ bl_etag.append(etag.c_str());
+ obj_attrs.emplace(RGW_ATTR_ETAG, std::move(bl_etag));
+ const req_context rctx{dpp, y, nullptr};
+ jspan_context trace{false, false};
+ if (const auto ret = head_obj_wop.write_meta(0, size, obj_attrs, rctx, trace); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to commit logging object '" << temp_obj_name <<
+ "' to bucket id '" << get_info().bucket <<"'. error: " << ret << dendl;
+ return ret;
+ }
+ ldpp_dout(dpp, 20) << "INFO: committed logging object '" << temp_obj_name <<
+ "' with size of " << size << " bytes, to bucket '" << get_key() << "' as '" <<
+ obj_name << "'" << dendl;
+ return 0;
+}
+
+struct BucketLoggingCompleteArg {
+ BucketLoggingCompleteArg(const std::string& _obj_name, size_t _size, CephContext* _cct)
+ : obj_name{_obj_name}, size{_size}, cct{_cct} {}
+ const std::string obj_name;
+ const size_t size;
+ CephContext* cct;
+};
+
+void bucket_logging_completion(rados_completion_t completion, void* args) {
+ auto* aio_comp = reinterpret_cast<librados::AioCompletionImpl*>(completion);
+ std::unique_ptr<BucketLoggingCompleteArg> logging_args(reinterpret_cast<BucketLoggingCompleteArg*>(args));
+ if (aio_comp->get_return_value() < 0) {
+ ldout(logging_args->cct, 1) << "ERROR: failed to complete append to logging object '" << logging_args->obj_name <<
+ "'. ret = " << aio_comp->get_return_value() << dendl;
+ } else {
+ ldout(logging_args->cct, 20) << "INFO: wrote " << logging_args->size << " bytes to logging object '" <<
+ logging_args->obj_name << "'" << dendl;
+ }
+}
+
+int RadosBucket::write_logging_object(const std::string& obj_name,
+ const std::string& record,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ bool async_completion) {
+ const auto temp_obj_name = to_temp_object_name(this, obj_name);
+ rgw_pool data_pool;
+ rgw_obj obj{get_key(), obj_name};
+ if (!store->getRados()->get_obj_data_pool(get_placement_rule(), obj, &data_pool)) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get data pool for bucket '" << get_name() <<
+ "' when writing logging object" << dendl;
+ return -EIO;
+ }
+ librados::IoCtx io_ctx;
+ if (const auto ret = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), data_pool, io_ctx); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get IO context for logging object from data pool:" << data_pool.to_str() << dendl;
+ return -EIO;
+ }
+ bufferlist bl;
+ bl.append(record);
+ bl.append("\n");
+ // append the record to the temporary object
+ // if this is the first record, the object will be created
+ librados::ObjectWriteOperation op;
+ op.append(bl);
+ if (async_completion) {
+ aio_completion_ptr completion{librados::Rados::aio_create_completion()};
+ auto arg = std::make_unique<BucketLoggingCompleteArg>(temp_obj_name, record.length(), store->ctx());
+ completion->set_complete_callback(arg.get(), bucket_logging_completion);
+ if (const auto ret = io_ctx.aio_operate(temp_obj_name, completion.get(), &op); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to append to logging object '" << temp_obj_name <<
+ "'. ret = " << ret << dendl;
+ return ret;
+ }
+ std::ignore = arg.release();
+ std::ignore = completion.release();
+ return 0;
+ }
+ if (const auto ret = rgw_rados_operate(dpp, io_ctx, temp_obj_name, &op, y); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to append to logging object '" << temp_obj_name <<
+ "'. ret = " << ret << dendl;
+ return ret;
+ }
+ ldpp_dout(dpp, 20) << "INFO: wrote " << record.length() << " bytes to logging object '" <<
+ temp_obj_name << "'" << dendl;
+ return 0;
+}
+
std::unique_ptr<User> RadosStore::get_user(const rgw_user &u)
{
return std::make_unique<RadosUser>(this, u);
@@ -1652,7 +1938,7 @@ int RadosStore::read_topics(const std::string& tenant, rgw_pubsub_topics& topics
}
int RadosStore::stat_topics_v1(const std::string& tenant, optional_yield y, const DoutPrefixProvider *dpp) {
- return rgw_stat_system_obj(dpp, svc()->sysobj, svc()->zone->get_zone_params().log_pool, topics_oid(tenant), nullptr, nullptr, y, nullptr);
+ return rgw_stat_system_obj(dpp, svc()->sysobj, svc()->zone->get_zone_params().log_pool, topics_oid(tenant), nullptr, nullptr, nullptr, y, nullptr);
}
int RadosStore::write_topics(const std::string& tenant, const rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker,
@@ -2228,7 +2514,108 @@ bool RadosObject::is_sync_completed(const DoutPrefixProvider* dpp,
const rgw_bi_log_entry& earliest_marker = entries.front();
return earliest_marker.timestamp > obj_mtime;
-}
+} /* is_sync_completed */
+
+int RadosObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y)
+{
+ int ret{0};
+
+ /* require an object with a manifest, so call to get_obj_state() must precede this */
+ if (! manifest) {
+ return -EINVAL;
+ }
+
+ RGWObjManifest::obj_iterator end = manifest->obj_end(dpp);
+ if (end.get_cur_part_id() == 0) { // not multipart
+ ldpp_dout(dpp, 20) << __func__ << " object does not have a multipart manifest"
+ << dendl;
+ return 0;
+ }
+
+ auto end_part_id = end.get_cur_part_id();
+ auto parts_count = (end_part_id == 1) ? 1 : end_part_id - 1;
+ if (marker > (parts_count - 1)) {
+ return 0;
+ }
+
+ RGWObjManifest::obj_iterator part_iter = manifest->obj_begin(dpp);
+
+ if (marker != 0) {
+ ldpp_dout_fmt(dpp, 20,
+ "{} seeking to part #{} in the object manifest",
+ __func__, marker);
+
+ part_iter = manifest->obj_find_part(dpp, marker + 1);
+
+ if (part_iter == end) {
+ ldpp_dout_fmt(dpp, 5,
+ "{} failed to find part #{} in the object manifest",
+ __func__, marker + 1);
+ return 0;
+ }
+ }
+
+ RGWObjectCtx& obj_ctx = get_ctx();
+ RGWBucketInfo& bucket_info = get_bucket()->get_info();
+
+ Object::Part obj_part{};
+ for (; part_iter != manifest->obj_end(dpp); ++part_iter) {
+
+ /* we're only interested in the first object in each logical part */
+ auto cur_part_id = part_iter.get_cur_part_id();
+ if (cur_part_id == obj_part.part_number) {
+ continue;
+ }
+
+ if (max_parts < 1) {
+ *truncated = true;
+ break;
+ }
+
+ /* get_part_obj_state alters the passed manifest** to point to a part
+ * manifest, which we don't want to leak out here */
+ RGWObjManifest* obj_m = manifest;
+ RGWObjState* astate;
+ bool part_prefetch = false;
+ ret = RGWRados::get_part_obj_state(dpp, y, store->getRados(), bucket_info, &obj_ctx,
+ obj_m, cur_part_id, &parts_count,
+ part_prefetch, &astate, &obj_m);
+
+ if (ret < 0) {
+ ldpp_dout_fmt(dpp, 4,
+ "{} get_part_obj_state() failed ret={}",
+ __func__, ret);
+ break;
+ }
+
+ obj_part.part_number = part_iter.get_cur_part_id();
+ obj_part.part_size = astate->accounted_size;
+
+ if (auto iter = astate->attrset.find(RGW_ATTR_CKSUM);
+ iter != astate->attrset.end()) {
+ try {
+ rgw::cksum::Cksum part_cksum;
+ auto ck_iter = iter->second.cbegin();
+ part_cksum.decode(ck_iter);
+ obj_part.cksum = std::move(part_cksum);
+ } catch (buffer::error& err) {
+ ldpp_dout_fmt(dpp, 4,
+ "WARN: {} could not decode stored cksum, "
+ "caught buffer::error",
+ __func__);
+ }
+ }
+
+ each_func(obj_part);
+ *next_marker = ++marker;
+ --max_parts;
+ } /* each part */
+
+ return ret;
+} /* RadosObject::list_parts */
int RadosObject::load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh)
{
@@ -3412,7 +3799,7 @@ int RadosMultipartUpload::init(const DoutPrefixProvider *dpp, optional_yield y,
multipart_upload_info upload_info;
upload_info.dest_placement = dest_placement;
upload_info.cksum_type = cksum_type;
-
+
if (obj_legal_hold) {
upload_info.obj_legal_hold_exist = true;
upload_info.obj_legal_hold = (*obj_legal_hold);
@@ -4257,11 +4644,6 @@ bool RadosZone::get_redirect_endpoint(std::string* endpoint)
return true;
}
-bool RadosZone::has_zonegroup_api(const std::string& api) const
-{
- return store->svc()->zone->has_zonegroup_api(api);
-}
-
const std::string& RadosZone::get_current_period_id()
{
return store->svc()->zone->get_current_period_id();
@@ -4508,8 +4890,8 @@ void RadosLuaManager::handle_reload_notify(const DoutPrefixProvider* dpp, option
#ifdef WITH_RADOSGW_LUA_PACKAGES
rgw::lua::packages_t failed_packages;
std::string install_dir;
- auto r = rgw::lua::install_packages(dpp, store,
- y, store->ctx()->_conf.get_val<std::string>("rgw_luarocks_location"),
+ auto r = rgw::lua::install_packages(dpp, store,
+ y, store->ctx()->_conf.get_val<std::string>("rgw_luarocks_location"),
failed_packages, install_dir);
if (r < 0) {
ldpp_dout(dpp, 1) << "WARNING: failed to install Lua packages from allowlist. error code: " << r
@@ -4520,9 +4902,9 @@ void RadosLuaManager::handle_reload_notify(const DoutPrefixProvider* dpp, option
ldpp_dout(dpp, 5) << "WARNING: failed to install Lua package: " << p
<< " from allowlist" << dendl;
}
-#else
+#else
const int r = 0;
-#endif
+#endif
ack_reload(dpp, notify_id, cookie, r);
}
@@ -4544,7 +4926,7 @@ int RadosLuaManager::reload_packages(const DoutPrefixProvider *dpp, optional_yie
<< ". error: " << cpp_strerror(r) << dendl;
return r;
}
-
+
std::vector<librados::notify_ack_t> acks;
std::vector<librados::notify_timeout_t> timeouts;
ioctx.decode_notify_response(reply_bl, &acks, &timeouts);
@@ -4558,7 +4940,7 @@ int RadosLuaManager::reload_packages(const DoutPrefixProvider *dpp, optional_yie
auto iter = ack.payload_bl.cbegin();
ceph::decode(r, iter);
} catch (buffer::error& err) {
- ldpp_dout(dpp, 1) << "ERROR: couldn't decode Lua packages reload status. error: " <<
+ ldpp_dout(dpp, 1) << "ERROR: couldn't decode Lua packages reload status. error: " <<
err.what() << dendl;
return -EINVAL;
}
diff --git a/src/rgw/driver/rados/rgw_sal_rados.h b/src/rgw/driver/rados/rgw_sal_rados.h
index be681c9f975..e65c3c0050e 100644
--- a/src/rgw/driver/rados/rgw_sal_rados.h
+++ b/src/rgw/driver/rados/rgw_sal_rados.h
@@ -107,7 +107,6 @@ class RadosZone : public StoreZone {
virtual const std::string& get_name() const override;
virtual bool is_writeable() override;
virtual bool get_redirect_endpoint(std::string* endpoint) override;
- virtual bool has_zonegroup_api(const std::string& api) const override;
virtual const std::string& get_current_period_id() override;
virtual const RGWAccessKey& get_system_key() override;
virtual const std::string& get_realm_name() override;
@@ -593,12 +592,18 @@ class RadosObject : public StoreObject {
StoreObject::set_compressed();
}
-
virtual bool is_sync_completed(const DoutPrefixProvider* dpp,
const ceph::real_time& obj_mtime) override;
/* For rgw_admin.cc */
RGWObjState& get_state() { return state; }
virtual int load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh = true) override;
+
+ /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */
+ virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y) override;
+
virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y, uint32_t flags) override;
virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj = NULL) override;
virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) override;
@@ -775,6 +780,24 @@ class RadosBucket : public StoreBucket {
optional_yield y, const DoutPrefixProvider *dpp) override;
int remove_topics(RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override;
+ int get_logging_object_name(std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) override;
+ int set_logging_object_name(const std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ bool new_obj,
+ RGWObjVersionTracker* objv_tracker) override;
+ int remove_logging_object_name(const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) override;
+ int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override;
+ int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override;
+ int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) override;
private:
int link(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y, bool update_entrypoint = true, RGWObjVersionTracker* objv = nullptr);
diff --git a/src/rgw/driver/rados/rgw_tools.cc b/src/rgw/driver/rados/rgw_tools.cc
index f5cd193d815..bf7a309e864 100644
--- a/src/rgw/driver/rados/rgw_tools.cc
+++ b/src/rgw/driver/rados/rgw_tools.cc
@@ -155,7 +155,7 @@ int rgw_put_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
int rgw_stat_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
const rgw_pool& pool, const std::string& key,
RGWObjVersionTracker *objv_tracker,
- real_time *pmtime, optional_yield y,
+ real_time *pmtime, uint64_t *psize, optional_yield y,
std::map<std::string, bufferlist> *pattrs)
{
rgw_raw_obj obj(pool, key);
@@ -163,6 +163,7 @@ int rgw_stat_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
return sysobj.rop()
.set_attrs(pattrs)
.set_last_mod(pmtime)
+ .set_obj_size(psize)
.stat(y, dpp);
}
@@ -185,7 +186,7 @@ int rgw_get_system_obj(RGWSI_SysObj* svc_sysobj, const rgw_pool& pool, const str
.read(dpp, &bl, y);
}
-int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
+int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid,
RGWObjVersionTracker *objv_tracker, optional_yield y)
{
@@ -338,21 +339,35 @@ int rgw_list_pool(const DoutPrefixProvider *dpp,
ldpp_dout(dpp, 10) << "failed to parse cursor: " << marker << dendl;
return -EINVAL;
}
-
- auto iter = ioctx.nobjects_begin(oc);
+ librados::NObjectIterator iter;
+ try {
+ iter = ioctx.nobjects_begin(oc);
+ } catch (const std::system_error& e) {
+ ldpp_dout(dpp, 1) << "rgw_list_pool: Failed to begin iteration of pool "
+ << ioctx.get_pool_name() << " with error "
+ << e.what() << dendl;
+ return ceph::from_error_code(e.code());
+ }
/// Pool_iterate
if (iter == ioctx.nobjects_end())
return -ENOENT;
- for (; oids->size() < max && iter != ioctx.nobjects_end(); ++iter) {
- string oid = iter->get_oid();
- ldpp_dout(dpp, 20) << "RGWRados::pool_iterate: got " << oid << dendl;
+ try {
+ for (; oids->size() < max && iter != ioctx.nobjects_end(); ++iter) {
+ string oid = iter->get_oid();
+ ldpp_dout(dpp, 20) << "RGWRados::pool_iterate: got " << oid << dendl;
- // fill it in with initial values; we may correct later
- if (filter && !filter(oid, oid))
- continue;
+ // fill it in with initial values; we may correct later
+ if (filter && !filter(oid, oid))
+ continue;
- oids->push_back(oid);
+ oids->push_back(oid);
+ }
+ } catch (const std::system_error& e) {
+ ldpp_dout(dpp, 1) << "rgw_list_pool: Failed iterating pool "
+ << ioctx.get_pool_name() << " with error "
+ << e.what() << dendl;
+ return ceph::from_error_code(e.code());
}
marker = iter.get_cursor().to_str();
diff --git a/src/rgw/driver/rados/rgw_tools.h b/src/rgw/driver/rados/rgw_tools.h
index 016da256263..b86d280a4a3 100644
--- a/src/rgw/driver/rados/rgw_tools.h
+++ b/src/rgw/driver/rados/rgw_tools.h
@@ -76,13 +76,13 @@ int rgw_get_system_obj(RGWSI_SysObj* svc_sysobj, const rgw_pool& pool,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none,
bool raw_attrs=false);
-int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
+int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const std::string& oid,
RGWObjVersionTracker *objv_tracker, optional_yield y);
int rgw_stat_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
const rgw_pool& pool, const std::string& key,
RGWObjVersionTracker *objv_tracker,
- real_time *pmtime, optional_yield y,
+ real_time *pmtime, uint64_t *psize, optional_yield y,
std::map<std::string, bufferlist> *pattrs = nullptr);
const char *rgw_find_mime_by_ext(std::string& ext);
diff --git a/src/rgw/driver/rados/rgw_user.cc b/src/rgw/driver/rados/rgw_user.cc
index 94a18ffcbab..894d8e40950 100644
--- a/src/rgw/driver/rados/rgw_user.cc
+++ b/src/rgw/driver/rados/rgw_user.cc
@@ -1755,7 +1755,11 @@ int RGWUser::execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_
user_info.display_name = display_name;
user_info.type = TYPE_RGW;
- // tenant must not look like a valid account id
+ // user/tenant must not look like a valid account id
+ if (rgw::account::validate_id(uid.id)) {
+ set_err_msg(err_msg, "uid must not be formatted as an account id");
+ return -EINVAL;
+ }
if (rgw::account::validate_id(uid.tenant)) {
set_err_msg(err_msg, "tenant must not be formatted as an account id");
return -EINVAL;
diff --git a/src/rgw/driver/rados/rgw_user.h b/src/rgw/driver/rados/rgw_user.h
index ab157f38e39..4ae7d13eff7 100644
--- a/src/rgw/driver/rados/rgw_user.h
+++ b/src/rgw/driver/rados/rgw_user.h
@@ -19,11 +19,11 @@
#define RGW_USER_ANON_ID "anonymous"
-#define SECRET_KEY_LEN 40
-#define PUBLIC_ID_LEN 20
-#define RAND_SUBUSER_LEN 5
+constexpr auto SECRET_KEY_LEN=40;
+constexpr auto PUBLIC_ID_LEN=20;
+constexpr auto RAND_SUBUSER_LEN=5;
-#define XMLNS_AWS_S3 "http://s3.amazonaws.com/doc/2006-03-01/"
+constexpr auto XMLNS_AWS_S3 = "http://s3.amazonaws.com/doc/2006-03-01/";
class RGWUserCtl;
class RGWBucketCtl;
diff --git a/src/rgw/driver/rados/rgw_zone.h b/src/rgw/driver/rados/rgw_zone.h
index c542abc76d6..5fb2b4b8096 100644
--- a/src/rgw/driver/rados/rgw_zone.h
+++ b/src/rgw/driver/rados/rgw_zone.h
@@ -769,7 +769,6 @@ public:
int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true);
int delete_obj(const DoutPrefixProvider *dpp, optional_yield y);
int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
- int add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y);
void fork();
int update(const DoutPrefixProvider *dpp, optional_yield y);
diff --git a/src/rgw/rgw_orphan.cc b/src/rgw/radosgw-admin/orphan.cc
index b7dc562c721..9fca3b99a7c 100644
--- a/src/rgw/rgw_orphan.cc
+++ b/src/rgw/radosgw-admin/orphan.cc
@@ -1,6 +1,12 @@
+
+/*
+ * Copyright (C) 2024 IBM
+*/
+
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
+#include "radosgw-admin/orphan.h"
#include <string>
@@ -10,7 +16,6 @@
#include "rgw_op.h"
#include "rgw_multi.h"
-#include "rgw_orphan.h"
#include "rgw_zone.h"
#include "rgw_bucket.h"
#include "rgw_sal_rados.h"
diff --git a/src/rgw/rgw_orphan.h b/src/rgw/radosgw-admin/orphan.h
index db811d31d9a..db811d31d9a 100644
--- a/src/rgw/rgw_orphan.h
+++ b/src/rgw/radosgw-admin/orphan.h
diff --git a/src/rgw/rgw_admin.cc b/src/rgw/radosgw-admin/radosgw-admin.cc
index b00dfaa1ec5..47b68d3f902 100644
--- a/src/rgw/rgw_admin.cc
+++ b/src/rgw/radosgw-admin/radosgw-admin.cc
@@ -1,12 +1,15 @@
+/*
+ * Copyright (C) 2025 IBM
+*/
+
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
-#include <errno.h>
-#include <iostream>
-#include <sstream>
+#include <cerrno>
#include <string>
-
-#include <boost/optional.hpp>
+#include <sstream>
+#include <optional>
+#include <iostream>
extern "C" {
#include <liboath/oath.h>
@@ -38,6 +41,9 @@ extern "C" {
#include "include/utime.h"
#include "include/str_list.h"
+#include "radosgw-admin/orphan.h"
+#include "radosgw-admin/sync_checkpoint.h"
+
#include "rgw_user.h"
#include "rgw_otp.h"
#include "rgw_rados.h"
@@ -48,7 +54,6 @@ extern "C" {
#include "rgw_log.h"
#include "rgw_formats.h"
#include "rgw_usage.h"
-#include "rgw_orphan.h"
#include "rgw_sync.h"
#include "rgw_trim_bilog.h"
#include "rgw_trim_datalog.h"
@@ -62,12 +67,12 @@ extern "C" {
#include "rgw_zone.h"
#include "rgw_pubsub.h"
#include "rgw_bucket_sync.h"
-#include "rgw_sync_checkpoint.h"
#include "rgw_lua.h"
#include "rgw_sal.h"
#include "rgw_sal_config.h"
#include "rgw_data_access.h"
#include "rgw_account.h"
+#include "rgw_bucket_logging.h"
#include "services/svc_sync_modules.h"
#include "services/svc_cls.h"
@@ -81,11 +86,6 @@ extern "C" {
#define dout_context g_ceph_context
-#define SECRET_KEY_LEN 40
-#define PUBLIC_ID_LEN 20
-
-using namespace std;
-
static rgw::sal::Driver* driver = NULL;
static constexpr auto dout_subsys = ceph_subsys_rgw;
@@ -116,19 +116,13 @@ static const DoutPrefixProvider* dpp() {
} \
} while (0)
-static inline int posix_errortrans(int r)
+using namespace std;
+
+inline int posix_errortrans(int r)
{
- switch(r) {
- case ERR_NO_SUCH_BUCKET:
- r = ENOENT;
- break;
- default:
- break;
- }
- return r;
+ return ERR_NO_SUCH_BUCKET == r ? ENOENT : r;
}
-
static const std::string LUA_CONTEXT_LIST("prerequest, postrequest, background, getdata, putdata");
void usage()
@@ -177,6 +171,8 @@ void usage()
cout << " bucket sync disable disable bucket sync\n";
cout << " bucket sync enable enable bucket sync\n";
cout << " bucket radoslist list rados objects backing bucket's objects\n";
+ cout << " bucket logging flush flush pending log records object of source bucket to the log bucket\n";
+ cout << " bucket logging info get info on bucket logging configuration on source bucket or list of sources in log bucket\n";
cout << " bi get retrieve bucket index object entries\n";
cout << " bi put store bucket index object entries\n";
cout << " bi list list raw bucket index entries\n";
@@ -359,6 +355,7 @@ void usage()
cout << " --secret/--secret-key=<key> specify secret key\n";
cout << " --gen-access-key generate random access key (for S3)\n";
cout << " --gen-secret generate random secret key\n";
+ cout << " --generate-key create user with or without credentials\n";
cout << " --key-type=<type> key type, options are: swift, s3\n";
cout << " --key-active=<bool> activate or deactivate a key\n";
cout << " --temp-url-key[-2]=<key> temp url key\n";
@@ -704,6 +701,8 @@ enum class OPT {
BUCKET_SHARD_OBJECTS,
BUCKET_OBJECT_SHARD,
BUCKET_RESYNC_ENCRYPTED_MULTIPART,
+ BUCKET_LOGGING_FLUSH,
+ BUCKET_LOGGING_INFO,
POLICY,
LOG_LIST,
LOG_SHOW,
@@ -942,6 +941,8 @@ static SimpleCmd::Commands all_cmds = {
{ "bucket shard object", OPT::BUCKET_SHARD_OBJECTS },
{ "bucket object shard", OPT::BUCKET_OBJECT_SHARD },
{ "bucket resync encrypted multipart", OPT::BUCKET_RESYNC_ENCRYPTED_MULTIPART },
+ { "bucket logging flush", OPT::BUCKET_LOGGING_FLUSH },
+ { "bucket logging info", OPT::BUCKET_LOGGING_INFO },
{ "policy", OPT::POLICY },
{ "log list", OPT::LOG_LIST },
{ "log show", OPT::LOG_SHOW },
@@ -1267,7 +1268,7 @@ static int read_input(const string& infile, bufferlist& bl)
}
}
-#define READ_CHUNK 8196
+ constexpr auto READ_CHUNK=8196;
int r;
int err;
@@ -2549,35 +2550,104 @@ std::ostream& operator<<(std::ostream& out, const indented& h) {
return out << std::setw(h.w) << h.header << std::setw(1) << ' ';
}
-static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* driver, const RGWZone& zone,
+struct bucket_source_sync_info {
+ const RGWZone& _source;
+ std::string_view error;
+ std::map<int,std::string> shards_behind;
+ int total_shards;
+ std::string_view status;
+ rgw_bucket bucket_source;
+
+ bucket_source_sync_info(const RGWZone& source): _source(source) {}
+
+ void _print_plaintext(std::ostream& out, int width) const {
+ out << indented{width, "source zone"} << _source.id << " (" << _source.name << ")" << std::endl;
+ if (!error.empty()) {
+ out << indented{width} << error << std::endl;
+ return;
+ }
+ out << indented{width, "source bucket"} << bucket_source << std::endl;
+ if (!status.empty()) {
+ out << indented{width} << status << std::endl;
+ return;
+ }
+ out << indented{width} << "incremental sync on " << total_shards << " shards\n";
+ if (!shards_behind.empty()) {
+ out << indented{width} << "bucket is behind on " << shards_behind.size() << " shards\n";
+ set<int> shard_ids;
+ for (auto const& [shard_id, _] : shards_behind) {
+ shard_ids.insert(shard_id);
+ }
+ out << indented{width} << "behind shards: [" << shard_ids << "]\n";
+ } else {
+ out << indented{width} << "bucket is caught up with source\n";
+ }
+ }
+
+ void _print_formatter(std::ostream& out, Formatter* formatter) const {
+ formatter->open_object_section("source");
+ formatter->dump_string("source_zone", _source.id);
+ formatter->dump_string("source_name", _source.name);
+
+ if (!error.empty()) {
+ formatter->dump_string("error", error);
+ formatter->close_section();
+ formatter->flush(out);
+ return;
+ }
+
+ formatter->dump_string("source_bucket", bucket_source.name);
+ formatter->dump_string("source_bucket_id", bucket_source.bucket_id);
+
+ if (!status.empty()) {
+ formatter->dump_string("status", status);
+ formatter->close_section();
+ formatter->flush(out);
+ return;
+ }
+
+ formatter->dump_int("total_shards", total_shards);
+ formatter->open_array_section("behind_shards");
+ for (auto const& [id, marker] : shards_behind) {
+ formatter->open_object_section("shard");
+ formatter->dump_int("shard_id", id);
+ formatter->dump_string("shard_marker", marker);
+ formatter->close_section();
+ }
+ formatter->close_section();
+ formatter->close_section();
+ formatter->flush(out);
+ }
+};
+
+static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* driver,
+ const RGWZone& zone,
const RGWZone& source, RGWRESTConn *conn,
const RGWBucketInfo& bucket_info,
rgw_sync_bucket_pipe pipe,
- int width, std::ostream& out)
+ bucket_source_sync_info& source_sync_info)
{
- out << indented{width, "source zone"} << source.id << " (" << source.name << ")" << std::endl;
-
// syncing from this zone?
if (!driver->svc()->zone->zone_syncs_from(zone, source)) {
- out << indented{width} << "does not sync from zone\n";
+ source_sync_info.error = "does not sync from zone";
return 0;
}
if (!pipe.source.bucket) {
- ldpp_dout(dpp, -1) << __func__ << "(): missing source bucket" << dendl;
+ source_sync_info.error = fmt::format("{} (): missing source bucket", __func__);
return -EINVAL;
}
std::unique_ptr<rgw::sal::Bucket> source_bucket;
int r = init_bucket(*pipe.source.bucket, &source_bucket);
if (r < 0) {
- ldpp_dout(dpp, -1) << "failed to read source bucket info: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read source bucket info: {}", cpp_strerror(r));
return r;
}
- out << indented{width, "source bucket"} << source_bucket->get_key() << std::endl;
- pipe.source.bucket = source_bucket->get_key();
+ source_sync_info.bucket_source = source_bucket->get_key();
+ pipe.source.bucket = source_bucket->get_key();
pipe.dest.bucket = bucket_info.bucket;
uint64_t gen = 0;
@@ -2588,15 +2658,15 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
r = rgw_read_bucket_full_sync_status(dpp, driver, pipe, &full_status, null_yield);
if (r >= 0) {
if (full_status.state == BucketSyncState::Init) {
- out << indented{width} << "init: bucket sync has not started\n";
+ source_sync_info.status = "init: bucket sync has not started";
return 0;
}
if (full_status.state == BucketSyncState::Stopped) {
- out << indented{width} << "stopped: bucket sync is disabled\n";
+ source_sync_info.status = "stopped: bucket sync is disabled";
return 0;
}
if (full_status.state == BucketSyncState::Full) {
- out << indented{width} << "full sync: " << full_status.full.count << " objects completed\n";
+ source_sync_info.status = fmt::format("full sync: {} objects completed", full_status.full.count);
return 0;
}
gen = full_status.incremental_gen;
@@ -2605,46 +2675,45 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
// no full status, but there may be per-shard status from before upgrade
const auto& logs = source_bucket->get_info().layout.logs;
if (logs.empty()) {
- out << indented{width} << "init: bucket sync has not started\n";
+ source_sync_info.status = "init: bucket sync has not started";
return 0;
}
const auto& log = logs.front();
if (log.gen > 0) {
// this isn't the backward-compatible case, so we just haven't started yet
- out << indented{width} << "init: bucket sync has not started\n";
+ source_sync_info.status = "init: bucket sync has not started";
return 0;
}
if (log.layout.type != rgw::BucketLogType::InIndex) {
- ldpp_dout(dpp, -1) << "unrecognized log layout type " << log.layout.type << dendl;
+ source_sync_info.error = fmt::format("unrecognized log layout type {}", to_string(log.layout.type));
return -EINVAL;
}
// use shard count from our log gen=0
shard_status.resize(rgw::num_shards(log.layout.in_index));
} else {
- lderr(driver->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read bucket full sync status: {}", cpp_strerror(r));
return r;
}
r = rgw_read_bucket_inc_sync_status(dpp, driver, pipe, gen, &shard_status);
if (r < 0) {
- lderr(driver->ctx()) << "failed to read bucket incremental sync status: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read bucket incremental sync status: {}", cpp_strerror(r));
return r;
}
const int total_shards = shard_status.size();
-
- out << indented{width} << "incremental sync on " << total_shards << " shards\n";
+ source_sync_info.total_shards = total_shards;
rgw_bucket_index_marker_info remote_info;
BucketIndexShardsManager remote_markers;
r = rgw_read_remote_bilog_info(dpp, conn, source_bucket->get_key(),
remote_info, remote_markers, null_yield);
if (r < 0) {
- ldpp_dout(dpp, -1) << "failed to read remote log: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read remote log: {}", cpp_strerror(r));
return r;
}
- std::set<int> shards_behind;
+ std::map<int, std::string> shards_behind;
for (const auto& r : remote_markers.get()) {
auto shard_id = r.first;
if (r.second.empty()) {
@@ -2652,21 +2721,17 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
}
if (shard_id >= total_shards) {
// unexpected shard id. we don't have status for it, so we're behind
- shards_behind.insert(shard_id);
+ shards_behind[shard_id] = r.second;
continue;
}
auto& m = shard_status[shard_id];
const auto pos = BucketIndexShardsManager::get_shard_marker(m.inc_marker.position);
if (pos < r.second) {
- shards_behind.insert(shard_id);
+ shards_behind[shard_id] = r.second;
}
}
- if (!shards_behind.empty()) {
- out << indented{width} << "bucket is behind on " << shards_behind.size() << " shards\n";
- out << indented{width} << "behind shards: [" << shards_behind << "]\n";
- } else {
- out << indented{width} << "bucket is caught up with source\n";
- }
+
+ source_sync_info.shards_behind = std::move(shards_behind);
return 0;
}
@@ -2877,25 +2942,82 @@ static int bucket_sync_info(rgw::sal::Driver* driver, const RGWBucketInfo& info,
return 0;
}
+struct bucket_sync_status_info {
+ std::vector<bucket_source_sync_info> source_status_info;
+ rgw::sal::Zone* _zone;
+ const rgw::sal::ZoneGroup* _zonegroup;
+ const RGWBucketInfo& _bucket_info;
+ const int width = 15;
+ std::string error;
+
+ bucket_sync_status_info(const RGWBucketInfo& bucket_info): _bucket_info(bucket_info) {}
+
+ void print(std::ostream& out, bool use_formatter, Formatter* formatter) {
+ if (use_formatter) {
+ _print_formatter(out, formatter);
+ } else {
+ _print_plaintext(out);
+ }
+ }
+
+ void _print_plaintext(std::ostream& out) {
+ out << indented{width, "realm"} << _zone->get_realm_id() << " (" << _zone->get_realm_name() << ")" << std::endl;
+ out << indented{width, "zonegroup"} << _zonegroup->get_id() << " (" << _zonegroup->get_name() << ")" << std::endl;
+ out << indented{width, "zone"} << _zone->get_id() << " (" << _zone->get_name() << ")" << std::endl;
+ out << indented{width, "bucket"} << _bucket_info.bucket << std::endl;
+ out << indented{width, "current time"}
+ << to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms) << "\n\n";
+
+ if (!error.empty()){
+ out << error << std::endl;
+ }
+
+ for (const auto &info : source_status_info) {
+ info._print_plaintext(out, width);
+ }
+ }
+
+ void _print_formatter(std::ostream& out, Formatter* formatter) {
+ formatter->open_object_section("test");
+ formatter->dump_string("realm", _zone->get_realm_id());
+ formatter->dump_string("realm_name", _zone->get_realm_name());
+ formatter->dump_string("zonegroup", _zonegroup->get_id());
+ formatter->dump_string("zonegroup_name", _zonegroup->get_name());
+ formatter->dump_string("zone", _zone->get_id());
+ formatter->dump_string("zone_name", _zone->get_name());
+ formatter->dump_string("bucket", _bucket_info.bucket.name);
+ formatter->dump_string("bucket_instance_id", _bucket_info.bucket.bucket_id);
+ formatter->dump_string("current_time", to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms));
+
+ if (!error.empty()) {
+ formatter->dump_string("error", error);
+ }
+
+ formatter->open_array_section("sources");
+ for (const auto &info : source_status_info) {
+ info._print_formatter(out, formatter);
+ }
+ formatter->close_section();
+
+ formatter->close_section();
+ formatter->flush(out);
+ }
+
+};
+
static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& info,
const rgw_zone_id& source_zone_id,
std::optional<rgw_bucket>& opt_source_bucket,
- std::ostream& out)
+ bucket_sync_status_info& bucket_sync_info)
{
const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup();
rgw::sal::Zone* zone = driver->get_zone();
- constexpr int width = 15;
-
- out << indented{width, "realm"} << zone->get_realm_id() << " (" << zone->get_realm_name() << ")\n";
- out << indented{width, "zonegroup"} << zonegroup.get_id() << " (" << zonegroup.get_name() << ")\n";
- out << indented{width, "zone"} << zone->get_id() << " (" << zone->get_name() << ")\n";
- out << indented{width, "bucket"} << info.bucket << "\n";
- out << indented{width, "current time"}
- << to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms) << "\n\n";
+ bucket_sync_info._zone = zone;
+ bucket_sync_info._zonegroup = &zonegroup;
if (!static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) {
- out << "Sync is disabled for bucket " << info.bucket.name << " or bucket has no sync sources" << std::endl;
+ bucket_sync_info.error = fmt::format("Sync is disabled for bucket {} or bucket has no sync sources", info.bucket.name);
return 0;
}
@@ -2903,7 +3025,7 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf
int r = driver->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
if (r < 0) {
- ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
+ bucket_sync_info.error = fmt::format("ERROR: failed to get policy handler for bucket ({}): r={}: {}", info.bucket.name, r, cpp_strerror(-r));
return r;
}
@@ -2916,13 +3038,12 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf
std::unique_ptr<rgw::sal::Zone> zone;
int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(source_zone_id.id, &zone);
if (ret < 0) {
- ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup "
- << zonegroup.get_name() << dendl;
+ bucket_sync_info.error = fmt::format("Source zone not found in zonegroup {}", zonegroup.get_name());
return -EINVAL;
}
auto c = zone_conn_map.find(source_zone_id);
if (c == zone_conn_map.end()) {
- ldpp_dout(dpp(), -1) << "No connection to zone " << zone->get_name() << dendl;
+ bucket_sync_info.error = fmt::format("No connection to zone {}", zone->get_name());
return -EINVAL;
}
zone_ids.insert(source_zone_id);
@@ -2953,10 +3074,15 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf
continue;
}
if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) {
- bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), z->second,
+ bucket_source_sync_info source_sync_info(z->second);
+ auto ret = bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), z->second,
c->second,
info, pipe,
- width, out);
+ source_sync_info);
+
+ if (ret == 0) {
+ bucket_sync_info.source_status_info.emplace_back(std::move(source_sync_info));
+ }
}
}
}
@@ -3427,6 +3553,13 @@ int main(int argc, const char **argv)
OPT opt_cmd = OPT::NO_CMD;
int gen_access_key = 0;
int gen_secret_key = 0;
+ enum generate_key_enum {
+ OPTION_SET_FALSE = 0,
+ OPTION_SET_TRUE = 1,
+ OPTION_NOT_SET = 2,
+ };
+
+ generate_key_enum generate_key = OPTION_NOT_SET;
bool set_perm = false;
bool set_temp_url_key = false;
map<int, string> temp_url_keys;
@@ -3484,6 +3617,7 @@ int main(int argc, const char **argv)
list<string> tags_rm;
int placement_inline_data = true;
bool placement_inline_data_specified = false;
+ bool format_arg_passed = false;
int64_t max_objects = -1;
int64_t max_size = -1;
@@ -3707,6 +3841,17 @@ int main(int argc, const char **argv)
cerr << "bad key type: " << key_type_str << std::endl;
exit(1);
}
+ } else if (ceph_argparse_witharg(args, i, &val, "--generate-key", (char*)NULL)) {
+ key_type_str = val;
+ if (key_type_str.compare("true") == 0) {
+ generate_key = OPTION_SET_TRUE;
+ } else if(key_type_str.compare("false") == 0) {
+ generate_key = OPTION_SET_FALSE;
+ } else {
+ cerr << "wrong value for --generate-key: " << key_type_str << " please specify either true or false" << std::endl;
+ exit(1);
+ }
+ // do nothing
} else if (ceph_argparse_binary_flag(args, i, &key_active, NULL, "--key-active", (char*)NULL)) {
key_active_specified = true;
} else if (ceph_argparse_witharg(args, i, &val, "--job-id", (char*)NULL)) {
@@ -3863,6 +4008,7 @@ int main(int argc, const char **argv)
new_bucket_name = val;
} else if (ceph_argparse_witharg(args, i, &val, "--format", (char*)NULL)) {
format = val;
+ format_arg_passed = true;
} else if (ceph_argparse_witharg(args, i, &val, "--categories", (char*)NULL)) {
string cat_str = val;
list<string> cat_list;
@@ -4469,14 +4615,21 @@ int main(int argc, const char **argv)
}
/* check key parameter conflict */
- if ((!access_key.empty()) && gen_access_key) {
- cerr << "ERROR: key parameter conflict, --access-key & --gen-access-key" << std::endl;
+ if ((!access_key.empty()) && (gen_access_key || generate_key == OPTION_SET_TRUE)) {
+ cerr << "ERROR: key parameter conflict, --access-key & --gen-access-key/generate-key" << std::endl;
+ return EINVAL;
+ }
+ if ((!secret_key.empty()) && (gen_secret_key || generate_key == OPTION_SET_TRUE)) {
+ cerr << "ERROR: key parameter conflict, --secret & --gen-secret/generate-key" << std::endl;
return EINVAL;
}
- if ((!secret_key.empty()) && gen_secret_key) {
- cerr << "ERROR: key parameter conflict, --secret & --gen-secret" << std::endl;
+ if (generate_key == OPTION_SET_FALSE) {
+ if ((!access_key.empty()) || gen_access_key || (!secret_key.empty()) || gen_secret_key) {
+ cerr << "ERROR: key parameter conflict, if --generate-key is not set so no other key parameters can be set" << std::endl;
return EINVAL;
+ }
}
+
}
// default to pretty json
@@ -6641,7 +6794,7 @@ int main(int argc, const char **argv)
}
break;
case OPT::USER_CREATE:
- if (!user_op.has_existing_user()) {
+ if (!user_op.has_existing_user() && (generate_key != OPTION_SET_FALSE)) {
user_op.set_generate_key(); // generate a new key by default
}
ret = ruser.add(dpp(), user_op, null_yield, &err_msg);
@@ -7552,6 +7705,95 @@ int main(int argc, const char **argv)
}
}
+ if (opt_cmd == OPT::BUCKET_LOGGING_FLUSH) {
+ if (bucket_name.empty()) {
+ cerr << "ERROR: bucket not specified" << std::endl;
+ return EINVAL;
+ }
+ int ret = init_bucket(tenant, bucket_name, bucket_id, &bucket);
+ if (ret < 0) {
+ return -ret;
+ }
+ const auto& bucket_attrs = bucket->get_attrs();
+ auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING);
+ if (iter == bucket_attrs.end()) {
+ cerr << "WARNING: no logging configured on bucket" << std::endl;
+ return 0;
+ }
+ rgw::bucketlogging::configuration configuration;
+ try {
+ configuration.enabled = true;
+ decode(configuration, iter->second);
+ } catch (buffer::error& err) {
+ cerr << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "'. error: " << err.what() << std::endl;
+ return EINVAL;
+ }
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ ret = init_bucket(tenant, configuration.target_bucket, "", &target_bucket);
+ if (ret < 0) {
+ cerr << "ERROR: failed to get target logging bucket '" << configuration.target_bucket << "'" << std::endl;
+ return -ret;
+ }
+ std::string obj_name;
+ RGWObjVersionTracker objv_tracker;
+ ret = target_bucket->get_logging_object_name(obj_name, configuration.target_prefix, null_yield, dpp(), &objv_tracker);
+ if (ret < 0) {
+ cerr << "ERROR: failed to get pending logging object name from target bucket '" << configuration.target_bucket << "'" << std::endl;
+ return -ret;
+ }
+ ret = rgw::bucketlogging::rollover_logging_object(configuration, target_bucket, obj_name, dpp(), null_yield, true, &objv_tracker);
+ if (ret < 0) {
+ cerr << "ERROR: failed to flush pending logging object '" << obj_name
+ << "' to target bucket '" << configuration.target_bucket << "'" << std::endl;
+ return -ret;
+ }
+ cout << "flushed pending logging object '" << obj_name
+ << "' to target bucket '" << configuration.target_bucket << "'" << std::endl;
+ return 0;
+ }
+
+ if (opt_cmd == OPT::BUCKET_LOGGING_INFO) {
+ if (bucket_name.empty()) {
+ cerr << "ERROR: bucket not specified" << std::endl;
+ return EINVAL;
+ }
+ int ret = init_bucket(tenant, bucket_name, bucket_id, &bucket);
+ if (ret < 0) {
+ return -ret;
+ }
+ const auto& bucket_attrs = bucket->get_attrs();
+ auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING);
+ if (iter != bucket_attrs.end()) {
+ rgw::bucketlogging::configuration configuration;
+ try {
+ configuration.enabled = true;
+ decode(configuration, iter->second);
+ } catch (buffer::error& err) {
+ cerr << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "'. error: " << err.what() << std::endl;
+ return EINVAL;
+ }
+ encode_json("logging", configuration, formatter.get());
+ formatter->flush(cout);
+ }
+ iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING_SOURCES);
+ if (iter != bucket_attrs.end()) {
+ rgw::bucketlogging::source_buckets sources;
+ try {
+ decode(sources, iter->second);
+ } catch (buffer::error& err) {
+ cerr << "ERROR: failed to decode logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES
+ << "'. error: " << err.what() << std::endl;
+ return EINVAL;
+ }
+ encode_json("logging_sources", sources, formatter.get());
+ formatter->flush(cout);
+ }
+
+ return 0;
+ }
+
if (opt_cmd == OPT::LOG_LIST) {
// filter by date?
if (date.size() && date.size() != 10) {
@@ -8623,6 +8865,10 @@ next:
handled = decode_dump<uint64_t>("pg_ver", bl, formatter.get());
} else if (iter->first == RGW_ATTR_SOURCE_ZONE) {
handled = decode_dump<uint32_t>("source_zone", bl, formatter.get());
+ } else if (iter->first == RGW_ATTR_RESTORE_EXPIRY_DATE) {
+ handled = decode_dump<utime_t>("restore_expiry_date", bl, formatter.get());
+ } else if (iter->first == RGW_ATTR_RESTORE_TIME) {
+ handled = decode_dump<utime_t>("restore_time", bl, formatter.get());
}
if (!handled)
@@ -9845,7 +10091,18 @@ next:
if (ret < 0) {
return -ret;
}
- bucket_sync_status(driver, bucket->get_info(), source_zone, opt_source_bucket, std::cout);
+
+ auto bucket_info = bucket->get_info();
+ bucket_sync_status_info bucket_sync_info(bucket_info);
+
+ ret = bucket_sync_status(driver, bucket_info, source_zone,
+ opt_source_bucket, bucket_sync_info);
+
+ if (ret == 0) {
+ bucket_sync_info.print(std::cout, format_arg_passed, formatter.get());
+ } else {
+ cerr << "failed to get bucket sync status. see logs for more info" << std::endl;
+ }
}
if (opt_cmd == OPT::BUCKET_SYNC_MARKERS) {
@@ -10335,7 +10592,8 @@ next:
if (!rgw::sal::User::empty(user)) {
pipe->params.user = user->get_id();
- } else if (pipe->params.mode == rgw_sync_pipe_params::MODE_USER) {
+ } else if (pipe->params.mode == rgw_sync_pipe_params::MODE_USER &&
+ pipe->params.user.empty()) {
cerr << "ERROR: missing --uid for --mode=user" << std::endl;
return EINVAL;
}
diff --git a/src/rgw/rgw_sync_checkpoint.cc b/src/rgw/radosgw-admin/sync_checkpoint.cc
index 1172e79a48f..0303ed6c747 100644
--- a/src/rgw/rgw_sync_checkpoint.cc
+++ b/src/rgw/radosgw-admin/sync_checkpoint.cc
@@ -5,6 +5,7 @@
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
+ * Copyright (C) 2024 IBM
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -13,9 +14,12 @@
*
*/
+#include "radosgw-admin/sync_checkpoint.h"
+
#include <fmt/format.h>
+
#include "common/errno.h"
-#include "rgw_sync_checkpoint.h"
+
#include "rgw_sal_rados.h"
#include "rgw_bucket_sync.h"
#include "rgw_data_sync.h"
diff --git a/src/rgw/rgw_sync_checkpoint.h b/src/rgw/radosgw-admin/sync_checkpoint.h
index 28df68d8860..28df68d8860 100644
--- a/src/rgw/rgw_sync_checkpoint.h
+++ b/src/rgw/radosgw-admin/sync_checkpoint.h
diff --git a/src/rgw/rgw_amqp.cc b/src/rgw/rgw_amqp.cc
index 7504d47c6c9..5bc5d173c73 100644
--- a/src/rgw/rgw_amqp.cc
+++ b/src/rgw/rgw_amqp.cc
@@ -650,6 +650,9 @@ private:
// (4) TODO reconnect on connection errors
// (5) TODO cleanup timedout callbacks
void run() noexcept {
+ // give the runner thread a name for easier debugging
+ ceph_pthread_setname("amqp_manager");
+
amqp_frame_t frame;
while (!stopped) {
@@ -838,12 +841,6 @@ public:
// This is to prevent rehashing so that iterators are not invalidated
// when a new connection is added.
connections.max_load_factor(10.0);
- // give the runner thread a name for easier debugging
- const char* thread_name = "amqp_manager";
- if (const auto rc = ceph_pthread_setname(runner.native_handle(), thread_name); rc != 0) {
- ldout(cct, 1) << "ERROR: failed to set amqp manager thread name to: " << thread_name
- << ". error: " << rc << dendl;
- }
}
// non copyable
diff --git a/src/rgw/rgw_asio_frontend.cc b/src/rgw/rgw_asio_frontend.cc
index 30e1e77fd15..ebe42d96de9 100644
--- a/src/rgw/rgw_asio_frontend.cc
+++ b/src/rgw/rgw_asio_frontend.cc
@@ -1194,8 +1194,11 @@ void AsioFrontend::pause()
l.signal.emit(boost::asio::cancellation_type::terminal);
}
- // close all connections so outstanding requests fail quickly
- connections.close(ec);
+ const bool graceful_stop{ g_ceph_context->_conf->rgw_graceful_stop };
+ if (!graceful_stop) {
+ // close all connections so outstanding requests fail quickly
+ connections.close(ec);
+ }
// pause and wait until outstanding requests complete
pause_mutex.lock(ec);
diff --git a/src/rgw/rgw_auth.cc b/src/rgw/rgw_auth.cc
index ac1ed8b75d6..a0b494eb9c5 100644
--- a/src/rgw/rgw_auth.cc
+++ b/src/rgw/rgw_auth.cc
@@ -188,7 +188,8 @@ int load_account_and_policies(const DoutPrefixProvider* dpp,
static auto transform_old_authinfo(const RGWUserInfo& user,
std::optional<RGWAccountInfo> account,
- std::vector<IAM::Policy> policies)
+ std::vector<IAM::Policy> policies,
+ sal::Driver* driver)
-> std::unique_ptr<rgw::auth::Identity>
{
/* This class is not intended for public use. Should be removed altogether
@@ -198,6 +199,7 @@ static auto transform_old_authinfo(const RGWUserInfo& user,
/* For this particular case it's OK to use rgw_user structure to convey
* the identity info as this was the policy for doing that before the
* new auth. */
+ sal::Driver* driver;
const rgw_user id;
const std::string display_name;
const std::string path;
@@ -208,8 +210,10 @@ static auto transform_old_authinfo(const RGWUserInfo& user,
public:
DummyIdentityApplier(const RGWUserInfo& user,
std::optional<RGWAccountInfo> account,
- std::vector<IAM::Policy> policies)
- : id(user.user_id),
+ std::vector<IAM::Policy> policies,
+ sal::Driver* driver)
+ : driver(driver),
+ id(user.user_id),
display_name(user.display_name),
path(user.path),
is_admin(user.admin),
@@ -294,9 +298,9 @@ static auto transform_old_authinfo(const RGWUserInfo& user,
<< ", is_admin=" << is_admin << ")";
}
- void load_acct_info(const DoutPrefixProvider* dpp,
- RGWUserInfo& user_info) const override {
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override {
// noop, this user info was passed in on construction
+ return driver->get_user(id);
}
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const {
@@ -307,13 +311,14 @@ static auto transform_old_authinfo(const RGWUserInfo& user,
};
return std::make_unique<DummyIdentityApplier>(
- user, std::move(account), std::move(policies));
+ user, std::move(account), std::move(policies), driver);
}
auto transform_old_authinfo(const DoutPrefixProvider* dpp,
optional_yield y,
sal::Driver* driver,
- sal::User* user)
+ sal::User* user,
+ std::vector<IAM::Policy>* policies_)
-> tl::expected<std::unique_ptr<Identity>, int>
{
const RGWUserInfo& info = user->get_info();
@@ -328,7 +333,10 @@ auto transform_old_authinfo(const DoutPrefixProvider* dpp,
return tl::unexpected(r);
}
- return transform_old_authinfo(info, std::move(account), std::move(policies));
+ if (policies_) { // return policies to caller if requested
+ *policies_ = policies;
+ }
+ return transform_old_authinfo(info, std::move(account), std::move(policies), driver);
}
} /* namespace auth */
@@ -377,7 +385,7 @@ strategy_handle_rejected(rgw::auth::Engine::result_t&& engine_result,
case Control::FALLBACK:
/* Don't try next. */
- return std::make_pair(false, std::move(engine_result));
+ return std::make_pair(false, std::move(strategy_result));
default:
/* Huh, memory corruption? */
@@ -523,7 +531,7 @@ rgw::auth::Strategy::apply(const DoutPrefixProvider *dpp, const rgw::auth::Strat
/* Account used by a given RGWOp is decoupled from identity employed
* in the authorization phase (RGWOp::verify_permissions). */
- applier->load_acct_info(dpp, s->user->get_info());
+ s->user = applier->load_acct_info(dpp);
s->perm_mask = applier->get_perm_mask();
/* This is the single place where we pass req_state as a pointer
@@ -631,36 +639,36 @@ void rgw::auth::WebIdentityApplier::create_account(const DoutPrefixProvider* dpp
user_info = user->get_info();
}
-void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const {
+auto rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> {
rgw_user federated_user;
federated_user.id = this->sub;
federated_user.tenant = role_tenant;
federated_user.ns = "oidc";
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(federated_user);
if (account) {
// we don't need shadow users for account roles because bucket ownership,
// quota, and stats are tracked by the account instead of the user
- user_info.user_id = std::move(federated_user);
+ RGWUserInfo& user_info = user->get_info();
user_info.display_name = user_name;
user_info.type = TYPE_WEB;
- return;
+ // the user_info.user_id is initialized by driver->get_user(...)
+ return user;
}
- std::unique_ptr<rgw::sal::User> user = driver->get_user(federated_user);
-
//Check in oidc namespace
if (user->load_user(dpp, null_yield) >= 0) {
/* Succeeded. */
- user_info = user->get_info();
- return;
+ // the user_info in user is initialized by user->load_user(...)
+ return user;
}
user->clear_ns();
//Check for old users which wouldn't have been created in oidc namespace
if (user->load_user(dpp, null_yield) >= 0) {
/* Succeeded. */
- user_info = user->get_info();
- return;
+ // the user_info in user is initialized by user->load_user(...)
+ return user;
}
//Check if user_id.buckets already exists, may have been from the time, when shadow users didnt exist
@@ -671,7 +679,7 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp
last_synced, last_updated);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: reading stats for the user returned error " << ret << dendl;
- return;
+ return user;
}
if (ret == -ENOENT) { /* in case of ENOENT, which means user doesnt have buckets */
//In this case user will be created in oidc namespace
@@ -684,7 +692,8 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp
}
ldpp_dout(dpp, 0) << "NOTICE: couldn't map oidc federated user " << federated_user << dendl;
- create_account(dpp, federated_user, this->user_name, user_info);
+ create_account(dpp, federated_user, this->user_name, user->get_info());
+ return user;
}
void rgw::auth::WebIdentityApplier::modify_request_state(const DoutPrefixProvider *dpp, req_state* s) const
@@ -936,7 +945,7 @@ void rgw::auth::RemoteApplier::write_ops_log_entry(rgw_log_entry& entry) const
}
/* TODO(rzarzynski): we need to handle display_name changes. */
-void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const /* out */
+auto rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> /* out */
{
/* It's supposed that RGWRemoteAuthApplier tries to load account info
* that belongs to the authenticated identity. Another policy may be
@@ -975,9 +984,9 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW
(void) load_account_and_policies(dpp, null_yield, driver, user->get_info(),
user->get_attrs(), account, policies);
- user_info = std::move(user->get_info());
owner_acct_user = std::move(tenanted_uid);
- return;
+ // the user_info in user is initialized by user->load_user(...)
+ return user;
}
}
@@ -990,15 +999,16 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW
(void) load_account_and_policies(dpp, null_yield, driver, user->get_info(),
user->get_attrs(), account, policies);
- user_info = std::move(user->get_info());
owner_acct_user = acct_user;
- return;
+ // the user_info in user is initialized by user->load_user(...)
+ return user;
}
ldpp_dout(dpp, 0) << "NOTICE: couldn't map swift user " << acct_user << dendl;
- create_account(dpp, acct_user, implicit_tenant, user_info);
+ create_account(dpp, acct_user, implicit_tenant, user->get_info());
/* Succeeded if we are here (create_account() hasn't throwed). */
+ return user;
}
void rgw::auth::RemoteApplier::modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const
@@ -1098,11 +1108,11 @@ uint32_t rgw::auth::LocalApplier::get_perm_mask(const std::string& subuser_name,
}
}
-void rgw::auth::LocalApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const /* out */
+auto rgw::auth::LocalApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> /* out */
{
/* Load the account that belongs to the authenticated identity. An extra call
* to RADOS may be safely skipped in this case. */
- user_info = this->user_info;
+ return std::unique_ptr<rgw::sal::User>(user.release());
}
void rgw::auth::LocalApplier::modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const
@@ -1121,6 +1131,22 @@ void rgw::auth::LocalApplier::write_ops_log_entry(rgw_log_entry& entry) const
}
}
+rgw::auth::LocalApplier::LocalApplier(CephContext* const cct,
+ std::unique_ptr<rgw::sal::User> user,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies,
+ std::string subuser,
+ const std::optional<uint32_t>& perm_mask,
+ const std::string access_key_id)
+ : user_info(user->get_info()),
+ user(std::move(user)),
+ account(std::move(account)),
+ policies(std::move(policies)),
+ subuser(std::move(subuser)),
+ perm_mask(perm_mask.value_or(RGW_PERM_INVALID)),
+ access_key_id(access_key_id) {
+}
+
ACLOwner rgw::auth::RoleApplier::get_aclowner() const
{
ACLOwner owner;
@@ -1183,10 +1209,11 @@ bool rgw::auth::RoleApplier::is_identity(const Principal& p) const {
return false;
}
-void rgw::auth::RoleApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const /* out */
+auto rgw::auth::RoleApplier::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> /* out */
{
/* Load the user id */
- user_info.user_id = this->token_attrs.user_id;
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(this->token_attrs.user_id);
+ return user;
}
void rgw::auth::RoleApplier::write_ops_log_entry(rgw_log_entry& entry) const
@@ -1267,9 +1294,10 @@ rgw::auth::AnonymousEngine::authenticate(const DoutPrefixProvider* dpp, const re
} else {
RGWUserInfo user_info;
rgw_get_anon_user(user_info);
-
+ std::unique_ptr<rgw::sal::User> user = s->user->clone();
+ user->get_info() = user_info;
auto apl = \
- apl_factory->create_apl_local(cct, s, user_info, std::nullopt, {},
+ apl_factory->create_apl_local(cct, s, std::move(user), std::nullopt, {},
rgw::auth::LocalApplier::NO_SUBUSER,
std::nullopt, rgw::auth::LocalApplier::NO_ACCESS_KEY);
return result_t::grant(std::move(apl));
diff --git a/src/rgw/rgw_auth.h b/src/rgw/rgw_auth.h
index f3edbbab845..22b0816bac9 100644
--- a/src/rgw/rgw_auth.h
+++ b/src/rgw/rgw_auth.h
@@ -105,7 +105,8 @@ inline std::ostream& operator<<(std::ostream& out,
auto transform_old_authinfo(const DoutPrefixProvider* dpp,
optional_yield y,
sal::Driver* driver,
- sal::User* user)
+ sal::User* user,
+ std::vector<IAM::Policy>* policies_ = nullptr)
-> tl::expected<std::unique_ptr<Identity>, int>;
// Load the user account and all user/group policies. May throw
@@ -139,7 +140,7 @@ public:
*
* XXX: be aware that the "account" term refers to rgw_user. The naming
* is legacy. */
- virtual void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const = 0; /* out */
+ virtual auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> = 0; /* out */
/* Apply any changes to request state. This method will be most useful for
* TempURL of Swift API. */
@@ -484,7 +485,7 @@ public:
bool is_identity(const Principal& p) const override;
- void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override;
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override;
uint32_t get_identity_type() const override {
return TYPE_WEB;
@@ -656,7 +657,7 @@ public:
uint32_t get_perm_mask() const override { return info.perm_mask; }
void to_str(std::ostream& out) const override;
- void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override;
void write_ops_log_entry(rgw_log_entry& entry) const override;
uint32_t get_identity_type() const override { return info.acct_type; }
@@ -683,7 +684,7 @@ public:
/* rgw::auth::LocalApplier targets those auth engines that base on the data
- * enclosed in the RGWUserInfo control structure. As a side effect of doing
+ * enclosed in the rgw::sal::User->RGWUserInfo control structure. As a side effect of doing
* the authentication process, they must have it loaded. Leveraging this is
* a way to avoid unnecessary calls to underlying RADOS store. */
class LocalApplier : public IdentityApplier {
@@ -691,6 +692,7 @@ class LocalApplier : public IdentityApplier {
protected:
const RGWUserInfo user_info;
+ mutable std::unique_ptr<rgw::sal::User> user;
const std::optional<RGWAccountInfo> account;
const std::vector<IAM::Policy> policies;
const std::string subuser;
@@ -705,19 +707,12 @@ public:
static const std::string NO_ACCESS_KEY;
LocalApplier(CephContext* const cct,
- const RGWUserInfo& user_info,
+ std::unique_ptr<rgw::sal::User> user,
std::optional<RGWAccountInfo> account,
std::vector<IAM::Policy> policies,
std::string subuser,
const std::optional<uint32_t>& perm_mask,
- const std::string access_key_id)
- : user_info(user_info),
- account(std::move(account)),
- policies(std::move(policies)),
- subuser(std::move(subuser)),
- perm_mask(perm_mask.value_or(RGW_PERM_INVALID)),
- access_key_id(access_key_id) {
- }
+ const std::string access_key_id);
ACLOwner get_aclowner() const override;
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override;
@@ -732,7 +727,7 @@ public:
}
}
void to_str(std::ostream& out) const override;
- void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override;
uint32_t get_identity_type() const override { return user_info.type; }
std::string get_acct_name() const override { return {}; }
@@ -750,7 +745,7 @@ public:
virtual ~Factory() {}
virtual aplptr_t create_apl_local(CephContext* cct,
const req_state* s,
- const RGWUserInfo& user_info,
+ std::unique_ptr<rgw::sal::User> user,
std::optional<RGWAccountInfo> account,
std::vector<IAM::Policy> policies,
const std::string& subuser,
@@ -779,15 +774,20 @@ public:
std::vector<std::pair<std::string, std::string>> principal_tags;
};
protected:
+ CephContext* const cct;
+ rgw::sal::Driver* driver;
Role role;
TokenAttrs token_attrs;
public:
RoleApplier(CephContext* const cct,
+ rgw::sal::Driver* driver,
const Role& role,
const TokenAttrs& token_attrs)
- : role(role),
+ : cct(cct),
+ driver(driver),
+ role(role),
token_attrs(token_attrs) {}
ACLOwner get_aclowner() const override;
@@ -803,7 +803,7 @@ public:
return RGW_PERM_NONE;
}
void to_str(std::ostream& out) const override;
- void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */
uint32_t get_identity_type() const override { return TYPE_ROLE; }
std::string get_acct_name() const override { return {}; }
std::string get_subuser() const override { return {}; }
diff --git a/src/rgw/rgw_auth_filters.h b/src/rgw/rgw_auth_filters.h
index a93641e8b8e..7d264197c52 100644
--- a/src/rgw/rgw_auth_filters.h
+++ b/src/rgw/rgw_auth_filters.h
@@ -117,8 +117,8 @@ public:
return get_decoratee().get_account();
}
- void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override { /* out */
- return get_decoratee().load_acct_info(dpp, user_info);
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override { /* out */
+ return get_decoratee().load_acct_info(dpp);
}
void modify_request_state(const DoutPrefixProvider* dpp, req_state * s) const override { /* in/out */
@@ -152,7 +152,7 @@ public:
}
void to_str(std::ostream& out) const override;
- void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */
};
/* static declaration: UNKNOWN_ACCT will be an empty rgw_user that is a result
@@ -169,23 +169,25 @@ void ThirdPartyAccountApplier<T>::to_str(std::ostream& out) const
}
template <typename T>
-void ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const
+auto ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User>
{
+ std::unique_ptr<rgw::sal::User> luser;
if (UNKNOWN_ACCT == acct_user_override) {
/* There is no override specified by the upper layer. This means that we'll
* load the account owned by the authenticated identity (aka auth_user). */
- DecoratedApplier<T>::load_acct_info(dpp, user_info);
+ luser = DecoratedApplier<T>::load_acct_info(dpp);
} else if (DecoratedApplier<T>::is_owner_of(acct_user_override)) {
/* The override has been specified but the account belongs to the authenticated
* identity. We may safely forward the call to a next stage. */
- DecoratedApplier<T>::load_acct_info(dpp, user_info);
+ luser = DecoratedApplier<T>::load_acct_info(dpp);
} else if (this->is_anonymous()) {
/* If the user was authed by the anonymous engine then scope the ANON user
* to the correct tenant */
+ luser = driver->get_user(rgw_user(RGW_USER_ANON_ID));
if (acct_user_override.tenant.empty())
- user_info.user_id = rgw_user(acct_user_override.id, RGW_USER_ANON_ID);
+ luser->get_info().user_id = rgw_user(acct_user_override.id, RGW_USER_ANON_ID);
else
- user_info.user_id = rgw_user(acct_user_override.tenant, RGW_USER_ANON_ID);
+ luser->get_info().user_id = rgw_user(acct_user_override.tenant, RGW_USER_ANON_ID);
} else {
/* Compatibility mechanism for multi-tenancy. For more details refer to
* load_acct_info method of rgw::auth::RemoteApplier. */
@@ -196,9 +198,10 @@ void ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp,
user = driver->get_user(tenanted_uid);
if (user->load_user(dpp, null_yield) >= 0) {
- user_info = user->get_info();
+ // the user_info in luser is initialized by user->load_user(...)
+ luser = user->clone();
/* Succeeded. */
- return;
+ return luser;
}
}
@@ -213,8 +216,10 @@ void ThirdPartyAccountApplier<T>::load_acct_info(const DoutPrefixProvider* dpp,
throw ret;
}
}
- user_info = user->get_info();
+ // the user_info in luser is initialized by user->load_user(...)
+ luser = user->clone();
}
+ return luser;
}
template <typename T> static inline
@@ -248,7 +253,7 @@ public:
}
void to_str(std::ostream& out) const override;
- void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
+ auto load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User> override; /* out */
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override; /* in/out */
ACLOwner get_aclowner() const override {
@@ -271,10 +276,10 @@ void SysReqApplier<T>::to_str(std::ostream& out) const
}
template <typename T>
-void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const
+auto SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp) const -> std::unique_ptr<rgw::sal::User>
{
- DecoratedApplier<T>::load_acct_info(dpp, user_info);
- is_system = user_info.system;
+ std::unique_ptr<rgw::sal::User> user = DecoratedApplier<T>::load_acct_info(dpp);
+ is_system = user->get_info().system;
if (is_system) {
//ldpp_dout(dpp, 20) << "system request" << dendl;
@@ -285,7 +290,7 @@ void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo
effective_owner->id = parse_owner(str);
if (const auto* uid = std::get_if<rgw_user>(&effective_owner->id); uid) {
- std::unique_ptr<rgw::sal::User> user = driver->get_user(*uid);
+ user = driver->get_user(*uid);
if (user->load_user(dpp, null_yield) < 0) {
//ldpp_dout(dpp, 0) << "User lookup failed!" << dendl;
throw -EACCES;
@@ -294,14 +299,14 @@ void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo
}
}
}
+ return user;
}
template <typename T>
void SysReqApplier<T>::modify_request_state(const DoutPrefixProvider* dpp, req_state* const s) const
{
if (boost::logic::indeterminate(is_system)) {
- RGWUserInfo unused_info;
- load_acct_info(dpp, unused_info);
+ std::unique_ptr<rgw::sal::User> unused_user{ load_acct_info(dpp) };
}
if (is_system) {
diff --git a/src/rgw/rgw_auth_s3.h b/src/rgw/rgw_auth_s3.h
index 2f7fd2d7598..5815a520e02 100644
--- a/src/rgw/rgw_auth_s3.h
+++ b/src/rgw/rgw_auth_s3.h
@@ -55,14 +55,14 @@ class STSAuthStrategy : public rgw::auth::Strategy,
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
- const RGWUserInfo& user_info,
+ std::unique_ptr<rgw::sal::User> user,
std::optional<RGWAccountInfo> account,
std::vector<IAM::Policy> policies,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- LocalApplier(cct, user_info, std::move(account), std::move(policies),
+ LocalApplier(cct, std::move(user), std::move(account), std::move(policies),
subuser, perm_mask, access_key_id));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
@@ -72,7 +72,7 @@ class STSAuthStrategy : public rgw::auth::Strategy,
RoleApplier::Role role,
RoleApplier::TokenAttrs token_attrs) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- rgw::auth::RoleApplier(cct, std::move(role), std::move(token_attrs)));
+ rgw::auth::RoleApplier(cct, driver, std::move(role), std::move(token_attrs)));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
@@ -176,14 +176,14 @@ class AWSAuthStrategy : public rgw::auth::Strategy,
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
- const RGWUserInfo& user_info,
+ std::unique_ptr<rgw::sal::User> user,
std::optional<RGWAccountInfo> account,
std::vector<IAM::Policy> policies,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- LocalApplier(cct, user_info, std::move(account), std::move(policies),
+ LocalApplier(cct, std::move(user), std::move(account), std::move(policies),
subuser, perm_mask, access_key_id));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
diff --git a/src/rgw/rgw_bucket_layout.cc b/src/rgw/rgw_bucket_layout.cc
index f8c485d89c3..1f8db396a0d 100644
--- a/src/rgw/rgw_bucket_layout.cc
+++ b/src/rgw/rgw_bucket_layout.cc
@@ -376,9 +376,9 @@ void encode_json_impl(const char *name, const BucketLayout& l, ceph::Formatter *
for (const auto& log : l.logs) {
encode_json("log", log, f);
}
+ f->close_section(); // logs[]
utime_t jt(l.judge_reshard_lock_time);
encode_json("judge_reshard_lock_time", jt, f);
- f->close_section(); // logs[]
f->close_section();
}
void decode_json_obj(BucketLayout& l, JSONObj *obj)
diff --git a/src/rgw/rgw_bucket_logging.cc b/src/rgw/rgw_bucket_logging.cc
new file mode 100644
index 00000000000..dd407f26e8c
--- /dev/null
+++ b/src/rgw/rgw_bucket_logging.cc
@@ -0,0 +1,799 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+#include <time.h>
+#include <random>
+#include "common/ceph_time.h"
+#include "rgw_bucket_logging.h"
+#include "rgw_xml.h"
+#include "rgw_sal.h"
+#include "rgw_op.h"
+
+#define dout_subsys ceph_subsys_rgw
+
+namespace rgw::bucketlogging {
+
+bool configuration::decode_xml(XMLObj* obj) {
+ const auto throw_if_missing = true;
+ enabled = false;
+ XMLObjIter iter = obj->find("LoggingEnabled");
+ XMLObj* const o = iter.get_next();
+ if (o) {
+ enabled = true;
+ RGWXMLDecoder::decode_xml("TargetBucket", target_bucket, o, throw_if_missing);
+ RGWXMLDecoder::decode_xml("TargetPrefix", target_prefix, o);
+ // TODO: decode grant
+ RGWXMLDecoder::decode_xml("ObjectRollTime", obj_roll_time, default_obj_roll_time, o);
+ std::string default_type{"Standard"};
+ std::string type;
+ RGWXMLDecoder::decode_xml("LoggingType", type, default_type, o);
+ if (type == "Standard") {
+ logging_type = LoggingType::Standard;
+ } else if (type == "Journal") {
+ logging_type = LoggingType::Journal;
+ if (iter = o->find("Filter"); XMLObj* const filter_o = iter.get_next()) {
+ RGWXMLDecoder::decode_xml("S3Key", key_filter, filter_o);
+ }
+ } else {
+ // we don't allow for type "Any" in the configuration
+ throw RGWXMLDecoder::err("invalid bucket logging record type: '" + type + "'");
+ }
+ RGWXMLDecoder::decode_xml("RecordsBatchSize", records_batch_size, o);
+ if (iter = o->find("TargetObjectKeyFormat"); XMLObj* const oo = iter.get_next()) {
+ if (iter = oo->find("PartitionedPrefix"); XMLObj* const ooo = iter.get_next()) {
+ obj_key_format = KeyFormat::Partitioned;
+ default_type = "DeliveryTime";
+ RGWXMLDecoder::decode_xml("PartitionDateSource", type, default_type, ooo);
+ if (type == "DeliveryTime") {
+ date_source = PartitionDateSource::DeliveryTime;
+ } else if (type == "EventTime") {
+ date_source = PartitionDateSource::EventTime;
+ } else {
+ throw RGWXMLDecoder::err("invalid bucket logging partition date source: '" + type + "'");
+ }
+ } else if (iter = oo->find("SimplePrefix"); iter.get_next()) {
+ obj_key_format = KeyFormat::Simple;
+ } else {
+ throw RGWXMLDecoder::err("TargetObjectKeyFormat must contain a format tag");
+ }
+ }
+ }
+
+ return true;
+}
+
+void configuration::dump_xml(Formatter *f) const {
+ if (!enabled) {
+ return;
+ }
+ f->open_object_section("LoggingEnabled");
+ ::encode_xml("TargetBucket", target_bucket, f);
+ ::encode_xml("TargetPrefix", target_prefix, f);
+ ::encode_xml("ObjectRollTime", obj_roll_time, f);
+ switch (logging_type) {
+ case LoggingType::Standard:
+ ::encode_xml("LoggingType", "Standard", f);
+ break;
+ case LoggingType::Journal:
+ ::encode_xml("LoggingType", "Journal", f);
+ if (key_filter.has_content()) {
+ f->open_object_section("Filter");
+ ::encode_xml("S3Key", key_filter, f);
+ f->close_section(); // Filter
+ }
+ break;
+ case LoggingType::Any:
+ ::encode_xml("LoggingType", "", f);
+ break;
+ }
+ ::encode_xml("RecordsBatchSize", records_batch_size, f);
+ f->open_object_section("TargetObjectKeyFormat");
+ switch (obj_key_format) {
+ case KeyFormat::Partitioned:
+ f->open_object_section("PartitionedPrefix");
+ switch (date_source) {
+ case PartitionDateSource::DeliveryTime:
+ ::encode_xml("PartitionDateSource", "DeliveryTime", f);
+ break;
+ case PartitionDateSource::EventTime:
+ ::encode_xml("PartitionDateSource", "EventTime", f);
+ break;
+ }
+ f->close_section(); // PartitionedPrefix
+ break;
+ case KeyFormat::Simple:
+ f->open_object_section("SimplePrefix"); // empty section
+ f->close_section();
+ break;
+ }
+ f->close_section(); // TargetObjectKeyFormat
+ f->close_section(); // LoggingEnabled
+}
+
+void configuration::dump(Formatter *f) const {
+ Formatter::ObjectSection s(*f, "bucketLoggingStatus");
+ if (!enabled) {
+ return;
+ }
+ {
+ Formatter::ObjectSection s(*f, "loggingEnabled");
+ encode_json("targetBucket", target_bucket, f);
+ encode_json("targetPrefix", target_prefix, f);
+ encode_json("objectRollTime", obj_roll_time, f);
+ switch (logging_type) {
+ case LoggingType::Standard:
+ encode_json("loggingType", "Standard", f);
+ break;
+ case LoggingType::Journal:
+ encode_json("loggingType", "Journal", f);
+ if (key_filter.has_content()) {
+ Formatter::ObjectSection s(*f, "Filter");
+ encode_json("S3Key", key_filter, f);
+ }
+ break;
+ case LoggingType::Any:
+ encode_json("loggingType", "", f);
+ break;
+ }
+ encode_json("recordsBatchSize", records_batch_size, f);
+ {
+ Formatter::ObjectSection s(*f, "targetObjectKeyFormat");
+ switch (obj_key_format) {
+ case KeyFormat::Partitioned:
+ {
+ Formatter::ObjectSection s(*f, "partitionedPrefix");
+ switch (date_source) {
+ case PartitionDateSource::DeliveryTime:
+ encode_json("partitionDateSource", "DeliveryTime", f);
+ break;
+ case PartitionDateSource::EventTime:
+ encode_json("partitionDateSource", "EventTime", f);
+ break;
+ }
+ }
+ break;
+ case KeyFormat::Simple:
+ {
+ Formatter::ObjectSection s(*f, "simplePrefix");
+ }
+ break;
+ }
+ }
+ }
+}
+
+std::string configuration::to_json_str() const {
+ JSONFormatter f;
+ dump(&f);
+ std::stringstream ss;
+ f.flush(ss);
+ return ss.str();
+}
+
+template<size_t N>
+std::string unique_string() {
+ static const std::string possible_characters{"0123456789ABCDEFGHIJKLMNOPQRSTUVWXY"};
+ static const auto max_possible_value = possible_characters.length() - 1;
+ std::random_device rd;
+ std::mt19937 engine(rd());
+ std::uniform_int_distribution<> dist(0, max_possible_value);
+ std::string str(N, '\0');
+ std::generate_n(str.begin(), N, [&](){return possible_characters[dist(engine)];});
+ return str;
+}
+
+constexpr size_t UniqueStringLength = 16;
+
+ceph::coarse_real_time time_from_name(const std::string& obj_name, const DoutPrefixProvider *dpp) {
+ static const auto time_format_length = std::string{"YYYY-MM-DD-hh-mm-ss"}.length();
+ const auto obj_name_length = obj_name.length();
+ ceph::coarse_real_time extracted_time;
+ if (obj_name_length < time_format_length + UniqueStringLength + 1) {
+ ldpp_dout(dpp, 1) << "ERROR: logging object name too short: " << obj_name << dendl;
+ return extracted_time;
+ }
+ const auto time_start_pos = obj_name_length - (time_format_length + UniqueStringLength + 1);
+ // note: +1 is for the dash between the timestamp and the unique string
+ std::string time_str = obj_name.substr(time_start_pos, time_format_length);
+
+ std::tm t = {};
+ if (const auto ret = strptime(time_str.c_str(), "%Y-%m-%d-%H-%M-%S", &t); ret == nullptr || *ret != '\0') {
+ ldpp_dout(dpp, 1) << "ERROR: invalid time format: '" << time_str << "' in logging object name: " << obj_name << dendl;
+ return extracted_time;
+ }
+ extracted_time = ceph::coarse_real_time::clock::from_time_t(mktime(&t));
+ ldpp_dout(dpp, 20) << "INFO: time '" << extracted_time << "' extracted from logging object name: " << obj_name << dendl;
+ return extracted_time;
+}
+
+std::string full_bucket_name(const std::unique_ptr<rgw::sal::Bucket>& bucket) {
+ if (bucket->get_tenant().empty()) {
+ return bucket->get_name();
+ }
+ return fmt::format("{}:{}", bucket->get_tenant(), bucket->get_name());
+}
+
+int new_logging_object(const configuration& conf,
+ const std::unique_ptr<rgw::sal::Bucket>& bucket,
+ std::string& obj_name,
+ const DoutPrefixProvider *dpp,
+ optional_yield y,
+ bool init_obj,
+ RGWObjVersionTracker* objv_tracker) {
+ const auto tt = ceph::coarse_real_time::clock::to_time_t(ceph::coarse_real_time::clock::now());
+ std::tm t{};
+ localtime_r(&tt, &t);
+
+ const auto unique = unique_string<UniqueStringLength>();
+ const auto old_name = obj_name;
+
+ switch (conf.obj_key_format) {
+ case KeyFormat::Simple:
+ obj_name = fmt::format("{}{:%Y-%m-%d-%H-%M-%S}-{}",
+ conf.target_prefix,
+ t,
+ unique);
+ break;
+ case KeyFormat::Partitioned:
+ {
+ // TODO: use date_source
+ const auto source_region = ""; // TODO
+ obj_name = fmt::format("{}{}/{}/{}/{:%Y/%m/%d}/{:%Y-%m-%d-%H-%M-%S}-{}",
+ conf.target_prefix,
+ to_string(bucket->get_owner()),
+ source_region,
+ full_bucket_name(bucket),
+ t,
+ t,
+ unique);
+ }
+ break;
+ }
+ int ret = bucket->set_logging_object_name(obj_name, conf.target_prefix, y, dpp, init_obj, objv_tracker);
+ if (ret == -EEXIST || ret == -ECANCELED) {
+ if (ret = bucket->get_logging_object_name(obj_name, conf.target_prefix, y, dpp, nullptr); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get name of logging object of bucket '" <<
+ conf.target_bucket << "' and prefix '" << conf.target_prefix << "', ret = " << ret << dendl;
+ return ret;
+ }
+ ldpp_dout(dpp, 20) << "INFO: name already set. got name of logging object '" << obj_name << "' of bucket '" <<
+ conf.target_bucket << "' and prefix '" << conf.target_prefix << "'" << dendl;
+ return -ECANCELED;
+ } else if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to write name of logging object '" << obj_name << "' of bucket '" <<
+ conf.target_bucket << "'. ret = " << ret << dendl;
+ return ret;
+ }
+ ldpp_dout(dpp, 20) << "INFO: wrote name of logging object '" << obj_name << "' of bucket '" <<
+ conf.target_bucket << "'" << dendl;
+ return 0;
+}
+
+int commit_logging_object(const configuration& conf,
+ const DoutPrefixProvider *dpp,
+ rgw::sal::Driver* driver,
+ const std::string& tenant_name,
+ optional_yield y) {
+ std::string target_bucket_name;
+ std::string target_tenant_name;
+ auto ret = rgw_parse_url_bucket(conf.target_bucket, tenant_name, target_tenant_name, target_bucket_name);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to parse target bucket '" << conf.target_bucket << "' when commiting logging object, ret = "
+ << ret << dendl;
+ return ret;
+ }
+ const rgw_bucket target_bucket_id(target_tenant_name, target_bucket_name);
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ ret = driver->load_bucket(dpp, target_bucket_id,
+ &target_bucket, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get target logging bucket '" << target_bucket_id << "' when commiting logging object, ret = "
+ << ret << dendl;
+ return ret;
+ }
+ return commit_logging_object(conf, target_bucket, dpp, y);
+}
+
+int commit_logging_object(const configuration& conf,
+ const std::unique_ptr<rgw::sal::Bucket>& target_bucket,
+ const DoutPrefixProvider *dpp,
+ optional_yield y) {
+ std::string obj_name;
+ if (const auto ret = target_bucket->get_logging_object_name(obj_name, conf.target_prefix, y, dpp, nullptr); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get name of logging object of bucket '" <<
+ target_bucket->get_info().bucket << "'. ret = " << ret << dendl;
+ return ret;
+ }
+ return target_bucket->commit_logging_object(obj_name, y, dpp);
+}
+
+int rollover_logging_object(const configuration& conf,
+ const std::unique_ptr<rgw::sal::Bucket>& bucket,
+ std::string& obj_name,
+ const DoutPrefixProvider *dpp,
+ optional_yield y,
+ bool must_commit,
+ RGWObjVersionTracker* objv_tracker) {
+ std::string target_bucket_name;
+ std::string target_tenant_name;
+ std::ignore = rgw_parse_url_bucket(conf.target_bucket, bucket->get_tenant(), target_tenant_name, target_bucket_name);
+ if (target_bucket_name != bucket->get_name() || target_tenant_name != bucket->get_tenant()) {
+ ldpp_dout(dpp, 1) << "ERROR: bucket name mismatch. conf= '" << conf.target_bucket <<
+ "', bucket= '" << bucket->get_info().bucket << "'" << dendl;
+ return -EINVAL;
+ }
+ const auto old_obj = obj_name;
+ const auto ret = new_logging_object(conf, bucket, obj_name, dpp, y, false, objv_tracker);
+ if (ret == -ECANCELED) {
+ ldpp_dout(dpp, 20) << "INFO: rollover already performed for '" << old_obj << "' to bucket '" <<
+ conf.target_bucket << "'. ret = " << ret << dendl;
+ return 0;
+ } else if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to rollover logging object '" << old_obj << "' to bucket '" <<
+ conf.target_bucket << "'. ret = " << ret << dendl;
+ return ret;
+ }
+ if (const auto ret = bucket->commit_logging_object(old_obj, y, dpp); ret < 0) {
+ if (must_commit) {
+ return ret;
+ }
+ ldpp_dout(dpp, 5) << "WARNING: failed to commit logging object '" << old_obj << "' to bucket '" <<
+ conf.target_bucket << "'. ret = " << ret << dendl;
+ // we still want to write the new records to the new object even if commit failed
+ // will try to commit again next time
+ }
+ return 0;
+}
+
+#define dash_if_empty(S) (S).empty() ? "-" : S
+#define dash_if_empty_or_null(P, S) (((P) == nullptr) || (S).empty()) ? "-" : S
+#define dash_if_zero(I) (I) == 0 ? "-" : std::to_string(I)
+#define dash_if_zero_or_null(P, I) (((P) == nullptr) || ((I) == 0)) ? "-" : std::to_string(I)
+
+/* S3 bucket standard log record
+ * based on: https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html
+ - bucket owner
+ - bucket name
+ - The time at which the request was received at UTC time. The format, as follows: [%d/%b/%Y:%H:%M:%S %z]
+ - The apparent IP address of the requester
+ - The canonical user ID of the requester, or a - for unauthenticated requests
+ - Request ID
+ - REST.HTTP_method.resource_type or S3.action.resource_type for Lifecycle and logging
+ - The key (object name) part of the request (source key in case of copy)
+ - The Request-URI part of the HTTP request message
+ - The numeric HTTP status code of the response
+ - The S3 Error code, or - if no error occurred
+ - The number of response bytes sent, excluding HTTP protocol overhead, or - if zero
+ - Object Size
+ - Total time: milliseconds including network transmission time. from first byte received to last byte transmitted
+ - turn around time: milliseconds exluding networks transmission time. from last byte received to first byte transmitted
+ - The value of the HTTP Referer header, if present, or - if not
+ - User Agent
+ - The version ID in the request, or - if the operation doesn't take a versionId parameter
+ - Host ID: x-amz-id-2
+ - SigV2 or SigV4, that was used to authenticate the request or a - for unauthenticated requests
+ - SSL cipher that was negotiated for an HTTPS request or a - for HTTP
+ - The type of request authentication used: AuthHeader, QueryString or a - for unauthenticated requests
+ - Host Header: The RGW endpoint fqdn
+ - TLS version negotiated by the client: TLSv1.1, TLSv1.2, TLSv1.3, or - if TLS wasn't used
+ - ARN of the access point of the request. If the access point ARN is malformed or not used, the field will contain a -
+ - A string that indicates whether the request required an (ACL) for authorization. If ACL is required, the string is Yes. If no ACLs were required, the string is -
+
+S3 bucket short (ceph) log record
+ - bucket owner
+ - bucket name
+ - The time at which the request was received at UTC time. The format, as follows: [%d/%b/%Y:%H:%M:%S %z]
+ - REST.HTTP_method.resource_type or S3.action.resource_type for Lifecycle and logging
+ - The key (object name) part of the request (source key in case of copy)
+ - Object version in case of versioned bucket
+ - Object Size
+ - eTag
+};*/
+
+int log_record(rgw::sal::Driver* driver,
+ const sal::Object* obj,
+ const req_state* s,
+ const std::string& op_name,
+ const std::string& etag,
+ size_t size,
+ const configuration& conf,
+ const DoutPrefixProvider *dpp,
+ optional_yield y,
+ bool async_completion,
+ bool log_source_bucket) {
+ if (!s->bucket) {
+ ldpp_dout(dpp, 1) << "ERROR: only bucket operations are logged" << dendl;
+ return -EINVAL;
+ }
+ std::string target_bucket_name;
+ std::string target_tenant_name;
+ auto ret = rgw_parse_url_bucket(conf.target_bucket, s->bucket_tenant, target_tenant_name, target_bucket_name);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to parse target bucket '" << conf.target_bucket << "', ret = " << ret << dendl;
+ return ret;
+ }
+ const rgw_bucket target_bucket_id(target_tenant_name, target_bucket_name);
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ ret = driver->load_bucket(dpp, target_bucket_id,
+ &target_bucket, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get target logging bucket '" << target_bucket_id << "'. ret = " << ret << dendl;
+ return ret;
+ }
+ std::string obj_name;
+ RGWObjVersionTracker objv_tracker;
+ ret = target_bucket->get_logging_object_name(obj_name, conf.target_prefix, y, dpp, &objv_tracker);
+ if (ret == 0) {
+ const auto time_to_commit = time_from_name(obj_name, dpp) + std::chrono::seconds(conf.obj_roll_time);
+ if (ceph::coarse_real_time::clock::now() > time_to_commit) {
+ ldpp_dout(dpp, 20) << "INFO: logging object '" << obj_name << "' exceeded its time, will be committed to bucket '" <<
+ conf.target_bucket << "'" << dendl;
+ if (ret = rollover_logging_object(conf, target_bucket, obj_name, dpp, y, false, &objv_tracker); ret < 0) {
+ return ret;
+ }
+ } else {
+ ldpp_dout(dpp, 20) << "INFO: record will be written to current logging object '" << obj_name << "'. will be comitted at: " << time_to_commit << dendl;
+ }
+ } else if (ret == -ENOENT) {
+ // try to create the temporary log object for the first time
+ ret = new_logging_object(conf, target_bucket, obj_name, dpp, y, true, nullptr);
+ if (ret == 0) {
+ ldpp_dout(dpp, 20) << "INFO: first time logging for bucket '" << conf.target_bucket << "' and prefix '" <<
+ conf.target_prefix << "'" << dendl;
+ } else if (ret == -ECANCELED) {
+ ldpp_dout(dpp, 20) << "INFO: logging object '" << obj_name << "' already exists for bucket '" << conf.target_bucket << "' and prefix" <<
+ conf.target_prefix << "'" << dendl;
+ } else {
+ ldpp_dout(dpp, 1) << "ERROR: failed to create logging object of bucket '" <<
+ conf.target_bucket << "' and prefix '" << conf.target_prefix << "' for the first time. ret = " << ret << dendl;
+ return ret;
+ }
+ } else {
+ ldpp_dout(dpp, 1) << "ERROR: failed to get name of logging object of bucket '" <<
+ conf.target_bucket << "'. ret = " << ret << dendl;
+ return ret;
+ }
+
+ std::string record;
+ const auto tt = ceph::coarse_real_time::clock::to_time_t(s->time);
+ std::tm t{};
+ localtime_r(&tt, &t);
+ auto user_or_account = s->account_name;
+ if (user_or_account.empty()) {
+ s->user->get_id().to_str(user_or_account);
+ }
+ auto fqdn = s->info.host;
+ if (!s->info.domain.empty() && !fqdn.empty()) {
+ fqdn.append(".").append(s->info.domain);
+ }
+
+ std::string bucket_owner;
+ std::string bucket_name;
+ if (log_source_bucket) {
+ if (!s->src_object || !s->src_object->get_bucket()) {
+ ldpp_dout(dpp, 1) << "ERROR: source object or bucket is missing when logging source bucket" << dendl;
+ return -EINVAL;
+ }
+ bucket_owner = to_string(s->src_object->get_bucket()->get_owner());
+ bucket_name = s->src_bucket_name;
+ } else {
+ bucket_owner = to_string( s->bucket->get_owner());
+ bucket_name = full_bucket_name(s->bucket);
+ }
+
+ switch (conf.logging_type) {
+ case LoggingType::Standard:
+ record = fmt::format("{} {} [{:%d/%b/%Y:%H:%M:%S %z}] {} {} {} {} {} \"{} {}{}{} HTTP/1.1\" {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}",
+ dash_if_empty(bucket_owner),
+ dash_if_empty(bucket_name),
+ t,
+ "-", // no requester IP
+ dash_if_empty(user_or_account),
+ dash_if_empty(s->req_id),
+ op_name,
+ dash_if_empty_or_null(obj, obj->get_name()),
+ s->info.method,
+ s->info.request_uri,
+ s->info.request_params.empty() ? "" : "?",
+ s->info.request_params,
+ dash_if_zero(s->err.http_ret),
+ dash_if_empty(s->err.err_code),
+ dash_if_zero(s->content_length),
+ dash_if_zero(size),
+ "-", // no total time when logging record
+ std::chrono::duration_cast<std::chrono::milliseconds>(s->time_elapsed()),
+ "-", // TODO: referer
+ "-", // TODO: user agent
+ dash_if_empty_or_null(obj, obj->get_instance()),
+ s->info.x_meta_map.contains("x-amz-id-2") ? s->info.x_meta_map.at("x-amz-id-2") : "-",
+ "-", // TODO: Signature Version (SigV2 or SigV4)
+ "-", // TODO: SSL cipher. e.g. "ECDHE-RSA-AES128-GCM-SHA256"
+ "-", // TODO: Auth type. e.g. "AuthHeader"
+ dash_if_empty(fqdn),
+ "-", // TODO: TLS version. e.g. "TLSv1.2" or "TLSv1.3"
+ "-", // no access point ARN
+ (s->has_acl_header) ? "Yes" : "-");
+ break;
+ case LoggingType::Journal:
+ record = fmt::format("{} {} [{:%d/%b/%Y:%H:%M:%S %z}] {} {} {} {} {}",
+ dash_if_empty(to_string(s->bucket->get_owner())),
+ dash_if_empty(full_bucket_name(s->bucket)),
+ t,
+ op_name,
+ dash_if_empty_or_null(obj, obj->get_name()),
+ dash_if_zero(size),
+ dash_if_empty_or_null(obj, obj->get_instance()),
+ dash_if_empty(etag));
+ break;
+ case LoggingType::Any:
+ ldpp_dout(dpp, 1) << "ERROR: failed to format record when writing to logging object '" <<
+ obj_name << "' due to unsupported logging type" << dendl;
+ return -EINVAL;
+ }
+
+ if (ret = target_bucket->write_logging_object(obj_name,
+ record,
+ y,
+ dpp,
+ async_completion); ret < 0 && ret != -EFBIG) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to write record to logging object '" <<
+ obj_name << "'. ret = " << ret << dendl;
+ return ret;
+ }
+ if (ret == -EFBIG) {
+ ldpp_dout(dpp, 20) << "WARNING: logging object '" << obj_name << "' is full, will be committed to bucket '" <<
+ conf.target_bucket << "'" << dendl;
+ if (ret = rollover_logging_object(conf, target_bucket, obj_name, dpp, y, true, nullptr); ret < 0 ) {
+ return ret;
+ }
+ if (ret = target_bucket->write_logging_object(obj_name,
+ record,
+ y,
+ dpp,
+ async_completion); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to write record to logging object '" <<
+ obj_name << "'. ret = " << ret << dendl;
+ return ret;
+ }
+ }
+
+ ldpp_dout(dpp, 20) << "INFO: wrote logging record: '" << record
+ << "' to '" << obj_name << "'" << dendl;
+ return 0;
+}
+
+std::string object_name_oid(const rgw::sal::Bucket* bucket, const std::string& prefix) {
+ // TODO: do i need bucket marker in the name?
+ return fmt::format("logging.{}.bucket.{}/{}", bucket->get_tenant(), bucket->get_bucket_id(), prefix);
+}
+
+int log_record(rgw::sal::Driver* driver,
+ LoggingType type,
+ const sal::Object* obj,
+ const req_state* s,
+ const std::string& op_name,
+ const std::string& etag,
+ size_t size,
+ const DoutPrefixProvider *dpp,
+ optional_yield y,
+ bool async_completion,
+ bool log_source_bucket) {
+ if (!s->bucket) {
+ // logging only bucket operations
+ return 0;
+ }
+ // check if bucket logging is needed
+ const auto& bucket_attrs = s->bucket->get_attrs();
+ auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING);
+ if (iter == bucket_attrs.end()) {
+ return 0;
+ }
+ configuration configuration;
+ try {
+ configuration.enabled = true;
+ auto bl_iter = iter->second.cbegin();
+ decode(configuration, bl_iter);
+ if (type != LoggingType::Any && configuration.logging_type != type) {
+ return 0;
+ }
+ if (configuration.key_filter.has_content()) {
+ if (!match(configuration.key_filter, obj->get_name())) {
+ return 0;
+ }
+ }
+ ldpp_dout(dpp, 20) << "INFO: found matching logging configuration of bucket '" << s->bucket->get_info().bucket <<
+ "' configuration: " << configuration.to_json_str() << dendl;
+ if (auto ret = log_record(driver, obj, s, op_name, etag, size, configuration, dpp, y, async_completion, log_source_bucket); ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to perform logging for bucket '" << s->bucket->get_info().bucket <<
+ "'. ret=" << ret << dendl;
+ return ret;
+ }
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "'. error: " << err.what() << dendl;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int get_bucket_id(const std::string& bucket_name, const std::string& tenant_name, rgw_bucket& bucket_id) {
+ std::string parsed_bucket_name;
+ std::string parsed_tenant_name;
+ if (const auto ret = rgw_parse_url_bucket(bucket_name, tenant_name, parsed_tenant_name, parsed_bucket_name); ret < 0) {
+ return ret;
+ }
+ bucket_id = rgw_bucket{parsed_tenant_name, parsed_bucket_name};
+ return 0;
+}
+
+int update_bucket_logging_sources(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, const rgw_bucket& target_bucket_id, const rgw_bucket& src_bucket_id, bool add, optional_yield y) {
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ const auto ret = driver->load_bucket(dpp, target_bucket_id, &target_bucket, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to get target bucket '" << target_bucket_id << "', ret = " << ret << dendl;
+ return ret;
+ }
+ return update_bucket_logging_sources(dpp, target_bucket, src_bucket_id, add, y);
+}
+
+int update_bucket_logging_sources(const DoutPrefixProvider* dpp, std::unique_ptr<rgw::sal::Bucket>& bucket, const rgw_bucket& src_bucket_id, bool add, optional_yield y) {
+ return retry_raced_bucket_write(dpp, bucket.get(), [dpp, &bucket, &src_bucket_id, add, y] {
+ auto& attrs = bucket->get_attrs();
+ auto iter = attrs.find(RGW_ATTR_BUCKET_LOGGING_SOURCES);
+ if (iter == attrs.end()) {
+ if (!add) {
+ ldpp_dout(dpp, 20) << "INFO: no logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES
+ << "' for bucket '" << bucket->get_info().bucket << "', nothing to remove" << dendl;
+ return 0;
+ }
+ source_buckets sources{src_bucket_id};
+ bufferlist bl;
+ ceph::encode(sources, bl);
+ attrs.insert(std::make_pair(RGW_ATTR_BUCKET_LOGGING_SOURCES, std::move(bl)));
+ return bucket->merge_and_store_attrs(dpp, attrs, y);
+ }
+ try {
+ source_buckets sources;
+ ceph::decode(sources, iter->second);
+ if ((add && sources.insert(src_bucket_id).second) ||
+ (!add && sources.erase(src_bucket_id) > 0)) {
+ bufferlist bl;
+ ceph::encode(sources, bl);
+ iter->second = std::move(bl);
+ return bucket->merge_and_store_attrs(dpp, attrs, y);
+ }
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to decode logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES
+ << "' for bucket '" << bucket->get_info().bucket << "', error: " << err.what() << dendl;
+ }
+ ldpp_dout(dpp, 20) << "INFO: logging source '" << src_bucket_id << "' already " <<
+ (add ? "added to" : "removed from") << " bucket '" << bucket->get_info().bucket << "'" << dendl;
+ return 0;
+ }, y);
+}
+
+
+int bucket_deletion_cleanup(const DoutPrefixProvider* dpp,
+ sal::Driver* driver,
+ sal::Bucket* bucket,
+ optional_yield y) {
+ // if the bucket is used a log bucket, we should delete all pending log objects
+ // and also delete the object holding the pending object name
+ auto& attrs = bucket->get_attrs();
+ if (const auto iter = attrs.find(RGW_ATTR_BUCKET_LOGGING_SOURCES); iter != attrs.end()) {
+ try {
+ source_buckets sources;
+ ceph::decode(sources, iter->second);
+ for (const auto& source : sources) {
+ std::unique_ptr<rgw::sal::Bucket> src_bucket;
+ if (const auto ret = driver->load_bucket(dpp, source, &src_bucket, y); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to get logging source bucket '" << source << "' for log bucket '" <<
+ bucket->get_info().bucket << "', ret = " << ret << dendl;
+ continue;
+ }
+ auto& src_attrs = src_bucket->get_attrs();
+ if (const auto iter = src_attrs.find(RGW_ATTR_BUCKET_LOGGING); iter != src_attrs.end()) {
+ configuration conf;
+ try {
+ auto bl_iter = iter->second.cbegin();
+ decode(conf, bl_iter);
+ std::string obj_name;
+ RGWObjVersionTracker objv;
+ if (const auto ret = bucket->get_logging_object_name(obj_name, conf.target_prefix, y, dpp, &objv); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to get logging object name for log bucket '" << bucket->get_info().bucket <<
+ "', ret = " << ret << dendl;
+ continue;
+ }
+ if (const auto ret = bucket->remove_logging_object(obj_name, y, dpp); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to delete pending logging object '" << obj_name << "' for log bucket '" <<
+ bucket->get_info().bucket << "', ret = " << ret << dendl;
+ continue;
+ }
+ ldpp_dout(dpp, 20) << "INFO: successfully deleted pending logging object '" << obj_name << "' from deleted log bucket '" <<
+ bucket->get_info().bucket << "'" << dendl;
+ if (const auto ret = bucket->remove_logging_object_name(conf.target_prefix, y, dpp, &objv); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to delete object holding bucket logging object name for log bucket '" <<
+ bucket->get_info().bucket << "', ret = " << ret << dendl;
+ continue;
+ }
+ ldpp_dout(dpp, 20) << "INFO: successfully deleted object holding bucket logging object name from deleted log bucket '" <<
+ bucket->get_info().bucket << "'" << dendl;
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "' of bucket '" << src_bucket->get_info().bucket << "', error: " << err.what() << dendl;
+ }
+ }
+ }
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to decode logging sources attribute '" << RGW_ATTR_BUCKET_LOGGING_SOURCES
+ << "' for bucket '" << bucket->get_info().bucket << "', error: " << err.what() << dendl;
+ return -EIO;
+ }
+ }
+
+ return source_bucket_cleanup(dpp, driver, bucket, false, y);
+}
+
+int source_bucket_cleanup(const DoutPrefixProvider* dpp,
+ sal::Driver* driver,
+ sal::Bucket* bucket,
+ bool remove_attr,
+ optional_yield y) {
+ std::optional<configuration> conf;
+ const auto& info = bucket->get_info();
+ if (const auto ret = retry_raced_bucket_write(dpp, bucket, [dpp, bucket, &conf, &info, remove_attr, y] {
+ auto& attrs = bucket->get_attrs();
+ if (auto iter = attrs.find(RGW_ATTR_BUCKET_LOGGING); iter != attrs.end()) {
+ try {
+ auto bl_iter = iter->second.cbegin();
+ configuration tmp_conf;
+ tmp_conf.enabled = true;
+ decode(tmp_conf, bl_iter);
+ conf = std::move(tmp_conf);
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to decode existing logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "' of bucket '" << info.bucket << "', error: " << err.what() << dendl;
+ return -EIO;
+ }
+ if (remove_attr) {
+ attrs.erase(iter);
+ return bucket->merge_and_store_attrs(dpp, attrs, y);
+ }
+ }
+ // nothing to remove or no need to remove
+ return 0;
+ }, y); ret < 0) {
+ if (remove_attr) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove logging attribute '" << RGW_ATTR_BUCKET_LOGGING << "' from bucket '" <<
+ info.bucket << "', ret = " << ret << dendl;
+ }
+ return ret;
+ }
+ if (!conf) {
+ // no logging attribute found
+ return 0;
+ }
+ if (const auto ret = commit_logging_object(*conf, dpp, driver, info.bucket.tenant, y); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: could not commit pending logging object of bucket '" <<
+ info.bucket << "', ret = " << ret << dendl;
+ } else {
+ ldpp_dout(dpp, 20) << "INFO: successfully committed pending logging object of bucket '" << info.bucket << "'" << dendl;
+ }
+ rgw_bucket target_bucket_id;
+ rgw_bucket src_bucket_id{info.bucket.tenant, info.bucket.name};
+ if (const auto ret = get_bucket_id(conf->target_bucket, info.bucket.tenant, target_bucket_id); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to parse target bucket '" << conf->target_bucket << "', ret = " << ret << dendl;
+ return 0;
+ }
+ if (const auto ret = update_bucket_logging_sources(dpp, driver, target_bucket_id, src_bucket_id, false, y); ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: could not update bucket logging source '" <<
+ info.bucket << "', ret = " << ret << dendl;
+ return 0;
+ }
+ ldpp_dout(dpp, 20) << "INFO: successfully updated bucket logging source '" <<
+ info.bucket << "'"<< dendl;
+ return 0;
+}
+
+} // namespace rgw::bucketlogging
+
diff --git a/src/rgw/rgw_bucket_logging.h b/src/rgw/rgw_bucket_logging.h
new file mode 100644
index 00000000000..cbdb8b55f88
--- /dev/null
+++ b/src/rgw/rgw_bucket_logging.h
@@ -0,0 +1,250 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+#pragma once
+
+#include <string>
+#include <cstdint>
+#include "rgw_sal_fwd.h"
+#include "include/buffer.h"
+#include "include/encoding.h"
+#include "common/async/yield_context.h"
+#include "rgw_s3_filter.h"
+
+class XMLObj;
+namespace ceph { class Formatter; }
+class DoutPrefixProvider;
+struct req_state;
+struct RGWObjVersionTracker;
+class RGWOp;
+
+namespace rgw::bucketlogging {
+/* S3 bucket logging configuration
+ * based on: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html
+ * with ceph extensions
+<BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <LoggingEnabled>
+ <TargetBucket>string</TargetBucket>
+ <TargetGrants>
+ <Grant>
+ <Grantee>
+ <DisplayName>string</DisplayName>
+ <EmailAddress>string</EmailAddress>
+ <ID>string</ID>
+ <xsi:type>string</xsi:type>
+ <URI>string</URI>
+ </Grantee>
+ <Permission>string</Permission>
+ </Grant>
+ </TargetGrants>
+ <TargetObjectKeyFormat>
+ <PartitionedPrefix>
+ <PartitionDateSource>DeliveryTime|EventTime</PartitionDateSource>
+ </PartitionedPrefix>
+ <SimplePrefix>
+ </SimplePrefix>
+ </TargetObjectKeyFormat>
+ <TargetPrefix>string</TargetPrefix>
+ <LoggingType>Standard|Journal</LoggingType> <!-- Ceph extension -->
+ <ObjectRollTime>integer</ObjectRollTime> <!-- Ceph extension -->
+ <RecordsBatchSize>integer</RecordsBatchSize> <!-- Ceph extension -->
+ <Filter>
+ <S3Key>
+ <FilterRule>
+ <Name>suffix/prefix/regex</Name>
+ <Value></Value>
+ </FilterRule>
+ </S3Key>
+ </Filter>
+ </LoggingEnabled>
+</BucketLoggingStatus>
+*/
+
+enum class KeyFormat {Partitioned, Simple};
+enum class LoggingType {Standard, Journal, Any};
+enum class PartitionDateSource {DeliveryTime, EventTime};
+
+struct configuration {
+ bool operator==(const configuration& rhs) const {
+ return enabled == rhs.enabled &&
+ target_bucket == rhs.target_bucket &&
+ obj_key_format == rhs.obj_key_format &&
+ target_prefix == rhs.target_prefix &&
+ obj_roll_time == rhs.obj_roll_time &&
+ logging_type == rhs.logging_type &&
+ records_batch_size == rhs.records_batch_size &&
+ date_source == rhs.date_source &&
+ key_filter == rhs.key_filter;
+ }
+ uint32_t default_obj_roll_time = 300;
+ bool enabled = false;
+ std::string target_bucket;
+ KeyFormat obj_key_format = KeyFormat::Simple;
+ // target object key formats:
+ // Partitioned: [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+ // Simple: [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+ std::string target_prefix; // a prefix for all log object keys.
+ // useful when multiple bucket log to the same target
+ // or when the target bucket is used for other things than logs
+ uint32_t obj_roll_time; // time in seconds to move object to bucket and start another object
+ LoggingType logging_type = LoggingType::Standard;
+ // in case of "Standard: logging type, all bucket operations are logged
+ // in case of "Journal" logging type only the following operations are logged: PUT, COPY, MULTI/DELETE, Complete MPU
+ uint32_t records_batch_size = 0; // how many records to batch in memory before writing to the object
+ // if set to zero, records are written syncronously to the object.
+ // if obj_roll_time is reached, the batch of records will be written to the object
+ // regardless of the number of records
+ PartitionDateSource date_source = PartitionDateSource::DeliveryTime;
+ // EventTime: use only year, month, and day. The hour, minutes and seconds are set to 00 in the key
+ // DeliveryTime: the time the log object was created
+ rgw_s3_key_filter key_filter;
+ bool decode_xml(XMLObj *obj);
+ void dump_xml(Formatter *f) const;
+ void dump(Formatter *f) const; // json
+ std::string to_json_str() const;
+
+ void encode(ceph::bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(target_bucket, bl);
+ encode(static_cast<int>(obj_key_format), bl);
+ encode(target_prefix, bl);
+ encode(obj_roll_time, bl);
+ encode(static_cast<int>(logging_type), bl);
+ encode(records_batch_size, bl);
+ encode(static_cast<int>(date_source), bl);
+ if (logging_type == LoggingType::Journal) {
+ encode(key_filter, bl);
+ }
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(target_bucket, bl);
+ int type;
+ decode(type, bl);
+ obj_key_format = static_cast<KeyFormat>(type);
+ decode(target_prefix, bl);
+ decode(obj_roll_time, bl);
+ decode(type, bl);
+ logging_type = static_cast<LoggingType>(type);
+ decode(records_batch_size, bl);
+ decode(type, bl);
+ date_source = static_cast<PartitionDateSource>(type);
+ if (logging_type == LoggingType::Journal) {
+ decode(key_filter, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(configuration)
+
+using source_buckets = std::set<rgw_bucket>;
+
+constexpr unsigned MAX_BUCKET_LOGGING_BUFFER = 1000;
+
+using bucket_logging_records = std::array<std::string, MAX_BUCKET_LOGGING_BUFFER>;
+
+template <typename Records>
+inline std::string to_string(const Records& records) {
+ std::string str_records;
+ for (const auto& record : records) {
+ str_records.append(to_string(record)).append("\n");
+ }
+ return str_records;
+}
+
+// log a bucket logging record according to the configuration
+int log_record(rgw::sal::Driver* driver,
+ const sal::Object* obj,
+ const req_state* s,
+ const std::string& op_name,
+ const std::string& etag,
+ size_t size,
+ const configuration& conf,
+ const DoutPrefixProvider *dpp,
+ optional_yield y,
+ bool async_completion,
+ bool log_source_bucket);
+
+// commit the pending log objec to the log bucket
+// and create a new pending log object
+// if "must_commit" is "false" the function will return success even if the pending log object was not committed
+int rollover_logging_object(const configuration& conf,
+ const std::unique_ptr<rgw::sal::Bucket>& bucket,
+ std::string& obj_name,
+ const DoutPrefixProvider *dpp,
+ optional_yield y,
+ bool must_commit,
+ RGWObjVersionTracker* objv_tracker);
+
+// commit the pending log object to the log bucket
+// use this for cleanup, when new pending object is not needed
+// and target bucket is known
+int commit_logging_object(const configuration& conf,
+ const std::unique_ptr<rgw::sal::Bucket>& target_bucket,
+ const DoutPrefixProvider *dpp,
+ optional_yield y);
+
+// commit the pending log object to the log bucket
+// use this for cleanup, when new pending object is not needed
+// and target bucket shoud be loaded based on the configuration
+int commit_logging_object(const configuration& conf,
+ const DoutPrefixProvider *dpp,
+ rgw::sal::Driver* driver,
+ const std::string& tenant_name,
+ optional_yield y);
+
+// return the oid of the object holding the name of the temporary logging object
+// bucket - log bucket
+// prefix - logging prefix from configuration. should be used when multiple buckets log into the same log bucket
+std::string object_name_oid(const rgw::sal::Bucket* bucket, const std::string& prefix);
+
+// log a bucket logging record according to type
+// configuration is fetched from bucket attributes
+// if no configuration exists, or if type does not match the function return zero (success)
+int log_record(rgw::sal::Driver* driver,
+ LoggingType type,
+ const sal::Object* obj,
+ const req_state* s,
+ const std::string& op_name,
+ const std::string& etag,
+ size_t size,
+ const DoutPrefixProvider *dpp,
+ optional_yield y,
+ bool async_completion,
+ bool log_source_bucket);
+
+// return (by ref) an rgw_bucket object with the bucket name and tenant name
+// fails if the bucket name is not in the format: [tenant name:]<bucket name>
+int get_bucket_id(const std::string& bucket_name, const std::string& tenant_name, rgw_bucket& bucket_id);
+
+// update (add or remove) a source bucket from the list of source buckets in the target bucket
+// use this function when the target bucket is already loaded
+int update_bucket_logging_sources(const DoutPrefixProvider* dpp, std::unique_ptr<rgw::sal::Bucket>& bucket,
+ const rgw_bucket& src_bucket, bool add, optional_yield y);
+
+// update (add or remove) a source bucket from the list of source buckets in the target bucket
+// use this function when the target bucket is not known and needs to be loaded
+int update_bucket_logging_sources(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, const rgw_bucket& target_bucket_id,
+ const rgw_bucket& src_bucket_id, bool add, optional_yield y);
+
+// when source bucket is deleted, all pending log objects should be comitted to the log bucket
+// when the target bucket is deleted, all pending log objects should be deleted, as well as the object holding the pending log object name
+int bucket_deletion_cleanup(const DoutPrefixProvider* dpp,
+ sal::Driver* driver,
+ sal::Bucket* bucket,
+ optional_yield y);
+
+// if bucket has bucket logging configuration associated with it then:
+// if "remove_attr" is true, the bucket logging configuration should be removed from the bucket
+// in addition:
+// any pending log objects should be comitted to the log bucket
+// and the log bucket should be updated to remove the bucket as a source
+int source_bucket_cleanup(const DoutPrefixProvider* dpp,
+ sal::Driver* driver,
+ sal::Bucket* bucket,
+ bool remove_attr,
+ optional_yield y);
+} // namespace rgw::bucketlogging
+
diff --git a/src/rgw/rgw_cksum_pipe.cc b/src/rgw/rgw_cksum_pipe.cc
index e06957e2715..0bec8d341af 100644
--- a/src/rgw/rgw_cksum_pipe.cc
+++ b/src/rgw/rgw_cksum_pipe.cc
@@ -18,6 +18,7 @@
#include <string>
#include <fmt/format.h>
#include <boost/algorithm/string.hpp>
+#include "rgw_cksum.h"
#include "rgw_common.h"
#include "common/dout.h"
#include "rgw_client_io.h"
@@ -34,7 +35,8 @@ namespace rgw::putobj {
{}
std::unique_ptr<RGWPutObj_Cksum> RGWPutObj_Cksum::Factory(
- rgw::sal::DataProcessor* next, const RGWEnv& env)
+ rgw::sal::DataProcessor* next, const RGWEnv& env,
+ rgw::cksum::Type override_type)
{
/* look for matching headers */
auto algo_header = cksum_algorithm_hdr(env);
@@ -49,6 +51,13 @@ namespace rgw::putobj {
throw rgw::io::Exception(EINVAL, std::system_category());
}
/* no checksum header */
+ if (override_type != rgw::cksum::Type::none) {
+ /* XXXX safe? do we need to fixup env as well? */
+ auto algo_header = cksum_algorithm_hdr(override_type);
+ return
+ std::make_unique<RGWPutObj_Cksum>(
+ next, override_type, std::move(algo_header));
+ }
return std::unique_ptr<RGWPutObj_Cksum>();
}
diff --git a/src/rgw/rgw_cksum_pipe.h b/src/rgw/rgw_cksum_pipe.h
index fddcd283c84..c459d156335 100644
--- a/src/rgw/rgw_cksum_pipe.h
+++ b/src/rgw/rgw_cksum_pipe.h
@@ -20,6 +20,7 @@
#include <tuple>
#include <cstring>
#include <boost/algorithm/string/case_conv.hpp>
+#include "rgw_cksum.h"
#include "rgw_cksum_digest.h"
#include "rgw_common.h"
#include "rgw_putobj.h"
@@ -29,6 +30,38 @@ namespace rgw::putobj {
namespace cksum = rgw::cksum;
using cksum_hdr_t = std::pair<const char*, const char*>;
+ static inline const cksum_hdr_t cksum_algorithm_hdr(rgw::cksum::Type t) {
+ static constexpr std::string_view hdr =
+ "HTTP_X_AMZ_SDK_CHECKSUM_ALGORITHM";
+ using rgw::cksum::Type;
+ switch (t) {
+ case Type::sha256:
+ return cksum_hdr_t(hdr.data(), "SHA256");
+ break;
+ case Type::crc32:
+ return cksum_hdr_t(hdr.data(), "CRC32");
+ break;
+ case Type::crc32c:
+ return cksum_hdr_t(hdr.data(), "CRC32C");
+ break;
+ case Type::xxh3:
+ return cksum_hdr_t(hdr.data(), "XX3");
+ break;
+ case Type::sha1:
+ return cksum_hdr_t(hdr.data(), "SHA1");
+ break;
+ case Type::sha512:
+ return cksum_hdr_t(hdr.data(), "SHA512");
+ break;
+ case Type::blake3:
+ return cksum_hdr_t(hdr.data(), "BLAKE3");
+ break;
+ default:
+ break;
+ };
+ return cksum_hdr_t(nullptr, nullptr);;
+ }
+
static inline const cksum_hdr_t cksum_algorithm_hdr(const RGWEnv& env) {
/* If the individual checksum value you provide through
x-amz-checksum-algorithm doesn't match the checksum algorithm
@@ -102,7 +135,8 @@ namespace rgw::putobj {
using VerifyResult = std::tuple<bool, const cksum::Cksum&>;
static std::unique_ptr<RGWPutObj_Cksum> Factory(
- rgw::sal::DataProcessor* next, const RGWEnv&);
+ rgw::sal::DataProcessor* next, const RGWEnv&,
+ rgw::cksum::Type override_type);
RGWPutObj_Cksum(rgw::sal::DataProcessor* next, rgw::cksum::Type _type,
cksum_hdr_t&& _hdr);
diff --git a/src/rgw/rgw_common.cc b/src/rgw/rgw_common.cc
index 68fb9a29278..6610538542c 100644
--- a/src/rgw/rgw_common.cc
+++ b/src/rgw/rgw_common.cc
@@ -63,6 +63,7 @@ rgw_http_errors rgw_http_s3_errors({
{ ERR_INVALID_DIGEST, {400, "InvalidDigest" }},
{ ERR_BAD_DIGEST, {400, "BadDigest" }},
{ ERR_INVALID_LOCATION_CONSTRAINT, {400, "InvalidLocationConstraint" }},
+ { ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION, {400, "IllegalLocationConstraintException" }},
{ ERR_ZONEGROUP_DEFAULT_PLACEMENT_MISCONFIGURATION, {400, "ZonegroupDefaultPlacementMisconfiguration" }},
{ ERR_INVALID_BUCKET_NAME, {400, "InvalidBucketName" }},
{ ERR_INVALID_OBJECT_NAME, {400, "InvalidObjectName" }},
@@ -2994,7 +2995,9 @@ void RGWAccessKey::decode_json(JSONObj *obj) {
subuser = user.substr(pos + 1);
}
}
- JSONDecoder::decode_json("active", active, obj);
+ if (bool tmp = false; JSONDecoder::decode_json("active", tmp, obj)) {
+ active = tmp; // update only if "active" is present
+ }
JSONDecoder::decode_json("create_date", create_date, obj);
}
@@ -3204,3 +3207,14 @@ void RGWObjVersionTracker::generate_new_write_ver(CephContext *cct)
append_rand_alpha(cct, write_version.tag, write_version.tag, TAG_LEN);
}
+boost::optional<rgw::IAM::Policy>
+get_iam_policy_from_attr(CephContext* cct,
+ const std::map<std::string, bufferlist>& attrs,
+ const std::string& tenant)
+{
+ if (auto i = attrs.find(RGW_ATTR_IAM_POLICY); i != attrs.end()) {
+ return Policy(cct, &tenant, i->second.to_str(), false);
+ } else {
+ return boost::none;
+ }
+}
diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h
index a8f6a1107a9..99f7db4f569 100644
--- a/src/rgw/rgw_common.h
+++ b/src/rgw/rgw_common.h
@@ -107,6 +107,8 @@ using ceph::crypto::MD5;
#define RGW_ATTR_SLO_UINDICATOR RGW_ATTR_META_PREFIX "static-large-object"
#define RGW_ATTR_X_ROBOTS_TAG RGW_ATTR_PREFIX "x-robots-tag"
#define RGW_ATTR_STORAGE_CLASS RGW_ATTR_PREFIX "storage_class"
+#define RGW_ATTR_BUCKET_LOGGING RGW_ATTR_PREFIX "logging"
+#define RGW_ATTR_BUCKET_LOGGING_SOURCES RGW_ATTR_PREFIX "logging-sources"
/* S3 Object Lock*/
#define RGW_ATTR_OBJECT_LOCK RGW_ATTR_PREFIX "object-lock"
@@ -336,6 +338,7 @@ inline constexpr const char* RGW_REST_STS_XMLNS =
#define ERR_PRESIGNED_URL_EXPIRED 2223
#define ERR_PRESIGNED_URL_DISABLED 2224
#define ERR_AUTHORIZATION 2225 // SNS 403 AuthorizationError
+#define ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION 2226
#define ERR_BUSY_RESHARDING 2300 // also in cls_rgw_types.h, don't change!
#define ERR_NO_SUCH_ENTITY 2301
@@ -1746,24 +1749,22 @@ rgw::IAM::Effect evaluate_iam_policies(
bool verify_user_permission(const DoutPrefixProvider* dpp,
req_state * const s,
- const RGWAccessControlPolicy& user_acl,
- const std::vector<rgw::IAM::Policy>& user_policies,
- const std::vector<rgw::IAM::Policy>& session_policies,
- const rgw::ARN& res,
- const uint64_t op,
- bool mandatory_policy=true);
-bool verify_user_permission_no_policy(const DoutPrefixProvider* dpp,
- req_state * const s,
- const RGWAccessControlPolicy& user_acl,
- const int perm);
-bool verify_user_permission(const DoutPrefixProvider* dpp,
- req_state * const s,
const rgw::ARN& res,
const uint64_t op,
bool mandatory_policy=true);
bool verify_user_permission_no_policy(const DoutPrefixProvider* dpp,
req_state * const s,
int perm);
+bool verify_bucket_permission(const DoutPrefixProvider* dpp,
+ struct perm_state_base * const s,
+ const rgw::ARN& arn,
+ bool account_root,
+ const RGWAccessControlPolicy& user_acl,
+ const RGWAccessControlPolicy& bucket_acl,
+ const boost::optional<rgw::IAM::Policy>& bucket_policy,
+ const std::vector<rgw::IAM::Policy>& identity_policies,
+ const std::vector<rgw::IAM::Policy>& session_policies,
+ const uint64_t op);
bool verify_bucket_permission(
const DoutPrefixProvider* dpp,
req_state * const s,
@@ -2011,3 +2012,8 @@ struct AioCompletionDeleter {
void operator()(librados::AioCompletion* c) { c->release(); }
};
using aio_completion_ptr = std::unique_ptr<librados::AioCompletion, AioCompletionDeleter>;
+
+extern boost::optional<rgw::IAM::Policy>
+get_iam_policy_from_attr(CephContext* cct,
+ const std::map<std::string, bufferlist>& attrs,
+ const std::string& tenant);
diff --git a/src/rgw/rgw_file_int.h b/src/rgw/rgw_file_int.h
index 0a1db645207..84eff1e252e 100644
--- a/src/rgw/rgw_file_int.h
+++ b/src/rgw/rgw_file_int.h
@@ -2298,6 +2298,8 @@ public:
std::string uri;
std::map<std::string, buffer::list> attrs;
RGWLibFS::BucketStats& bs;
+ real_time ctime;
+ bool name_matched = false;
RGWStatBucketRequest(CephContext* _cct, std::unique_ptr<rgw::sal::User> _user,
const std::string& _path,
@@ -2312,9 +2314,7 @@ public:
return (iter != attrs.end()) ? &(iter->second) : nullptr;
}
- real_time get_ctime() const {
- return bucket->get_creation_time();
- }
+ real_time get_ctime() { return ctime; }
bool only_bucket() override { return false; }
@@ -2342,22 +2342,26 @@ public:
return 0;
}
- virtual int get_params() {
- return 0;
+ int get_params(optional_yield) override { return 0; }
+
+ void complete() override {
+ // get_state() will no longer be there after execute_req()
+ // so save what we need from get_state()->bucket
+ ctime = get_state()->bucket->get_creation_time();
+ name_matched = get_state()->bucket->get_name().length() > 0;
+
+ RGWOp::complete();
}
void send_response() override {
- bucket->get_creation_time() = get_state()->bucket->get_info().creation_time;
bs.size = stats.size;
bs.size_rounded = stats.size_rounded;
- bs.creation_time = bucket->get_creation_time();
+ bs.creation_time = get_state()->bucket->get_info().creation_time;
bs.num_entries = stats.num_objects;
std::swap(attrs, get_state()->bucket_attrs);
}
- bool matched() {
- return (bucket->get_name().length() > 0);
- }
+ bool matched() { return name_matched; }
}; /* RGWStatBucketRequest */
diff --git a/src/rgw/rgw_iam_policy.cc b/src/rgw/rgw_iam_policy.cc
index ce76ed4c3c3..ef6761d4222 100644
--- a/src/rgw/rgw_iam_policy.cc
+++ b/src/rgw/rgw_iam_policy.cc
@@ -94,6 +94,8 @@ static const actpair actpairs[] =
{ "s3:GetPublicAccessBlock", s3GetPublicAccessBlock },
{ "s3:GetObjectAcl", s3GetObjectAcl },
{ "s3:GetObject", s3GetObject },
+ { "s3:GetObjectAttributes", s3GetObjectAttributes },
+ { "s3:GetObjectVersionAttributes", s3GetObjectVersionAttributes },
{ "s3:GetObjectTorrent", s3GetObjectTorrent },
{ "s3:GetObjectVersionAcl", s3GetObjectVersionAcl },
{ "s3:GetObjectVersion", s3GetObjectVersion },
@@ -113,6 +115,7 @@ static const actpair actpairs[] =
{ "s3:PutBucketCORS", s3PutBucketCORS },
{ "s3:PutBucketEncryption", s3PutBucketEncryption },
{ "s3:PutBucketLogging", s3PutBucketLogging },
+ { "s3:PostBucketLogging", s3PostBucketLogging },
{ "s3:PutBucketNotification", s3PutBucketNotification },
{ "s3:PutBucketOwnershipControls", s3PutBucketOwnershipControls },
{ "s3:PutBucketPolicy", s3PutBucketPolicy },
@@ -1334,6 +1337,7 @@ const char* action_bit_string(uint64_t action) {
case s3ListBucketVersions:
return "s3:ListBucketVersions";
+
case s3ListAllMyBuckets:
return "s3:ListAllMyBuckets";
@@ -1406,6 +1410,9 @@ const char* action_bit_string(uint64_t action) {
case s3PutBucketLogging:
return "s3:PutBucketLogging";
+ case s3PostBucketLogging:
+ return "s3:PostBucketLogging";
+
case s3GetBucketTagging:
return "s3:GetBucketTagging";
@@ -1475,6 +1482,12 @@ const char* action_bit_string(uint64_t action) {
case s3BypassGovernanceRetention:
return "s3:BypassGovernanceRetention";
+ case s3GetObjectAttributes:
+ return "s3:GetObjectAttributes";
+
+ case s3GetObjectVersionAttributes:
+ return "s3:GetObjectVersionAttributes";
+
case s3DescribeJob:
return "s3:DescribeJob";
diff --git a/src/rgw/rgw_iam_policy.h b/src/rgw/rgw_iam_policy.h
index 1494cbf0b81..dd323ee4b9c 100644
--- a/src/rgw/rgw_iam_policy.h
+++ b/src/rgw/rgw_iam_policy.h
@@ -81,6 +81,7 @@ enum {
s3PutBucketNotification,
s3GetBucketLogging,
s3PutBucketLogging,
+ s3PostBucketLogging,
s3GetBucketTagging,
s3PutBucketTagging,
s3GetBucketWebsite,
@@ -114,6 +115,8 @@ enum {
s3GetBucketEncryption,
s3PutBucketEncryption,
s3DescribeJob,
+ s3GetObjectAttributes,
+ s3GetObjectVersionAttributes,
s3All,
s3objectlambdaGetObject,
@@ -246,6 +249,8 @@ inline int op_to_perm(std::uint64_t op) {
case s3GetObjectVersionTagging:
case s3GetObjectRetention:
case s3GetObjectLegalHold:
+ case s3GetObjectAttributes:
+ case s3GetObjectVersionAttributes:
case s3ListAllMyBuckets:
case s3ListBucket:
case s3ListBucketMultipartUploads:
@@ -298,6 +303,7 @@ inline int op_to_perm(std::uint64_t op) {
case s3PutBucketCORS:
case s3PutBucketEncryption:
case s3PutBucketLogging:
+ case s3PostBucketLogging:
case s3PutBucketNotification:
case s3PutBucketPolicy:
case s3PutBucketRequestPayment:
diff --git a/src/rgw/rgw_kafka.cc b/src/rgw/rgw_kafka.cc
index c0ec3dc2c55..b38b1a78ec4 100644
--- a/src/rgw/rgw_kafka.cc
+++ b/src/rgw/rgw_kafka.cc
@@ -13,6 +13,7 @@
#include <thread>
#include <atomic>
#include <mutex>
+#include <boost/algorithm/string.hpp>
#include <boost/functional/hash.hpp>
#include <boost/lockfree/queue.hpp>
#include "common/dout.h"
@@ -503,6 +504,7 @@ private:
}
void run() noexcept {
+ ceph_pthread_setname("kafka_manager");
while (!stopped) {
// publish all messages in the queue
@@ -575,12 +577,6 @@ public:
// This is to prevent rehashing so that iterators are not invalidated
// when a new connection is added.
connections.max_load_factor(10.0);
- // give the runner thread a name for easier debugging
- const char* thread_name = "kafka_manager";
- if (const auto rc = ceph_pthread_setname(runner.native_handle(), thread_name); rc != 0) {
- ldout(cct, 1) << "ERROR: failed to set kafka manager thread name to: " << thread_name
- << ". error: " << rc << dendl;
- }
}
// non copyable
@@ -600,7 +596,8 @@ public:
boost::optional<const std::string&> ca_location,
boost::optional<const std::string&> mechanism,
boost::optional<const std::string&> topic_user_name,
- boost::optional<const std::string&> topic_password) {
+ boost::optional<const std::string&> topic_password,
+ boost::optional<const std::string&> brokers) {
if (stopped) {
ldout(cct, 1) << "Kafka connect: manager is stopped" << dendl;
return false;
@@ -608,8 +605,8 @@ public:
std::string user;
std::string password;
- std::string broker;
- if (!parse_url_authority(url, broker, user, password)) {
+ std::string broker_list;
+ if (!parse_url_authority(url, broker_list, user, password)) {
// TODO: increment counter
ldout(cct, 1) << "Kafka connect: URL parsing failed" << dendl;
return false;
@@ -637,7 +634,13 @@ public:
ldout(cct, 1) << "Kafka connect: user/password are only allowed over secure connection" << dendl;
return false;
}
- connection_id_t tmp_id(broker, user, password, ca_location, mechanism,
+
+ if (brokers.has_value()) {
+ broker_list.append(",");
+ broker_list.append(brokers.get());
+ }
+
+ connection_id_t tmp_id(broker_list, user, password, ca_location, mechanism,
use_ssl);
std::lock_guard lock(connections_lock);
const auto it = connections.find(tmp_id);
@@ -657,7 +660,7 @@ public:
return false;
}
- auto conn = std::make_unique<connection_t>(cct, broker, use_ssl, verify_ssl, ca_location, user, password, mechanism);
+ auto conn = std::make_unique<connection_t>(cct, broker_list, use_ssl, verify_ssl, ca_location, user, password, mechanism);
if (!new_producer(conn.get())) {
ldout(cct, 10) << "Kafka connect: producer creation failed in new connection" << dendl;
return false;
@@ -775,11 +778,12 @@ bool connect(connection_id_t& conn_id,
boost::optional<const std::string&> ca_location,
boost::optional<const std::string&> mechanism,
boost::optional<const std::string&> user_name,
- boost::optional<const std::string&> password) {
+ boost::optional<const std::string&> password,
+ boost::optional<const std::string&> brokers) {
std::shared_lock lock(s_manager_mutex);
if (!s_manager) return false;
return s_manager->connect(conn_id, url, use_ssl, verify_ssl, ca_location,
- mechanism, user_name, password);
+ mechanism, user_name, password, brokers);
}
int publish(const connection_id_t& conn_id,
diff --git a/src/rgw/rgw_kafka.h b/src/rgw/rgw_kafka.h
index b7aa0d15759..858b185219f 100644
--- a/src/rgw/rgw_kafka.h
+++ b/src/rgw/rgw_kafka.h
@@ -48,7 +48,8 @@ bool connect(connection_id_t& conn_id,
boost::optional<const std::string&> ca_location,
boost::optional<const std::string&> mechanism,
boost::optional<const std::string&> user_name,
- boost::optional<const std::string&> password);
+ boost::optional<const std::string&> password,
+ boost::optional<const std::string&> brokers);
// publish a message over a connection that was already created
int publish(const connection_id_t& conn_id,
diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc
index a7f2ceabad3..c9fb4765d59 100644
--- a/src/rgw/rgw_lc.cc
+++ b/src/rgw/rgw_lc.cc
@@ -1183,7 +1183,7 @@ public:
<< " " << oc.wq->thr_name() << dendl;
} else {
/* ! o.is_delete_marker() */
- r = remove_expired_obj(oc.dpp, oc, !oc.bucket->versioned(),
+ r = remove_expired_obj(oc.dpp, oc, !oc.bucket->versioning_enabled(),
{rgw::notify::ObjectExpirationCurrent,
rgw::notify::LifecycleExpirationDelete});
if (r < 0) {
diff --git a/src/rgw/rgw_lua_background.cc b/src/rgw/rgw_lua_background.cc
index ef97a5d6f65..c5b815f93f5 100644
--- a/src/rgw/rgw_lua_background.cc
+++ b/src/rgw/rgw_lua_background.cc
@@ -83,11 +83,6 @@ void Background::start() {
}
started = true;
runner = std::thread(&Background::run, this);
- const char* thread_name = "lua_background";
- if (const auto rc = ceph_pthread_setname(runner.native_handle(), thread_name); rc != 0) {
- ldout(cct, 1) << "ERROR: failed to set lua background thread name to: " << thread_name
- << ". error: " << rc << dendl;
- }
}
void Background::pause() {
@@ -127,6 +122,7 @@ const BackgroundMapValue& Background::get_table_value(const std::string& key) co
//(2) Executes the script
//(3) Sleep (configurable)
void Background::run() {
+ ceph_pthread_setname("lua_background");
const DoutPrefixProvider* const dpp = &dp;
lua_state_guard lguard(cct->_conf->rgw_lua_max_memory_per_state, dpp);
auto L = lguard.get();
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index 67829e6320a..7b0ca3134a3 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -25,8 +25,10 @@
#include "common/ceph_json.h"
#include "common/static_ptr.h"
#include "common/perf_counters_key.h"
+#include "rgw_cksum.h"
#include "rgw_cksum_digest.h"
#include "rgw_common.h"
+#include "common/split.h"
#include "rgw_tracer.h"
#include "rgw_rados.h"
@@ -64,6 +66,7 @@
#include "rgw_lua.h"
#include "rgw_iam_managed_policy.h"
#include "rgw_bucket_sync.h"
+#include "rgw_bucket_logging.h"
#include "services/svc_zone.h"
#include "services/svc_quota.h"
@@ -148,7 +151,7 @@ int rgw_forward_request_to_master(const DoutPrefixProvider* dpp,
// use the master zone's endpoints
auto conn = RGWRESTConn{dpp->get_cct(), z->second.id, z->second.endpoints,
- creds, zg->second.id, zg->second.api_name};
+ creds, site.get_zonegroup().id, zg->second.api_name};
bufferlist outdata;
constexpr size_t max_response_size = 128 * 1024; // we expect a very small response
int ret = conn.forward(dpp, effective_owner, req, nullptr,
@@ -330,19 +333,6 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp,
return ret;
}
-
-static boost::optional<Policy>
-get_iam_policy_from_attr(CephContext* cct,
- const map<string, bufferlist>& attrs,
- const string& tenant)
-{
- if (auto i = attrs.find(RGW_ATTR_IAM_POLICY); i != attrs.end()) {
- return Policy(cct, &tenant, i->second.to_str(), false);
- } else {
- return none;
- }
-}
-
static boost::optional<PublicAccessBlockConfiguration>
get_public_access_conf_from_attr(const map<string, bufferlist>& attrs)
{
@@ -757,7 +747,7 @@ static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s, r
return 0;
}
-static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s) {
+int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s) {
return rgw_iam_add_buckettags(dpp, s, s->bucket.get());
}
@@ -830,7 +820,7 @@ static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvide
return make_tuple(has_existing_obj_tag, has_resource_tag);
}
-static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp, req_state* s, bool check_obj_exist_tag=true) {
+std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp, req_state* s, bool check_obj_exist_tag) {
return rgw_check_policy_condition(dpp, s->iam_policy, s->iam_identity_policies, s->session_policies, check_obj_exist_tag);
}
@@ -943,6 +933,17 @@ void handle_replication_status_header(
/*
* GET on CloudTiered objects either it will synced to other zones.
* In all other cases, it will try to fetch the object from remote cloud endpoint.
+ *
+ * @return:
+ * Note - return status may differ based on whether it is RESTORE op or
+ * READTHROUGH/GET op.
+ * for e.g, ERR_INVALID_OBJECT_STATE is sent for non cloud-transitioned
+ * incase of restore op and ERR_REQUEST_TIMEOUT is applicable only for
+ * read-through etc.
+ * `<0` : failed to process; s->err.message & op_ret set accrodingly
+ * `0` : restore request initiated
+ * `1` : restore is already in progress
+ * `2` : already restored
*/
int handle_cloudtier_obj(req_state* s, const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
rgw::sal::Attrs& attrs, bool sync_cloudtiered, std::optional<uint64_t> days,
@@ -1051,12 +1052,17 @@ int handle_cloudtier_obj(req_state* s, const DoutPrefixProvider *dpp, rgw::sal::
s->err.message = "restore is still in progress";
}
return op_ret;
- } else if ((!restore_op) && (restore_status == rgw::sal::RGWRestoreStatus::RestoreAlreadyInProgress)) {
- op_ret = -ERR_REQUEST_TIMEOUT;
- ldpp_dout(dpp, 5) << "restore is still in progress, please check restore status and retry" << dendl;
- s->err.message = "restore is still in progress";
- } else { // CloudRestored..return success
- return 0;
+ } else if (restore_status == rgw::sal::RGWRestoreStatus::RestoreAlreadyInProgress) {
+ if (!restore_op) {
+ op_ret = -ERR_REQUEST_TIMEOUT;
+ ldpp_dout(dpp, 5) << "restore is still in progress, please check restore status and retry" << dendl;
+ s->err.message = "restore is still in progress";
+ return op_ret;
+ } else {
+ return 1; // for restore-op, corresponds to RESTORE_ALREADY_IN_PROGRESS
+ }
+ } else {
+ return 2; // corresponds to CLOUD_RESTORED
}
} catch (const buffer::end_of_buffer&) {
//empty manifest; it's not cloud-tiered
@@ -1333,9 +1339,9 @@ void RGWDeleteBucketTags::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
- rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_TAGS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ op_ret = s->bucket->put_info(this, false, real_time(), y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
<< s->bucket->get_name()
@@ -2338,6 +2344,7 @@ void RGWGetObj::execute(optional_yield y)
rgw::op_counters::inc(counters, l_rgw_op_get_obj, 1);
std::unique_ptr<rgw::sal::Object::ReadOp> read_op(s->object->get_read_op());
+ std::string etag;
op_ret = get_params(y);
if (op_ret < 0)
@@ -3117,17 +3124,19 @@ static int load_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y,
void RGWStatBucket::execute(optional_yield y)
{
- if (!s->bucket_exists) {
- op_ret = -ERR_NO_SUCH_BUCKET;
+ op_ret = get_params(y);
+ if (op_ret < 0) {
return;
}
- op_ret = driver->load_bucket(this, s->bucket->get_key(), &bucket, y);
- if (op_ret) {
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
return;
}
- op_ret = load_bucket_stats(this, y, *s->bucket, stats);
+ if (report_stats) {
+ op_ret = load_bucket_stats(this, y, *s->bucket, stats);
+ }
}
int RGWListBucket::verify_permission(optional_yield y)
@@ -3220,19 +3229,6 @@ void RGWListBucket::execute(optional_yield y)
rgw::op_counters::tinc(counters, l_rgw_op_list_obj_lat, s->time_elapsed());
}
-int RGWGetBucketLogging::verify_permission(optional_yield y)
-{
- auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
- if (has_s3_resource_tag)
- rgw_iam_add_buckettags(this, s);
-
- if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketLogging)) {
- return -EACCES;
- }
-
- return 0;
-}
-
int RGWGetBucketLocation::verify_permission(optional_yield y)
{
auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
@@ -3564,54 +3560,62 @@ void RGWCreateBucket::execute(optional_yield y)
const rgw::SiteConfig& site = *s->penv.site;
const std::optional<RGWPeriod>& period = site.get_period();
const RGWZoneGroup& my_zonegroup = site.get_zonegroup();
-
- if (s->system_request) {
- // allow system requests to override the target zonegroup. for forwarded
- // requests, we'll create the bucket for the originating zonegroup
- createparams.zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
- }
-
+ const std::string rgwx_zonegroup = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
const RGWZoneGroup* bucket_zonegroup = &my_zonegroup;
- if (createparams.zonegroup_id.empty()) {
- // default to the local zonegroup
- createparams.zonegroup_id = my_zonegroup.id;
- } else if (period) {
- auto z = period->period_map.zonegroups.find(createparams.zonegroup_id);
- if (z == period->period_map.zonegroups.end()) {
- ldpp_dout(this, 0) << "could not find zonegroup "
- << createparams.zonegroup_id << " in current period" << dendl;
- op_ret = -ENOENT;
- return;
- }
- bucket_zonegroup = &z->second;
- } else if (createparams.zonegroup_id != my_zonegroup.id) {
- ldpp_dout(this, 0) << "zonegroup does not match current zonegroup "
- << createparams.zonegroup_id << dendl;
- op_ret = -ENOENT;
- return;
- }
- // validate the LocationConstraint
+ // Validate LocationConstraint if it's provided and enforcement is strict
if (!location_constraint.empty() && !relaxed_region_enforcement) {
- // on the master zonegroup, allow any valid api_name. otherwise it has to
- // match the bucket's zonegroup
- if (period && my_zonegroup.is_master) {
- if (!period->period_map.zonegroups_by_api.count(location_constraint)) {
+ if (period) {
+ auto location_iter = period->period_map.zonegroups_by_api.find(location_constraint);
+ if (location_iter == period->period_map.zonegroups_by_api.end()) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint
<< ") can't be found." << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
- s->err.message = "The specified location-constraint is not valid";
+ s->err.message = fmt::format("The {} location constraint is not valid.",
+ location_constraint);
return;
}
- } else if (bucket_zonegroup->api_name != location_constraint) {
+ bucket_zonegroup = &location_iter->second;
+ } else if (location_constraint != my_zonegroup.api_name) { // if we don't have a period, we can only use the current zonegroup - so check if the location matches by api name here
ldpp_dout(this, 0) << "location constraint (" << location_constraint
- << ") doesn't match zonegroup (" << bucket_zonegroup->api_name
- << ')' << dendl;
- op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
- s->err.message = "The specified location-constraint is not valid";
+ << ") doesn't match zonegroup (" << my_zonegroup.api_name << ")" << dendl;
+ op_ret = -ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION;
+ s->err.message = fmt::format("The {} location constraint is incompatible "
+ "for the region specific endpoint this request was sent to.",
+ location_constraint);
return;
}
}
+ // If it's a system request, use the provided zonegroup if available
+ else if (s->system_request && !rgwx_zonegroup.empty()) {
+ if (period) {
+ auto zonegroup_iter = period->period_map.zonegroups.find(rgwx_zonegroup);
+ if (zonegroup_iter == period->period_map.zonegroups.end()) {
+ ldpp_dout(this, 0) << "could not find zonegroup " << rgwx_zonegroup
+ << " in current period" << dendl;
+ op_ret = -ENOENT;
+ return;
+ }
+ bucket_zonegroup = &zonegroup_iter->second;
+ }
+ }
+
+ const bool enforce_location_match =
+ !period || // No period: no multisite, so no need to enforce location match.
+ !s->system_request || // All user requests are enforced to match zonegroup's location.
+ !my_zonegroup.is_master; // but if it's a system request (forwarded) only allow remote creation on master zonegroup.
+ if (enforce_location_match && !my_zonegroup.equals(bucket_zonegroup->get_id())) {
+ ldpp_dout(this, 0) << "location constraint (" << bucket_zonegroup->api_name
+ << ") doesn't match zonegroup (" << my_zonegroup.api_name << ")" << dendl;
+ op_ret = -ERR_ILLEGAL_LOCATION_CONSTRAINT_EXCEPTION;
+ s->err.message = fmt::format("The {} location constraint is incompatible "
+ "for the region specific endpoint this request was sent to.",
+ bucket_zonegroup->api_name);
+ return;
+ }
+
+ // Set the final zonegroup ID
+ createparams.zonegroup_id = bucket_zonegroup->id;
// select and validate the placement target
op_ret = select_bucket_placement(this, *bucket_zonegroup, s->user->get_info(),
@@ -3620,7 +3624,7 @@ void RGWCreateBucket::execute(optional_yield y)
return;
}
- if (bucket_zonegroup == &my_zonegroup) {
+ if (my_zonegroup.equals(bucket_zonegroup->get_id())) {
// look up the zone placement pool
createparams.zone_placement = rgw::find_zone_placement(
this, site.get_zone_params(), createparams.placement_rule);
@@ -3709,7 +3713,6 @@ void RGWCreateBucket::execute(optional_yield y)
if (!driver->is_meta_master()) {
// apply bucket creation on the master zone first
- bufferlist in_data;
JSONParser jp;
op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, &jp, s->info, y);
@@ -3786,7 +3789,10 @@ void RGWCreateBucket::execute(optional_yield y)
s->bucket->get_info().has_website = !s->bucket->get_info().website_conf.is_empty();
/* This will also set the quota on the bucket. */
- op_ret = s->bucket->merge_and_store_attrs(this, createparams.attrs, y);
+ s->bucket->set_attrs(std::move(createparams.attrs));
+ constexpr bool exclusive = false; // overwrite
+ constexpr ceph::real_time no_set_mtime{};
+ op_ret = s->bucket->put_info(this, exclusive, no_set_mtime, y);
} while (op_ret == -ECANCELED && tries++ < 20);
/* Restore the proper return code. */
@@ -4337,6 +4343,9 @@ void RGWPutObj::execute(optional_yield y)
}
return;
}
+
+ multipart_cksum_type = upload->cksum_type;
+
/* upload will go out of scope, so copy the dest placement for later use */
s->dest_placement = *pdest_placement;
pdest_placement = &s->dest_placement;
@@ -4467,11 +4476,12 @@ void RGWPutObj::execute(optional_yield y)
/* optional streaming checksum */
try {
cksum_filter =
- rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env);
+ rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env, multipart_cksum_type);
} catch (const rgw::io::Exception& e) {
op_ret = -e.code().value();
return;
}
+
if (cksum_filter) {
filter = &*cksum_filter;
}
@@ -4618,10 +4628,12 @@ void RGWPutObj::execute(optional_yield y)
if (cksum_filter) {
const auto& hdr = cksum_filter->header();
+ auto expected_ck = cksum_filter->expected(*s->info.env);
auto cksum_verify =
cksum_filter->verify(*s->info.env); // valid or no supplied cksum
cksum = get<1>(cksum_verify);
- if (std::get<0>(cksum_verify)) {
+ if ((!expected_ck) ||
+ std::get<0>(cksum_verify)) {
buffer::list cksum_bl;
ldpp_dout_fmt(this, 16,
@@ -4629,14 +4641,13 @@ void RGWPutObj::execute(optional_yield y)
"\n\tcomputed={} == \n\texpected={}",
hdr.second,
cksum->to_armor(),
- cksum_filter->expected(*s->info.env));
+ (!!expected_ck) ? expected_ck : "(checksum unavailable)");
cksum->encode(cksum_bl);
emplace_attr(RGW_ATTR_CKSUM, std::move(cksum_bl));
} else {
/* content checksum mismatch */
auto computed_ck = cksum->to_armor();
- auto expected_ck = cksum_filter->expected(*s->info.env);
ldpp_dout_fmt(this, 4,
"{} content checksum mismatch"
@@ -4677,6 +4688,13 @@ void RGWPutObj::execute(optional_yield y)
obj_retention->encode(obj_retention_bl);
emplace_attr(RGW_ATTR_OBJECT_RETENTION, std::move(obj_retention_bl));
}
+
+ if (!multipart) {
+ op_ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), etag, s->object->get_size(), this, y, false, false);
+ if (op_ret < 0) {
+ return;
+ }
+ }
// don't track the individual parts of multipart uploads. they replicate in
// full after CompleteMultipart
@@ -4832,7 +4850,8 @@ void RGWPostObj::execute(optional_yield y)
/* optional streaming checksum */
try {
cksum_filter =
- rgw::putobj::RGWPutObj_Cksum::Factory(filter, *s->info.env);
+ rgw::putobj::RGWPutObj_Cksum::Factory(
+ filter, *s->info.env, rgw::cksum::Type::none /* no override */);
} catch (const rgw::io::Exception& e) {
op_ret = -e.code().value();
return;
@@ -5180,7 +5199,10 @@ void RGWPutMetadataBucket::execute(optional_yield y)
/* Setting attributes also stores the provided bucket info. Due
* to this fact, the new quota settings can be serialized with
* the same call. */
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ s->bucket->set_attrs(attrs);
+ constexpr bool exclusive = false; // overwrite
+ constexpr ceph::real_time no_set_mtime{};
+ op_ret = s->bucket->put_info(this, exclusive, no_set_mtime, s->yield);
return op_ret;
}, y);
}
@@ -5282,33 +5304,14 @@ void RGWRestoreObj::execute(optional_yield y)
int op_ret = s->object->get_obj_attrs(y, this);
if (op_ret < 0) {
ldpp_dout(this, 1) << "failed to fetch get_obj_attrs op ret = " << op_ret << dendl;
+ restore_ret = op_ret;
return;
}
- rgw::sal::Attrs attrs = s->object->get_attrs();
- auto attr_iter = attrs.find(RGW_ATTR_MANIFEST);
- if (attr_iter != attrs.end()) {
- RGWObjManifest m;
- decode(m, attr_iter->second);
- RGWObjTier tier_config;
- m.get_tier_config(&tier_config);
- if (m.get_tier_type() == "cloud-s3") {
- ldpp_dout(this, 20) << "execute: expiry days" << expiry_days <<dendl;
- op_ret = handle_cloudtier_obj(s, this, driver, attrs, false, expiry_days, true, y);
- if (op_ret < 0) {
- ldpp_dout(this, 4) << "Cannot get cloud tiered object: " << *s->object
- <<". Failing with " << op_ret << dendl;
- if (op_ret == -ERR_INVALID_OBJECT_STATE) {
- s->err.message = "This object was transitioned to cloud-s3";
- }
- }
- } else {
- ldpp_dout(this, 20) << "not cloud tier object erroring" << dendl;
- op_ret = -ERR_INVALID_OBJECT_STATE;
- }
- } else {
- ldpp_dout(this, 20) << " manifest not found" << dendl;
- }
- ldpp_dout(this, 20) << "completed restore" << dendl;
+ rgw::sal::Attrs attrs;
+ attrs = s->object->get_attrs();
+ op_ret = handle_cloudtier_obj(s, this, driver, attrs, false, expiry_days, true, y);
+ restore_ret = op_ret;
+ ldpp_dout(this, 20) << "Restore completed of object: " << *s->object << "with op ret: " << restore_ret <<dendl;
return;
}
@@ -5539,6 +5542,13 @@ void RGWDeleteObj::execute(optional_yield y)
}
}
+ if (op_ret == 0) {
+ if (auto ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), etag, obj_size, this, y, false, false); ret < 0) {
+ // don't reply with an error in case of failed delete logging
+ ldpp_dout(this, 5) << "WARNING: DELETE operation ignores bucket logging failure: " << ret << dendl;
+ }
+ }
+
if (op_ret == -ECANCELED) {
op_ret = 0;
}
@@ -5883,6 +5893,12 @@ void RGWCopyObj::execute(optional_yield y)
return;
}
+ etag = s->src_object->get_attrs()[RGW_ATTR_ETAG].to_str();
+ op_ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), etag, obj_size, this, y, false, false);
+ if (op_ret < 0) {
+ return;
+ }
+
op_ret = s->src_object->copy_object(s->owner,
s->user->get_id(),
&s->info,
@@ -5911,12 +5927,17 @@ void RGWCopyObj::execute(optional_yield y)
this,
s->yield);
+ int ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Standard, s->src_object.get(), s, "REST.COPY.OBJECT_GET", etag, obj_size, this, y, true, true);
+ if (ret < 0) {
+ ldpp_dout(this, 5) << "WARNING: COPY operation ignores bucket logging failure of the GET part: " << ret << dendl;
+ }
+
if (op_ret < 0) {
return;
}
// send request to notification manager
- int ret = res->publish_commit(this, obj_size, mtime, etag, s->object->get_instance());
+ ret = res->publish_commit(this, obj_size, mtime, etag, s->object->get_instance());
if (ret < 0) {
ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl;
// too late to rollback operation, hence op_ret is not set here
@@ -5969,8 +5990,6 @@ void RGWGetACLs::execute(optional_yield y)
acls = ss.str();
}
-
-
int RGWPutACLs::verify_permission(optional_yield y)
{
bool perm;
@@ -5992,6 +6011,74 @@ int RGWPutACLs::verify_permission(optional_yield y)
return 0;
}
+uint16_t RGWGetObjAttrs::recognize_attrs(const std::string& hdr, uint16_t deflt)
+{
+ auto attrs{deflt};
+ auto sa = ceph::split(hdr, ",");
+ for (auto& k : sa) {
+ if (boost::iequals(k, "etag")) {
+ attrs |= as_flag(ReqAttributes::Etag);
+ }
+ if (boost::iequals(k, "checksum")) {
+ attrs |= as_flag(ReqAttributes::Checksum);
+ }
+ if (boost::iequals(k, "objectparts")) {
+ attrs |= as_flag(ReqAttributes::ObjectParts);
+ }
+ if (boost::iequals(k, "objectsize")) {
+ attrs |= as_flag(ReqAttributes::ObjectSize);
+ }
+ if (boost::iequals(k, "storageclass")) {
+ attrs |= as_flag(ReqAttributes::StorageClass);
+ }
+ }
+ return attrs;
+} /* RGWGetObjAttrs::recognize_attrs */
+
+int RGWGetObjAttrs::verify_permission(optional_yield y)
+{
+ bool perm = false;
+ auto [has_s3_existing_tag, has_s3_resource_tag] =
+ rgw_check_policy_condition(this, s);
+
+ if (! rgw::sal::Object::empty(s->object.get())) {
+
+ auto iam_action1 = s->object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion;
+
+ auto iam_action2 = s->object->get_instance().empty() ?
+ rgw::IAM::s3GetObjectAttributes :
+ rgw::IAM::s3GetObjectVersionAttributes;
+
+ if (has_s3_existing_tag || has_s3_resource_tag) {
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
+ }
+
+ /* XXXX the following conjunction should be &&--but iam_action2 is currently not
+ * hooked up and always fails (but should succeed if the requestor has READ
+ * acess to the object) */
+ perm = (verify_object_permission(this, s, iam_action1) || /* && */
+ verify_object_permission(this, s, iam_action2));
+ }
+
+ if (! perm) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+void RGWGetObjAttrs::pre_exec()
+{
+ rgw_bucket_object_pre_exec(s);
+}
+
+void RGWGetObjAttrs::execute(optional_yield y)
+{
+ RGWGetObj::execute(y);
+} /* RGWGetObjAttrs::execute */
+
int RGWGetLC::verify_permission(optional_yield y)
{
auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
@@ -6373,9 +6460,9 @@ void RGWDeleteCORS::execute(optional_yield y)
return op_ret;
}
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_CORS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
if (op_ret < 0) {
ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket->get_name()
<< " returned err=" << op_ret << dendl;
@@ -6659,6 +6746,14 @@ try_sum_part_cksums(const DoutPrefixProvider *dpp,
++parts_ix;
auto& part_cksum = part.second->get_cksum();
+ if (! part_cksum) {
+ ldpp_dout_fmt(dpp, 0,
+ "ERROR: multipart part checksum not present (ix=={})",
+ parts_ix);
+ op_ret = -ERR_INVALID_REQUEST;
+ return op_ret;
+ }
+
ldpp_dout_fmt(dpp, 16,
"INFO: {} iterate part: {} {} {}",
__func__, parts_ix, part_cksum->type_string(),
@@ -6811,6 +6906,8 @@ void RGWCompleteMultipart::execute(optional_yield y)
if (upload->cksum_type != rgw::cksum::Type::none) {
op_ret = try_sum_part_cksums(this, s->cct, upload.get(), parts, cksum, y);
if (op_ret < 0) {
+ ldpp_dout(this, 16) << "ERROR: try_sum_part_cksums failed, obj="
+ << meta_obj << " ret=" << op_ret << dendl;
return;
}
}
@@ -6835,13 +6932,23 @@ void RGWCompleteMultipart::execute(optional_yield y)
rgw::putobj::find_hdr_cksum(*(s->info.env));
ldpp_dout_fmt(this, 10,
- "INFO: client supplied checksum {}: {}",
+ "INFO: client supplied checksum {}: {} ",
hdr_cksum.header_name(), supplied_cksum);
if (! (supplied_cksum.empty()) &&
(supplied_cksum != armored_cksum)) {
- op_ret = -ERR_INVALID_REQUEST;
- return;
+ /* some minio SDK clients assert a checksum that is cryptographically
+ * valid but omits the part count */
+ auto parts_suffix = fmt::format("-{}", parts->parts.size());
+ auto suffix_len = armored_cksum->size() - parts_suffix.size();
+ if (armored_cksum->compare(0, suffix_len, supplied_cksum) != 0) {
+ ldpp_dout_fmt(this, 4,
+ "{} content checksum mismatch"
+ "\n\tcalculated={} != \n\texpected={}",
+ hdr_cksum.header_name(), armored_cksum, supplied_cksum);
+ op_ret = -ERR_INVALID_REQUEST;
+ return;
+ }
}
buffer::list cksum_bl;
@@ -6864,7 +6971,13 @@ void RGWCompleteMultipart::execute(optional_yield y)
RGWObjVersionTracker& objv_tracker = meta_obj->get_version_tracker();
using prefix_map_t = rgw::sal::MultipartUpload::prefix_map_t;
- prefix_map_t processed_prefixes;
+ prefix_map_t processed_prefixes;
+
+ // no etag and size before completion
+ op_ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Journal, s->object.get(), s, canonical_name(), "", 0, this, y, false, false);
+ if (op_ret < 0) {
+ return;
+ }
op_ret =
upload->complete(this, y, s->cct, parts->parts, remove_objs, accounted_size,
@@ -7011,17 +7124,30 @@ void RGWAbortMultipart::execute(optional_yield y)
return;
upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id);
+ meta_obj = upload->get_meta_obj();
+ meta_obj->set_in_extra_data(true);
+ meta_obj->get_obj_attrs(s->yield, this);
+
jspan_context trace_ctx(false, false);
if (tracing::rgw::tracer.is_enabled()) {
// read meta object attributes for trace info
- meta_obj = upload->get_meta_obj();
- meta_obj->set_in_extra_data(true);
- meta_obj->get_obj_attrs(s->yield, this);
extract_span_context(meta_obj->get_attrs(), trace_ctx);
}
multipart_trace = tracing::rgw::tracer.add_span(name(), trace_ctx);
+ int max_lock_secs_mp =
+ s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
+ utime_t dur(max_lock_secs_mp, 0);
+ auto serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart");
+ op_ret = serializer->try_lock(this, dur, y);
+ if (op_ret < 0) {
+ if (op_ret == -ENOENT) {
+ op_ret = -ERR_NO_SUCH_UPLOAD;
+ }
+ return;
+ }
op_ret = upload->abort(this, s->cct, y);
+ serializer->unlock();
}
int RGWListMultipart::verify_permission(optional_yield y)
@@ -7280,6 +7406,12 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_
if (op_ret == -ENOENT) {
op_ret = 0;
}
+
+ if (auto ret = rgw::bucketlogging::log_record(driver, rgw::bucketlogging::LoggingType::Any, obj.get(), s, canonical_name(), etag, obj_size, this, y, true, false); ret < 0) {
+ // don't reply with an error in case of failed delete logging
+ ldpp_dout(this, 5) << "WARNING: multi DELETE operation ignores bucket logging failure: " << ret << dendl;
+ }
+
if (op_ret == 0) {
// send request to notification manager
int ret = res->publish_commit(dpp, obj_size, ceph::real_clock::now(), etag, version_id);
@@ -7319,6 +7451,12 @@ void RGWDeleteMultiObj::execute(optional_yield y)
return;
}
+ if (multi_delete->objects.empty()) {
+ s->err.message = "Missing required element Object";
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+
constexpr int DEFAULT_MAX_NUM = 1000;
int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
if (max_num < 0) {
@@ -8474,9 +8612,9 @@ void RGWDeleteBucketPolicy::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_IAM_POLICY);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
return op_ret;
}, y);
}
@@ -8994,9 +9132,9 @@ void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_PUBLIC_ACCESS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
return op_ret;
}, y);
}
@@ -9105,10 +9243,10 @@ void RGWDeleteBucketEncryption::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
- rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_POLICY);
attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_KEY_ID);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ op_ret = s->bucket->put_info(this, false, real_time(), y);
return op_ret;
}, y);
}
diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h
index df05500a437..dcf64c31572 100644
--- a/src/rgw/rgw_op.h
+++ b/src/rgw/rgw_op.h
@@ -12,6 +12,7 @@
#pragma once
+#include <cstdint>
#include <limits.h>
#include <array>
@@ -83,6 +84,10 @@ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp,
RGWAccessControlPolicy& policy,
optional_yield y);
+std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp, req_state* s, bool check_obj_exist_tag=true);
+
+int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s);
+
class RGWHandler {
protected:
rgw::sal::Driver* driver{nullptr};
@@ -296,6 +301,7 @@ public:
}
virtual const char* name() const = 0;
virtual RGWOpType get_type() { return RGW_OP_UNKNOWN; }
+ virtual std::string canonical_name() const { return fmt::format("REST.{}.{}", s->info.method, name()); }
virtual uint32_t op_mask() { return 0; }
@@ -974,18 +980,6 @@ public:
virtual bool need_container_stats() { return false; }
};
-class RGWGetBucketLogging : public RGWOp {
-public:
- RGWGetBucketLogging() {}
- int verify_permission(optional_yield y) override;
- void execute(optional_yield) override { }
-
- void send_response() override = 0;
- const char* name() const override { return "get_bucket_logging"; }
- RGWOpType get_type() override { return RGW_OP_GET_BUCKET_LOGGING; }
- uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
-};
-
class RGWGetBucketLocation : public RGWOp {
public:
RGWGetBucketLocation() {}
@@ -1094,14 +1088,15 @@ public:
class RGWStatBucket : public RGWOp {
protected:
- std::unique_ptr<rgw::sal::Bucket> bucket;
RGWStorageStats stats;
+ bool report_stats{true};
public:
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
+ virtual int get_params(optional_yield y) = 0;
void send_response() override = 0;
const char* name() const override { return "stat_bucket"; }
RGWOpType get_type() override { return RGW_OP_STAT_BUCKET; }
@@ -1117,6 +1112,7 @@ class RGWCreateBucket : public RGWOp {
bool relaxed_region_enforcement = false;
RGWCORSConfiguration cors_config;
std::set<std::string> rmattr_names;
+ bufferlist in_data;
virtual bool need_metadata_upload() const { return false; }
@@ -1243,6 +1239,7 @@ protected:
std::string multipart_upload_id;
std::string multipart_part_str;
int multipart_part_num = 0;
+ rgw::cksum::Type multipart_cksum_type{rgw::cksum::Type::none};
jspan_ptr multipart_trace;
boost::optional<ceph::real_time> delete_at;
@@ -1464,6 +1461,7 @@ public:
class RGWRestoreObj : public RGWOp {
protected:
std::optional<uint64_t> expiry_days;
+ int restore_ret;
public:
RGWRestoreObj() {}
@@ -1649,6 +1647,50 @@ public:
uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; }
};
+class RGWGetObjAttrs : public RGWGetObj {
+protected:
+ std::string version_id;
+ std::string expected_bucket_owner;
+ std::optional<int> marker;
+ std::optional<int> max_parts;
+ uint16_t requested_attributes{0};
+#if 0
+ /* used to decrypt attributes for objects stored with SSE-C */
+ x-amz-server-side-encryption-customer-algorithm
+ x-amz-server-side-encryption-customer-key
+ x-amz-server-side-encryption-customer-key-MD5
+#endif
+public:
+
+ enum class ReqAttributes : uint16_t {
+ None = 0,
+ Etag,
+ Checksum,
+ ObjectParts,
+ StorageClass,
+ ObjectSize
+ };
+
+ static uint16_t as_flag(ReqAttributes attr) {
+ return 1 << (uint16_t(attr) ? uint16_t(attr) - 1 : 0);
+ }
+
+ static uint16_t recognize_attrs(const std::string& hdr, uint16_t deflt = 0);
+
+ RGWGetObjAttrs() : RGWGetObj()
+ {
+ RGWGetObj::get_data = false; // it's extra false
+ }
+
+ int verify_permission(optional_yield y) override;
+ void pre_exec() override;
+ void execute(optional_yield y) override;
+ void send_response() override = 0;
+ const char* name() const override { return "get_obj_attrs"; }
+ RGWOpType get_type() override { return RGW_OP_GET_OBJ_ATTRS; }
+ uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
+}; /* RGWGetObjAttrs */
+
class RGWGetLC : public RGWOp {
protected:
diff --git a/src/rgw/rgw_op_type.h b/src/rgw/rgw_op_type.h
index f0c3b072e47..2c8225d289e 100644
--- a/src/rgw/rgw_op_type.h
+++ b/src/rgw/rgw_op_type.h
@@ -30,6 +30,7 @@ enum RGWOpType {
RGW_OP_COPY_OBJ,
RGW_OP_GET_ACLS,
RGW_OP_PUT_ACLS,
+ RGW_OP_GET_OBJ_ATTRS,
RGW_OP_GET_CORS,
RGW_OP_PUT_CORS,
RGW_OP_DELETE_CORS,
@@ -116,6 +117,8 @@ enum RGWOpType {
RGW_OP_ATTACH_GROUP_POLICY,
RGW_OP_DETACH_GROUP_POLICY,
RGW_OP_LIST_ATTACHED_GROUP_POLICIES,
+ RGW_OP_PUT_BUCKET_LOGGING,
+ RGW_OP_POST_BUCKET_LOGGING,
/* rgw specific */
RGW_OP_ADMIN_SET_METADATA,
RGW_OP_GET_OBJ_LAYOUT,
diff --git a/src/rgw/rgw_process.cc b/src/rgw/rgw_process.cc
index 8be7be79069..9ad599b3252 100644
--- a/src/rgw/rgw_process.cc
+++ b/src/rgw/rgw_process.cc
@@ -21,6 +21,7 @@
#include "rgw_lua_request.h"
#include "rgw_tracer.h"
#include "rgw_ratelimit.h"
+#include "rgw_bucket_logging.h"
#include "services/svc_zone_utils.h"
@@ -444,6 +445,20 @@ done:
rgw_log_op(rest, s, op, penv.olog);
}
+ if (op) {
+ std::ignore = rgw::bucketlogging::log_record(driver,
+ rgw::bucketlogging::LoggingType::Standard,
+ s->object.get(),
+ s,
+ op->canonical_name(),
+ "",
+ (s->src_object ? s->src_object->get_size() : (s->object ? s->object->get_size() : 0)),
+ op,
+ yield,
+ true,
+ false);
+ }
+
if (http_ret != nullptr) {
*http_ret = s->err.http_ret;
}
diff --git a/src/rgw/rgw_pubsub.cc b/src/rgw/rgw_pubsub.cc
index cb68d72d7da..87a46bd61a6 100644
--- a/src/rgw/rgw_pubsub.cc
+++ b/src/rgw/rgw_pubsub.cc
@@ -62,214 +62,6 @@ void set_event_id(std::string& id, const std::string& hash, const utime_t& ts) {
}
}
-void rgw_s3_key_filter::dump(Formatter *f) const {
- if (!has_content()) {
- return;
- }
- f->open_array_section("FilterRules");
- if (!prefix_rule.empty()) {
- f->open_object_section("");
- ::encode_json("Name", "prefix", f);
- ::encode_json("Value", prefix_rule, f);
- f->close_section();
- }
- if (!suffix_rule.empty()) {
- f->open_object_section("");
- ::encode_json("Name", "suffix", f);
- ::encode_json("Value", suffix_rule, f);
- f->close_section();
- }
- if (!regex_rule.empty()) {
- f->open_object_section("");
- ::encode_json("Name", "regex", f);
- ::encode_json("Value", regex_rule, f);
- f->close_section();
- }
- f->close_section();
-}
-
-bool rgw_s3_key_filter::decode_xml(XMLObj* obj) {
- XMLObjIter iter = obj->find("FilterRule");
- XMLObj *o;
-
- const auto throw_if_missing = true;
- auto prefix_not_set = true;
- auto suffix_not_set = true;
- auto regex_not_set = true;
- std::string name;
-
- while ((o = iter.get_next())) {
- RGWXMLDecoder::decode_xml("Name", name, o, throw_if_missing);
- if (name == "prefix" && prefix_not_set) {
- prefix_not_set = false;
- RGWXMLDecoder::decode_xml("Value", prefix_rule, o, throw_if_missing);
- } else if (name == "suffix" && suffix_not_set) {
- suffix_not_set = false;
- RGWXMLDecoder::decode_xml("Value", suffix_rule, o, throw_if_missing);
- } else if (name == "regex" && regex_not_set) {
- regex_not_set = false;
- RGWXMLDecoder::decode_xml("Value", regex_rule, o, throw_if_missing);
- } else {
- throw RGWXMLDecoder::err("invalid/duplicate S3Key filter rule name: '" + name + "'");
- }
- }
- return true;
-}
-
-void rgw_s3_key_filter::dump_xml(Formatter *f) const {
- if (!prefix_rule.empty()) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", "prefix", f);
- ::encode_xml("Value", prefix_rule, f);
- f->close_section();
- }
- if (!suffix_rule.empty()) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", "suffix", f);
- ::encode_xml("Value", suffix_rule, f);
- f->close_section();
- }
- if (!regex_rule.empty()) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", "regex", f);
- ::encode_xml("Value", regex_rule, f);
- f->close_section();
- }
-}
-
-bool rgw_s3_key_filter::has_content() const {
- return !(prefix_rule.empty() && suffix_rule.empty() && regex_rule.empty());
-}
-
-void rgw_s3_key_value_filter::dump(Formatter *f) const {
- if (!has_content()) {
- return;
- }
- f->open_array_section("FilterRules");
- for (const auto& key_value : kv) {
- f->open_object_section("");
- ::encode_json("Name", key_value.first, f);
- ::encode_json("Value", key_value.second, f);
- f->close_section();
- }
- f->close_section();
-}
-
-bool rgw_s3_key_value_filter::decode_xml(XMLObj* obj) {
- kv.clear();
- XMLObjIter iter = obj->find("FilterRule");
- XMLObj *o;
-
- const auto throw_if_missing = true;
-
- std::string key;
- std::string value;
-
- while ((o = iter.get_next())) {
- RGWXMLDecoder::decode_xml("Name", key, o, throw_if_missing);
- RGWXMLDecoder::decode_xml("Value", value, o, throw_if_missing);
- kv.emplace(key, value);
- }
- return true;
-}
-
-void rgw_s3_key_value_filter::dump_xml(Formatter *f) const {
- for (const auto& key_value : kv) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", key_value.first, f);
- ::encode_xml("Value", key_value.second, f);
- f->close_section();
- }
-}
-
-bool rgw_s3_key_value_filter::has_content() const {
- return !kv.empty();
-}
-
-void rgw_s3_filter::dump(Formatter *f) const {
- encode_json("S3Key", key_filter, f);
- encode_json("S3Metadata", metadata_filter, f);
- encode_json("S3Tags", tag_filter, f);
-}
-
-bool rgw_s3_filter::decode_xml(XMLObj* obj) {
- RGWXMLDecoder::decode_xml("S3Key", key_filter, obj);
- RGWXMLDecoder::decode_xml("S3Metadata", metadata_filter, obj);
- RGWXMLDecoder::decode_xml("S3Tags", tag_filter, obj);
- return true;
-}
-
-void rgw_s3_filter::dump_xml(Formatter *f) const {
- if (key_filter.has_content()) {
- ::encode_xml("S3Key", key_filter, f);
- }
- if (metadata_filter.has_content()) {
- ::encode_xml("S3Metadata", metadata_filter, f);
- }
- if (tag_filter.has_content()) {
- ::encode_xml("S3Tags", tag_filter, f);
- }
-}
-
-bool rgw_s3_filter::has_content() const {
- return key_filter.has_content() ||
- metadata_filter.has_content() ||
- tag_filter.has_content();
-}
-
-bool match(const rgw_s3_key_filter& filter, const std::string& key) {
- const auto key_size = key.size();
- const auto prefix_size = filter.prefix_rule.size();
- if (prefix_size != 0) {
- // prefix rule exists
- if (prefix_size > key_size) {
- // if prefix is longer than key, we fail
- return false;
- }
- if (!std::equal(filter.prefix_rule.begin(), filter.prefix_rule.end(), key.begin())) {
- return false;
- }
- }
- const auto suffix_size = filter.suffix_rule.size();
- if (suffix_size != 0) {
- // suffix rule exists
- if (suffix_size > key_size) {
- // if suffix is longer than key, we fail
- return false;
- }
- if (!std::equal(filter.suffix_rule.begin(), filter.suffix_rule.end(), (key.end() - suffix_size))) {
- return false;
- }
- }
- if (!filter.regex_rule.empty()) {
- // TODO add regex chaching in the filter
- const std::regex base_regex(filter.regex_rule);
- if (!std::regex_match(key, base_regex)) {
- return false;
- }
- }
- return true;
-}
-
-bool match(const rgw_s3_key_value_filter& filter, const KeyValueMap& kv) {
- // all filter pairs must exist with the same value in the object's metadata/tags
- // object metadata/tags may include items not in the filter
- return std::includes(kv.begin(), kv.end(), filter.kv.begin(), filter.kv.end());
-}
-
-bool match(const rgw_s3_key_value_filter& filter, const KeyMultiValueMap& kv) {
- // all filter pairs must exist with the same value in the object's metadata/tags
- // object metadata/tags may include items not in the filter
- for (auto& filter : filter.kv) {
- auto result = kv.equal_range(filter.first);
- if (std::any_of(result.first, result.second, [&filter](const std::pair<std::string, std::string>& p) { return p.second == filter.second;}))
- continue;
- else
- return false;
- }
- return true;
-}
-
bool match(const rgw::notify::EventTypeList& events, rgw::notify::EventType event) {
// if event list exists, and none of the events in the list matches the event type, filter the message
if (!events.empty() && std::find(events.begin(), events.end(), event) == events.end()) {
diff --git a/src/rgw/rgw_pubsub.h b/src/rgw/rgw_pubsub.h
index 8a6b290cb85..176ada95204 100644
--- a/src/rgw/rgw_pubsub.h
+++ b/src/rgw/rgw_pubsub.h
@@ -9,94 +9,10 @@
#include "rgw_zone.h"
#include "rgw_notify_event_type.h"
#include <boost/container/flat_map.hpp>
+#include "rgw_s3_filter.h"
class XMLObj;
-struct rgw_s3_key_filter {
- std::string prefix_rule;
- std::string suffix_rule;
- std::string regex_rule;
-
- bool has_content() const;
-
- void dump(Formatter *f) const;
- bool decode_xml(XMLObj *obj);
- void dump_xml(Formatter *f) const;
-
- void encode(bufferlist& bl) const {
- ENCODE_START(1, 1, bl);
- encode(prefix_rule, bl);
- encode(suffix_rule, bl);
- encode(regex_rule, bl);
- ENCODE_FINISH(bl);
- }
-
- void decode(bufferlist::const_iterator& bl) {
- DECODE_START(1, bl);
- decode(prefix_rule, bl);
- decode(suffix_rule, bl);
- decode(regex_rule, bl);
- DECODE_FINISH(bl);
- }
-};
-WRITE_CLASS_ENCODER(rgw_s3_key_filter)
-
-using KeyValueMap = boost::container::flat_map<std::string, std::string>;
-using KeyMultiValueMap = std::multimap<std::string, std::string>;
-
-struct rgw_s3_key_value_filter {
- KeyValueMap kv;
-
- bool has_content() const;
-
- void dump(Formatter *f) const;
- bool decode_xml(XMLObj *obj);
- void dump_xml(Formatter *f) const;
-
- void encode(bufferlist& bl) const {
- ENCODE_START(1, 1, bl);
- encode(kv, bl);
- ENCODE_FINISH(bl);
- }
- void decode(bufferlist::const_iterator& bl) {
- DECODE_START(1, bl);
- decode(kv, bl);
- DECODE_FINISH(bl);
- }
-};
-WRITE_CLASS_ENCODER(rgw_s3_key_value_filter)
-
-struct rgw_s3_filter {
- rgw_s3_key_filter key_filter;
- rgw_s3_key_value_filter metadata_filter;
- rgw_s3_key_value_filter tag_filter;
-
- bool has_content() const;
-
- void dump(Formatter *f) const;
- bool decode_xml(XMLObj *obj);
- void dump_xml(Formatter *f) const;
-
- void encode(bufferlist& bl) const {
- ENCODE_START(2, 1, bl);
- encode(key_filter, bl);
- encode(metadata_filter, bl);
- encode(tag_filter, bl);
- ENCODE_FINISH(bl);
- }
-
- void decode(bufferlist::const_iterator& bl) {
- DECODE_START(2, bl);
- decode(key_filter, bl);
- decode(metadata_filter, bl);
- if (struct_v >= 2) {
- decode(tag_filter, bl);
- }
- DECODE_FINISH(bl);
- }
-};
-WRITE_CLASS_ENCODER(rgw_s3_filter)
-
using OptionalFilter = std::optional<rgw_s3_filter>;
struct rgw_pubsub_topic_filter;
diff --git a/src/rgw/rgw_ratelimit.h b/src/rgw/rgw_ratelimit.h
index 0db1813f050..beb0eb3b1d2 100644
--- a/src/rgw/rgw_ratelimit.h
+++ b/src/rgw/rgw_ratelimit.h
@@ -239,6 +239,7 @@ class ActiveRateLimiter : public DoutPrefix {
std::atomic_uint8_t current_active = 0;
std::shared_ptr<RateLimiter> ratelimit[2];
void replace_active() {
+ ceph_pthread_setname("ratelimit_gc");
using namespace std::chrono_literals;
std::unique_lock<std::mutex> lk(cv_m);
while (!stopped) {
@@ -286,8 +287,5 @@ class ActiveRateLimiter : public DoutPrefix {
void start() {
ldpp_dout(this, 20) << "starting ratelimit_gc thread" << dendl;
runner = std::thread(&ActiveRateLimiter::replace_active, this);
- if (const auto rc = ceph_pthread_setname(runner.native_handle(), "ratelimit_gc"); rc != 0) {
- ldpp_dout(this, 1) << "ERROR: failed to set ratelimit_gc thread name. error: " << rc << dendl;
- }
}
};
diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc
index a202d5acf4e..ac5e65c0dd6 100644
--- a/src/rgw/rgw_rest.cc
+++ b/src/rgw/rgw_rest.cc
@@ -666,8 +666,10 @@ static void build_redirect_url(req_state *s, const string& redirect_base, string
dest_uri = dest_uri.substr(0, dest_uri.size() - 1);
}
dest_uri += s->info.request_uri;
- dest_uri += "?";
- dest_uri += s->info.request_params;
+ if (!s->info.request_params.empty()) {
+ dest_uri += "?";
+ dest_uri += s->info.request_params;
+ }
}
void abort_early(req_state *s, RGWOp* op, int err_no,
@@ -1467,7 +1469,7 @@ int RGWPutACLs_ObjStore::get_params(optional_yield y)
{
const auto max_size = s->cct->_conf->rgw_max_put_param_size;
std::tie(op_ret, data) = read_all_input(s, max_size, false);
- ldpp_dout(s, 0) << "RGWPutACLs_ObjStore::get_params read data is: " << data.c_str() << dendl;
+ ldpp_dout(s, 20) << "RGWPutACLs_ObjStore::get_params read data is: " << data.c_str() << dendl;
return op_ret;
}
@@ -1668,7 +1670,6 @@ int RGWDeleteMultiObj_ObjStore::get_params(optional_yield y)
return op_ret;
}
-
void RGWRESTOp::send_response()
{
if (!flusher.did_start()) {
diff --git a/src/rgw/rgw_rest.h b/src/rgw/rgw_rest.h
index 3abba0124a6..9111696453e 100644
--- a/src/rgw/rgw_rest.h
+++ b/src/rgw/rgw_rest.h
@@ -129,30 +129,39 @@ public:
}
int get_params(optional_yield y) override;
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT", s->info.method); }
};
class RGWGetObjTags_ObjStore : public RGWGetObjTags {
public:
RGWGetObjTags_ObjStore() {};
~RGWGetObjTags_ObjStore() {};
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT_TAGGING", s->info.method); }
};
class RGWPutObjTags_ObjStore: public RGWPutObjTags {
public:
RGWPutObjTags_ObjStore() {};
~RGWPutObjTags_ObjStore() {};
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT_TAGGING", s->info.method); }
};
class RGWGetBucketTags_ObjStore : public RGWGetBucketTags {
public:
RGWGetBucketTags_ObjStore() = default;
virtual ~RGWGetBucketTags_ObjStore() = default;
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET_TAGGING", s->info.method); }
};
class RGWPutBucketTags_ObjStore: public RGWPutBucketTags {
public:
RGWPutBucketTags_ObjStore() = default;
virtual ~RGWPutBucketTags_ObjStore() = default;
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET_TAGGING", s->info.method); }
};
class RGWGetBucketReplication_ObjStore : public RGWGetBucketReplication {
@@ -177,42 +186,56 @@ class RGWListBuckets_ObjStore : public RGWListBuckets {
public:
RGWListBuckets_ObjStore() {}
~RGWListBuckets_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKETS", s->info.method); }
};
class RGWGetUsage_ObjStore : public RGWGetUsage {
public:
RGWGetUsage_ObjStore() {}
~RGWGetUsage_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.USER_USAGE", s->info.method); }
};
class RGWListBucket_ObjStore : public RGWListBucket {
public:
RGWListBucket_ObjStore() {}
~RGWListBucket_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET", s->info.method); }
};
class RGWStatAccount_ObjStore : public RGWStatAccount {
public:
RGWStatAccount_ObjStore() {}
~RGWStatAccount_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.ACCOUNT_STATUS", s->info.method); }
};
class RGWStatBucket_ObjStore : public RGWStatBucket {
public:
RGWStatBucket_ObjStore() {}
~RGWStatBucket_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET_STATUS", s->info.method); }
};
class RGWCreateBucket_ObjStore : public RGWCreateBucket {
public:
RGWCreateBucket_ObjStore() {}
~RGWCreateBucket_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET", s->info.method); }
};
class RGWDeleteBucket_ObjStore : public RGWDeleteBucket {
public:
RGWDeleteBucket_ObjStore() {}
~RGWDeleteBucket_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET", s->info.method); }
};
class RGWPutObj_ObjStore : public RGWPutObj
@@ -224,6 +247,8 @@ public:
int verify_params() override;
int get_params(optional_yield y) override;
int get_data(bufferlist& bl) override;
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT", s->info.method); }
};
class RGWPostObj_ObjStore : public RGWPostObj
@@ -294,6 +319,7 @@ public:
~RGWPostObj_ObjStore() override {}
int verify_params() override;
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT", s->info.method); }
};
@@ -302,6 +328,8 @@ class RGWPutMetadataAccount_ObjStore : public RGWPutMetadataAccount
public:
RGWPutMetadataAccount_ObjStore() {}
~RGWPutMetadataAccount_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.ACCOUNT_METADATA", s->info.method); }
};
class RGWPutMetadataBucket_ObjStore : public RGWPutMetadataBucket
@@ -309,6 +337,8 @@ class RGWPutMetadataBucket_ObjStore : public RGWPutMetadataBucket
public:
RGWPutMetadataBucket_ObjStore() {}
~RGWPutMetadataBucket_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET_METADATA", s->info.method); }
};
class RGWPutMetadataObject_ObjStore : public RGWPutMetadataObject
@@ -316,18 +346,24 @@ class RGWPutMetadataObject_ObjStore : public RGWPutMetadataObject
public:
RGWPutMetadataObject_ObjStore() {}
~RGWPutMetadataObject_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT_METADATA", s->info.method); }
};
class RGWRestoreObj_ObjStore : public RGWRestoreObj {
public:
RGWRestoreObj_ObjStore() {}
~RGWRestoreObj_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT", s->info.method); }
};
class RGWDeleteObj_ObjStore : public RGWDeleteObj {
public:
RGWDeleteObj_ObjStore() {}
~RGWDeleteObj_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT", s->info.method); }
};
class RGWGetCrossDomainPolicy_ObjStore : public RGWGetCrossDomainPolicy {
@@ -346,12 +382,16 @@ class RGWCopyObj_ObjStore : public RGWCopyObj {
public:
RGWCopyObj_ObjStore() {}
~RGWCopyObj_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.OBJECT", s->info.method); }
};
class RGWGetACLs_ObjStore : public RGWGetACLs {
public:
RGWGetACLs_ObjStore() {}
~RGWGetACLs_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.ACL", s->info.method); }
};
class RGWPutACLs_ObjStore : public RGWPutACLs {
@@ -360,12 +400,26 @@ public:
~RGWPutACLs_ObjStore() override {}
int get_params(optional_yield y) override;
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.ACL", s->info.method); }
+};
+
+class RGWGetObjAttrs_ObjStore : public RGWGetObjAttrs {
+public:
+ RGWGetObjAttrs_ObjStore() {}
+ ~RGWGetObjAttrs_ObjStore() override {}
+
+ int get_params(optional_yield y) = 0;
+ /* not actually used */
+ int send_response_data_error(optional_yield y) override { return 0; };
+ int send_response_data(bufferlist& bl, off_t ofs, off_t len) override { return 0; };
};
class RGWGetLC_ObjStore : public RGWGetLC {
public:
RGWGetLC_ObjStore() {}
~RGWGetLC_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.LIFECYCLE", s->info.method); }
};
class RGWPutLC_ObjStore : public RGWPutLC {
@@ -374,6 +428,7 @@ public:
~RGWPutLC_ObjStore() override {}
int get_params(optional_yield y) override;
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.LIFECYCLE", s->info.method); }
};
class RGWDeleteLC_ObjStore : public RGWDeleteLC {
@@ -381,30 +436,39 @@ public:
RGWDeleteLC_ObjStore() {}
~RGWDeleteLC_ObjStore() override {}
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.LIFECYCLE", s->info.method); }
};
class RGWGetCORS_ObjStore : public RGWGetCORS {
public:
RGWGetCORS_ObjStore() {}
~RGWGetCORS_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.CORS", s->info.method); }
};
class RGWPutCORS_ObjStore : public RGWPutCORS {
public:
RGWPutCORS_ObjStore() {}
~RGWPutCORS_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.CORS", s->info.method); }
};
class RGWDeleteCORS_ObjStore : public RGWDeleteCORS {
public:
RGWDeleteCORS_ObjStore() {}
~RGWDeleteCORS_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.CORS", s->info.method); }
};
class RGWOptionsCORS_ObjStore : public RGWOptionsCORS {
public:
RGWOptionsCORS_ObjStore() {}
~RGWOptionsCORS_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.CORS", s->info.method); }
};
class RGWGetBucketEncryption_ObjStore : public RGWGetBucketEncryption {
@@ -429,6 +493,8 @@ class RGWInitMultipart_ObjStore : public RGWInitMultipart {
public:
RGWInitMultipart_ObjStore() {}
~RGWInitMultipart_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.UPLOADS", s->info.method); }
};
class RGWCompleteMultipart_ObjStore : public RGWCompleteMultipart {
@@ -436,6 +502,7 @@ public:
RGWCompleteMultipart_ObjStore() {}
~RGWCompleteMultipart_ObjStore() override {}
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.UPLOAD", s->info.method); }
int get_params(optional_yield y) override;
};
@@ -443,6 +510,8 @@ class RGWAbortMultipart_ObjStore : public RGWAbortMultipart {
public:
RGWAbortMultipart_ObjStore() {}
~RGWAbortMultipart_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.UPLOAD", s->info.method); }
};
class RGWListMultipart_ObjStore : public RGWListMultipart {
@@ -450,6 +519,7 @@ public:
RGWListMultipart_ObjStore() {}
~RGWListMultipart_ObjStore() override {}
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.UPLOAD", s->info.method); }
int get_params(optional_yield y) override;
};
@@ -458,6 +528,7 @@ public:
RGWListBucketMultiparts_ObjStore() {}
~RGWListBucketMultiparts_ObjStore() override {}
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.UPLOADS", s->info.method); }
int get_params(optional_yield y) override;
};
@@ -465,12 +536,16 @@ class RGWBulkDelete_ObjStore : public RGWBulkDelete {
public:
RGWBulkDelete_ObjStore() {}
~RGWBulkDelete_ObjStore() override {}
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BULK_DELETE", s->info.method); }
};
class RGWBulkUploadOp_ObjStore : public RGWBulkUploadOp {
public:
RGWBulkUploadOp_ObjStore() = default;
~RGWBulkUploadOp_ObjStore() = default;
+
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BULK_UPLOAD", s->info.method); }
};
class RGWDeleteMultiObj_ObjStore : public RGWDeleteMultiObj {
@@ -479,6 +554,7 @@ public:
~RGWDeleteMultiObj_ObjStore() override {}
int get_params(optional_yield y) override;
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.DELETE_MULTI_OBJECT", s->info.method); }
};
class RGWInfo_ObjStore : public RGWInfo {
diff --git a/src/rgw/rgw_rest_bucket_logging.cc b/src/rgw/rgw_rest_bucket_logging.cc
new file mode 100644
index 00000000000..afd79b0a548
--- /dev/null
+++ b/src/rgw/rgw_rest_bucket_logging.cc
@@ -0,0 +1,369 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "common/dout.h"
+#include "rgw_op.h"
+#include "rgw_rest.h"
+#include "rgw_rest_s3.h"
+#include "rgw_arn.h"
+#include "rgw_auth_s3.h"
+#include "rgw_url.h"
+#include "rgw_bucket_logging.h"
+#include "rgw_rest_bucket_logging.h"
+
+#define dout_context g_ceph_context
+#define dout_subsys ceph_subsys_rgw
+
+namespace {
+ int verify_bucket_logging_params(const DoutPrefixProvider* dpp, const req_state* s) {
+ bool exists;
+ const auto no_value = s->info.args.get("logging", &exists);
+ if (!exists) {
+ ldpp_dout(dpp, 1) << "ERROR: missing required param 'logging'" << dendl;
+ return -EINVAL;
+ }
+ if (no_value.length() > 0) {
+ ldpp_dout(dpp, 1) << "ERROR: param 'logging' should not have any value" << dendl;
+ return -EINVAL;
+ }
+ if (s->bucket_name.empty()) {
+ ldpp_dout(dpp, 1) << "ERROR: logging request must be on a bucket" << dendl;
+ return -EINVAL;
+ }
+ return 0;
+ }
+}
+
+// GET /<bucket name>/?logging
+// reply is XML encoded
+class RGWGetBucketLoggingOp : public RGWOp {
+ rgw::bucketlogging::configuration configuration;
+
+public:
+ int verify_permission(optional_yield y) override {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketLogging)) {
+ return -EACCES;
+ }
+
+ return 0;
+ }
+
+ void execute(optional_yield y) override {
+ op_ret = verify_bucket_logging_params(this, s);
+ if (op_ret < 0) {
+ return;
+ }
+
+ const rgw_bucket src_bucket_id(s->bucket_tenant, s->bucket_name);
+ std::unique_ptr<rgw::sal::Bucket> src_bucket;
+ op_ret = driver->load_bucket(this, src_bucket_id,
+ &src_bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << src_bucket_id << "', ret = " << op_ret << dendl;
+ return;
+ }
+ if (auto iter = src_bucket->get_attrs().find(RGW_ATTR_BUCKET_LOGGING); iter != src_bucket->get_attrs().end()) {
+ try {
+ configuration.enabled = true;
+ decode(configuration, iter->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 1) << "WARNING: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "' for bucket '" << src_bucket_id << "', error: " << err.what() << dendl;
+ op_ret = -EIO;
+ return;
+ }
+ } else {
+ ldpp_dout(this, 5) << "WARNING: no logging configuration on bucket '" << src_bucket_id << "'" << dendl;
+ return;
+ }
+ ldpp_dout(this, 20) << "INFO: found logging configuration on bucket '" << src_bucket_id << "'"
+ << "'. configuration: " << configuration.to_json_str() << dendl;
+ }
+
+ void send_response() override {
+ dump_errno(s);
+ end_header(s, this, to_mime_type(s->format));
+ dump_start(s);
+
+ s->formatter->open_object_section_in_ns("BucketLoggingStatus", XMLNS_AWS_S3);
+ configuration.dump_xml(s->formatter);
+ s->formatter->close_section();
+ rgw_flush_formatter_and_reset(s, s->formatter);
+ }
+ const char* name() const override { return "get_bucket_logging"; }
+ RGWOpType get_type() override { return RGW_OP_GET_BUCKET_LOGGING; }
+ uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
+};
+
+// PUT /<bucket name>/?logging
+// actual configuration is XML encoded in the body of the message
+class RGWPutBucketLoggingOp : public RGWDefaultResponseOp {
+ int verify_permission(optional_yield y) override {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketLogging)) {
+ return -EACCES;
+ }
+
+ return 0;
+ }
+
+ const char* name() const override { return "put_bucket_logging"; }
+ RGWOpType get_type() override { return RGW_OP_PUT_BUCKET_LOGGING; }
+ uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; }
+
+ void execute(optional_yield y) override {
+ op_ret = verify_bucket_logging_params(this, s);
+ if (op_ret < 0) {
+ return;
+ }
+
+ const auto max_size = s->cct->_conf->rgw_max_put_param_size;
+ bufferlist data;
+ std::tie(op_ret, data) = read_all_input(s, max_size, false);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to read XML logging payload, ret = " << op_ret << dendl;
+ return;
+ }
+ if (data.length() == 0) {
+ ldpp_dout(this, 1) << "ERROR: XML logging payload missing" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()){
+ ldpp_dout(this, 1) << "ERROR: failed to initialize XML parser" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ if (!parser.parse(data.c_str(), data.length(), 1)) {
+ ldpp_dout(this, 1) << "ERROR: failed to parse XML logging payload" << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+ rgw::bucketlogging::configuration configuration;
+ configuration.default_obj_roll_time = get_cct()->_conf->rgw_bucket_logging_obj_roll_time;
+ try {
+ RGWXMLDecoder::decode_xml("BucketLoggingStatus", configuration, &parser, true);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 1) << "ERROR: failed to parse XML logging payload. error: " << err << dendl;
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+
+ const rgw_bucket src_bucket_id(s->bucket_tenant, s->bucket_name);
+ std::unique_ptr<rgw::sal::Bucket> src_bucket;
+ op_ret = driver->load_bucket(this, src_bucket_id,
+ &src_bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << src_bucket_id << "', ret = " << op_ret << dendl;
+ return;
+ }
+
+ if (!configuration.enabled) {
+ op_ret = rgw::bucketlogging::source_bucket_cleanup(this, driver, src_bucket.get(), true, y);
+ return;
+ }
+
+ // set logging configuration
+ rgw_bucket target_bucket_id;
+ if (op_ret = rgw::bucketlogging::get_bucket_id(configuration.target_bucket, s->bucket_tenant, target_bucket_id); op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to parse target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl;
+ return;
+ }
+
+ if (target_bucket_id == src_bucket_id) {
+ ldpp_dout(this, 1) << "ERROR: target bucket '" << target_bucket_id << "' must be different from source bucket" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ op_ret = driver->load_bucket(this, target_bucket_id,
+ &target_bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << target_bucket_id << "', ret = " << op_ret << dendl;
+ return;
+ }
+ auto& target_attrs = target_bucket->get_attrs();
+ if (target_attrs.find(RGW_ATTR_BUCKET_LOGGING) != target_attrs.end()) {
+ // target bucket must not have logging set on it
+ ldpp_dout(this, 1) << "ERROR: logging target bucket '" << target_bucket_id << "', is configured with bucket logging" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ // verify target bucket does not have encryption
+ if (target_attrs.find(RGW_ATTR_BUCKET_ENCRYPTION_POLICY) != target_attrs.end()) {
+ ldpp_dout(this, 1) << "ERROR: logging target bucket '" << target_bucket_id << "', is configured with encryption" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ std::optional<rgw::bucketlogging::configuration> old_conf;
+ bufferlist conf_bl;
+ encode(configuration, conf_bl);
+ op_ret = retry_raced_bucket_write(this, src_bucket.get(), [this, &conf_bl, &src_bucket, &old_conf, &configuration, y] {
+ auto& attrs = src_bucket->get_attrs();
+ auto it = attrs.find(RGW_ATTR_BUCKET_LOGGING);
+ if (it != attrs.end()) {
+ try {
+ rgw::bucketlogging::configuration tmp_conf;
+ tmp_conf.enabled = true;
+ decode(tmp_conf, it->second);
+ old_conf = std::move(tmp_conf);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 1) << "WARNING: failed to decode existing logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "' for bucket '" << src_bucket->get_info().bucket << "', error: " << err.what() << dendl;
+ }
+ if (!old_conf || (old_conf && *old_conf != configuration)) {
+ // conf changed (or was unknown) - update
+ it->second = conf_bl;
+ return src_bucket->merge_and_store_attrs(this, attrs, y);
+ }
+ // nothing to update
+ return 0;
+ }
+ // conf was added
+ attrs.insert(std::make_pair(RGW_ATTR_BUCKET_LOGGING, conf_bl));
+ return src_bucket->merge_and_store_attrs(this, attrs, y);
+ }, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to set logging attribute '" << RGW_ATTR_BUCKET_LOGGING << "' to bucket '" <<
+ src_bucket_id << "', ret = " << op_ret << dendl;
+ return;
+ }
+ if (!old_conf) {
+ ldpp_dout(this, 20) << "INFO: new logging configuration added to bucket '" << src_bucket_id << "'. configuration: " <<
+ configuration.to_json_str() << dendl;
+ if (const auto ret = rgw::bucketlogging::update_bucket_logging_sources(this, target_bucket, src_bucket_id, true, y); ret < 0) {
+ ldpp_dout(this, 1) << "WARNING: failed to add source bucket '" << src_bucket_id << "' to logging sources of target bucket '" <<
+ target_bucket_id << "', ret = " << ret << dendl;
+ }
+ } else if (*old_conf != configuration) {
+ // conf changed - do cleanup
+ if (const auto ret = commit_logging_object(*old_conf, target_bucket, this, y); ret < 0) {
+ ldpp_dout(this, 1) << "WARNING: could not commit pending logging object when updating logging configuration of bucket '" <<
+ src_bucket->get_info().bucket << "', ret = " << ret << dendl;
+ } else {
+ ldpp_dout(this, 20) << "INFO: committed pending logging object when updating logging configuration of bucket '" <<
+ src_bucket->get_info().bucket << "'" << dendl;
+ }
+ if (old_conf->target_bucket != configuration.target_bucket) {
+ rgw_bucket old_target_bucket_id;
+ if (const auto ret = rgw::bucketlogging::get_bucket_id(old_conf->target_bucket, s->bucket_tenant, old_target_bucket_id); ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to parse target bucket '" << old_conf->target_bucket << "', ret = " << ret << dendl;
+ return;
+ }
+ if (const auto ret = rgw::bucketlogging::update_bucket_logging_sources(this, driver, old_target_bucket_id, src_bucket_id, false, y); ret < 0) {
+ ldpp_dout(this, 1) << "WARNING: failed to remove source bucket '" << src_bucket_id << "' from logging sources of original target bucket '" <<
+ old_target_bucket_id << "', ret = " << ret << dendl;
+ }
+ if (const auto ret = rgw::bucketlogging::update_bucket_logging_sources(this, target_bucket, src_bucket_id, true, y); ret < 0) {
+ ldpp_dout(this, 1) << "WARNING: failed to add source bucket '" << src_bucket_id << "' to logging sources of target bucket '" <<
+ target_bucket_id << "', ret = " << ret << dendl;
+ }
+ }
+ ldpp_dout(this, 20) << "INFO: wrote logging configuration to bucket '" << src_bucket_id << "'. configuration: " <<
+ configuration.to_json_str() << dendl;
+ } else {
+ ldpp_dout(this, 20) << "INFO: logging configuration of bucket '" << src_bucket_id << "' did not change" << dendl;
+ }
+ }
+};
+
+// Post /<bucket name>/?logging
+class RGWPostBucketLoggingOp : public RGWDefaultResponseOp {
+ int verify_permission(optional_yield y) override {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PostBucketLogging)) {
+ return -EACCES;
+ }
+
+ return 0;
+ }
+
+ const char* name() const override { return "post_bucket_logging"; }
+ RGWOpType get_type() override { return RGW_OP_POST_BUCKET_LOGGING; }
+ uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; }
+
+ void execute(optional_yield y) override {
+ op_ret = verify_bucket_logging_params(this, s);
+ if (op_ret < 0) {
+ return;
+ }
+
+ const rgw_bucket src_bucket_id(s->bucket_tenant, s->bucket_name);
+ std::unique_ptr<rgw::sal::Bucket> src_bucket;
+ op_ret = driver->load_bucket(this, src_bucket_id,
+ &src_bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << src_bucket_id << "', ret = " << op_ret << dendl;
+ return;
+ }
+ const auto& bucket_attrs = src_bucket->get_attrs();
+ auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING);
+ if (iter == bucket_attrs.end()) {
+ ldpp_dout(this, 1) << "WARNING: no logging configured on bucket '" << src_bucket_id << "'" << dendl;
+ return;
+ }
+ rgw::bucketlogging::configuration configuration;
+ try {
+ configuration.enabled = true;
+ decode(configuration, iter->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 1) << "WARNING: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "' for bucket '" << src_bucket_id << "', error: " << err.what() << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ rgw_bucket target_bucket_id;
+ if (op_ret = rgw::bucketlogging::get_bucket_id(configuration.target_bucket, s->bucket_tenant, target_bucket_id); op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to parse target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl;
+ return;
+ }
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ op_ret = driver->load_bucket(this, target_bucket_id,
+ &target_bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << target_bucket_id << "', ret = " << op_ret << dendl;
+ return;
+ }
+ std::string obj_name;
+ RGWObjVersionTracker objv_tracker;
+ op_ret = target_bucket->get_logging_object_name(obj_name, configuration.target_prefix, null_yield, this, &objv_tracker);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get pending logging object name from target bucket '" << target_bucket_id << "'" << dendl;
+ return;
+ }
+ op_ret = rgw::bucketlogging::rollover_logging_object(configuration, target_bucket, obj_name, this, null_yield, true, &objv_tracker);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to flush pending logging object '" << obj_name
+ << "' to target bucket '" << target_bucket_id << "'" << dendl;
+ return;
+ }
+ ldpp_dout(this, 20) << "INFO: flushed pending logging object '" << obj_name
+ << "' to target bucket '" << configuration.target_bucket << "'" << dendl;
+ }
+};
+
+RGWOp* RGWHandler_REST_BucketLogging_S3::create_post_op() {
+ return new RGWPostBucketLoggingOp();
+}
+
+RGWOp* RGWHandler_REST_BucketLogging_S3::create_put_op() {
+ return new RGWPutBucketLoggingOp();
+}
+
+RGWOp* RGWHandler_REST_BucketLogging_S3::create_get_op() {
+ return new RGWGetBucketLoggingOp();
+}
+
diff --git a/src/rgw/rgw_rest_bucket_logging.h b/src/rgw/rgw_rest_bucket_logging.h
new file mode 100644
index 00000000000..0b31d88dad8
--- /dev/null
+++ b/src/rgw/rgw_rest_bucket_logging.h
@@ -0,0 +1,19 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+#pragma once
+
+#include "rgw_rest_s3.h"
+
+// s3 compliant bucket logging handler factory
+class RGWHandler_REST_BucketLogging_S3 : public RGWHandler_REST_S3 {
+protected:
+ int init_permissions(RGWOp* op, optional_yield y) override {return 0;}
+ int read_permissions(RGWOp* op, optional_yield y) override {return 0;}
+ bool supports_quota() override {return false;}
+public:
+ virtual ~RGWHandler_REST_BucketLogging_S3() = default;
+ static RGWOp* create_get_op();
+ static RGWOp* create_put_op();
+ static RGWOp* create_post_op();
+};
+
diff --git a/src/rgw/rgw_rest_pubsub.cc b/src/rgw/rgw_rest_pubsub.cc
index c0345a4f88a..f1ffe09cf25 100644
--- a/src/rgw/rgw_rest_pubsub.cc
+++ b/src/rgw/rgw_rest_pubsub.cc
@@ -234,7 +234,13 @@ bool verify_topic_permission(const DoutPrefixProvider* dpp, req_state* s,
return verify_topic_permission(dpp, s, topic.owner, arn, policy, op);
}
-// command (AWS compliant):
+bool should_forward_request_to_master(req_state* s, rgw::sal::Driver* driver) {
+ return (!driver->is_meta_master() &&
+ rgw::all_zonegroups_support(*s->penv.site,
+ rgw::zone_features::notification_v2));
+}
+
+// command (AWS compliant):
// POST
// Action=CreateTopic&Name=<topic-name>[&OpaqueData=data][&push-endpoint=<endpoint>[&persistent][&<arg1>=<value1>]]
class RGWPSCreateTopicOp : public RGWOp {
@@ -273,7 +279,7 @@ class RGWPSCreateTopicOp : public RGWOp {
// Remove the args that are parsed, so the push_endpoint_args only contains
// necessary one's which is parsed after this if. but only if master zone,
// else we do not remove as request is forwarded to master.
- if (driver->is_meta_master()) {
+ if (!should_forward_request_to_master(s, driver)) {
s->info.args.remove("OpaqueData");
s->info.args.remove("push-endpoint");
s->info.args.remove("persistent");
@@ -396,7 +402,7 @@ class RGWPSCreateTopicOp : public RGWOp {
void RGWPSCreateTopicOp::execute(optional_yield y) {
// master request will replicate the topic creation.
- if (!driver->is_meta_master()) {
+ if (should_forward_request_to_master(s, driver)) {
op_ret = rgw_forward_request_to_master(
this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y);
if (op_ret < 0) {
@@ -494,11 +500,11 @@ void RGWPSListTopicsOp::execute(optional_yield y) {
const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
if (rgw::all_zonegroups_support(*s->penv.site, rgw::zone_features::notification_v2) &&
- driver->stat_topics_v1(s->bucket->get_tenant(), null_yield, this) == -ENOENT) {
- op_ret = ps.get_topics_v1(this, result, y);
- } else {
+ driver->stat_topics_v1(get_account_or_tenant(s->owner.id), null_yield, this) == -ENOENT) {
constexpr int max_items = 100;
op_ret = ps.get_topics_v2(this, start_token, max_items, result, next_token, y);
+ } else {
+ op_ret = ps.get_topics_v1(this, result, y);
}
// if there are no topics it is not considered an error
op_ret = op_ret == -ENOENT ? 0 : op_ret;
@@ -863,7 +869,7 @@ class RGWPSSetTopicAttributesOp : public RGWOp {
};
void RGWPSSetTopicAttributesOp::execute(optional_yield y) {
- if (!driver->is_meta_master()) {
+ if (should_forward_request_to_master(s, driver)) {
op_ret = rgw_forward_request_to_master(
this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y);
if (op_ret < 0) {
@@ -1008,9 +1014,10 @@ class RGWPSDeleteTopicOp : public RGWOp {
};
void RGWPSDeleteTopicOp::execute(optional_yield y) {
- if (!driver->is_meta_master()) {
+ if (should_forward_request_to_master(s, driver)) {
op_ret = rgw_forward_request_to_master(
this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y);
+
if (op_ret < 0) {
ldpp_dout(this, 1)
<< "DeleteTopic forward_request_to_master returned ret = " << op_ret
@@ -1260,7 +1267,7 @@ int RGWPSCreateNotifOp::verify_permission(optional_yield y) {
}
void RGWPSCreateNotifOp::execute(optional_yield y) {
- if (!driver->is_meta_master()) {
+ if (should_forward_request_to_master(s, driver)) {
op_ret = rgw_forward_request_to_master(
this, *s->penv.site, s->owner.id, &data, nullptr, s->info, y);
if (op_ret < 0) {
@@ -1462,7 +1469,7 @@ int RGWPSDeleteNotifOp::verify_permission(optional_yield y) {
}
void RGWPSDeleteNotifOp::execute(optional_yield y) {
- if (!driver->is_meta_master()) {
+ if (should_forward_request_to_master(s, driver)) {
bufferlist indata;
op_ret = rgw_forward_request_to_master(
this, *s->penv.site, s->owner.id, &indata, nullptr, s->info, y);
diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc
index a245fca9945..885991244a6 100644
--- a/src/rgw/rgw_rest_s3.cc
+++ b/src/rgw/rgw_rest_s3.cc
@@ -9,6 +9,7 @@
#include <string_view>
#include "common/ceph_crypto.h"
+#include "common/dout.h"
#include "common/split.h"
#include "common/Formatter.h"
#include "common/utf8.h"
@@ -69,6 +70,7 @@
#include "rgw_role.h"
#include "rgw_rest_sts.h"
#include "rgw_rest_iam.h"
+#include "rgw_rest_bucket_logging.h"
#include "rgw_sts.h"
#include "rgw_sal_rados.h"
#include "rgw_cksum_pipe.h"
@@ -449,8 +451,7 @@ int RGWGetObj_ObjStore_S3::send_response_data(bufferlist& bl, off_t bl_ofs,
dump_content_length(s, total_len);
dump_last_modified(s, lastmod);
dump_header_if_nonempty(s, "x-amz-version-id", version_id);
- dump_header_if_nonempty(s, "x-amz-expiration", expires);
-
+ dump_header_if_nonempty(s, "x-amz-expiration", expires);
if (attrs.find(RGW_ATTR_APPEND_PART_NUM) != attrs.end()) {
dump_header(s, "x-rgw-object-type", "Appendable");
dump_header(s, "x-rgw-next-append-position", s->obj_size);
@@ -526,7 +527,29 @@ int RGWGetObj_ObjStore_S3::send_response_data(bufferlist& bl, off_t bl_ofs,
auto iter = bl.cbegin();
decode(rt, iter);
+ rgw::sal::RGWRestoreStatus restore_status;
+ attr_iter = attrs.find(RGW_ATTR_RESTORE_STATUS);
+ if (attr_iter != attrs.end()) {
+ bufferlist bl = attr_iter->second;
+ auto iter = bl.cbegin();
+ decode(restore_status, iter);
+ }
+
+ //restore status
+ if (restore_status == rgw::sal::RGWRestoreStatus::RestoreAlreadyInProgress) {
+ dump_header(s, "x-amz-restore", "ongoing-request=\"true\"");
+ }
if (rt == rgw::sal::RGWRestoreType::Temporary) {
+ auto expire_iter = attrs.find(RGW_ATTR_RESTORE_EXPIRY_DATE);
+ ceph::real_time expiration_date;
+
+ if (expire_iter != attrs.end()) {
+ bufferlist bl = expire_iter->second;
+ auto iter = bl.cbegin();
+ decode(expiration_date, iter);
+ }
+ //restore status
+ dump_header_if_nonempty(s, "x-amz-restore", "ongoing-request=\"false\", expiry-date=\""+ dump_time_to_str(expiration_date) +"\"");
// temporary restore; set storage-class to cloudtier storage class
auto c_iter = attrs.find(RGW_ATTR_CLOUDTIER_STORAGE_CLASS);
@@ -785,7 +808,6 @@ void RGWGetObjTags_ObjStore_S3::send_response_data(bufferlist& bl)
}
}
-
int RGWPutObjTags_ObjStore_S3::get_params(optional_yield y)
{
RGWXMLParser parser;
@@ -2128,16 +2150,6 @@ void RGWListBucket_ObjStore_S3v2::send_response()
rgw_flush_formatter_and_reset(s, s->formatter);
}
-void RGWGetBucketLogging_ObjStore_S3::send_response()
-{
- dump_errno(s);
- end_header(s, this, to_mime_type(s->format));
- dump_start(s);
-
- s->formatter->open_object_section_in_ns("BucketLoggingStatus", XMLNS_AWS_S3);
- s->formatter->close_section();
- rgw_flush_formatter_and_reset(s, s->formatter);
-}
void RGWGetBucketLocation_ObjStore_S3::send_response()
{
@@ -2389,28 +2401,41 @@ void RGWGetBucketWebsite_ObjStore_S3::send_response()
rgw_flush_formatter_and_reset(s, s->formatter);
}
-static void dump_bucket_metadata(req_state *s, rgw::sal::Bucket* bucket,
+static void dump_bucket_metadata(req_state *s,
RGWStorageStats& stats)
{
dump_header(s, "X-RGW-Object-Count", static_cast<long long>(stats.num_objects));
dump_header(s, "X-RGW-Bytes-Used", static_cast<long long>(stats.size));
+}
- // only bucket's owner is allowed to get the quota settings of the account
- if (s->auth.identity->is_owner_of(bucket->get_owner())) {
- const auto& user_info = s->user->get_info();
- const auto& bucket_quota = s->bucket->get_info().quota; // bucket quota
- dump_header(s, "X-RGW-Quota-User-Size", static_cast<long long>(user_info.quota.user_quota.max_size));
- dump_header(s, "X-RGW-Quota-User-Objects", static_cast<long long>(user_info.quota.user_quota.max_objects));
- dump_header(s, "X-RGW-Quota-Max-Buckets", static_cast<long long>(user_info.max_buckets));
- dump_header(s, "X-RGW-Quota-Bucket-Size", static_cast<long long>(bucket_quota.max_size));
- dump_header(s, "X-RGW-Quota-Bucket-Objects", static_cast<long long>(bucket_quota.max_objects));
- }
+int RGWStatBucket_ObjStore_S3::get_params(optional_yield y)
+{
+ report_stats = s->info.args.exists("read-stats");
+
+ return 0;
}
void RGWStatBucket_ObjStore_S3::send_response()
{
if (op_ret >= 0) {
- dump_bucket_metadata(s, bucket.get(), stats);
+ if (report_stats) {
+ dump_bucket_metadata(s, stats);
+ }
+ // only bucket's owner is allowed to get the quota settings of the account
+ if (s->auth.identity->is_owner_of(s->bucket->get_owner())) {
+ const auto& user_info = s->user->get_info();
+ const auto& bucket_quota = s->bucket->get_info().quota; // bucket quota
+
+ dump_header(s, "X-RGW-Quota-Max-Buckets", static_cast<long long>(user_info.max_buckets));
+ if (user_info.quota.user_quota.enabled) {
+ dump_header(s, "X-RGW-Quota-User-Size", static_cast<long long>(user_info.quota.user_quota.max_size));
+ dump_header(s, "X-RGW-Quota-User-Objects", static_cast<long long>(user_info.quota.user_quota.max_objects));
+ }
+ if (bucket_quota.enabled) {
+ dump_header(s, "X-RGW-Quota-Bucket-Size", static_cast<long long>(bucket_quota.max_size));
+ dump_header(s, "X-RGW-Quota-Bucket-Objects", static_cast<long long>(bucket_quota.max_objects));
+ }
+ }
}
set_req_state_err(s, op_ret);
@@ -2508,6 +2533,10 @@ int RGWCreateBucket_ObjStore_S3::get_params(optional_yield y)
if ((op_ret < 0) && (op_ret != -ERR_LENGTH_REQUIRED))
return op_ret;
+ if (!driver->is_meta_master()) {
+ in_data.append(data);
+ }
+
if (data.length()) {
RGWCreateBucketParser parser;
@@ -3513,38 +3542,46 @@ int RGWRestoreObj_ObjStore_S3::get_params(optional_yield y)
void RGWRestoreObj_ObjStore_S3::send_response()
{
- if (op_ret < 0)
- {
- set_req_state_err(s, op_ret);
+ if (restore_ret < 0) {
+ set_req_state_err(s, restore_ret);
dump_errno(s);
end_header(s, this);
dump_start(s);
return;
}
- rgw::sal::Attrs attrs = s->object->get_attrs();
- auto attr_iter = attrs.find(RGW_ATTR_RESTORE_STATUS);
- rgw::sal::RGWRestoreStatus restore_status;
- if (attr_iter != attrs.end()) {
- bufferlist bl = attr_iter->second;
- auto iter = bl.cbegin();
- decode(restore_status, iter);
- }
- ldpp_dout(this, 10) << "restore_status=" << restore_status << dendl;
-
- if (attr_iter == attrs.end() || restore_status != rgw::sal::RGWRestoreStatus::None) {
- s->err.http_ret = 202; //Accepted
- dump_header(s, "x-amz-restore", rgw_bl_str(restore_status));
- } else if (restore_status != rgw::sal::RGWRestoreStatus::RestoreAlreadyInProgress) {
+ if (restore_ret == 0) {
+ s->err.http_ret = 202; // OK
+ } else if (restore_ret == 1) {
s->err.http_ret = 409; // Conflict
- dump_header_if_nonempty(s, "x-amz-restore", rgw_bl_str(restore_status));
- } else if (restore_status != rgw::sal::RGWRestoreStatus::CloudRestored) {
- s->err.http_ret = 200; // OK
- dump_header_if_nonempty(s, "x-amz-restore", rgw_bl_str(restore_status));
- } else {
- s->err.http_ret = 202; // Accepted
- dump_header_if_nonempty(s, "x-amz-restore", rgw_bl_str(restore_status));
- }
+ dump_header(s, "x-amz-restore", "on-going-request=\"true\"");
+ } else if (restore_ret == 2) {
+ rgw::sal::Attrs attrs;
+ ceph::real_time expiration_date;
+ rgw::sal::RGWRestoreType rt;
+ attrs = s->object->get_attrs();
+ auto expire_iter = attrs.find(RGW_ATTR_RESTORE_EXPIRY_DATE);
+ auto type_iter = attrs.find(RGW_ATTR_RESTORE_TYPE);
+
+ if (expire_iter != attrs.end()) {
+ bufferlist bl = expire_iter->second;
+ auto iter = bl.cbegin();
+ decode(expiration_date, iter);
+ }
+
+ if (type_iter != attrs.end()) {
+ bufferlist bl = type_iter->second;
+ auto iter = bl.cbegin();
+ decode(rt, iter);
+ }
+ if (rt == rgw::sal::RGWRestoreType::Temporary) {
+ s->err.http_ret = 200; // OK
+ dump_header(s, "x-amz-restore", "ongoing-request=\"false\", expiry-date=\""+ dump_time_to_str(expiration_date) +"\"");
+ } else {
+ s->err.http_ret = 200;
+ dump_header(s, "x-amz-restore", "ongoing-request=\"false\"");
+ }
+ }
dump_errno(s);
end_header(s, this);
@@ -3782,6 +3819,196 @@ void RGWPutACLs_ObjStore_S3::send_response()
dump_start(s);
}
+int RGWGetObjAttrs_ObjStore_S3::get_params(optional_yield y)
+{
+ string err;
+ auto& env = s->info.env;
+ version_id = s->info.args.get("versionId");
+
+ auto hdr = env->get_optional("HTTP_X_AMZ_EXPECTED_BUCKET_OWNER");
+ if (hdr) {
+ expected_bucket_owner = *hdr;
+ }
+
+ hdr = env->get_optional("HTTP_X_AMZ_MAX_PARTS");
+ if (hdr) {
+ max_parts = strict_strtol(hdr->c_str(), 10, &err);
+ if (!err.empty()) {
+ s->err.message = "Invalid value for MaxParts: " + err;
+ ldpp_dout(s, 10) << "Invalid value for MaxParts " << *hdr << ": "
+ << err << dendl;
+ return -ERR_INVALID_PART;
+ }
+ max_parts = std::min(*max_parts, 1000);
+ }
+
+ hdr = env->get_optional("HTTP_X_AMZ_PART_NUMBER_MARKER");
+ if (hdr) {
+ marker = strict_strtol(hdr->c_str(), 10, &err);
+ if (!err.empty()) {
+ s->err.message = "Invalid value for PartNumberMarker: " + err;
+ ldpp_dout(s, 10) << "Invalid value for PartNumberMarker " << *hdr << ": "
+ << err << dendl;
+ return -ERR_INVALID_PART;
+ }
+ }
+
+ hdr = env->get_optional("HTTP_X_AMZ_OBJECT_ATTRIBUTES");
+ if (hdr) {
+ requested_attributes = recognize_attrs(*hdr);
+ }
+
+ /* XXX skipping SSE-C params for now */
+
+ return 0;
+} /* RGWGetObjAttrs_ObjStore_S3::get_params(...) */
+
+int RGWGetObjAttrs_ObjStore_S3::get_decrypt_filter(
+ std::unique_ptr<RGWGetObj_Filter> *filter,
+ RGWGetObj_Filter* cb, bufferlist* manifest_bl)
+{
+ // we aren't actually decrypting the data, but for objects encrypted with
+ // SSE-C we do need to verify that required headers are present and valid
+ //
+ // in the SSE-KMS and SSE-S3 cases, this unfortunately causes us to fetch
+ // decryption keys which we don't need :(
+ std::unique_ptr<BlockCrypt> block_crypt; // ignored
+ std::map<std::string, std::string> crypt_http_responses; // ignored
+ return rgw_s3_prepare_decrypt(s, s->yield, attrs, &block_crypt,
+ crypt_http_responses);
+}
+
+void RGWGetObjAttrs_ObjStore_S3::send_response()
+{
+ if (op_ret)
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+
+ if (op_ret == 0) {
+ version_id = s->object->get_instance();
+
+ // x-amz-delete-marker: DeleteMarker // not sure we can plausibly do this?
+ dump_last_modified(s, lastmod);
+ dump_header_if_nonempty(s, "x-amz-version-id", version_id);
+ // x-amz-request-charged: RequestCharged
+ }
+
+ end_header(s, this, to_mime_type(s->format));
+ dump_start(s);
+
+ if (op_ret == 0) {
+ s->formatter->open_object_section("GetObjectAttributes");
+ if (requested_attributes & as_flag(ReqAttributes::Etag)) {
+ if (lo_etag.empty()) {
+ auto iter = attrs.find(RGW_ATTR_ETAG);
+ if (iter != attrs.end()) {
+ lo_etag = iter->second.to_str();
+ }
+ }
+ s->formatter->dump_string("ETag", lo_etag);
+ }
+
+ if (requested_attributes & as_flag(ReqAttributes::Checksum)) {
+ s->formatter->open_object_section("Checksum");
+ auto iter = attrs.find(RGW_ATTR_CKSUM);
+ if (iter != attrs.end()) {
+ try {
+ rgw::cksum::Cksum cksum;
+ auto bliter = iter->second.cbegin();
+ cksum.decode(bliter);
+ if (multipart_parts_count && multipart_parts_count > 0) {
+ s->formatter->dump_string(cksum.element_name(),
+ fmt::format("{}-{}", cksum.to_armor(), *multipart_parts_count));
+ } else {
+ s->formatter->dump_string(cksum.element_name(), cksum.to_armor());
+ }
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0)
+ << "ERROR: could not decode stored cksum, caught buffer::error" << dendl;
+ }
+ }
+ s->formatter->close_section(); /* Checksum */
+ } /* Checksum */
+
+ if (requested_attributes & as_flag(ReqAttributes::ObjectParts)) {
+ if (multipart_parts_count && multipart_parts_count > 0) {
+
+ /* XXX the following was needed to see a manifest at list_parts()! */
+ op_ret = s->object->load_obj_state(s, s->yield);
+ if (op_ret < 0) {
+ ldpp_dout_fmt(this, 0,
+ "ERROR: {} load_obj_state() failed ret={}", __func__,
+ op_ret);
+ }
+
+ ldpp_dout_fmt(this, 16,
+ "{} attr flags={} parts_count={}",
+ __func__, requested_attributes, *multipart_parts_count);
+
+ s->formatter->open_object_section("ObjectParts");
+
+ bool truncated = false;
+ int next_marker;
+
+ using namespace rgw::sal;
+
+ int ret =
+ s->object->list_parts(
+ this, s->cct,
+ max_parts ? *max_parts : 1000,
+ marker ? *marker : 0,
+ &next_marker, &truncated,
+ [&](const Object::Part& part) -> int {
+ s->formatter->open_object_section("Part");
+ s->formatter->dump_int("PartNumber", part.part_number);
+ s->formatter->dump_unsigned("Size", part.part_size);
+ if (part.cksum.type != rgw::cksum::Type::none) {
+ s->formatter->dump_string(part.cksum.element_name(), part.cksum.to_armor());
+ }
+ s->formatter->close_section(); /* Part */
+ return 0;
+ }, s->yield);
+
+ if (ret < 0) {
+ ldpp_dout_fmt(this, 0,
+ "ERROR: {} list-parts failed for {}",
+ __func__, s->object->get_name());
+ }
+ /* AWS docs disagree on the name of this element */
+ s->formatter->dump_int("PartsCount", *multipart_parts_count);
+ s->formatter->dump_int("TotalPartsCount", *multipart_parts_count);
+ s->formatter->dump_bool("IsTruncated", truncated);
+ if (max_parts) {
+ s->formatter->dump_int("MaxParts", *max_parts);
+ }
+ if(truncated) {
+ s->formatter->dump_int("NextPartNumberMarker", next_marker);
+ }
+ if (marker) {
+ s->formatter->dump_int("PartNumberMarker", *marker);
+ }
+ s->formatter->close_section();
+ } /* multipart_parts_count positive */
+ } /* ObjectParts */
+
+ if (requested_attributes & as_flag(ReqAttributes::ObjectSize)) {
+ s->formatter->dump_int("ObjectSize", s->obj_size);
+ }
+
+ if (requested_attributes & as_flag(ReqAttributes::StorageClass)) {
+ auto iter = attrs.find(RGW_ATTR_STORAGE_CLASS);
+ if (iter != attrs.end()) {
+ s->formatter->dump_string("StorageClass", iter->second.to_str());
+ } else {
+ s->formatter->dump_string("StorageClass", "STANDARD");
+ }
+ }
+ s->formatter->close_section();
+ } /* op_ret == 0 */
+
+ rgw_flush_formatter_and_reset(s, s->formatter);
+} /* RGWGetObjAttrs_ObjStore_S3::send_response */
+
void RGWGetLC_ObjStore_S3::execute(optional_yield y)
{
config.set_ctx(s->cct);
@@ -4761,11 +4988,12 @@ RGWOp *RGWHandler_REST_Bucket_S3::get_obj_op(bool get_data) const
RGWOp *RGWHandler_REST_Bucket_S3::op_get()
{
+ /* XXX maybe we could replace this with an indexing operation */
if (s->info.args.sub_resource_exists("encryption"))
return nullptr;
if (s->info.args.sub_resource_exists("logging"))
- return new RGWGetBucketLogging_ObjStore_S3;
+ return RGWHandler_REST_BucketLogging_S3::create_get_op();
if (s->info.args.sub_resource_exists("location"))
return new RGWGetBucketLocation_ObjStore_S3;
@@ -4829,9 +5057,10 @@ RGWOp *RGWHandler_REST_Bucket_S3::op_head()
RGWOp *RGWHandler_REST_Bucket_S3::op_put()
{
- if (s->info.args.sub_resource_exists("logging") ||
- s->info.args.sub_resource_exists("encryption"))
+ if (s->info.args.sub_resource_exists("encryption"))
return nullptr;
+ if (s->info.args.sub_resource_exists("logging"))
+ return RGWHandler_REST_BucketLogging_S3::create_put_op();
if (s->info.args.sub_resource_exists("versioning"))
return new RGWSetBucketVersioning_ObjStore_S3;
if (s->info.args.sub_resource_exists("website")) {
@@ -4876,8 +5105,7 @@ RGWOp *RGWHandler_REST_Bucket_S3::op_put()
RGWOp *RGWHandler_REST_Bucket_S3::op_delete()
{
- if (s->info.args.sub_resource_exists("logging") ||
- s->info.args.sub_resource_exists("encryption"))
+ if (s->info.args.sub_resource_exists("encryption"))
return nullptr;
if (is_tagging_op()) {
@@ -4921,6 +5149,10 @@ RGWOp *RGWHandler_REST_Bucket_S3::op_post()
return new RGWDeleteMultiObj_ObjStore_S3;
}
+ if (s->info.args.exists("logging")) {
+ return RGWHandler_REST_BucketLogging_S3::create_post_op();
+ }
+
if (s->info.args.exists("mdsearch")) {
if (!s->cct->_conf->rgw_enable_mdsearch) {
return NULL;
@@ -4953,6 +5185,8 @@ RGWOp *RGWHandler_REST_Obj_S3::op_get()
return new RGWGetObjLayout_ObjStore_S3;
} else if (is_tagging_op()) {
return new RGWGetObjTags_ObjStore_S3;
+ } else if (is_attributes_op()) {
+ return new RGWGetObjAttrs_ObjStore_S3;
} else if (is_obj_retention_op()) {
return new RGWGetObjRetention_ObjStore_S3;
} else if (is_obj_legal_hold_op()) {
@@ -6078,6 +6312,9 @@ AWSGeneralAbstractor::get_auth_data_v4(const req_state* const s,
case RGW_OP_GET_BUCKET_PUBLIC_ACCESS_BLOCK:
case RGW_OP_DELETE_BUCKET_PUBLIC_ACCESS_BLOCK:
case RGW_OP_GET_OBJ://s3select its post-method(payload contain the query) , the request is get-object
+ case RGW_OP_PUT_BUCKET_LOGGING:
+ case RGW_OP_POST_BUCKET_LOGGING:
+ case RGW_OP_GET_BUCKET_LOGGING:
break;
default:
ldpp_dout(s, 10) << "ERROR: AWS4 completion for operation: " << s->op_type << ", NOT IMPLEMENTED" << dendl;
@@ -6466,7 +6703,7 @@ rgw::auth::s3::LocalEngine::authenticate(
if (driver->get_user_by_access_key(dpp, access_key_id, y, &user) < 0) {
ldpp_dout(dpp, 5) << "error reading user info, uid=" << access_key_id
<< " can't authenticate" << dendl;
- return result_t::reject(-ERR_INVALID_ACCESS_KEY);
+ return result_t::deny(-ERR_INVALID_ACCESS_KEY);
}
//TODO: Uncomment, when we have a migration plan in place.
/*else {
@@ -6488,14 +6725,14 @@ rgw::auth::s3::LocalEngine::authenticate(
const auto iter = user->get_info().access_keys.find(access_key_id);
if (iter == std::end(user->get_info().access_keys)) {
ldpp_dout(dpp, 0) << "ERROR: access key not encoded in user info" << dendl;
- return result_t::reject(-EPERM);
+ return result_t::deny(-EPERM);
}
const RGWAccessKey& k = iter->second;
/* Ignore signature for HTTP OPTIONS */
if (s->op_type == RGW_OP_OPTIONS_CORS) {
auto apl = apl_factory->create_apl_local(
- cct, s, user->get_info(), std::move(account), std::move(policies),
+ cct, s, std::move(user), std::move(account), std::move(policies),
k.subuser, std::nullopt, access_key_id);
return result_t::grant(std::move(apl), completer_factory(k.key));
}
@@ -6512,11 +6749,11 @@ rgw::auth::s3::LocalEngine::authenticate(
ldpp_dout(dpp, 15) << "compare=" << compare << dendl;
if (compare != 0) {
- return result_t::reject(-ERR_SIGNATURE_NO_MATCH);
+ return result_t::deny(-ERR_SIGNATURE_NO_MATCH);
}
auto apl = apl_factory->create_apl_local(
- cct, s, user->get_info(), std::move(account), std::move(policies),
+ cct, s, std::move(user), std::move(account), std::move(policies),
k.subuser, std::nullopt, access_key_id);
return result_t::grant(std::move(apl), completer_factory(k.key));
}
@@ -6725,7 +6962,7 @@ rgw::auth::s3::STSEngine::authenticate(
string subuser;
auto apl = local_apl_factory->create_apl_local(
- cct, s, user->get_info(), std::move(account), std::move(policies),
+ cct, s, std::move(user), std::move(account), std::move(policies),
subuser, token.perm_mask, std::string(_access_key_id));
return result_t::grant(std::move(apl), completer_factory(token.secret_access_key));
}
diff --git a/src/rgw/rgw_rest_s3.h b/src/rgw/rgw_rest_s3.h
index 63909f57036..e8fdc69751c 100644
--- a/src/rgw/rgw_rest_s3.h
+++ b/src/rgw/rgw_rest_s3.h
@@ -183,14 +183,6 @@ public:
void send_versioned_response();
};
-class RGWGetBucketLogging_ObjStore_S3 : public RGWGetBucketLogging {
-public:
- RGWGetBucketLogging_ObjStore_S3() {}
- ~RGWGetBucketLogging_ObjStore_S3() override {}
-
- void send_response() override;
-};
-
class RGWGetBucketLocation_ObjStore_S3 : public RGWGetBucketLocation {
public:
RGWGetBucketLocation_ObjStore_S3() {}
@@ -222,6 +214,7 @@ public:
~RGWGetBucketWebsite_ObjStore_S3() override {}
void send_response() override;
+ virtual std::string canonical_name() const override { return fmt::format("WEBSITE.{}.BUCKET_WEBSITE", s->info.method); }
};
class RGWSetBucketWebsite_ObjStore_S3 : public RGWSetBucketWebsite {
@@ -231,6 +224,7 @@ public:
int get_params(optional_yield y) override;
void send_response() override;
+ virtual std::string canonical_name() const override { return fmt::format("WEBSITE.{}.BUCKET_WEBSITE", s->info.method); }
};
class RGWDeleteBucketWebsite_ObjStore_S3 : public RGWDeleteBucketWebsite {
@@ -239,6 +233,7 @@ public:
~RGWDeleteBucketWebsite_ObjStore_S3() override {}
void send_response() override;
+ virtual std::string canonical_name() const override { return fmt::format("WEBSITE.{}.BUCKET_WEBSITE", s->info.method); }
};
class RGWStatBucket_ObjStore_S3 : public RGWStatBucket_ObjStore {
@@ -247,6 +242,7 @@ public:
~RGWStatBucket_ObjStore_S3() override {}
void send_response() override;
+ int get_params(optional_yield y) override;
};
class RGWCreateBucket_ObjStore_S3 : public RGWCreateBucket_ObjStore {
@@ -378,6 +374,18 @@ public:
int get_params(optional_yield y) override;
};
+class RGWGetObjAttrs_ObjStore_S3 : public RGWGetObjAttrs_ObjStore {
+public:
+ RGWGetObjAttrs_ObjStore_S3() {}
+ ~RGWGetObjAttrs_ObjStore_S3() override {}
+
+ int get_params(optional_yield y) override;
+ int get_decrypt_filter(std::unique_ptr<RGWGetObj_Filter>* filter,
+ RGWGetObj_Filter* cb,
+ bufferlist* manifest_bl) override;
+ void send_response() override;
+};
+
class RGWGetLC_ObjStore_S3 : public RGWGetLC_ObjStore {
protected:
RGWLifecycleConfiguration_S3 config;
@@ -595,6 +603,7 @@ class RGWConfigBucketMetaSearch_ObjStore_S3 : public RGWConfigBucketMetaSearch {
public:
RGWConfigBucketMetaSearch_ObjStore_S3() {}
~RGWConfigBucketMetaSearch_ObjStore_S3() {}
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET_MDSEARCH", s->info.method); }
int get_params(optional_yield y) override;
void send_response() override;
@@ -612,6 +621,7 @@ class RGWDelBucketMetaSearch_ObjStore_S3 : public RGWDelBucketMetaSearch {
public:
RGWDelBucketMetaSearch_ObjStore_S3() {}
~RGWDelBucketMetaSearch_ObjStore_S3() {}
+ virtual std::string canonical_name() const override { return fmt::format("REST.{}.BUCKET_MDSEARCH", s->info.method); }
void send_response() override;
};
@@ -703,6 +713,9 @@ protected:
bool is_acl_op() const {
return s->info.args.exists("acl");
}
+ bool is_attributes_op() const {
+ return s->info.args.exists("attributes");
+ }
bool is_cors_op() const {
return s->info.args.exists("cors");
}
@@ -761,6 +774,9 @@ protected:
bool is_acl_op() const {
return s->info.args.exists("acl");
}
+ bool is_attributes_op() const {
+ return s->info.args.exists("attributes");
+ }
bool is_tagging_op() const {
return s->info.args.exists("tagging");
}
diff --git a/src/rgw/rgw_rest_sts.cc b/src/rgw/rgw_rest_sts.cc
index f2bd9429a55..1101da0af3c 100644
--- a/src/rgw/rgw_rest_sts.cc
+++ b/src/rgw/rgw_rest_sts.cc
@@ -436,6 +436,9 @@ WebTokenEngine::validate_signature(const DoutPrefixProvider* dpp, const jwt::dec
.allow_algorithm(jwt::algorithm::ps512{cert});
verifier.verify(decoded);
+ } else {
+ ldpp_dout(dpp, 0) << "Unsupported algorithm: " << algorithm << dendl;
+ throw -EINVAL;
}
} catch (std::runtime_error& e) {
ldpp_dout(dpp, 0) << "Signature validation failed: " << e.what() << dendl;
diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc
index 35c36d1ae1a..b8ff3ca2fe8 100644
--- a/src/rgw/rgw_rest_swift.cc
+++ b/src/rgw/rgw_rest_swift.cc
@@ -447,7 +447,6 @@ int RGWListBucket_ObjStore_SWIFT::get_params(optional_yield y)
}
static void dump_container_metadata(req_state *,
- const rgw::sal::Bucket*,
const std::optional<RGWStorageStats>& stats,
const RGWQuotaInfo&,
const RGWBucketWebsiteConf&);
@@ -458,7 +457,7 @@ void RGWListBucket_ObjStore_SWIFT::send_response()
map<string, bool>::iterator pref_iter = common_prefixes.begin();
dump_start(s);
- dump_container_metadata(s, s->bucket.get(), stats, quota.bucket_quota,
+ dump_container_metadata(s, stats, quota.bucket_quota,
s->bucket->get_info().website_conf);
s->formatter->open_array_section_with_attrs("container",
@@ -558,7 +557,6 @@ next:
} // RGWListBucket_ObjStore_SWIFT::send_response
static void dump_container_metadata(req_state *s,
- const rgw::sal::Bucket* bucket,
const std::optional<RGWStorageStats>& stats,
const RGWQuotaInfo& quota,
const RGWBucketWebsiteConf& ws_conf)
@@ -683,7 +681,7 @@ void RGWStatBucket_ObjStore_SWIFT::send_response()
{
if (op_ret >= 0) {
op_ret = STATUS_NO_CONTENT;
- dump_container_metadata(s, bucket.get(), stats, quota.bucket_quota,
+ dump_container_metadata(s, stats, quota.bucket_quota,
s->bucket->get_info().website_conf);
}
@@ -2640,7 +2638,7 @@ RGWOp* RGWSwiftWebsiteHandler::get_ws_listing_op()
/* Generate the header now. */
set_req_state_err(s, op_ret);
dump_errno(s);
- dump_container_metadata(s, s->bucket.get(), stats, quota.bucket_quota,
+ dump_container_metadata(s, stats, quota.bucket_quota,
s->bucket->get_info().website_conf);
end_header(s, this, "text/html");
if (op_ret < 0) {
diff --git a/src/rgw/rgw_rest_swift.h b/src/rgw/rgw_rest_swift.h
index eb1c4422e34..ec206a5160f 100644
--- a/src/rgw/rgw_rest_swift.h
+++ b/src/rgw/rgw_rest_swift.h
@@ -86,6 +86,7 @@ public:
RGWStatBucket_ObjStore_SWIFT() {}
~RGWStatBucket_ObjStore_SWIFT() override {}
+ int get_params(optional_yield y) override { return 0; }
void send_response() override;
};
diff --git a/src/rgw/rgw_s3_filter.cc b/src/rgw/rgw_s3_filter.cc
new file mode 100644
index 00000000000..05a7c4a7293
--- /dev/null
+++ b/src/rgw/rgw_s3_filter.cc
@@ -0,0 +1,269 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+#include "rgw_pubsub.h"
+#include "rgw_tools.h"
+#include "rgw_xml.h"
+#include "rgw_s3_filter.h"
+#include "common/errno.h"
+#include "rgw_sal.h"
+#include <regex>
+#include <algorithm>
+
+void rgw_s3_key_filter::dump(Formatter *f) const {
+ if (!has_content()) {
+ return;
+ }
+ f->open_array_section("FilterRules");
+ if (!prefix_rule.empty()) {
+ f->open_object_section("");
+ ::encode_json("Name", "prefix", f);
+ ::encode_json("Value", prefix_rule, f);
+ f->close_section();
+ }
+ if (!suffix_rule.empty()) {
+ f->open_object_section("");
+ ::encode_json("Name", "suffix", f);
+ ::encode_json("Value", suffix_rule, f);
+ f->close_section();
+ }
+ if (!regex_rule.empty()) {
+ f->open_object_section("");
+ ::encode_json("Name", "regex", f);
+ ::encode_json("Value", regex_rule, f);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+bool rgw_s3_key_filter::decode_xml(XMLObj* obj) {
+ XMLObjIter iter = obj->find("FilterRule");
+ XMLObj *o;
+
+ const auto throw_if_missing = true;
+ auto prefix_not_set = true;
+ auto suffix_not_set = true;
+ auto regex_not_set = true;
+ std::string name;
+
+ while ((o = iter.get_next())) {
+ RGWXMLDecoder::decode_xml("Name", name, o, throw_if_missing);
+ if (name == "prefix" && prefix_not_set) {
+ prefix_not_set = false;
+ RGWXMLDecoder::decode_xml("Value", prefix_rule, o, throw_if_missing);
+ } else if (name == "suffix" && suffix_not_set) {
+ suffix_not_set = false;
+ RGWXMLDecoder::decode_xml("Value", suffix_rule, o, throw_if_missing);
+ } else if (name == "regex" && regex_not_set) {
+ regex_not_set = false;
+ RGWXMLDecoder::decode_xml("Value", regex_rule, o, throw_if_missing);
+ } else {
+ throw RGWXMLDecoder::err("invalid/duplicate S3Key filter rule name: '" + name + "'");
+ }
+ }
+ return true;
+}
+
+void rgw_s3_key_filter::dump_xml(Formatter *f) const {
+ if (!prefix_rule.empty()) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", "prefix", f);
+ ::encode_xml("Value", prefix_rule, f);
+ f->close_section();
+ }
+ if (!suffix_rule.empty()) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", "suffix", f);
+ ::encode_xml("Value", suffix_rule, f);
+ f->close_section();
+ }
+ if (!regex_rule.empty()) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", "regex", f);
+ ::encode_xml("Value", regex_rule, f);
+ f->close_section();
+ }
+}
+
+bool rgw_s3_key_filter::has_content() const {
+ return !(prefix_rule.empty() && suffix_rule.empty() && regex_rule.empty());
+}
+
+void rgw_s3_key_value_filter::dump(Formatter *f) const {
+ if (!has_content()) {
+ return;
+ }
+ f->open_array_section("FilterRules");
+ for (const auto& key_value : kv) {
+ f->open_object_section("");
+ ::encode_json("Name", key_value.first, f);
+ ::encode_json("Value", key_value.second, f);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+bool rgw_s3_key_value_filter::decode_xml(XMLObj* obj) {
+ kv.clear();
+ XMLObjIter iter = obj->find("FilterRule");
+ XMLObj *o;
+
+ const auto throw_if_missing = true;
+
+ std::string key;
+ std::string value;
+
+ while ((o = iter.get_next())) {
+ RGWXMLDecoder::decode_xml("Name", key, o, throw_if_missing);
+ RGWXMLDecoder::decode_xml("Value", value, o, throw_if_missing);
+ kv.emplace(key, value);
+ }
+ return true;
+}
+
+void rgw_s3_key_value_filter::dump_xml(Formatter *f) const {
+ for (const auto& key_value : kv) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", key_value.first, f);
+ ::encode_xml("Value", key_value.second, f);
+ f->close_section();
+ }
+}
+
+bool rgw_s3_key_value_filter::has_content() const {
+ return !kv.empty();
+}
+
+void rgw_s3_filter::dump(Formatter *f) const {
+ encode_json("S3Key", key_filter, f);
+ encode_json("S3Metadata", metadata_filter, f);
+ encode_json("S3Tags", tag_filter, f);
+}
+
+bool rgw_s3_filter::decode_xml(XMLObj* obj) {
+ RGWXMLDecoder::decode_xml("S3Key", key_filter, obj);
+ RGWXMLDecoder::decode_xml("S3Metadata", metadata_filter, obj);
+ RGWXMLDecoder::decode_xml("S3Tags", tag_filter, obj);
+ return true;
+}
+
+void rgw_s3_filter::dump_xml(Formatter *f) const {
+ if (key_filter.has_content()) {
+ ::encode_xml("S3Key", key_filter, f);
+ }
+ if (metadata_filter.has_content()) {
+ ::encode_xml("S3Metadata", metadata_filter, f);
+ }
+ if (tag_filter.has_content()) {
+ ::encode_xml("S3Tags", tag_filter, f);
+ }
+}
+
+bool rgw_s3_filter::has_content() const {
+ return key_filter.has_content() ||
+ metadata_filter.has_content() ||
+ tag_filter.has_content();
+}
+
+bool match(const rgw_s3_key_filter& filter, const std::string& key) {
+ const auto key_size = key.size();
+ const auto prefix_size = filter.prefix_rule.size();
+ if (prefix_size != 0) {
+ // prefix rule exists
+ if (prefix_size > key_size) {
+ // if prefix is longer than key, we fail
+ return false;
+ }
+ if (!std::equal(filter.prefix_rule.begin(), filter.prefix_rule.end(), key.begin())) {
+ return false;
+ }
+ }
+ const auto suffix_size = filter.suffix_rule.size();
+ if (suffix_size != 0) {
+ // suffix rule exists
+ if (suffix_size > key_size) {
+ // if suffix is longer than key, we fail
+ return false;
+ }
+ if (!std::equal(filter.suffix_rule.begin(), filter.suffix_rule.end(), (key.end() - suffix_size))) {
+ return false;
+ }
+ }
+ if (!filter.regex_rule.empty()) {
+ // TODO add regex caching in the filter
+ const std::regex base_regex(filter.regex_rule);
+ if (!std::regex_match(key, base_regex)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyValueMap& kv) {
+ // all filter pairs must exist with the same value in the object's metadata/tags
+ // object metadata/tags may include items not in the filter
+ return std::includes(kv.begin(), kv.end(), filter.kv.begin(), filter.kv.end());
+}
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyMultiValueMap& kv) {
+ // all filter pairs must exist with the same value in the object's metadata/tags
+ // object metadata/tags may include items not in the filter
+ for (auto& filter : filter.kv) {
+ auto result = kv.equal_range(filter.first);
+ if (std::any_of(result.first, result.second, [&filter](const std::pair<std::string, std::string>& p) { return p.second == filter.second;}))
+ continue;
+ else
+ return false;
+ }
+ return true;
+}
+
+bool match(const rgw_s3_filter& s3_filter, const rgw::sal::Object* obj) {
+ if (obj == nullptr) {
+ return false;
+ }
+
+ if (match(s3_filter.key_filter, obj->get_name())) {
+ return true;
+ }
+
+ const auto &attrs = obj->get_attrs();
+ if (!s3_filter.metadata_filter.kv.empty()) {
+ KeyValueMap attrs_map;
+ for (auto& attr : attrs) {
+ if (boost::algorithm::starts_with(attr.first, RGW_ATTR_META_PREFIX)) {
+ std::string_view key(attr.first);
+ key.remove_prefix(sizeof(RGW_ATTR_PREFIX)-1);
+ // we want to pass a null terminated version
+ // of the bufferlist, hence "to_str().c_str()"
+ attrs_map.emplace(key, attr.second.to_str().c_str());
+ }
+ }
+ if (match(s3_filter.metadata_filter, attrs_map)) {
+ return true;
+ }
+ }
+
+ if (!s3_filter.tag_filter.kv.empty()) {
+ // tag filter exists
+ // try to fetch tags from the attributes
+ KeyMultiValueMap tags;
+ const auto attr_iter = attrs.find(RGW_ATTR_TAGS);
+ if (attr_iter != attrs.end()) {
+ auto bliter = attr_iter->second.cbegin();
+ RGWObjTags obj_tags;
+ try {
+ ::decode(obj_tags, bliter);
+ } catch (buffer::error &) {
+ // not able to decode tags
+ return false;
+ }
+ tags = std::move(obj_tags.get_tags());
+ }
+ if (match(s3_filter.tag_filter, tags)) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/src/rgw/rgw_s3_filter.h b/src/rgw/rgw_s3_filter.h
new file mode 100644
index 00000000000..0273da9a364
--- /dev/null
+++ b/src/rgw/rgw_s3_filter.h
@@ -0,0 +1,103 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+#pragma once
+
+#include "rgw_tools.h"
+#include <boost/container/flat_map.hpp>
+
+class XMLObj;
+
+struct rgw_s3_key_filter {
+ bool operator==(const rgw_s3_key_filter& rhs) const = default;
+ std::string prefix_rule;
+ std::string suffix_rule;
+ std::string regex_rule;
+
+ bool has_content() const;
+
+ void dump(Formatter *f) const;
+ bool decode_xml(XMLObj *obj);
+ void dump_xml(Formatter *f) const;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(prefix_rule, bl);
+ encode(suffix_rule, bl);
+ encode(regex_rule, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(prefix_rule, bl);
+ decode(suffix_rule, bl);
+ decode(regex_rule, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_s3_key_filter)
+
+using KeyValueMap = boost::container::flat_map<std::string, std::string>;
+using KeyMultiValueMap = std::multimap<std::string, std::string>;
+
+struct rgw_s3_key_value_filter {
+ KeyValueMap kv;
+
+ bool has_content() const;
+
+ void dump(Formatter *f) const;
+ bool decode_xml(XMLObj *obj);
+ void dump_xml(Formatter *f) const;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(kv, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(kv, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_s3_key_value_filter)
+
+struct rgw_s3_filter {
+ rgw_s3_key_filter key_filter;
+ rgw_s3_key_value_filter metadata_filter;
+ rgw_s3_key_value_filter tag_filter;
+
+ bool has_content() const;
+
+ void dump(Formatter *f) const;
+ bool decode_xml(XMLObj *obj);
+ void dump_xml(Formatter *f) const;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(key_filter, bl);
+ encode(metadata_filter, bl);
+ encode(tag_filter, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(key_filter, bl);
+ decode(metadata_filter, bl);
+ if (struct_v >= 2) {
+ decode(tag_filter, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_s3_filter)
+
+bool match(const rgw_s3_key_filter& filter, const std::string& key);
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyValueMap& kv);
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyMultiValueMap& kv);
+
+bool match(const rgw_s3_filter& filter, const rgw::sal::Object* obj);
diff --git a/src/rgw/rgw_s3select.cc b/src/rgw/rgw_s3select.cc
index f0b26824ca6..d8be76a6b1c 100644
--- a/src/rgw/rgw_s3select.cc
+++ b/src/rgw/rgw_s3select.cc
@@ -762,7 +762,9 @@ void RGWSelectObj_ObjStore_S3::execute(optional_yield y)
op_ret = -ERR_INVALID_REQUEST;
} else {
//status per amount of processed data
+#ifdef _ARROW_EXIST
m_aws_response_handler.update_total_bytes_returned(m_s3_parquet_object.get_return_result_size());
+#endif
m_aws_response_handler.init_stats_response();
m_aws_response_handler.send_stats_response();
m_aws_response_handler.init_end_response();
diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h
index 769d7435442..97e25179fc9 100644
--- a/src/rgw/rgw_sal.h
+++ b/src/rgw/rgw_sal.h
@@ -15,6 +15,7 @@
#pragma once
+#include <cstdint>
#include <optional>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
@@ -26,6 +27,7 @@
#include "rgw_notify_event_type.h"
#include "rgw_req_context.h"
#include "include/random.h"
+#include "include/function2.hpp"
// FIXME: following subclass dependencies
#include "driver/rados/rgw_user.h"
@@ -874,7 +876,7 @@ class Bucket {
std::string zonegroup_id;
rgw_placement_rule placement_rule;
// zone placement is optional on buckets created for another zonegroup
- const RGWZonePlacementInfo* zone_placement;
+ const RGWZonePlacementInfo* zone_placement = nullptr;
RGWAccessControlPolicy policy;
Attrs attrs;
bool obj_lock_enabled = false;
@@ -1003,6 +1005,31 @@ class Bucket {
virtual int remove_topics(RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) = 0;
+ /** Read the name of the pending bucket logging object name */
+ virtual int get_logging_object_name(std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) = 0;
+ /** Update the name of the pending bucket logging object name */
+ virtual int set_logging_object_name(const std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ bool new_obj,
+ RGWObjVersionTracker* objv_tracker) = 0;
+ /** Remove the object holding the name of the pending bucket logging object */
+ virtual int remove_logging_object_name(const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) = 0;
+ /** Move the pending bucket logging object into the bucket */
+ virtual int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) = 0;
+ //** Remove the pending bucket logging object */
+ virtual int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) = 0;
+ /** Write a record to the pending bucket logging object */
+ virtual int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) = 0;
+
/* dang - This is temporary, until the API is completed */
virtual rgw_bucket& get_key() = 0;
virtual RGWBucketInfo& get_info() = 0;
@@ -1151,6 +1178,9 @@ class Object {
std::string* version_id, std::string* tag, std::string* etag,
void (*progress_cb)(off_t, void *), void* progress_data,
const DoutPrefixProvider* dpp, optional_yield y) = 0;
+
+ /** return logging subsystem */
+ virtual unsigned get_subsys() { return ceph_subsys_rgw; };
/** Get the ACL for this object */
virtual RGWAccessControlPolicy& get_acl(void) = 0;
/** Set the ACL for this object */
@@ -1231,6 +1261,28 @@ class Object {
/** Dump driver-specific object layout info in JSON */
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) = 0;
+ /* A transfer data type describing metadata specific to one part of a
+ * completed multipart upload object, following the GetObjectAttributes
+ * response syntax for Object::Parts here:
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html */
+ class Part
+ {
+ public:
+ int part_number;
+ uint32_t part_size;
+ rgw::cksum::Cksum cksum;
+ }; /* Part */
+
+ /* callback function/object used by list_parts */
+ using list_parts_each_t =
+ const fu2::unique_function<int(const Part&) const>;
+
+ /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */
+ virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y) = 0;
+
/** Get the cached attributes for this object */
virtual Attrs& get_attrs(void) = 0;
/** Get the (const) cached attributes for this object */
@@ -1429,7 +1481,7 @@ public:
virtual int init(const DoutPrefixProvider* dpp, optional_yield y, ACLOwner& owner, rgw_placement_rule& dest_placement, rgw::sal::Attrs& attrs) = 0;
/** List all the parts of this upload, filling the parts cache */
virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
- int num_parts, int marker,
+ int max_parts, int marker,
int* next_marker, bool* truncated, optional_yield y,
bool assume_unsorted = false) = 0;
/** Abort this upload */
@@ -1733,8 +1785,6 @@ class Zone {
virtual bool is_writeable() = 0;
/** Get the URL for the endpoint for redirecting to this zone */
virtual bool get_redirect_endpoint(std::string* endpoint) = 0;
- /** Check to see if the given API is supported in this zone */
- virtual bool has_zonegroup_api(const std::string& api) const = 0;
/** Get the current period ID for this zone */
virtual const std::string& get_current_period_id() = 0;
/** Get thes system access key for this zone */
diff --git a/src/rgw/rgw_sal_dbstore.cc b/src/rgw/rgw_sal_dbstore.cc
index d3af42cf2ec..02fd7a49cda 100644
--- a/src/rgw/rgw_sal_dbstore.cc
+++ b/src/rgw/rgw_sal_dbstore.cc
@@ -271,7 +271,7 @@ namespace rgw::sal {
/* XXX: handle has_instance_obj like in set_bucket_instance_attrs() */
- ret = store->getDB()->update_bucket(dpp, "attrs", info, false, nullptr, &new_attrs, nullptr, &get_info().objv_tracker);
+ ret = store->getDB()->update_bucket(dpp, "attrs", info, false, nullptr, &attrs, nullptr, &get_info().objv_tracker);
return ret;
}
@@ -458,14 +458,6 @@ namespace rgw::sal {
return false;
}
- bool DBZone::has_zonegroup_api(const std::string& api) const
- {
- if (api == "default")
- return true;
-
- return false;
- }
-
const std::string& DBZone::get_current_period_id()
{
return current_period->get_id();
@@ -496,6 +488,14 @@ namespace rgw::sal {
return std::make_unique<DBLuaManager>(this);
}
+ int DBObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y)
+ {
+ return -EOPNOTSUPP;
+ }
+
int DBObject::load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh)
{
RGWObjState* astate;
diff --git a/src/rgw/rgw_sal_dbstore.h b/src/rgw/rgw_sal_dbstore.h
index 107ba735a63..4df10d1dce1 100644
--- a/src/rgw/rgw_sal_dbstore.h
+++ b/src/rgw/rgw_sal_dbstore.h
@@ -268,22 +268,22 @@ protected:
class DBZone : public StoreZone {
protected:
DBStore* store;
- RGWRealm *realm{nullptr};
- DBZoneGroup *zonegroup{nullptr};
- RGWZone *zone_public_config{nullptr}; /* external zone params, e.g., entrypoints, log flags, etc. */
- RGWZoneParams *zone_params{nullptr}; /* internal zone params, e.g., rados pools */
- RGWPeriod *current_period{nullptr};
+ std::unique_ptr<RGWRealm> realm;
+ std::unique_ptr<DBZoneGroup> zonegroup;
+ std::unique_ptr<RGWZone> zone_public_config; /* external zone params, e.g., entrypoints, log flags, etc. */
+ std::unique_ptr<RGWZoneParams> zone_params; /* internal zone params, e.g., rados pools */
+ std::unique_ptr<RGWPeriod> current_period;
public:
DBZone(DBStore* _store) : store(_store) {
- realm = new RGWRealm();
+ realm = std::make_unique<RGWRealm>();
std::unique_ptr<RGWZoneGroup> rzg = std::make_unique<RGWZoneGroup>("default", "default");
rzg->api_name = "default";
rzg->is_master = true;
- zonegroup = new DBZoneGroup(store, std::move(rzg));
- zone_public_config = new RGWZone();
- zone_params = new RGWZoneParams();
- current_period = new RGWPeriod();
+ zonegroup = std::make_unique<DBZoneGroup>(store, std::move(rzg));
+ zone_public_config = std::make_unique<RGWZone>();
+ zone_params = std::make_unique<RGWZoneParams>();
+ current_period = std::make_unique<RGWPeriod>();
// XXX: only default and STANDARD supported for now
RGWZonePlacementInfo info;
@@ -292,13 +292,7 @@ protected:
info.storage_classes = sc;
zone_params->placement_pools["default"] = info;
}
- ~DBZone() {
- delete realm;
- delete zonegroup;
- delete zone_public_config;
- delete zone_params;
- delete current_period;
- }
+ ~DBZone() = default;
virtual std::unique_ptr<Zone> clone() override {
return std::make_unique<DBZone>(store);
@@ -309,7 +303,6 @@ protected:
virtual const std::string& get_name() const override;
virtual bool is_writeable() override;
virtual bool get_redirect_endpoint(std::string* endpoint) override;
- virtual bool has_zonegroup_api(const std::string& api) const override;
virtual const std::string& get_current_period_id() override;
virtual const RGWAccessKey& get_system_key() override;
virtual const std::string& get_realm_name() override;
@@ -535,6 +528,7 @@ protected:
DBObject(DBObject& _o) = default;
+ virtual unsigned get_subsys() { return ceph_subsys_rgw_dbstore; };
virtual int delete_object(const DoutPrefixProvider* dpp,
optional_yield y,
uint32_t flags,
@@ -560,6 +554,13 @@ protected:
virtual int set_acl(const RGWAccessControlPolicy& acl) override { acls = acl; return 0; }
virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y, uint32_t flags) override;
+
+ /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */
+ virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y) override;
+
virtual int load_obj_state(const DoutPrefixProvider* dpp, optional_yield y, bool follow_olh = true) override;
virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj = NULL) override;
virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) override;
diff --git a/src/rgw/rgw_sal_filter.cc b/src/rgw/rgw_sal_filter.cc
index 733bfa39ee2..15da580988e 100644
--- a/src/rgw/rgw_sal_filter.cc
+++ b/src/rgw/rgw_sal_filter.cc
@@ -1046,6 +1046,17 @@ RGWAccessControlPolicy& FilterObject::get_acl()
return next->get_acl();
}
+int FilterObject::list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y)
+{
+ return next->list_parts(dpp, cct, max_parts, marker, next_marker,
+ truncated,
+ sal::Object::list_parts_each_t(each_func),
+ y);
+}
+
int FilterObject::load_obj_state(const DoutPrefixProvider *dpp,
optional_yield y, bool follow_olh) {
return next->load_obj_state(dpp, y, follow_olh);
diff --git a/src/rgw/rgw_sal_filter.h b/src/rgw/rgw_sal_filter.h
index 17b102f7619..b6b6ed42b8f 100644
--- a/src/rgw/rgw_sal_filter.h
+++ b/src/rgw/rgw_sal_filter.h
@@ -108,9 +108,6 @@ public:
virtual bool get_redirect_endpoint(std::string* endpoint) override {
return next->get_redirect_endpoint(endpoint);
}
- virtual bool has_zonegroup_api(const std::string& api) const override {
- return next->has_zonegroup_api(api);
- }
virtual const std::string& get_current_period_id() override {
return next->get_current_period_id();
}
@@ -669,6 +666,36 @@ public:
optional_yield y, const DoutPrefixProvider *dpp) override {
return next->remove_topics(objv_tracker, y, dpp);
}
+ int get_logging_object_name(std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) override {
+ return next->get_logging_object_name(obj_name, prefix, y, dpp, objv_tracker);
+ }
+ int set_logging_object_name(const std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ bool new_obj,
+ RGWObjVersionTracker* objv_track) override {
+ return next->set_logging_object_name(obj_name, prefix, y, dpp, new_obj, objv_track);
+ }
+ int remove_logging_object_name(const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) override {
+ return next->remove_logging_object_name(prefix, y, dpp, objv_tracker);
+ }
+ int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp)override {
+ return next->commit_logging_object(obj_name, y, dpp);
+ }
+ int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override {
+ return next->remove_logging_object(obj_name, y, dpp);
+ }
+ int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) override {
+ return next->write_logging_object(obj_name, record, y, dpp, async_completion);
+ }
virtual rgw_bucket& get_key() override { return next->get_key(); }
virtual RGWBucketInfo& get_info() override { return next->get_info(); }
@@ -760,6 +787,12 @@ public:
virtual bool empty() const override { return next->empty(); }
virtual const std::string &get_name() const override { return next->get_name(); }
+ /** If multipart, enumerate (a range [marker..marker+[min(max_parts, parts_count-1)] of) parts of the object */
+ virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
+ int max_parts, int marker, int* next_marker,
+ bool* truncated, list_parts_each_t each_func,
+ optional_yield y) override;
+
virtual int load_obj_state(const DoutPrefixProvider *dpp, optional_yield y,
bool follow_olh = true) override;
virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs,
diff --git a/src/rgw/rgw_sal_store.h b/src/rgw/rgw_sal_store.h
index 47d031fbfc6..99b90564997 100644
--- a/src/rgw/rgw_sal_store.h
+++ b/src/rgw/rgw_sal_store.h
@@ -253,6 +253,26 @@ class StoreBucket : public Bucket {
optional_yield y, const DoutPrefixProvider *dpp) override {return 0;}
int remove_topics(RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override {return 0;}
+ int get_logging_object_name(std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) override { return 0; }
+ int set_logging_object_name(const std::string& obj_name,
+ const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ bool new_obj,
+ RGWObjVersionTracker* objv_tracker) override { return 0; }
+ int remove_logging_object_name(const std::string& prefix,
+ optional_yield y,
+ const DoutPrefixProvider *dpp,
+ RGWObjVersionTracker* objv_tracker) override { return 0; }
+ int commit_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override { return 0; }
+ int remove_logging_object(const std::string& obj_name, optional_yield y, const DoutPrefixProvider *dpp) override { return 0; }
+ int write_logging_object(const std::string& obj_name, const std::string& record, optional_yield y, const DoutPrefixProvider *dpp, bool async_completion) override {
+ return 0;
+ }
friend class BucketList;
};
diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc
index 032b3734bf9..937f74601b3 100644
--- a/src/rgw/rgw_swift_auth.cc
+++ b/src/rgw/rgw_swift_auth.cc
@@ -522,7 +522,7 @@ ExternalTokenEngine::authenticate(const DoutPrefixProvider* dpp,
}
auto apl = apl_factory->create_apl_local(
- cct, s, user->get_info(), std::move(account),
+ cct, s, std::move(user), std::move(account),
std::move(policies), extract_swift_subuser(swift_user),
std::nullopt, LocalApplier::NO_ACCESS_KEY);
return result_t::grant(std::move(apl));
@@ -685,7 +685,7 @@ SignedTokenEngine::authenticate(const DoutPrefixProvider* dpp,
}
auto apl = apl_factory->create_apl_local(
- cct, s, user->get_info(), std::move(account),
+ cct, s, std::move(user), std::move(account),
std::move(policies), extract_swift_subuser(swift_user),
std::nullopt, LocalApplier::NO_ACCESS_KEY);
return result_t::grant(std::move(apl));
diff --git a/src/rgw/rgw_swift_auth.h b/src/rgw/rgw_swift_auth.h
index 9049c54f5ca..c27a24a2619 100644
--- a/src/rgw/rgw_swift_auth.h
+++ b/src/rgw/rgw_swift_auth.h
@@ -23,8 +23,8 @@ namespace swift {
class TempURLApplier : public rgw::auth::LocalApplier {
public:
TempURLApplier(CephContext* const cct,
- const RGWUserInfo& user_info)
- : LocalApplier(cct, user_info, std::nullopt, {}, LocalApplier::NO_SUBUSER,
+ std::unique_ptr<rgw::sal::User> user)
+ : LocalApplier(cct, std::move(user), std::nullopt, {}, LocalApplier::NO_SUBUSER,
std::nullopt, LocalApplier::NO_ACCESS_KEY)
{}
@@ -155,8 +155,8 @@ public:
class SwiftAnonymousApplier : public rgw::auth::LocalApplier {
public:
SwiftAnonymousApplier(CephContext* const cct,
- const RGWUserInfo& user_info)
- : LocalApplier(cct, user_info, std::nullopt, {}, LocalApplier::NO_SUBUSER,
+ std::unique_ptr<rgw::sal::User> user)
+ : LocalApplier(cct, std::move(user), std::nullopt, {}, LocalApplier::NO_SUBUSER,
std::nullopt, LocalApplier::NO_ACCESS_KEY) {
}
bool is_admin_of(const rgw_owner& o) const {return false;}
@@ -238,7 +238,7 @@ class DefaultStrategy : public rgw::auth::Strategy,
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
- const RGWUserInfo& user_info,
+ std::unique_ptr<rgw::sal::User> user,
std::optional<RGWAccountInfo> account,
std::vector<IAM::Policy> policies,
const std::string& subuser,
@@ -247,7 +247,7 @@ class DefaultStrategy : public rgw::auth::Strategy,
auto apl = \
rgw::auth::add_3rdparty(driver, rgw_user(s->account_name),
rgw::auth::add_sysreq(cct, driver, s,
- LocalApplier(cct, user_info, std::move(account), std::move(policies),
+ LocalApplier(cct, std::move(user), std::move(account), std::move(policies),
subuser, perm_mask, access_key_id)));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
@@ -259,7 +259,9 @@ class DefaultStrategy : public rgw::auth::Strategy,
/* TempURL doesn't need any user account override. It's a Swift-specific
* mechanism that requires account name internally, so there is no
* business with delegating the responsibility outside. */
- return aplptr_t(new rgw::auth::swift::TempURLApplier(cct, user_info));
+ std::unique_ptr<rgw::sal::User> user = s->user->clone();
+ user->get_info() = user_info;
+ return aplptr_t(new rgw::auth::swift::TempURLApplier(cct, std::move(user)));
}
public:
diff --git a/src/rgw/services/svc_zone.cc b/src/rgw/services/svc_zone.cc
index 70cf40eb6cb..97d81550058 100644
--- a/src/rgw/services/svc_zone.cc
+++ b/src/rgw/services/svc_zone.cc
@@ -657,18 +657,6 @@ const string& RGWSI_Zone::get_current_period_id() const
return current_period->get_id();
}
-bool RGWSI_Zone::has_zonegroup_api(const std::string& api) const
-{
- if (!current_period->get_id().empty()) {
- const auto& zonegroups_by_api = current_period->get_map().zonegroups_by_api;
- if (zonegroups_by_api.find(api) != zonegroups_by_api.end())
- return true;
- } else if (zonegroup->api_name == api) {
- return true;
- }
- return false;
-}
-
bool RGWSI_Zone::zone_is_writeable()
{
return writeable_zone && !get_zone().is_read_only();
@@ -743,8 +731,7 @@ bool RGWSI_Zone::is_meta_master() const
bool RGWSI_Zone::need_to_log_metadata() const
{
- return is_meta_master() &&
- (zonegroup->zones.size() > 1 || current_period->is_multi_zonegroups_with_zones());
+ return is_meta_master() && is_syncing_bucket_meta();
}
bool RGWSI_Zone::can_reshard() const
@@ -761,33 +748,16 @@ bool RGWSI_Zone::can_reshard() const
/**
* Check to see if the bucket metadata could be synced
- * bucket: the bucket to check
* Returns false is the bucket is not synced
*/
-bool RGWSI_Zone::is_syncing_bucket_meta(const rgw_bucket& bucket)
+bool RGWSI_Zone::is_syncing_bucket_meta() const
{
-
/* no current period */
if (current_period->get_id().empty()) {
return false;
}
- /* zonegroup is not master zonegroup */
- if (!zonegroup->is_master_zonegroup()) {
- return false;
- }
-
- /* single zonegroup and a single zone */
- if (current_period->is_single_zonegroup() && zonegroup->zones.size() == 1) {
- return false;
- }
-
- /* zone is not master */
- if (zonegroup->master_zone != zone_public_config->id) {
- return false;
- }
-
- return true;
+ return zonegroup->zones.size() > 1 || current_period->is_multi_zonegroups_with_zones();
}
diff --git a/src/rgw/services/svc_zone.h b/src/rgw/services/svc_zone.h
index c4a3a28f0d7..719546eb8db 100644
--- a/src/rgw/services/svc_zone.h
+++ b/src/rgw/services/svc_zone.h
@@ -96,7 +96,6 @@ public:
uint32_t get_zone_short_id() const;
const std::string& get_current_period_id() const;
- bool has_zonegroup_api(const std::string& api) const;
bool zone_is_writeable();
bool zone_syncs_from(const RGWZone& target_zone, const RGWZone& source_zone) const;
@@ -146,7 +145,7 @@ public:
bool need_to_log_data() const;
bool need_to_log_metadata() const;
bool can_reshard() const;
- bool is_syncing_bucket_meta(const rgw_bucket& bucket);
+ bool is_syncing_bucket_meta() const;
int list_zonegroups(const DoutPrefixProvider *dpp, std::list<std::string>& zonegroups);
int list_regions(const DoutPrefixProvider *dpp, std::list<std::string>& regions);