summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--PendingReleaseNotes7
-rw-r--r--ceph.spec.in1
-rw-r--r--debian/libcephfs-dev.install1
-rw-r--r--doc/dev/crimson/pipeline.rst124
-rw-r--r--doc/radosgw/bucket_logging.rst6
-rw-r--r--doc/radosgw/s3/bucketops.rst40
-rw-r--r--doc/radosgw/uadk-accel.rst50
-rw-r--r--examples/rgw/boto3/bucket_logging.py11
-rw-r--r--examples/rgw/boto3/post_bucket_logging.py23
-rw-r--r--examples/rgw/boto3/service-2.sdk-extras.json39
-rw-r--r--monitoring/ceph-mixin/prometheus_alerts.libsonnet10
-rw-r--r--monitoring/ceph-mixin/prometheus_alerts.yml9
-rw-r--r--monitoring/ceph-mixin/tests_alerts/test_alerts.yml48
-rwxr-xr-xqa/standalone/scrub/osd-scrub-repair.sh2
-rw-r--r--qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml3
-rw-r--r--qa/suites/nvmeof/basic/workloads/nvmeof_namespaces.yaml2
-rw-r--r--qa/suites/nvmeof/basic/workloads/nvmeof_scalability.yaml5
-rw-r--r--qa/suites/nvmeof/thrash/gateway-initiator-setup/10-subsys-90-namespace-no_huge_pages.yaml37
-rw-r--r--qa/suites/nvmeof/thrash/gateway-initiator-setup/120-subsys-8-namespace.yaml (renamed from qa/suites/nvmeof/thrash/gateway-initiator-setup/3-subsys-60-namespace.yaml)4
-rw-r--r--qa/suites/nvmeof/thrash/thrashers/nvmeof_mon_thrash.yaml1
-rw-r--r--qa/suites/nvmeof/thrash/thrashers/nvmeof_thrash.yaml2
-rw-r--r--qa/suites/nvmeof/thrash/workloads/fio.yaml6
-rw-r--r--qa/suites/rgw/notifications/tasks/kafka/test_kafka.yaml2
-rw-r--r--qa/tasks/kafka.py11
-rw-r--r--qa/tasks/mgr/dashboard/test_rbd.py12
-rw-r--r--qa/tasks/nvmeof.py36
-rw-r--r--qa/tasks/s3a_hadoop.py16
-rwxr-xr-xqa/workunits/nvmeof/basic_tests.sh10
-rwxr-xr-xqa/workunits/nvmeof/fio_test.sh7
-rwxr-xr-xqa/workunits/nvmeof/scalability_test.sh51
-rwxr-xr-xqa/workunits/nvmeof/setup_subsystem.sh17
-rwxr-xr-xqa/workunits/rbd/cli_generic.sh5
-rw-r--r--src/CMakeLists.txt16
-rw-r--r--src/cephfs.pc.in10
-rw-r--r--src/client/Client.cc44
-rw-r--r--src/common/Formatter.cc61
-rw-r--r--src/common/HTMLFormatter.cc20
-rw-r--r--src/common/intrusive_lru.h65
-rw-r--r--src/crimson/osd/object_context.h57
-rw-r--r--src/crimson/osd/object_context_loader.cc43
-rw-r--r--src/crimson/osd/object_context_loader.h44
-rw-r--r--src/crimson/osd/ops_executer.cc263
-rw-r--r--src/crimson/osd/ops_executer.h125
-rw-r--r--src/crimson/osd/osd_operation.h34
-rw-r--r--src/crimson/osd/osd_operation_external_tracking.h178
-rw-r--r--src/crimson/osd/osd_operations/client_request.cc78
-rw-r--r--src/crimson/osd/osd_operations/client_request.h33
-rw-r--r--src/crimson/osd/osd_operations/internal_client_request.cc75
-rw-r--r--src/crimson/osd/osd_operations/internal_client_request.h12
-rw-r--r--src/crimson/osd/osd_operations/logmissing_request.cc6
-rw-r--r--src/crimson/osd/osd_operations/logmissing_request.h4
-rw-r--r--src/crimson/osd/osd_operations/logmissing_request_reply.cc5
-rw-r--r--src/crimson/osd/osd_operations/logmissing_request_reply.h2
-rw-r--r--src/crimson/osd/osd_operations/replicated_request.cc6
-rw-r--r--src/crimson/osd/osd_operations/replicated_request.h4
-rw-r--r--src/crimson/osd/osd_operations/snaptrim_event.cc70
-rw-r--r--src/crimson/osd/osd_operations/snaptrim_event.h11
-rw-r--r--src/crimson/osd/pg.cc185
-rw-r--r--src/crimson/osd/pg.h12
-rw-r--r--src/crimson/osd/pg_backend.cc45
-rw-r--r--src/crimson/osd/pg_backend.h19
-rw-r--r--src/include/cephfs/ceph_ll_client.h3
-rw-r--r--src/include/cephfs/libcephfs.h4
-rw-r--r--src/libcephfs.cc4
-rw-r--r--src/librbd/ObjectMap.cc26
-rw-r--r--src/librbd/ObjectMap.h1
-rw-r--r--src/librbd/operation/FlattenRequest.cc9
-rw-r--r--src/mon/NVMeofGwMon.cc18
-rw-r--r--src/mon/NVMeofGwMon.h3
-rw-r--r--src/msg/async/Event.cc2
-rw-r--r--src/nvmeof/NVMeofGwMonitorClient.cc6
-rw-r--r--src/nvmeof/NVMeofGwMonitorClient.h2
-rw-r--r--src/os/bluestore/bluestore_tool.cc2
-rw-r--r--src/osd/PG.cc1
-rw-r--r--src/osd/scrubber/pg_scrubber.cc20
-rw-r--r--src/osd/scrubber_common.h6
-rw-r--r--src/osdc/CMakeLists.txt5
-rw-r--r--src/pybind/mgr/cephadm/services/nvmeof.py12
-rw-r--r--src/pybind/mgr/dashboard/controllers/cluster_configuration.py56
-rw-r--r--src/pybind/mgr/dashboard/controllers/rgw_iam.py52
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.e2e-spec.ts3
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.po.ts12
-rw-r--r--src/pybind/mgr/dashboard/frontend/package-lock.json8
-rw-r--r--src/pybind/mgr/dashboard/frontend/package.json2
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form-create-request.model.ts1
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.html3
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.ts29
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.ts42
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.ts11
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.html1
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/core/auth/auth.module.ts47
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form-role.model.ts9
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.html432
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.ts20
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/api/prometheus.service.ts8
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.model.ts1
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.html2
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.html43
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.scss3
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.ts16
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.html3
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/enum/dashboard-promqls.enum.ts4
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/styles/themes/_content.scss1
-rw-r--r--src/pybind/mgr/dashboard/openapi.yaml290
-rw-r--r--src/pybind/mgr/dashboard/services/rgw_iam.py81
-rw-r--r--src/pybind/mgr/dashboard/tests/test_rgw_iam.py292
-rw-r--r--src/python-common/ceph/deployment/service_spec.py8
-rw-r--r--src/rgw/CMakeLists.txt1
-rw-r--r--src/rgw/driver/posix/notify.h2
-rw-r--r--src/rgw/driver/rados/rgw_sal_rados.cc2
-rw-r--r--src/rgw/rgw_admin.cc232
-rw-r--r--src/rgw/rgw_asio_frontend.cc7
-rw-r--r--src/rgw/rgw_bucket_layout.cc2
-rw-r--r--src/rgw/rgw_bucket_logging.cc17
-rw-r--r--src/rgw/rgw_bucket_logging.h16
-rw-r--r--src/rgw/rgw_file_int.h24
-rw-r--r--src/rgw/rgw_iam_policy.cc4
-rw-r--r--src/rgw/rgw_iam_policy.h2
-rw-r--r--src/rgw/rgw_op.cc57
-rw-r--r--src/rgw/rgw_op.h3
-rw-r--r--src/rgw/rgw_op_type.h1
-rw-r--r--src/rgw/rgw_pubsub.cc208
-rw-r--r--src/rgw/rgw_pubsub.h86
-rw-r--r--src/rgw/rgw_rest.cc7
-rw-r--r--src/rgw/rgw_rest_bucket_logging.cc78
-rw-r--r--src/rgw/rgw_rest_bucket_logging.h1
-rw-r--r--src/rgw/rgw_rest_s3.cc48
-rw-r--r--src/rgw/rgw_rest_s3.h1
-rw-r--r--src/rgw/rgw_rest_sts.cc3
-rw-r--r--src/rgw/rgw_rest_swift.cc8
-rw-r--r--src/rgw/rgw_rest_swift.h1
-rw-r--r--src/rgw/rgw_s3_filter.cc269
-rw-r--r--src/rgw/rgw_s3_filter.h102
-rw-r--r--src/rgw/rgw_sal_dbstore.cc2
-rw-r--r--src/rgw/rgw_sal_dbstore.h28
-rwxr-xr-xsrc/script/ceph-backport.sh2
-rw-r--r--src/test/cli/rbd/help.t7
-rw-r--r--src/test/cls_log/test_cls_log.cc1
-rw-r--r--src/test/common/CMakeLists.txt6
-rw-r--r--src/test/common/test_htmlformatter.cc26
-rw-r--r--src/test/common/test_intrusive_lru.cc22
-rw-r--r--src/test/common/test_json_formatter.cc24
-rw-r--r--src/test/common/test_tableformatter.cc17
-rw-r--r--src/test/common/test_xmlformatter.cc22
-rw-r--r--src/test/fio/fio_librgw.cc6
-rw-r--r--src/test/libcephfs/test.cc28
-rw-r--r--src/test/librbd/test_internal.cc77
-rw-r--r--src/test/rgw/bucket_notification/api.py4
-rw-r--r--src/test/rgw/bucket_notification/test_bn.py236
-rw-r--r--src/tools/monmaptool.cc8
-rw-r--r--src/tools/rbd/Utils.cc10
-rw-r--r--src/tools/rbd/Utils.h9
-rw-r--r--src/tools/rbd/action/Group.cc85
-rw-r--r--src/tools/rbd/action/MirrorPool.cc17
154 files changed, 3749 insertions, 1799 deletions
diff --git a/PendingReleaseNotes b/PendingReleaseNotes
index 146cab64d6f..873ac6d21dd 100644
--- a/PendingReleaseNotes
+++ b/PendingReleaseNotes
@@ -48,6 +48,10 @@
CephFS does not support disk space reservation. The only flags supported are
`FALLOC_FL_KEEP_SIZE` and `FALLOC_FL_PUNCH_HOLE`.
+* The HeadBucket API now reports the `X-RGW-Bytes-Used` and `X-RGW-Object-Count`
+ headers only when the `read-stats` querystring is explicitly included in the
+ API request.
+
>=19.2.1
* CephFS: Command `fs subvolume create` now allows tagging subvolumes through option
@@ -57,6 +61,9 @@
`ceph fs subvolume earmark set`, `ceph fs subvolume earmark get` and
`ceph fs subvolume earmark rm` have been added to set, get and remove earmark from a given subvolume.
+* RADOS: A performance botteneck in the balancer mgr module has been fixed.
+ Related Tracker: https://tracker.ceph.com/issues/68657
+
>=19.0.0
* cephx: key rotation is now possible using `ceph auth rotate`. Previously,
diff --git a/ceph.spec.in b/ceph.spec.in
index aedb6ae38b4..1e4c1f0850c 100644
--- a/ceph.spec.in
+++ b/ceph.spec.in
@@ -2566,6 +2566,7 @@ fi
%{_includedir}/cephfs/metrics/Types.h
%{_libdir}/libcephfs.so
%{_libdir}/libcephfs_proxy.so
+%{_libdir}/pkgconfig/cephfs.pc
%files -n python%{python3_pkgversion}-cephfs
%{python3_sitearch}/cephfs.cpython*.so
diff --git a/debian/libcephfs-dev.install b/debian/libcephfs-dev.install
index 40e37414051..47431dd5526 100644
--- a/debian/libcephfs-dev.install
+++ b/debian/libcephfs-dev.install
@@ -4,3 +4,4 @@ usr/include/cephfs/types.h
usr/include/cephfs/metrics/Types.h
usr/lib/libcephfs.so
usr/lib/libcephfs_proxy.so
+usr/lib/pkgconfig/cephfs.pc
diff --git a/doc/dev/crimson/pipeline.rst b/doc/dev/crimson/pipeline.rst
index e9115c6d7c3..6e47b79d80b 100644
--- a/doc/dev/crimson/pipeline.rst
+++ b/doc/dev/crimson/pipeline.rst
@@ -2,96 +2,34 @@
The ``ClientRequest`` pipeline
==============================
-In crimson, exactly like in the classical OSD, a client request has data and
-ordering dependencies which must be satisfied before processing (actually
-a particular phase of) can begin. As one of the goals behind crimson is to
-preserve the compatibility with the existing OSD incarnation, the same semantic
-must be assured. An obvious example of such data dependency is the fact that
-an OSD needs to have a version of OSDMap that matches the one used by the client
-(``Message::get_min_epoch()``).
-
-If a dependency is not satisfied, the processing stops. It is crucial to note
-the same must happen to all other requests that are sequenced-after (due to
-their ordering requirements).
-
-There are a few cases when the blocking of a client request can happen.
-
-
- ``ClientRequest::ConnectionPipeline::await_map``
- wait for particular OSDMap version is available at the OSD level
- ``ClientRequest::ConnectionPipeline::get_pg``
- wait a particular PG becomes available on OSD
- ``ClientRequest::PGPipeline::await_map``
- wait on a PG being advanced to particular epoch
- ``ClientRequest::PGPipeline::wait_for_active``
- wait for a PG to become *active* (i.e. have ``is_active()`` asserted)
- ``ClientRequest::PGPipeline::recover_missing``
- wait on an object to be recovered (i.e. leaving the ``missing`` set)
- ``ClientRequest::PGPipeline::get_obc``
- wait on an object to be available for locking. The ``obc`` will be locked
- before this operation is allowed to continue
- ``ClientRequest::PGPipeline::process``
- wait if any other ``MOSDOp`` message is handled against this PG
-
-At any moment, a ``ClientRequest`` being served should be in one and only one
-of the phases described above. Similarly, an object denoting particular phase
-can host not more than a single ``ClientRequest`` the same time. At low-level
-this is achieved with a combination of a barrier and an exclusive lock.
-They implement the semantic of a semaphore with a single slot for these exclusive
-phases.
-
-As the execution advances, request enters next phase and leaves the current one
-freeing it for another ``ClientRequest`` instance. All these phases form a pipeline
-which assures the order is preserved.
-
-These pipeline phases are divided into two ordering domains: ``ConnectionPipeline``
-and ``PGPipeline``. The former ensures order across a client connection while
-the latter does that across a PG. That is, requests originating from the same
-connection are executed in the same order as they were sent by the client.
-The same applies to the PG domain: when requests from multiple connections reach
-a PG, they are executed in the same order as they entered a first blocking phase
-of the ``PGPipeline``.
-
-Comparison with the classical OSD
-----------------------------------
-As the audience of this document are Ceph Developers, it seems reasonable to
-match the phases of crimson's ``ClientRequest`` pipeline with the blocking
-stages in the classical OSD. The names in the right column are names of
-containers (lists and maps) used to implement these stages. They are also
-already documented in the ``PG.h`` header.
-
-+----------------------------------------+--------------------------------------+
-| crimson | ceph-osd waiting list |
-+========================================+======================================+
-|``ConnectionPipeline::await_map`` | ``OSDShardPGSlot::waiting`` and |
-|``ConnectionPipeline::get_pg`` | ``OSDShardPGSlot::waiting_peering`` |
-+----------------------------------------+--------------------------------------+
-|``PGPipeline::await_map`` | ``PG::waiting_for_map`` |
-+----------------------------------------+--------------------------------------+
-|``PGPipeline::wait_for_active`` | ``PG::waiting_for_peered`` |
-| +--------------------------------------+
-| | ``PG::waiting_for_flush`` |
-| +--------------------------------------+
-| | ``PG::waiting_for_active`` |
-+----------------------------------------+--------------------------------------+
-|To be done (``PG_STATE_LAGGY``) | ``PG::waiting_for_readable`` |
-+----------------------------------------+--------------------------------------+
-|To be done | ``PG::waiting_for_scrub`` |
-+----------------------------------------+--------------------------------------+
-|``PGPipeline::recover_missing`` | ``PG::waiting_for_unreadable_object``|
-| +--------------------------------------+
-| | ``PG::waiting_for_degraded_object`` |
-+----------------------------------------+--------------------------------------+
-|To be done (proxying) | ``PG::waiting_for_blocked_object`` |
-+----------------------------------------+--------------------------------------+
-|``PGPipeline::get_obc`` | *obc rwlocks* |
-+----------------------------------------+--------------------------------------+
-|``PGPipeline::process`` | ``PG::lock`` (roughly) |
-+----------------------------------------+--------------------------------------+
-
-
-As the last word it might be worth to emphasize that the ordering implementations
-in both classical OSD and in crimson are stricter than a theoretical minimum one
-required by the RADOS protocol. For instance, we could parallelize read operations
-targeting the same object at the price of extra complexity but we don't -- the
-simplicity has won.
+RADOS requires writes on each object to be ordered. If a client
+submits a sequence of concurrent writes (doesn't want for the prior to
+complete before submitting the next), that client may rely on the
+writes being completed in the order in which they are submitted.
+
+As a result, the client->osd communication and queueing mechanisms on
+both sides must take care to ensure that writes on a (connection,
+object) pair remain ordered for the entire process.
+
+crimson-osd enforces this ordering via Pipelines and Stages
+(crimson/osd/osd_operation.h). Upon arrival at the OSD, messages
+enter the ConnectionPipeline::AwaitActive stage and proceed
+through a sequence of pipeline stages:
+
+* ConnectionPipeline: per-connection stages representing the message handling
+ path prior to being handed off to the target PG
+* PerShardPipeline: intermediate Pipeline representing the hand off from the
+ receiving shard to the shard with the target PG.
+* CommonPGPipeline: represents processing on the target PG prior to obtaining
+ the ObjectContext for the target of the operation.
+* CommonOBCPipeline: represents the actual processing of the IO on the target
+ object
+
+Because CommonOBCPipeline is per-object rather than per-connection or
+per-pg, multiple requests on different objects may be in the same
+CommonOBCPipeline stage concurrently. This allows us to serve
+multiple reads in the same PG concurrently. We can also process
+writes on multiple objects concurrently up to the point at which the
+write is actually submitted.
+
+See crimson/osd/osd_operations/client_request.(h|cc) for details.
diff --git a/doc/radosgw/bucket_logging.rst b/doc/radosgw/bucket_logging.rst
index d96ffbe2758..cb9f8465d20 100644
--- a/doc/radosgw/bucket_logging.rst
+++ b/doc/radosgw/bucket_logging.rst
@@ -27,6 +27,8 @@ This time (in seconds) could be set per source bucket via a Ceph extension to th
or globally via the `rgw_bucket_logging_obj_roll_time` configuration option. If not set, the default time is 5 minutes.
Adding a log object to the log bucket is done "lazily", meaning, that if no more records are written to the object, it may
remain outside of the log bucket even after the configured time has passed.
+To counter that, you can flush all logging objects on a given source bucket to log them,
+regardless if enough time passed or if no more records are written to the object.
Standard
````````
@@ -38,8 +40,8 @@ Journal
If logging type is set to "Journal", the records are written to the log bucket before the bucket operation is completed.
This means that if the logging action fails, the operation will not be executed, and an error will be returned to the client.
An exception to the above are "multi/delete" log records: if writing these log records fail, the operation continues and may still be successful.
-Note that it may happen that the log records were successfully written, but the bucket operation failed, since the logs are written
-before such a failure, there will be no indication for that in the log records.
+Journal mode supports filtering out records based on matches of the prefixes and suffixes of the logged object keys. Regular-expression matching can also be used on these to create filters.
+Note that it may happen that the log records were successfully written, but the bucket operation failed, since the logs are written.
Bucket Logging REST API
diff --git a/doc/radosgw/s3/bucketops.rst b/doc/radosgw/s3/bucketops.rst
index 2e5cc48646d..c33a8c0f410 100644
--- a/doc/radosgw/s3/bucketops.rst
+++ b/doc/radosgw/s3/bucketops.rst
@@ -751,6 +751,14 @@ Parameters are XML encoded in the body of the request, in the following format:
<TargetPrefix>string</TargetPrefix>
<LoggingType>Standard|Journal</LoggingType>
<ObjectRollTime>integer</ObjectRollTime>
+ <Filter>
+ <S3Key>
+ <FilterRule>
+ <Name>suffix/prefix/regex</Name>
+ <Value></Value>
+ </FilterRule>
+ </S3Key>
+ </Filter>
</LoggingEnabled>
</BucketLoggingStatus>
@@ -881,6 +889,14 @@ Response is XML encoded in the body of the request, in the following format:
<TargetPrefix>string</TargetPrefix>
<LoggingType>Standard|Journal</LoggingType>
<ObjectRollTime>integer</ObjectRollTime>
+ <Filter>
+ <S3Key>
+ <FilterRule>
+ <Name>suffix/prefix/regex</Name>
+ <Value></Value>
+ </FilterRule>
+ </S3Key>
+ </Filter>
</LoggingEnabled>
</BucketLoggingStatus>
@@ -894,3 +910,27 @@ HTTP Response
| ``404`` | NoSuchBucket | The bucket does not exist |
+---------------+-----------------------+----------------------------------------------------------+
+Flush Bucket Logging
+--------------------
+
+Flushes all logging objects for a given source bucket (logging bucket are written lazily).
+
+Syntax
+~~~~~~
+
+::
+
+ POST /{bucket}?logging HTTP/1.1
+
+
+HTTP Response
+~~~~~~~~~~~~~
+
++---------------+-----------------------+----------------------------------------------------------+
+| HTTP Status | Status Code | Description |
++===============+=======================+==========================================================+
+| ``201`` | Created | Flushed all logging objects successfully |
++---------------+-----------------------+----------------------------------------------------------+
+| ``404`` | NoSuchBucket | The bucket does not exist |
++---------------+-----------------------+----------------------------------------------------------+
+
diff --git a/doc/radosgw/uadk-accel.rst b/doc/radosgw/uadk-accel.rst
index fdf99f891f0..0ed25148d73 100644
--- a/doc/radosgw/uadk-accel.rst
+++ b/doc/radosgw/uadk-accel.rst
@@ -12,9 +12,9 @@ See `Compressor UADK Support`_.
UADK in the Software Stack
==========================
-UADK is a general-purpose user space accelerator framework that uses shared
-virtual addressing (SVA) to provide a unified programming interface for hardware
-acceleration of cryptographic and compression algorithms.
+UADK is a general-purpose user space accelerator framework that uses Shared
+Virtual Addressing (SVA) to provide a unified programming interface for
+hardware acceleration of cryptographic and compression algorithms.
UADK includes Unified/User-space-access-intended Accelerator Framework (UACCE),
which enables hardware accelerators that support SVA to adapt to UADK.
@@ -33,10 +33,10 @@ See `OpenSSL UADK Engine`_.
UADK Environment Setup
======================
-UADK consists of UACCE, vendors’ drivers, and an algorithm layer. UADK requires the
-hardware accelerator to support SVA, and the operating system to support IOMMU and
-SVA. Hardware accelerators from different vendors are registered as different character
-devices with UACCE by using kernel-mode drivers of the vendors.
+UADK consists of UACCE, vendor drivers, and an algorithm layer. UADK requires
+the hardware accelerator to support SVA, and the operating system to support
+IOMMU and SVA. Hardware accelerators are registered as different character
+devices with UACCE by kernel-mode drivers.
::
@@ -77,11 +77,12 @@ Configuration
#. Kernel Requirement
-User needs to make sure that UACCE is already supported in Linux kernel. The kernel version
-should be at least v5.9 with SVA (Shared Virtual Addressing) enabled.
+Users must ensure that UACCE is supported by the Linux kernel release in use,
+which should be 5.9 or later with SVA (Shared Virtual Addressing) enabled.
-UACCE may be built as a module or built into the kernel. Here's an example to build UACCE
-with hardware accelerators for the HiSilicon Kunpeng platform.
+UACCE may be built as a loadable module or built into the kernel. Here's an
+example to build UACCE with hardware accelerators for the HiSilicon Kunpeng
+platform.
.. prompt:: bash $
@@ -97,13 +98,17 @@ with hardware accelerators for the HiSilicon Kunpeng platform.
Make sure all these above kernel configurations are selected.
#. UADK enablement
-If the architecture is aarch64, it will automatically download the UADK source code to build
-the static library. If it runs on other architecture, user can enable it with build parameters
-`-DWITH_UADK=true`
-
-#. Manual Build UADK
-As the above paragraph shows, the UADK is enabled automatically, no need to build manually.
-For developer who is interested in UADK, you can refer to the below steps for building.
+If the architecture is ``aarch64``, it will automatically download the UADK
+source code to build the static library. When building on other CPU
+architectures, the user may enable UADK by adding ``-DWITH_UADK=true`` to the
+compilation command line options. Note that UADK may not be compatible with all
+architectures.
+
+#. Manually Building UADK
+As implied in the above paragraph, if the architecture is ``aarch64``, the UADK
+is enabled automatically and there is no need to build it manually. However,
+below we provide the procedure for manually building UADK so that developers
+can study how it is built.
.. prompt:: bash $
@@ -115,9 +120,9 @@ For developer who is interested in UADK, you can refer to the below steps for bu
make
make install
- .. note:: Without –prefix, UADK will be installed to /usr/local/lib by
- default. If get error:"cannot find -lnuma", please install
- the `libnuma-dev`.
+ .. note:: Without ``--prefix``, UADK will be installed under
+ ``/usr/local/lib`` by default. If you get the error:
+ ``cannot find -lnuma``, install the ``libnuma-dev`` package.
#. Configure
@@ -126,7 +131,8 @@ For developer who is interested in UADK, you can refer to the below steps for bu
uadk_compressor_enabled=true
- The default value in `global.yaml.in` for `uadk_compressor_enabled` is false.
+ The default value in `global.yaml.in` for `uadk_compressor_enabled` is
+ ``false``.
.. _Compressor UADK Support: https://github.com/ceph/ceph/pull/58336
.. _OpenSSL UADK Engine: https://github.com/Linaro/uadk_engine
diff --git a/examples/rgw/boto3/bucket_logging.py b/examples/rgw/boto3/bucket_logging.py
index fdc219c5765..7a972dac8bc 100644
--- a/examples/rgw/boto3/bucket_logging.py
+++ b/examples/rgw/boto3/bucket_logging.py
@@ -39,6 +39,17 @@ bucket_logging_conf = {'LoggingEnabled': {
},
'ObjectRollTime': 60,
'LoggingType': 'Journal',
+ "Filter": {
+ "Key": {
+ "FilterRules":
+ [
+ {
+ "Name": "prefix",
+ "Value": "myfile"
+ }
+ ]
+ }
+ }
}
}
diff --git a/examples/rgw/boto3/post_bucket_logging.py b/examples/rgw/boto3/post_bucket_logging.py
new file mode 100644
index 00000000000..130fc53b50a
--- /dev/null
+++ b/examples/rgw/boto3/post_bucket_logging.py
@@ -0,0 +1,23 @@
+import boto3
+import sys
+
+
+if len(sys.argv) == 2:
+ # bucket name as first argument
+ bucketname = sys.argv[1]
+else:
+ print('Usage: ' + sys.argv[0] + ' <bucket>')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000/'+bucketname
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# flushing the logs for bucket logging
+print(client.post_bucket_logging(Bucket=bucketname))
diff --git a/examples/rgw/boto3/service-2.sdk-extras.json b/examples/rgw/boto3/service-2.sdk-extras.json
index 5c22ee9f248..b81667ecd09 100644
--- a/examples/rgw/boto3/service-2.sdk-extras.json
+++ b/examples/rgw/boto3/service-2.sdk-extras.json
@@ -13,6 +13,17 @@
"documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/bucketops/#delete-notification",
"documentation":"<p>Deletes the notification configuration from the bucket.</p>"
},
+ "PostBucketLogging":{
+ "name":"PostBucketLogging",
+ "http":{
+ "method":"POST",
+ "requestUri":"/{Bucket}?logging",
+ "responseCode":201
+ },
+ "input":{"shape":"PostBucketLoggingRequest"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/bucketops/#post-bucket-logging",
+ "documentation":"<p>Flushes the logging objects of the buckets.</p>"
+ },
"GetUsageStats":{
"name":"GetUsageStats",
"http":{
@@ -146,6 +157,18 @@
}
}
},
+ "PostBucketLoggingRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"<p>Name of the bucket to flush its logging objects.</p>",
+ "location":"uri",
+ "locationName":"Bucket"
+ }
+ }
+ },
"FilterRule":{
"type":"structure",
"members":{
@@ -287,6 +310,10 @@
"RecordsBatchSize":{
"shape":"RecordsBatchSize",
"documentation":"indicates how many records to batch in memory before writing to the object. if set to zero, records are written syncronously to the object. if <code>ObjectRollTime</code>e is reached, the batch of records will be written to the object regardless of the number of records. </p>"
+ },
+ "Filter":{
+ "shape":"LoggingConfigurationFilter",
+ "documentation":"<p>A filter for all log object. Filter for the object by its key (prefix, suffix and regex).</p>"
}
},
"documentation":"<p>Describes where logs are stored the prefix assigned to all log object keys for a bucket, and their format. also, the level the delivery guarantee of the records.</p>"
@@ -340,6 +367,18 @@
"Standard",
"Journal"
]
+ },
+ "LoggingConfigurationFilter":{
+ "type":"structure",
+ "members":{
+ "Key":{
+ "shape":"S3KeyFilter",
+ "documentation":"<p/>",
+ "locationName":"S3Key"
+ }
+ },
+ "documentation":"<p>A filter for all log object. Filter for the object by its key (prefix, suffix and regex).</p>",
+ "locationName":"Filter"
}
},
"documentation":"<p/>"
diff --git a/monitoring/ceph-mixin/prometheus_alerts.libsonnet b/monitoring/ceph-mixin/prometheus_alerts.libsonnet
index 143e65f20e7..5d1ab49b533 100644
--- a/monitoring/ceph-mixin/prometheus_alerts.libsonnet
+++ b/monitoring/ceph-mixin/prometheus_alerts.libsonnet
@@ -856,6 +856,16 @@
},
},
{
+ alert: 'NVMeoFMultipleNamespacesOfRBDImage',
+ 'for': '1m',
+ expr: 'count by(pool_name, rbd_name) (count by(bdev_name, pool_name, rbd_name) (ceph_nvmeof_bdev_metadata and on (bdev_name) ceph_nvmeof_subsystem_namespace_metadata)) > 1',
+ labels: { severity: 'warning', type: 'ceph_default' },
+ annotations: {
+ summary: 'RBD image {{ $labels.pool_name }}/{{ $labels.rbd_name }} cannot be reused for multiple NVMeoF namespace ',
+ description: 'Each NVMeoF namespace must have a unique RBD pool and image, across all different gateway groups.',
+ },
+ },
+ {
alert: 'NVMeoFTooManyGateways',
'for': '1m',
expr: 'count(ceph_nvmeof_gateway_info) by (cluster) > %.2f' % [$._config.NVMeoFMaxGatewaysPerCluster],
diff --git a/monitoring/ceph-mixin/prometheus_alerts.yml b/monitoring/ceph-mixin/prometheus_alerts.yml
index 3eb8a8db4fa..3440d761351 100644
--- a/monitoring/ceph-mixin/prometheus_alerts.yml
+++ b/monitoring/ceph-mixin/prometheus_alerts.yml
@@ -765,6 +765,15 @@ groups:
labels:
severity: "warning"
type: "ceph_default"
+ - alert: "NVMeoFMultipleNamespacesOfRBDImage"
+ annotations:
+ description: "Each NVMeoF namespace must have a unique RBD pool and image, across all different gateway groups."
+ summary: "RBD image {{ $labels.pool_name }}/{{ $labels.rbd_name }} cannot be reused for multiple NVMeoF namespace "
+ expr: "count by(pool_name, rbd_name) (count by(bdev_name, pool_name, rbd_name) (ceph_nvmeof_bdev_metadata and on (bdev_name) ceph_nvmeof_subsystem_namespace_metadata)) > 1"
+ for: "1m"
+ labels:
+ severity: "warning"
+ type: "ceph_default"
- alert: "NVMeoFTooManyGateways"
annotations:
description: "You may create many gateways, but 4 is the tested limit"
diff --git a/monitoring/ceph-mixin/tests_alerts/test_alerts.yml b/monitoring/ceph-mixin/tests_alerts/test_alerts.yml
index a4e63bbcf73..b3b29308d08 100644
--- a/monitoring/ceph-mixin/tests_alerts/test_alerts.yml
+++ b/monitoring/ceph-mixin/tests_alerts/test_alerts.yml
@@ -2270,6 +2270,54 @@ tests:
summary: "wah subsystem has reached its maximum number of namespaces on cluster mycluster"
description: "Subsystems have a max namespace limit defined at creation time. This alert means that no more namespaces can be added to wah"
+# NVMeoFMultipleNamespacesOfRBDImage
+ - interval: 1m
+ input_series:
+ - series: 'ceph_nvmeof_bdev_metadata{bdev_name="bdev1", instance="ceph-nvme-vm1", pool_name="mypool", rbd_name="myimage1"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_bdev_metadata{bdev_name="bdev1", instance="ceph-nvme-vm2", pool_name="mypool", rbd_name="myimage1"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_bdev_metadata{bdev_name="bdev2", instance="ceph-nvme-vm1", pool_name="mypool", rbd_name="myimage2"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_bdev_metadata{bdev_name="bdev2", instance="ceph-nvme-vm2", pool_name="mypool", rbd_name="myimage2"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_bdev_metadata{bdev_name="bdev3", instance="ceph-nvme-vm1", pool_name="mypool", rbd_name="myimage1"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_bdev_metadata{bdev_name="bdev3", instance="ceph-nvme-vm2", pool_name="mypool", rbd_name="myimage1"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_bdev_metadata{bdev_name="bdev4", instance="ceph-nvme-vm1", pool_name="mypool", rbd_name="myimage1"}' # bdev with no ns
+ values: '1x10'
+ - series: 'ceph_nvmeof_subsystem_namespace_metadata{nqn="nqn1", nsid="1", bdev_name="bdev1", instance="ceph-nvme-vm1", cluster="mycluster"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_subsystem_namespace_metadata{nqn="nqn1", nsid="1", bdev_name="bdev1", instance="ceph-nvme-vm2", cluster="mycluster"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_subsystem_namespace_metadata{nqn="nqn1", nsid="2", bdev_name="bdev2", instance="ceph-nvme-vm1", cluster="mycluster"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_subsystem_namespace_metadata{nqn="nqn1", nsid="2", bdev_name="bdev2", instance="ceph-nvme-vm2", cluster="mycluster"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_subsystem_namespace_metadata{nqn="nqn2", nsid="1", bdev_name="bdev3", instance="ceph-nvme-vm1", cluster="mycluster"}'
+ values: '1x10'
+ - series: 'ceph_nvmeof_subsystem_namespace_metadata{nqn="nqn2", nsid="1", bdev_name="bdev3", instance="ceph-nvme-vm2", cluster="mycluster"}'
+ values: '1x10'
+ promql_expr_test:
+ - expr: count by(pool_name, rbd_name) (count by(bdev_name, pool_name, rbd_name) (ceph_nvmeof_bdev_metadata and on (bdev_name) ceph_nvmeof_subsystem_namespace_metadata)) > 1
+ eval_time: 1m
+ exp_samples:
+ - labels: '{pool_name="mypool", rbd_name="myimage1"}'
+ value: 2
+ alert_rule_test:
+ - eval_time: 5m
+ alertname: NVMeoFMultipleNamespacesOfRBDImage
+ exp_alerts:
+ - exp_labels:
+ pool_name: mypool
+ rbd_name: myimage1
+ severity: warning
+ type: ceph_default
+ exp_annotations:
+ summary: "RBD image mypool/myimage1 cannot be reused for multiple NVMeoF namespace "
+ description: "Each NVMeoF namespace must have a unique RBD pool and image, across all different gateway groups."
+
# NVMeoFTooManyGateways
- interval: 1m
input_series:
diff --git a/qa/standalone/scrub/osd-scrub-repair.sh b/qa/standalone/scrub/osd-scrub-repair.sh
index 491e46603f7..6dd5b10ae8f 100755
--- a/qa/standalone/scrub/osd-scrub-repair.sh
+++ b/qa/standalone/scrub/osd-scrub-repair.sh
@@ -5833,7 +5833,7 @@ function TEST_periodic_scrub_replicated() {
flush_pg_stats
# Request a regular scrub and it will be done
- pg_schedule_scrub $pg
+ pg_scrub $pg
grep -q "Regular scrub request, deep-scrub details will be lost" $dir/osd.${primary}.log || return 1
# deep-scrub error is no longer present
diff --git a/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml b/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml
index 7c97edae552..0416ae2ea4e 100644
--- a/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml
+++ b/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml
@@ -1,7 +1,8 @@
+# runs on default nvmeof image (i.e. DEFAULT_NVMEOF_IMAGE)
tasks:
- nvmeof:
installer: host.a
- gw_image: quay.io/ceph/nvmeof:latest # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
+ gw_image: default # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
rbd:
pool_name: mypool
image_name_prefix: myimage
diff --git a/qa/suites/nvmeof/basic/workloads/nvmeof_namespaces.yaml b/qa/suites/nvmeof/basic/workloads/nvmeof_namespaces.yaml
index 9ef37004427..dfe31380bb6 100644
--- a/qa/suites/nvmeof/basic/workloads/nvmeof_namespaces.yaml
+++ b/qa/suites/nvmeof/basic/workloads/nvmeof_namespaces.yaml
@@ -18,6 +18,7 @@ tasks:
clients:
client.0:
- nvmeof/setup_subsystem.sh
+ - nvmeof/basic_tests.sh
env:
RBD_POOL: mypool
RBD_IMAGE_PREFIX: myimage
@@ -27,7 +28,6 @@ tasks:
timeout: 30m
clients:
client.0:
- - nvmeof/basic_tests.sh
- nvmeof/fio_test.sh --rbd_iostat
client.1:
- nvmeof/basic_tests.sh
diff --git a/qa/suites/nvmeof/basic/workloads/nvmeof_scalability.yaml b/qa/suites/nvmeof/basic/workloads/nvmeof_scalability.yaml
index 12cb50b408d..d66b6fc8093 100644
--- a/qa/suites/nvmeof/basic/workloads/nvmeof_scalability.yaml
+++ b/qa/suites/nvmeof/basic/workloads/nvmeof_scalability.yaml
@@ -31,8 +31,11 @@ tasks:
no_coverage_and_limits: true
timeout: 30m
clients:
- client.0:
+ client.3:
- nvmeof/scalability_test.sh nvmeof.a,nvmeof.b
- nvmeof/scalability_test.sh nvmeof.b,nvmeof.c,nvmeof.d
+ - nvmeof/scalability_test.sh nvmeof.b,nvmeof.c
env:
SCALING_DELAYS: '50'
+ RBD_POOL: mypool
+ NVMEOF_GROUP: mygroup0
diff --git a/qa/suites/nvmeof/thrash/gateway-initiator-setup/10-subsys-90-namespace-no_huge_pages.yaml b/qa/suites/nvmeof/thrash/gateway-initiator-setup/10-subsys-90-namespace-no_huge_pages.yaml
new file mode 100644
index 00000000000..83d54cdf5c3
--- /dev/null
+++ b/qa/suites/nvmeof/thrash/gateway-initiator-setup/10-subsys-90-namespace-no_huge_pages.yaml
@@ -0,0 +1,37 @@
+tasks:
+- nvmeof:
+ installer: host.a
+ gw_image: quay.io/ceph/nvmeof:latest # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
+ rbd:
+ pool_name: mypool
+ image_name_prefix: myimage
+ gateway_config:
+ subsystems_count: 10
+ namespaces_count: 90 # each subsystem
+ cli_image: quay.io/ceph/nvmeof-cli:latest
+
+- cephadm.wait_for_service:
+ service: nvmeof.mypool.mygroup0
+
+- cephadm.exec:
+ host.a:
+ - ceph orch ls nvmeof --export > /tmp/nvmeof-orig.yaml
+ - cp /tmp/nvmeof-orig.yaml /tmp/nvmeof-no-huge-page.yaml
+ - "sed -i '/ pool: mypool/a\\ spdk_mem_size: 4096' /tmp/nvmeof-no-huge-page.yaml"
+ - cat /tmp/nvmeof-no-huge-page.yaml
+ - ceph orch ls --refresh
+ - ceph orch apply -i /tmp/nvmeof-no-huge-page.yaml
+ - ceph orch redeploy nvmeof.mypool.mygroup0
+
+- cephadm.wait_for_service:
+ service: nvmeof.mypool.mygroup0
+
+- workunit:
+ no_coverage_and_limits: true
+ clients:
+ client.0:
+ - nvmeof/setup_subsystem.sh
+ - nvmeof/basic_tests.sh
+ env:
+ RBD_POOL: mypool
+ RBD_IMAGE_PREFIX: myimage
diff --git a/qa/suites/nvmeof/thrash/gateway-initiator-setup/3-subsys-60-namespace.yaml b/qa/suites/nvmeof/thrash/gateway-initiator-setup/120-subsys-8-namespace.yaml
index b4755a6433b..0f7ac011a60 100644
--- a/qa/suites/nvmeof/thrash/gateway-initiator-setup/3-subsys-60-namespace.yaml
+++ b/qa/suites/nvmeof/thrash/gateway-initiator-setup/120-subsys-8-namespace.yaml
@@ -6,8 +6,8 @@ tasks:
pool_name: mypool
image_name_prefix: myimage
gateway_config:
- subsystems_count: 3
- namespaces_count: 20 # each subsystem
+ subsystems_count: 120
+ namespaces_count: 8 # each subsystem
cli_image: quay.io/ceph/nvmeof-cli:latest
- cephadm.wait_for_service:
diff --git a/qa/suites/nvmeof/thrash/thrashers/nvmeof_mon_thrash.yaml b/qa/suites/nvmeof/thrash/thrashers/nvmeof_mon_thrash.yaml
index 19fa2ec605d..46037784d31 100644
--- a/qa/suites/nvmeof/thrash/thrashers/nvmeof_mon_thrash.yaml
+++ b/qa/suites/nvmeof/thrash/thrashers/nvmeof_mon_thrash.yaml
@@ -11,6 +11,7 @@ overrides:
- NVMEOF_SINGLE_GATEWAY
- NVMEOF_GATEWAY_DOWN
- are in unavailable state
+ - is unavailable
- is in error state
- failed cephadm daemon
diff --git a/qa/suites/nvmeof/thrash/thrashers/nvmeof_thrash.yaml b/qa/suites/nvmeof/thrash/thrashers/nvmeof_thrash.yaml
index 80bf0527715..b58dc14d87b 100644
--- a/qa/suites/nvmeof/thrash/thrashers/nvmeof_thrash.yaml
+++ b/qa/suites/nvmeof/thrash/thrashers/nvmeof_thrash.yaml
@@ -6,9 +6,11 @@ overrides:
- NVMEOF_SINGLE_GATEWAY
- NVMEOF_GATEWAY_DOWN
- are in unavailable state
+ - is unavailable
- is in error state
- failed cephadm daemon
tasks:
- nvmeof.thrash:
checker_host: 'client.0'
+ randomize: False
diff --git a/qa/suites/nvmeof/thrash/workloads/fio.yaml b/qa/suites/nvmeof/thrash/workloads/fio.yaml
index b042b92d6ae..f9a0d0ebde5 100644
--- a/qa/suites/nvmeof/thrash/workloads/fio.yaml
+++ b/qa/suites/nvmeof/thrash/workloads/fio.yaml
@@ -1,11 +1,11 @@
tasks:
- workunit:
no_coverage_and_limits: true
- timeout: 30m
+ timeout: 60m
clients:
client.0:
- - nvmeof/fio_test.sh --rbd_iostat
+ - nvmeof/fio_test.sh --random_devices 200
env:
RBD_POOL: mypool
IOSTAT_INTERVAL: '10'
- RUNTIME: '600'
+ RUNTIME: '1800'
diff --git a/qa/suites/rgw/notifications/tasks/kafka/test_kafka.yaml b/qa/suites/rgw/notifications/tasks/kafka/test_kafka.yaml
index 462570e7727..303f98d540e 100644
--- a/qa/suites/rgw/notifications/tasks/kafka/test_kafka.yaml
+++ b/qa/suites/rgw/notifications/tasks/kafka/test_kafka.yaml
@@ -1,7 +1,7 @@
tasks:
- kafka:
client.0:
- kafka_version: 2.6.0
+ kafka_version: 3.8.1
- notification-tests:
client.0:
extra_attr: ["kafka_test", "data_path_v2_kafka_test"]
diff --git a/qa/tasks/kafka.py b/qa/tasks/kafka.py
index 5e6c208ca30..833f03babf6 100644
--- a/qa/tasks/kafka.py
+++ b/qa/tasks/kafka.py
@@ -4,6 +4,7 @@ Deploy and configure Kafka for Teuthology
import contextlib
import logging
import time
+import os
from teuthology import misc as teuthology
from teuthology import contextutil
@@ -33,6 +34,13 @@ def install_kafka(ctx, config):
assert isinstance(config, dict)
log.info('Installing Kafka...')
+ # programmatically find a nearby mirror so as not to hammer archive.apache.org
+ apache_mirror_cmd="curl 'https://www.apache.org/dyn/closer.cgi' 2>/dev/null | " \
+ "grep -o '<strong>[^<]*</strong>' | sed 's/<[^>]*>//g' | head -n 1"
+ log.info("determining apache mirror by running: " + apache_mirror_cmd)
+ apache_mirror_url_front = os.popen(apache_mirror_cmd).read().rstrip() # note: includes trailing slash (/)
+ log.info("chosen apache mirror is " + apache_mirror_url_front)
+
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
test_dir=teuthology.get_testdir(ctx)
@@ -40,7 +48,8 @@ def install_kafka(ctx, config):
kafka_file = kafka_prefix + current_version + '.tgz'
- link1 = 'https://archive.apache.org/dist/kafka/' + current_version + '/' + kafka_file
+ link1 = '{apache_mirror_url_front}/kafka/'.format(apache_mirror_url_front=apache_mirror_url_front) + \
+ current_version + '/' + kafka_file
ctx.cluster.only(client).run(
args=['cd', '{tdir}'.format(tdir=test_dir), run.Raw('&&'), 'wget', link1],
)
diff --git a/qa/tasks/mgr/dashboard/test_rbd.py b/qa/tasks/mgr/dashboard/test_rbd.py
index a872645e33e..83b3bf520c2 100644
--- a/qa/tasks/mgr/dashboard/test_rbd.py
+++ b/qa/tasks/mgr/dashboard/test_rbd.py
@@ -869,7 +869,19 @@ class RbdTest(DashboardTestCase):
self.assertEqual(clone_format_version, 2)
self.assertStatus(200)
+ # if empty list is sent, then the config will remain as it is
value = []
+ res = [{'section': "global", 'value': "2"}]
+ self._post('/api/cluster_conf', {
+ 'name': config_name,
+ 'value': value
+ })
+ self.wait_until_equal(
+ lambda: _get_config_by_name(config_name),
+ res,
+ timeout=60)
+
+ value = [{'section': "global", 'value': ""}]
self._post('/api/cluster_conf', {
'name': config_name,
'value': value
diff --git a/qa/tasks/nvmeof.py b/qa/tasks/nvmeof.py
index 42e357294d9..c58a7267b4e 100644
--- a/qa/tasks/nvmeof.py
+++ b/qa/tasks/nvmeof.py
@@ -128,12 +128,11 @@ class Nvmeof(Task):
total_images = int(self.namespaces_count) * int(self.subsystems_count)
log.info(f'[nvmeof]: creating {total_images} images')
+ rbd_create_cmd = []
for i in range(1, total_images + 1):
imagename = self.image_name_prefix + str(i)
- log.info(f'[nvmeof]: rbd create {poolname}/{imagename} --size {self.rbd_size}')
- _shell(self.ctx, self.cluster_name, self.remote, [
- 'rbd', 'create', f'{poolname}/{imagename}', '--size', f'{self.rbd_size}'
- ])
+ rbd_create_cmd += ['rbd', 'create', f'{poolname}/{imagename}', '--size', f'{self.rbd_size}', run.Raw(';')]
+ _shell(self.ctx, self.cluster_name, self.remote, rbd_create_cmd)
for role, i in daemons.items():
remote, id_ = i
@@ -251,9 +250,9 @@ class NvmeofThrasher(Thrasher, Greenlet):
daemon_max_thrash_times:
For now, NVMeoF daemons have limitation that each daemon can
- be thrashed only 3 times in span of 30 mins. This option
+ be thrashed only 5 times in span of 30 mins. This option
allows to set the amount of times it could be thrashed in a period
- of time. (default: 3)
+ of time. (default: 5)
daemon_max_thrash_period:
This option goes with the above option. It sets the period of time
over which each daemons can be thrashed for daemon_max_thrash_times
@@ -306,12 +305,12 @@ class NvmeofThrasher(Thrasher, Greenlet):
self.max_thrash_daemons = int(self.config.get('max_thrash', len(self.daemons) - 1))
# Limits on thrashing each daemon
- self.daemon_max_thrash_times = int(self.config.get('daemon_max_thrash_times', 3))
+ self.daemon_max_thrash_times = int(self.config.get('daemon_max_thrash_times', 5))
self.daemon_max_thrash_period = int(self.config.get('daemon_max_thrash_period', 30 * 60)) # seconds
self.min_thrash_delay = int(self.config.get('min_thrash_delay', 60))
self.max_thrash_delay = int(self.config.get('max_thrash_delay', self.min_thrash_delay + 30))
- self.min_revive_delay = int(self.config.get('min_revive_delay', 100))
+ self.min_revive_delay = int(self.config.get('min_revive_delay', 60))
self.max_revive_delay = int(self.config.get('max_revive_delay', self.min_revive_delay + 30))
def _get_devices(self, remote):
@@ -347,6 +346,7 @@ class NvmeofThrasher(Thrasher, Greenlet):
run.Raw('&&'), 'ceph', 'orch', 'ps', '--daemon-type', 'nvmeof',
run.Raw('&&'), 'ceph', 'health', 'detail',
run.Raw('&&'), 'ceph', '-s',
+ run.Raw('&&'), 'sudo', 'nvme', 'list',
]
for dev in self.devices:
check_cmd += [
@@ -421,13 +421,11 @@ class NvmeofThrasher(Thrasher, Greenlet):
while not self.stopping.is_set():
killed_daemons = defaultdict(list)
- weight = 1.0 / len(self.daemons)
- count = 0
+ thrash_daemon_num = self.rng.randint(1, self.max_thrash_daemons)
+ selected_daemons = self.rng.sample(self.daemons, thrash_daemon_num)
for daemon in self.daemons:
- skip = self.rng.uniform(0.0, 1.0)
- if weight <= skip:
- self.log('skipping daemon {label} with skip ({skip}) > weight ({weight})'.format(
- label=daemon.id_, skip=skip, weight=weight))
+ if daemon not in selected_daemons:
+ self.log(f'skipping daemon {daemon.id_} ...')
continue
# For now, nvmeof daemons can only be thrashed 3 times in last 30mins.
@@ -445,17 +443,11 @@ class NvmeofThrasher(Thrasher, Greenlet):
continue
self.log('kill {label}'.format(label=daemon.id_))
- # daemon.stop()
kill_method = self.kill_daemon(daemon)
killed_daemons[kill_method].append(daemon)
daemons_thrash_history[daemon.id_] += [datetime.now()]
- # only thrash max_thrash_daemons amount of daemons
- count += 1
- if count >= self.max_thrash_daemons:
- break
-
if killed_daemons:
iteration_summary = "thrashed- "
for kill_method in killed_daemons:
@@ -468,7 +460,7 @@ class NvmeofThrasher(Thrasher, Greenlet):
self.log(f'waiting for {revive_delay} secs before reviving')
time.sleep(revive_delay) # blocking wait
- self.log('done waiting before reviving')
+ self.log(f'done waiting before reviving - iteration #{len(summary)}: {iteration_summary}')
self.do_checks()
self.switch_task()
@@ -487,7 +479,7 @@ class NvmeofThrasher(Thrasher, Greenlet):
if thrash_delay > 0.0:
self.log(f'waiting for {thrash_delay} secs before thrashing')
time.sleep(thrash_delay) # blocking
- self.log('done waiting before thrashing')
+ self.log('done waiting before thrashing - everything should be up now')
self.do_checks()
self.switch_task()
diff --git a/qa/tasks/s3a_hadoop.py b/qa/tasks/s3a_hadoop.py
index 7b77359fcf2..4518a6f397c 100644
--- a/qa/tasks/s3a_hadoop.py
+++ b/qa/tasks/s3a_hadoop.py
@@ -1,5 +1,6 @@
import contextlib
import logging
+import os
from teuthology import misc
from teuthology.orchestra import run
@@ -40,7 +41,7 @@ def task(ctx, config):
# get versions
maven_major = config.get('maven-major', 'maven-3')
- maven_version = config.get('maven-version', '3.6.3')
+ maven_version = config.get('maven-version', '3.9.9')
hadoop_ver = config.get('hadoop-version', '2.9.2')
bucket_name = config.get('bucket-name', 's3atest')
access_key = config.get('access-key', 'EGAQRD2ULOIFKFSKCT4F')
@@ -48,11 +49,19 @@ def task(ctx, config):
'secret-key',
'zi816w1vZKfaSM85Cl0BxXTwSLyN7zB4RbTswrGb')
+ # programmatically find a nearby mirror so as not to hammer archive.apache.org
+ apache_mirror_cmd="curl 'https://www.apache.org/dyn/closer.cgi' 2>/dev/null | " \
+ "grep -o '<strong>[^<]*</strong>' | sed 's/<[^>]*>//g' | head -n 1"
+ log.info("determining apache mirror by running: " + apache_mirror_cmd)
+ apache_mirror_url_front = os.popen(apache_mirror_cmd).read().rstrip() # note: includes trailing slash (/)
+ log.info("chosen apache mirror is " + apache_mirror_url_front)
+
# set versions for cloning the repo
apache_maven = 'apache-maven-{maven_version}-bin.tar.gz'.format(
maven_version=maven_version)
- maven_link = 'http://archive.apache.org/dist/maven/' + \
- '{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + apache_maven
+ maven_link = '{apache_mirror_url_front}/maven/'.format(apache_mirror_url_front=apache_mirror_url_front) + \
+ '{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + \
+ apache_maven
hadoop_git = 'https://github.com/apache/hadoop'
hadoop_rel = 'hadoop-{ver} rel/release-{ver}'.format(ver=hadoop_ver)
if hadoop_ver == 'trunk':
@@ -204,6 +213,7 @@ def run_s3atest(client, maven_version, testdir, test_options):
run.Raw('&&'),
run.Raw(rm_test),
run.Raw('&&'),
+ run.Raw('JAVA_HOME=$(alternatives --list | grep jre_1.8.0 | head -n 1 | awk \'{print $3}\')'),
run.Raw(run_test),
run.Raw(test_options)
]
diff --git a/qa/workunits/nvmeof/basic_tests.sh b/qa/workunits/nvmeof/basic_tests.sh
index dc6fd1669da..794353348b4 100755
--- a/qa/workunits/nvmeof/basic_tests.sh
+++ b/qa/workunits/nvmeof/basic_tests.sh
@@ -38,8 +38,10 @@ disconnect_all() {
connect_all() {
sudo nvme connect-all --traddr=$NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --transport=tcp -l 3600
sleep 5
- output=$(sudo nvme list --output-format=json)
- if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
+ expected_devices_count=$1
+ actual_devices=$(sudo nvme list --output-format=json | grep -o "$SPDK_CONTROLLER" | wc -l)
+ if [ "$actual_devices" -ne "$expected_devices_count" ]; then
+ sudo nvme list --output-format=json
return 1
fi
}
@@ -72,11 +74,13 @@ test_run connect
test_run list_subsys 1
test_run disconnect_all
test_run list_subsys 0
-test_run connect_all
+devices_count=$(( $NVMEOF_NAMESPACES_COUNT * $NVMEOF_SUBSYSTEMS_COUNT))
+test_run connect_all $devices_count
gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
multipath_count=$(( $gateways_count * $NVMEOF_SUBSYSTEMS_COUNT))
test_run list_subsys $multipath_count
+
echo "-------------Test Summary-------------"
echo "[nvmeof] All nvmeof basic tests passed!"
diff --git a/qa/workunits/nvmeof/fio_test.sh b/qa/workunits/nvmeof/fio_test.sh
index 57d355a6318..03fb58693bd 100755
--- a/qa/workunits/nvmeof/fio_test.sh
+++ b/qa/workunits/nvmeof/fio_test.sh
@@ -5,6 +5,7 @@ sudo yum -y install sysstat
namespace_range_start=
namespace_range_end=
+random_devices_count=
rbd_iostat=false
while [[ $# -gt 0 ]]; do
@@ -17,6 +18,10 @@ while [[ $# -gt 0 ]]; do
namespace_range_end=$2
shift 2
;;
+ --random_devices)
+ random_devices_count=$2
+ shift 2
+ ;;
--rbd_iostat)
rbd_iostat=true
shift
@@ -37,6 +42,8 @@ all_drives_list=$(sudo nvme list --output-format=json |
# run on first 3 namespaces here.
if [ "$namespace_range_start" ] || [ "$namespace_range_end" ]; then
selected_drives=$(echo "${all_drives_list[@]}" | sed -n "${namespace_range_start},${namespace_range_end}p")
+elif [ "$random_devices_count" ]; then
+ selected_drives=$(echo "${all_drives_list[@]}" | shuf -n $random_devices_count)
else
selected_drives="${all_drives_list[@]}"
fi
diff --git a/qa/workunits/nvmeof/scalability_test.sh b/qa/workunits/nvmeof/scalability_test.sh
index 5a26b6284f7..8ede4b7eda2 100755
--- a/qa/workunits/nvmeof/scalability_test.sh
+++ b/qa/workunits/nvmeof/scalability_test.sh
@@ -3,37 +3,64 @@
GATEWAYS=$1 # exmaple "nvmeof.a,nvmeof.b"
DELAY="${SCALING_DELAYS:-50}"
+POOL="${RBD_POOL:-mypool}"
+GROUP="${NVMEOF_GROUP:-mygroup0}"
+source /etc/ceph/nvmeof.env
if [ -z "$GATEWAYS" ]; then
echo "At least one gateway needs to be defined for scalability test"
exit 1
fi
-pip3 install yq
-
status_checks() {
- ceph nvme-gw show mypool ''
- ceph orch ls
- ceph orch ps
- ceph -s
+ expected_count=$1
+
+ output=$(ceph nvme-gw show $POOL $GROUP)
+ nvme_show=$(echo $output | grep -o '"AVAILABLE"' | wc -l)
+ if [ "$nvme_show" -ne "$expected_count" ]; then
+ return 1
+ fi
+
+ orch_ls=$(ceph orch ls)
+ if ! echo "$orch_ls" | grep -q "$expected_count/$expected_count"; then
+ return 1
+ fi
+
+ output=$(ceph orch ps --service-name nvmeof.$POOL.$GROUP)
+ orch_ps=$(echo $output | grep -o 'running' | wc -l)
+ if [ "$orch_ps" -ne "$expected_count" ]; then
+ return 1
+ fi
+
+ ceph_status=$(ceph -s)
+ if ! echo "$ceph_status" | grep -q "HEALTH_OK"; then
+ return 1
+ fi
}
+total_gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
+scaled_down_gateways_count=$(( total_gateways_count - $(echo "$GATEWAYS" | tr -cd ',' | wc -c) - 1 ))
+
echo "[nvmeof.scale] Setting up config to remove gateways ${GATEWAYS}"
+ceph orch ls --service-name nvmeof.$POOL.$GROUP --export > /tmp/nvmeof-gw.yaml
ceph orch ls nvmeof --export > /tmp/nvmeof-gw.yaml
cat /tmp/nvmeof-gw.yaml
-yq "del(.placement.hosts[] | select(. | test(\".*($(echo $GATEWAYS | sed 's/,/|/g'))\")))" /tmp/nvmeof-gw.yaml > /tmp/nvmeof-gw-new.yaml
+
+pattern=$(echo $GATEWAYS | sed 's/,/\\|/g')
+sed "/$pattern/d" /tmp/nvmeof-gw.yaml > /tmp/nvmeof-gw-new.yaml
cat /tmp/nvmeof-gw-new.yaml
echo "[nvmeof.scale] Starting scale testing by removing ${GATEWAYS}"
-status_checks
-ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
+status_checks $total_gateways_count
ceph orch apply -i /tmp/nvmeof-gw-new.yaml # downscale
+ceph orch redeploy nvmeof.$POOL.$GROUP
sleep $DELAY
-status_checks
-ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
+status_checks $scaled_down_gateways_count
+echo "[nvmeof.scale] Downscale complete - removed gateways (${GATEWAYS}); now scaling back up"
ceph orch apply -i /tmp/nvmeof-gw.yaml #upscale
+ceph orch redeploy nvmeof.$POOL.$GROUP
sleep $DELAY
-status_checks
+status_checks $total_gateways_count
echo "[nvmeof.scale] Scale testing passed for ${GATEWAYS}"
diff --git a/qa/workunits/nvmeof/setup_subsystem.sh b/qa/workunits/nvmeof/setup_subsystem.sh
index cc4024323eb..b573647b1e3 100755
--- a/qa/workunits/nvmeof/setup_subsystem.sh
+++ b/qa/workunits/nvmeof/setup_subsystem.sh
@@ -26,14 +26,21 @@ list_subsystems () {
done
}
+list_namespaces () {
+ for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn
+ done
+}
+
+echo "[nvmeof] Starting subsystem setup..."
+
# add all subsystems
for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT subsystem add --subsystem $subsystem_nqn --no-group-append
done
-list_subsystems
-
# add all gateway listeners
for i in "${!gateway_ips[@]}"
do
@@ -65,11 +72,5 @@ done
list_subsystems
-# list namespaces
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn
-done
-
echo "[nvmeof] Subsystem setup done"
diff --git a/qa/workunits/rbd/cli_generic.sh b/qa/workunits/rbd/cli_generic.sh
index 2aa27d3d655..0ceb9ff54cf 100755
--- a/qa/workunits/rbd/cli_generic.sh
+++ b/qa/workunits/rbd/cli_generic.sh
@@ -914,6 +914,11 @@ test_namespace() {
rbd group create rbd/test1/group1
rbd group image add rbd/test1/group1 rbd/test1/image1
+ rbd group image add --group-pool rbd --group-namespace test1 --group group1 \
+ --image-pool rbd --image-namespace test1 --image image2
+ rbd group image rm --group-pool rbd --group-namespace test1 --group group1 \
+ --image-pool rbd --image-namespace test1 --image image1
+ rbd group image rm rbd/test1/group1 rbd/test1/image2
rbd group rm rbd/test1/group1
rbd trash move rbd/test1/image1
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 4a1c5768aa7..9cbe350b388 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -16,6 +16,7 @@ endif()
set(bindir ${CMAKE_INSTALL_FULL_BINDIR})
set(sbindir ${CMAKE_INSTALL_FULL_SBINDIR})
set(libdir ${CMAKE_INSTALL_FULL_LIBDIR})
+set(includedir ${CMAKE_INSTALL_FULL_INCLUDEDIR})
set(sysconfdir ${CMAKE_INSTALL_FULL_SYSCONFDIR})
set(libexecdir ${CMAKE_INSTALL_FULL_LIBEXECDIR})
set(pkgdatadir ${CMAKE_INSTALL_FULL_DATADIR})
@@ -31,6 +32,12 @@ configure_file(ceph-post-file.in
configure_file(ceph-crash.in
${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ceph-crash @ONLY)
+if(WITH_LIBCEPHFS)
+ configure_file(
+ ${CMAKE_SOURCE_DIR}/src/cephfs.pc.in
+ ${CMAKE_BINARY_DIR}/src/cephfs.pc @ONLY)
+endif(WITH_LIBCEPHFS)
+
# the src/.git_version file may be written out by make-dist; otherwise
# we pull the git version from .git
option(ENABLE_GIT_VERSION "build Ceph with git version string" ON)
@@ -832,10 +839,12 @@ if(WITH_LIBCEPHFS)
target_link_libraries(cephfs PRIVATE client ceph-common
${CRYPTO_LIBS} ${EXTRALIBS})
if(ENABLE_SHARED)
+ set(libcephfs_version 2.0.0)
+ set(libcephfs_soversion 2)
set_target_properties(cephfs PROPERTIES
OUTPUT_NAME cephfs
- VERSION 2.0.0
- SOVERSION 2)
+ VERSION ${libcephfs_version}
+ SOVERSION ${libcephfs_soversion})
if(NOT APPLE AND NOT
(WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL Clang))
foreach(name ceph-common client osdc)
@@ -848,6 +857,9 @@ if(WITH_LIBCEPHFS)
install(DIRECTORY
"include/cephfs"
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+ install(FILES
+ ${CMAKE_BINARY_DIR}/src/cephfs.pc
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
set(ceph_syn_srcs
ceph_syn.cc
client/SyntheticClient.cc)
diff --git a/src/cephfs.pc.in b/src/cephfs.pc.in
new file mode 100644
index 00000000000..3c5761495bb
--- /dev/null
+++ b/src/cephfs.pc.in
@@ -0,0 +1,10 @@
+prefix=@prefix@
+exec_prefix=${prefix}
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libcephfs
+Description: Ceph distributed file system client library
+Version: @libcephfs_version@
+Cflags: -I${includedir}/cephfs -D_FILE_OFFSET_BITS=64
+Libs: -L${libdir} -lcephfs
diff --git a/src/client/Client.cc b/src/client/Client.cc
index c404057b929..baf8fb4299d 100644
--- a/src/client/Client.cc
+++ b/src/client/Client.cc
@@ -8907,7 +8907,6 @@ int Client::chownat(int dirfd, const char *relpath, uid_t new_uid, gid_t new_gid
tout(cct) << new_gid << std::endl;
tout(cct) << flags << std::endl;
- filepath path(relpath);
InodeRef in;
InodeRef dirinode;
@@ -8917,10 +8916,24 @@ int Client::chownat(int dirfd, const char *relpath, uid_t new_uid, gid_t new_gid
return r;
}
- r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), 0, dirinode);
- if (r < 0) {
- return r;
+ if (!strcmp(relpath, "")) {
+#if defined(__linux__) && defined(AT_EMPTY_PATH)
+ if (flags & AT_EMPTY_PATH) {
+ in = dirinode;
+ goto out;
+ }
+#endif
+ return -CEPHFS_ENOENT;
+ } else {
+ filepath path(relpath);
+ r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), 0, dirinode);
+ if (r < 0) {
+ return r;
+ }
}
+
+out:
+
struct stat attr;
attr.st_uid = new_uid;
attr.st_gid = new_gid;
@@ -12230,6 +12243,7 @@ int Client::statxat(int dirfd, const char *relpath,
unsigned mask = statx_to_mask(flags, want);
+ InodeRef in;
InodeRef dirinode;
std::scoped_lock lock(client_lock);
int r = get_fd_inode(dirfd, &dirinode);
@@ -12237,12 +12251,24 @@ int Client::statxat(int dirfd, const char *relpath,
return r;
}
- InodeRef in;
- filepath path(relpath);
- r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), mask, dirinode);
- if (r < 0) {
- return r;
+ if (!strcmp(relpath, "")) {
+#if defined(__linux__) && defined(AT_EMPTY_PATH)
+ if (flags & AT_EMPTY_PATH) {
+ in = dirinode;
+ goto out;
+ }
+#endif
+ return -CEPHFS_ENOENT;
+ } else {
+ filepath path(relpath);
+ r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), mask, dirinode);
+ if (r < 0) {
+ return r;
+ }
}
+
+out:
+
r = _getattr(in, mask, perms);
if (r < 0) {
ldout(cct, 3) << __func__ << " exit on error!" << dendl;
diff --git a/src/common/Formatter.cc b/src/common/Formatter.cc
index fd3b2be0221..cd12e4f9885 100644
--- a/src/common/Formatter.cc
+++ b/src/common/Formatter.cc
@@ -23,6 +23,8 @@
#include <algorithm>
#include <set>
#include <limits>
+#include <utility>
+#include <boost/container/small_vector.hpp>
// -----------------------
namespace ceph {
@@ -365,10 +367,21 @@ std::ostream& JSONFormatter::dump_stream(std::string_view name)
void JSONFormatter::dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap)
{
- char buf[LARGE_SIZE];
- vsnprintf(buf, LARGE_SIZE, fmt, ap);
+ auto buf = boost::container::small_vector<char, LARGE_SIZE>{
+ LARGE_SIZE, boost::container::default_init};
- add_value(name, buf, quoted);
+ va_list ap_copy;
+ va_copy(ap_copy, ap);
+ int len = vsnprintf(buf.data(), buf.size(), fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (std::cmp_greater_equal(len, buf.size())) {
+ // output was truncated, allocate a buffer large enough
+ buf.resize(len + 1, boost::container::default_init);
+ vsnprintf(buf.data(), buf.size(), fmt, ap);
+ }
+
+ add_value(name, buf.data(), quoted);
}
int JSONFormatter::get_len() const
@@ -550,15 +563,27 @@ std::ostream& XMLFormatter::dump_stream(std::string_view name)
void XMLFormatter::dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap)
{
- char buf[LARGE_SIZE];
- size_t len = vsnprintf(buf, LARGE_SIZE, fmt, ap);
+ auto buf = boost::container::small_vector<char, LARGE_SIZE>{
+ LARGE_SIZE, boost::container::default_init};
+
+ va_list ap_copy;
+ va_copy(ap_copy, ap);
+ int len = vsnprintf(buf.data(), buf.size(), fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (std::cmp_greater_equal(len, buf.size())) {
+ // output was truncated, allocate a buffer large enough
+ buf.resize(len + 1, boost::container::default_init);
+ vsnprintf(buf.data(), buf.size(), fmt, ap);
+ }
+
auto e = get_xml_name(name);
print_spaces();
if (ns) {
- m_ss << "<" << e << " xmlns=\"" << ns << "\">" << xml_stream_escaper(std::string_view(buf, len)) << "</" << e << ">";
+ m_ss << "<" << e << " xmlns=\"" << ns << "\">" << xml_stream_escaper(std::string_view(buf.data(), len)) << "</" << e << ">";
} else {
- m_ss << "<" << e << ">" << xml_stream_escaper(std::string_view(buf, len)) << "</" << e << ">";
+ m_ss << "<" << e << ">" << xml_stream_escaper(std::string_view(buf.data(), len)) << "</" << e << ">";
}
if (m_pretty)
@@ -927,14 +952,26 @@ void TableFormatter::dump_format_va(std::string_view name,
const char *fmt, va_list ap)
{
finish_pending_string();
- char buf[LARGE_SIZE];
- vsnprintf(buf, LARGE_SIZE, fmt, ap);
+ auto buf = boost::container::small_vector<char, LARGE_SIZE>{
+ LARGE_SIZE, boost::container::default_init};
+
+ va_list ap_copy;
+ va_copy(ap_copy, ap);
+ int len = vsnprintf(buf.data(), buf.size(), fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (std::cmp_greater_equal(len, buf.size())) {
+ // output was truncated, allocate a buffer large enough
+ buf.resize(len + 1, boost::container::default_init);
+ vsnprintf(buf.data(), buf.size(), fmt, ap);
+ }
size_t i = m_vec_index(name);
if (ns) {
- m_ss << ns << "." << buf;
- } else
- m_ss << buf;
+ m_ss << ns << "." << buf.data();
+ } else {
+ m_ss << buf.data();
+ }
m_vec[i].push_back(std::make_pair(get_section_name(name), m_ss.str()));
m_ss.clear();
diff --git a/src/common/HTMLFormatter.cc b/src/common/HTMLFormatter.cc
index e7e985531d8..1bc8d864cb6 100644
--- a/src/common/HTMLFormatter.cc
+++ b/src/common/HTMLFormatter.cc
@@ -23,6 +23,8 @@
#include <stdlib.h>
#include <string>
#include <string.h> // for strdup
+#include <boost/container/small_vector.hpp>
+#include <utility> // for std::cmp_greater_equal
#include "common/escape.h"
@@ -138,17 +140,27 @@ std::ostream& HTMLFormatter::dump_stream(std::string_view name)
void HTMLFormatter::dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap)
{
- char buf[LARGE_SIZE];
- size_t len = vsnprintf(buf, LARGE_SIZE, fmt, ap);
+ auto buf = boost::container::small_vector<char, LARGE_SIZE>{
+ LARGE_SIZE, boost::container::default_init};
+
+ va_list ap_copy;
+ va_copy(ap_copy, ap);
+ size_t len = vsnprintf(buf.data(), buf.size(), fmt, ap);
+ va_end(ap_copy);
+
+ if(std::cmp_greater_equal(len, buf.size())){
+ buf.resize(len + 1, boost::container::default_init);
+ vsnprintf(buf.data(), buf.size(), fmt, ap_copy);
+ }
std::string e(name);
print_spaces();
if (ns) {
m_ss << "<li xmlns=\"" << ns << "\">" << e << ": "
- << xml_stream_escaper(std::string_view(buf, len)) << "</li>";
+ << xml_stream_escaper(std::string_view(buf.data(), len)) << "</li>";
} else {
m_ss << "<li>" << e << ": "
- << xml_stream_escaper(std::string_view(buf, len)) << "</li>";
+ << xml_stream_escaper(std::string_view(buf.data(), len)) << "</li>";
}
if (m_pretty)
diff --git a/src/common/intrusive_lru.h b/src/common/intrusive_lru.h
index 3ed3625d8a0..f9132773e4d 100644
--- a/src/common/intrusive_lru.h
+++ b/src/common/intrusive_lru.h
@@ -94,6 +94,8 @@ public:
friend void intrusive_ptr_add_ref<>(intrusive_lru_base<Config> *);
friend void intrusive_ptr_release<>(intrusive_lru_base<Config> *);
+ unsigned get_use_count() const { return use_count; }
+
virtual ~intrusive_lru_base() {}
};
@@ -176,6 +178,25 @@ class intrusive_lru {
evict(lru_target_size);
}
+ /// clear [from, to) invoking f upon and invalidating any live references
+ template <typename F>
+ void clear_range(
+ typename lru_set_t::iterator from, typename lru_set_t::iterator to,
+ F &&f) {
+ while (from != to) {
+ if (!(*from).lru) {
+ unreferenced_list.erase(lru_list_t::s_iterator_to(*from));
+ from = lru_set.erase_and_dispose(from, [](auto *p) { delete p; } );
+ } else {
+ std::invoke(f, static_cast<T&>(*from));
+ from->lru = nullptr;
+ assert(from->is_invalidated());
+ from = lru_set.erase_and_dispose(
+ from, [](auto *p) { assert(p->use_count > 0); });
+ }
+ }
+ }
+
public:
/**
* Returns the TRef corresponding to k if it exists or
@@ -198,38 +219,28 @@ public:
}
}
- /*
- * Clears unreferenced elements from the lru set [from, to]
+ /**
+ * clear_range
+ *
+ * Clears elements from the lru set in [from, to] invoking F upon and
+ * invalidating any with outstanding references
*/
- void clear_range(
- const K& from,
- const K& to) {
- auto from_iter = lru_set.lower_bound(from);
- auto to_iter = lru_set.upper_bound(to);
- for (auto i = from_iter; i != to_iter; ) {
- if (!(*i).lru) {
- unreferenced_list.erase(lru_list_t::s_iterator_to(*i));
- i = lru_set.erase_and_dispose(i, [](auto *p)
- { delete p; } );
- } else {
- i++;
- }
- }
+ template <typename F>
+ void clear_range(const K& from, const K& to, F &&f) {
+ auto from_iter = lru_set.lower_bound(from);
+ auto to_iter = lru_set.upper_bound(to);
+ clear_range(from_iter, to_iter, std::forward<F>(f));
}
- /// drop all elements from lru, invoke f on any with outstanding references
+ /**
+ * clear
+ *
+ * Clears all elements from the lru set invoking F upon and
+ * invalidating any with outstanding references
+ */
template <typename F>
void clear(F &&f) {
- evict(0);
- assert(unreferenced_list.empty());
- for (auto &i: lru_set) {
- std::invoke(f, static_cast<T&>(i));
- i.lru = nullptr;
- assert(i.is_invalidated());
- }
- lru_set.clear_and_dispose([](auto *i){
- assert(i->use_count > 0); /* don't delete, still has a ref count */
- });
+ clear_range(lru_set.begin(), lru_set.end(), std::forward<F>(f));
}
template <class F>
diff --git a/src/crimson/osd/object_context.h b/src/crimson/osd/object_context.h
index 6f51045931d..4195e5dc597 100644
--- a/src/crimson/osd/object_context.h
+++ b/src/crimson/osd/object_context.h
@@ -9,6 +9,7 @@
#include <seastar/core/shared_future.hh>
#include <seastar/core/shared_ptr.hh>
+#include "common/fmt_common.h"
#include "common/intrusive_lru.h"
#include "osd/object_state.h"
#include "crimson/common/exception.h"
@@ -73,6 +74,8 @@ public:
using watch_key_t = std::pair<uint64_t, entity_name_t>;
std::map<watch_key_t, seastar::shared_ptr<crimson::osd::Watch>> watchers;
+ CommonOBCPipeline obc_pipeline;
+
ObjectContext(hobject_t hoid) : lock(hoid),
obs(std::move(hoid)) {}
@@ -128,14 +131,49 @@ public:
}
bool is_valid() const {
- return !invalidated_by_interval_change;
+ return !invalidated;
}
private:
boost::intrusive::list_member_hook<> obc_accessing_hook;
uint64_t list_link_cnt = 0;
+
+ /**
+ * loading_started
+ *
+ * ObjectContext instances may be used for pipeline stages
+ * prior to actually being loaded.
+ *
+ * ObjectContextLoader::load_and_lock* use loading_started
+ * to determine whether to initiate loading or simply take
+ * the desired lock directly.
+ *
+ * If loading_started is not set, the task must set it and
+ * (syncronously) take an exclusive lock. That exclusive lock
+ * must be held until the loading completes, at which point the
+ * lock may be relaxed or released.
+ *
+ * If loading_started is set, it is safe to directly take
+ * the desired lock, once the lock is obtained loading may
+ * be assumed to be complete.
+ *
+ * loading_started, once set, remains set for the lifetime
+ * of the object.
+ */
+ bool loading_started = false;
+
+ /// true once set_*_state has been called, used for debugging
bool fully_loaded = false;
- bool invalidated_by_interval_change = false;
+
+ /**
+ * invalidated
+ *
+ * Set to true upon eviction from cache. This happens to all
+ * cached obc's upon interval change and to the target of
+ * a repop received on a replica to ensure that the cached
+ * state is refreshed upon subsequent replica read.
+ */
+ bool invalidated = false;
friend class ObjectContextRegistry;
friend class ObjectContextLoader;
@@ -156,6 +194,15 @@ public:
}
}
+ template <typename FormatContext>
+ auto fmt_print_ctx(FormatContext & ctx) const {
+ return fmt::format_to(
+ ctx.out(), "ObjectContext({}, oid={}, refcount={})",
+ (void*)this,
+ get_oid(),
+ get_use_count());
+ }
+
using obc_accessing_option_t = boost::intrusive::member_hook<
ObjectContext,
boost::intrusive::list_member_hook<>,
@@ -186,12 +233,14 @@ public:
void clear_range(const hobject_t &from,
const hobject_t &to) {
- obc_lru.clear_range(from, to);
+ obc_lru.clear_range(from, to, [](auto &obc) {
+ obc.invalidated = true;
+ });
}
void invalidate_on_interval_change() {
obc_lru.clear([](auto &obc) {
- obc.invalidated_by_interval_change = true;
+ obc.invalidated = true;
});
}
diff --git a/src/crimson/osd/object_context_loader.cc b/src/crimson/osd/object_context_loader.cc
index 869ca91504c..483251a23b5 100644
--- a/src/crimson/osd/object_context_loader.cc
+++ b/src/crimson/osd/object_context_loader.cc
@@ -16,19 +16,20 @@ ObjectContextLoader::load_and_lock_head(Manager &manager, RWState::State lock_ty
LOG_PREFIX(ObjectContextLoader::load_and_lock_head);
DEBUGDPP("{} {}", dpp, manager.target, lock_type);
auto releaser = manager.get_releaser();
- // no users pre-populate head_state on this path, so we don't bother to
- // handle it
ceph_assert(manager.target.is_head());
- ceph_assert(manager.head_state.is_empty());
+
+ if (manager.head_state.is_empty()) {
+ auto [obc, _] = obc_registry.get_cached_obc(manager.target);
+ manager.set_state_obc(manager.head_state, obc);
+ }
ceph_assert(manager.target_state.is_empty());
- auto [obc, existed] = obc_registry.get_cached_obc(manager.target);
- manager.set_state_obc(manager.target_state, obc);
- manager.set_state_obc(manager.head_state, obc);
+ manager.set_state_obc(manager.target_state, manager.head_state.obc);
- if (existed) {
+ if (manager.target_state.obc->loading_started) {
co_await manager.target_state.lock_to(lock_type);
} else {
manager.target_state.lock_excl_sync();
+ manager.target_state.obc->loading_started = true;
co_await load_obc(manager.target_state.obc);
manager.target_state.demote_excl_to(lock_type);
}
@@ -36,7 +37,8 @@ ObjectContextLoader::load_and_lock_head(Manager &manager, RWState::State lock_ty
}
ObjectContextLoader::load_and_lock_fut
-ObjectContextLoader::load_and_lock_clone(Manager &manager, RWState::State lock_type)
+ObjectContextLoader::load_and_lock_clone(
+ Manager &manager, RWState::State lock_type, bool lock_head)
{
LOG_PREFIX(ObjectContextLoader::load_and_lock_clone);
DEBUGDPP("{} {}", dpp, manager.target, lock_type);
@@ -46,16 +48,20 @@ ObjectContextLoader::load_and_lock_clone(Manager &manager, RWState::State lock_t
ceph_assert(manager.target_state.is_empty());
if (manager.head_state.is_empty()) {
- auto [obc, existed] = obc_registry.get_cached_obc(manager.target.get_head());
+ auto [obc, _] = obc_registry.get_cached_obc(manager.target.get_head());
manager.set_state_obc(manager.head_state, obc);
+ }
- if (existed) {
- co_await manager.head_state.lock_to(RWState::RWREAD);
- } else {
- manager.head_state.lock_excl_sync();
- co_await load_obc(manager.head_state.obc);
- manager.head_state.demote_excl_to(RWState::RWREAD);
- }
+ if (!manager.head_state.obc->loading_started) {
+ // caller is responsible for pre-populating a loaded obc if lock_head is
+ // false
+ ceph_assert(lock_head);
+ manager.head_state.lock_excl_sync();
+ manager.head_state.obc->loading_started = true;
+ co_await load_obc(manager.head_state.obc);
+ manager.head_state.demote_excl_to(RWState::RWREAD);
+ } else if (lock_head) {
+ co_await manager.head_state.lock_to(RWState::RWREAD);
}
if (manager.options.resolve_clone) {
@@ -95,13 +101,14 @@ ObjectContextLoader::load_and_lock_clone(Manager &manager, RWState::State lock_t
manager.head_state.state = RWState::RWNONE;
}
} else {
- auto [obc, existed] = obc_registry.get_cached_obc(manager.target);
+ auto [obc, _] = obc_registry.get_cached_obc(manager.target);
manager.set_state_obc(manager.target_state, obc);
- if (existed) {
+ if (manager.target_state.obc->loading_started) {
co_await manager.target_state.lock_to(RWState::RWREAD);
} else {
manager.target_state.lock_excl_sync();
+ manager.target_state.obc->loading_started = true;
co_await load_obc(manager.target_state.obc);
manager.target_state.obc->set_clone_ssc(manager.head_state.obc->ssc);
manager.target_state.demote_excl_to(RWState::RWREAD);
diff --git a/src/crimson/osd/object_context_loader.h b/src/crimson/osd/object_context_loader.h
index 6d007d65176..49f8f1572bf 100644
--- a/src/crimson/osd/object_context_loader.h
+++ b/src/crimson/osd/object_context_loader.h
@@ -2,9 +2,11 @@
#include <seastar/core/future.hh>
#include <seastar/util/defer.hh>
+#include "crimson/common/coroutine.h"
#include "crimson/common/errorator.h"
#include "crimson/common/log.h"
#include "crimson/osd/object_context.h"
+#include "crimson/osd/osd_operation.h"
#include "crimson/osd/pg_backend.h"
#include "osd/object_state_fmt.h"
@@ -143,9 +145,7 @@ public:
if (s.is_empty()) return;
s.release_lock();
- SUBDEBUGDPP(
- osd, "released object {}, {}",
- loader.dpp, s.obc->get_oid(), s.obc->obs);
+ SUBDEBUGDPP(osd, "releasing obc {}, {}", loader.dpp, *(s.obc), s.obc->obs);
s.obc->remove_from(loader.obc_set_accessing);
s = state_t();
}
@@ -165,11 +165,13 @@ public:
ObjectContextRef &get_obc() {
ceph_assert(!target_state.is_empty());
+ ceph_assert(target_state.obc->is_loaded());
return target_state.obc;
}
ObjectContextRef &get_head_obc() {
ceph_assert(!head_state.is_empty());
+ ceph_assert(head_state.obc->is_loaded());
return head_state.obc;
}
@@ -188,19 +190,49 @@ public:
release();
}
};
- Manager get_obc_manager(hobject_t oid, bool resolve_clone = true) {
+
+ class Orderer {
+ friend ObjectContextLoader;
+ ObjectContextRef orderer_obc;
+ public:
+ CommonOBCPipeline &obc_pp() {
+ ceph_assert(orderer_obc);
+ return orderer_obc->obc_pipeline;
+ }
+
+ ~Orderer() {
+ LOG_PREFIX(ObjectContextLoader::~Orderer);
+ SUBDEBUG(osd, "releasing obc {}, {}", *(orderer_obc));
+ }
+ };
+
+ Orderer get_obc_orderer(const hobject_t &oid) {
+ Orderer ret;
+ std::tie(ret.orderer_obc, std::ignore) =
+ obc_registry.get_cached_obc(oid.get_head());
+ return ret;
+ }
+
+ Manager get_obc_manager(const hobject_t &oid, bool resolve_clone = true) {
Manager ret(*this, oid);
ret.options.resolve_clone = resolve_clone;
return ret;
}
+ Manager get_obc_manager(
+ Orderer &orderer, const hobject_t &oid, bool resolve_clone = true) {
+ Manager ret = get_obc_manager(oid, resolve_clone);
+ ret.set_state_obc(ret.head_state, orderer.orderer_obc);
+ return ret;
+ }
+
using load_and_lock_ertr = load_obc_ertr;
using load_and_lock_iertr = interruptible::interruptible_errorator<
IOInterruptCondition, load_and_lock_ertr>;
using load_and_lock_fut = load_and_lock_iertr::future<>;
private:
load_and_lock_fut load_and_lock_head(Manager &, RWState::State);
- load_and_lock_fut load_and_lock_clone(Manager &, RWState::State);
+ load_and_lock_fut load_and_lock_clone(Manager &, RWState::State, bool lock_head=true);
public:
load_and_lock_fut load_and_lock(Manager &, RWState::State);
@@ -244,7 +276,7 @@ public:
// locks on head as part of this call.
manager.head_state.obc = head;
manager.head_state.obc->append_to(obc_set_accessing);
- co_await load_and_lock(manager, State);
+ co_await load_and_lock_clone(manager, State, false);
co_await std::invoke(func, head, manager.get_obc());
}
diff --git a/src/crimson/osd/ops_executer.cc b/src/crimson/osd/ops_executer.cc
index 97b241fdce4..cbc35c21a04 100644
--- a/src/crimson/osd/ops_executer.cc
+++ b/src/crimson/osd/ops_executer.cc
@@ -15,12 +15,15 @@
#include <seastar/core/thread.hh>
+#include "crimson/common/log.h"
#include "crimson/osd/exceptions.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/watch.h"
#include "osd/ClassHandler.h"
#include "osd/SnapMapper.h"
+SET_SUBSYS(osd);
+
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_osd);
@@ -464,10 +467,7 @@ auto OpsExecuter::do_const_op(Func&& f) {
template <class Func>
auto OpsExecuter::do_write_op(Func&& f, OpsExecuter::modified_by m) {
++num_write;
- if (!osd_op_params) {
- osd_op_params.emplace();
- fill_op_params(m);
- }
+ check_init_op_params(m);
return std::forward<Func>(f)(pg->get_backend(), obc->obs, txn);
}
OpsExecuter::call_errorator::future<> OpsExecuter::do_assert_ver(
@@ -822,25 +822,100 @@ OpsExecuter::do_execute_op(OSDOp& osd_op)
}
}
-void OpsExecuter::fill_op_params(OpsExecuter::modified_by m)
+OpsExecuter::rep_op_fut_t
+OpsExecuter::flush_changes_and_submit(
+ const std::vector<OSDOp>& ops,
+ SnapMapper& snap_mapper,
+ OSDriver& osdriver)
{
- osd_op_params->req_id = msg->get_reqid();
- osd_op_params->mtime = msg->get_mtime();
- osd_op_params->at_version = pg->get_next_version();
- osd_op_params->pg_trim_to = pg->get_pg_trim_to();
- osd_op_params->pg_committed_to = pg->get_pg_committed_to();
- osd_op_params->last_complete = pg->get_info().last_complete;
- osd_op_params->user_modify = (m == modified_by::user);
+ const bool want_mutate = !txn.empty();
+ // osd_op_params are instantiated by every wr-like operation.
+ assert(osd_op_params || !want_mutate);
+ assert(obc);
+
+ auto submitted = interruptor::now();
+ auto all_completed = interruptor::now();
+
+ if (cloning_ctx) {
+ ceph_assert(want_mutate);
+ }
+
+ apply_stats();
+ if (want_mutate) {
+ osd_op_params->at_version = pg->get_next_version();
+ osd_op_params->pg_trim_to = pg->get_pg_trim_to();
+ osd_op_params->pg_committed_to = pg->get_pg_committed_to();
+ osd_op_params->last_complete = pg->get_info().last_complete;
+
+ std::vector<pg_log_entry_t> log_entries;
+
+ if (cloning_ctx) {
+ log_entries.emplace_back(complete_cloning_ctx());
+ }
+
+ log_entries.emplace_back(prepare_head_update(ops, txn));
+
+ if (auto log_rit = log_entries.rbegin(); log_rit != log_entries.rend()) {
+ ceph_assert(log_rit->version == osd_op_params->at_version);
+ }
+
+ /*
+ * This works around the gcc bug causing the generated code to incorrectly
+ * execute unconditionally before the predicate.
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101244
+ */
+ auto clone_obc = cloning_ctx
+ ? std::move(cloning_ctx->clone_obc)
+ : nullptr;
+ auto [_submitted, _all_completed] = co_await pg->submit_transaction(
+ std::move(obc),
+ std::move(clone_obc),
+ std::move(txn),
+ std::move(*osd_op_params),
+ std::move(log_entries)
+ );
+
+ submitted = std::move(_submitted);
+ all_completed = std::move(_all_completed);
+ }
+
+ if (op_effects.size()) [[unlikely]] {
+ // need extra ref pg due to apply_stats() which can be executed after
+ // informing snap mapper
+ all_completed =
+ std::move(all_completed).then_interruptible([this, pg=this->pg] {
+ // let's do the cleaning of `op_effects` in destructor
+ return interruptor::do_for_each(op_effects,
+ [pg=std::move(pg)](auto& op_effect) {
+ return op_effect->execute(pg);
+ });
+ });
+ }
+
+ co_return std::make_tuple(
+ std::move(submitted),
+ std::move(all_completed));
}
-std::vector<pg_log_entry_t> OpsExecuter::prepare_transaction(
- const std::vector<OSDOp>& ops)
+pg_log_entry_t OpsExecuter::prepare_head_update(
+ const std::vector<OSDOp>& ops,
+ ceph::os::Transaction &txn)
{
- // let's ensure we don't need to inform SnapMapper about this particular
- // entry.
+ LOG_PREFIX(OpsExecuter::prepare_head_update);
assert(obc->obs.oi.soid.snap >= CEPH_MAXSNAP);
- std::vector<pg_log_entry_t> log_entries;
- log_entries.emplace_back(
+
+ update_clone_overlap();
+ if (cloning_ctx) {
+ obc->ssc->snapset = std::move(cloning_ctx->new_snapset);
+ }
+ if (snapc.seq > obc->ssc->snapset.seq) {
+ // update snapset with latest snap context
+ obc->ssc->snapset.seq = snapc.seq;
+ obc->ssc->snapset.snaps.clear();
+ }
+
+ pg_log_entry_t ret{
obc->obs.exists ?
pg_log_entry_t::MODIFY : pg_log_entry_t::DELETE,
obc->obs.oi.soid,
@@ -849,15 +924,38 @@ std::vector<pg_log_entry_t> OpsExecuter::prepare_transaction(
osd_op_params->user_modify ? osd_op_params->at_version.version : 0,
osd_op_params->req_id,
osd_op_params->mtime,
- op_info.allows_returnvec() && !ops.empty() ? ops.back().rval.code : 0);
+ op_info.allows_returnvec() && !ops.empty() ? ops.back().rval.code : 0};
+
if (op_info.allows_returnvec()) {
// also the per-op values are recorded in the pg log
- log_entries.back().set_op_returns(ops);
- logger().debug("{} op_returns: {}",
- __func__, log_entries.back().op_returns);
+ ret.set_op_returns(ops);
+ DEBUGDPP("op returns: {}", *pg, ret.op_returns);
+ }
+ ret.clean_regions = std::move(osd_op_params->clean_regions);
+
+
+ if (obc->obs.exists) {
+ obc->obs.oi.prior_version = obc->obs.oi.version;
+ obc->obs.oi.version = osd_op_params->at_version;
+ if (osd_op_params->user_modify)
+ obc->obs.oi.user_version = osd_op_params->at_version.version;
+ obc->obs.oi.last_reqid = osd_op_params->req_id;
+ obc->obs.oi.mtime = osd_op_params->mtime;
+ obc->obs.oi.local_mtime = ceph_clock_now();
+
+ obc->ssc->exists = true;
+ pg->get_backend().set_metadata(
+ obc->obs.oi.soid,
+ obc->obs.oi,
+ obc->obs.oi.soid.is_head() ? &(obc->ssc->snapset) : nullptr,
+ txn);
+ } else {
+ // reset cached ObjectState without enforcing eviction
+ obc->obs.oi = object_info_t(obc->obs.oi.soid);
}
- log_entries.back().clean_regions = std::move(osd_op_params->clean_regions);
- return log_entries;
+
+ DEBUGDPP("entry: {}", *pg, ret);
+ return ret;
}
// Defined here because there is a circular dependency between OpsExecuter and PG
@@ -871,25 +969,26 @@ version_t OpsExecuter::get_last_user_version() const
return pg->get_last_user_version();
}
-std::unique_ptr<OpsExecuter::CloningContext> OpsExecuter::execute_clone(
+void OpsExecuter::prepare_cloning_ctx(
const SnapContext& snapc,
const ObjectState& initial_obs,
const SnapSet& initial_snapset,
PGBackend& backend,
ceph::os::Transaction& txn)
{
+ LOG_PREFIX(OpsExecuter::prepare_cloning_ctx);
const hobject_t& soid = initial_obs.oi.soid;
logger().debug("{} {} snapset={} snapc={}",
__func__, soid,
initial_snapset, snapc);
- auto cloning_ctx = std::make_unique<CloningContext>();
+ cloning_ctx = std::make_unique<CloningContext>();
cloning_ctx->new_snapset = initial_snapset;
// clone object, the snap field is set to the seq of the SnapContext
// at its creation.
- hobject_t coid = soid;
- coid.snap = snapc.seq;
+ cloning_ctx->coid = soid;
+ cloning_ctx->coid.snap = snapc.seq;
// existing snaps are stored in descending order in snapc,
// cloned_snaps vector will hold all the snaps stored until snapset.seq
@@ -900,49 +999,63 @@ std::unique_ptr<OpsExecuter::CloningContext> OpsExecuter::execute_clone(
return std::vector<snapid_t>{std::begin(snapc.snaps), last};
}();
- auto clone_obc = prepare_clone(coid, osd_op_params->at_version);
- osd_op_params->at_version.version++;
+ // make clone here, but populate in metadata in complete_cloning_ctx
+ backend.clone_for_write(soid, cloning_ctx->coid, txn);
- // make clone
- backend.clone(clone_obc->obs.oi, initial_obs, clone_obc->obs, txn);
+ cloning_ctx->clone_obc = prepare_clone(cloning_ctx->coid, initial_obs);
delta_stats.num_objects++;
- if (clone_obc->obs.oi.is_omap()) {
+ if (cloning_ctx->clone_obc->obs.oi.is_omap()) {
delta_stats.num_objects_omap++;
}
delta_stats.num_object_clones++;
// newsnapset is obc's ssc
- cloning_ctx->new_snapset.clones.push_back(coid.snap);
- cloning_ctx->new_snapset.clone_size[coid.snap] = initial_obs.oi.size;
- cloning_ctx->new_snapset.clone_snaps[coid.snap] = cloned_snaps;
+ cloning_ctx->new_snapset.clones.push_back(cloning_ctx->coid.snap);
+ cloning_ctx->new_snapset.clone_size[cloning_ctx->coid.snap] = initial_obs.oi.size;
+ cloning_ctx->new_snapset.clone_snaps[cloning_ctx->coid.snap] = cloned_snaps;
// clone_overlap should contain an entry for each clone
// (an empty interval_set if there is no overlap)
- auto &overlap = cloning_ctx->new_snapset.clone_overlap[coid.snap];
+ auto &overlap = cloning_ctx->new_snapset.clone_overlap[cloning_ctx->coid.snap];
if (initial_obs.oi.size) {
overlap.insert(0, initial_obs.oi.size);
}
// log clone
- logger().debug("cloning v {} to {} v {} snaps={} snapset={}",
- initial_obs.oi.version, coid,
- osd_op_params->at_version, cloned_snaps, cloning_ctx->new_snapset);
+ DEBUGDPP("cloning v {} to {} v {} snaps={} snapset={}", *pg,
+ initial_obs.oi.version, cloning_ctx->coid,
+ osd_op_params->at_version, cloned_snaps, cloning_ctx->new_snapset);
+}
- cloning_ctx->log_entry = {
+pg_log_entry_t OpsExecuter::complete_cloning_ctx()
+{
+ ceph_assert(cloning_ctx);
+ const auto &coid = cloning_ctx->coid;
+ cloning_ctx->clone_obc->obs.oi.version = osd_op_params->at_version;
+
+ osd_op_params->at_version.version++;
+
+ pg->get_backend().set_metadata(
+ cloning_ctx->coid,
+ cloning_ctx->clone_obc->obs.oi,
+ nullptr /* snapset */,
+ txn);
+
+ pg_log_entry_t ret{
pg_log_entry_t::CLONE,
coid,
- clone_obc->obs.oi.version,
- clone_obc->obs.oi.prior_version,
- clone_obc->obs.oi.user_version,
+ cloning_ctx->clone_obc->obs.oi.version,
+ cloning_ctx->clone_obc->obs.oi.prior_version,
+ cloning_ctx->clone_obc->obs.oi.user_version,
osd_reqid_t(),
- clone_obc->obs.oi.mtime, // will be replaced in `apply_to()`
+ cloning_ctx->clone_obc->obs.oi.mtime, // will be replaced in `apply_to()`
0
};
- encode(cloned_snaps, cloning_ctx->log_entry.snaps);
- cloning_ctx->log_entry.clean_regions.mark_data_region_dirty(0, initial_obs.oi.size);
- cloning_ctx->clone_obc = clone_obc;
-
- return cloning_ctx;
+ ceph_assert(cloning_ctx->new_snapset.clone_snaps.count(coid.snap));
+ encode(cloning_ctx->new_snapset.clone_snaps[coid.snap], ret.snaps);
+ ret.clean_regions.mark_data_region_dirty(0, cloning_ctx->clone_obc->obs.oi.size);
+ ret.mtime = cloning_ctx->clone_obc->obs.oi.mtime;
+ return ret;
}
void OpsExecuter::update_clone_overlap() {
@@ -965,47 +1078,16 @@ void OpsExecuter::update_clone_overlap() {
delta_stats.num_bytes += osd_op_params->modified_ranges.size();
}
-void OpsExecuter::CloningContext::apply_to(
- std::vector<pg_log_entry_t>& log_entries,
- ObjectContext& processed_obc)
-{
- log_entry.mtime = processed_obc.obs.oi.mtime;
- log_entries.insert(log_entries.begin(), std::move(log_entry));
- processed_obc.ssc->snapset = std::move(new_snapset);
-}
-
-std::vector<pg_log_entry_t>
-OpsExecuter::flush_clone_metadata(
- std::vector<pg_log_entry_t>&& log_entries,
- SnapMapper& snap_mapper,
- OSDriver& osdriver,
- ceph::os::Transaction& txn)
-{
- assert(!txn.empty());
- update_clone_overlap();
- if (cloning_ctx) {
- cloning_ctx->apply_to(log_entries, *obc);
- }
- if (snapc.seq > obc->ssc->snapset.seq) {
- // update snapset with latest snap context
- obc->ssc->snapset.seq = snapc.seq;
- obc->ssc->snapset.snaps.clear();
- }
- logger().debug("{} done, initial snapset={}, new snapset={}",
- __func__, obc->obs.oi.soid, obc->ssc->snapset);
- return std::move(log_entries);
-}
-
ObjectContextRef OpsExecuter::prepare_clone(
const hobject_t& coid,
- eversion_t version)
+ const ObjectState& initial_obs)
{
ceph_assert(pg->is_primary());
ObjectState clone_obs{coid};
clone_obs.exists = true;
- clone_obs.oi.version = version;
- clone_obs.oi.prior_version = obc->obs.oi.version;
- clone_obs.oi.copy_user_bits(obc->obs.oi);
+ // clone_obs.oi.version will be populated in complete_cloning_ctx
+ clone_obs.oi.prior_version = initial_obs.oi.version;
+ clone_obs.oi.copy_user_bits(initial_obs.oi);
clone_obs.oi.clear_flag(object_info_t::FLAG_WHITEOUT);
auto [clone_obc, existed] = pg->obc_registry.get_cached_obc(std::move(coid));
@@ -1036,11 +1118,12 @@ OpsExecuter::OpsExecuter(Ref<PG> pg,
{
if (op_info.may_write() && should_clone(*obc, snapc)) {
do_write_op([this](auto& backend, auto& os, auto& txn) {
- cloning_ctx = execute_clone(std::as_const(snapc),
- std::as_const(obc->obs),
- std::as_const(obc->ssc->snapset),
- backend,
- txn);
+ prepare_cloning_ctx(
+ std::as_const(snapc),
+ std::as_const(obc->obs),
+ std::as_const(obc->ssc->snapset),
+ backend,
+ txn);
});
}
}
diff --git a/src/crimson/osd/ops_executer.h b/src/crimson/osd/ops_executer.h
index 51436f19da7..f5554bd6919 100644
--- a/src/crimson/osd/ops_executer.h
+++ b/src/crimson/osd/ops_executer.h
@@ -195,26 +195,26 @@ private:
SnapContext snapc; // writer snap context
struct CloningContext {
+ /// id of new clone, populated in prepare_cloning_ctx
+ hobject_t coid;
+ /// new snapset, populated in prepare_cloning_ctx
SnapSet new_snapset;
- pg_log_entry_t log_entry;
+ /// populated in complete_cloning_ctx
ObjectContextRef clone_obc;
-
- void apply_to(
- std::vector<pg_log_entry_t>& log_entries,
- ObjectContext& processed_obc);
};
std::unique_ptr<CloningContext> cloning_ctx;
-
/**
- * execute_clone
+ * prepare_cloning_ctx
*
* If snapc contains a snap which occurred logically after the last write
* seen by this object (see OpsExecuter::should_clone()), we first need
- * make a clone of the object at its current state. execute_clone primes
- * txn with that clone operation and returns an
- * OpsExecuter::CloningContext which will allow us to fill in the corresponding
- * metadata and log_entries once the operations have been processed.
+ * make a clone of the object at its current state. prepare_cloning_ctx
+ * primes txn with that clone operation and populates cloning_ctx with
+ * an obc for the clone and a new snapset reflecting the clone.
+ *
+ * complete_cloning_ctx later uses the information from cloning_ctx to
+ * generate a log entry and object_info versions for the clone.
*
* Note that this strategy differs from classic, which instead performs this
* work at the end and reorders the transaction. See
@@ -227,13 +227,15 @@ private:
* @param backend [in,out] interface for generating mutations
* @param txn [out] transaction for the operation
*/
- std::unique_ptr<CloningContext> execute_clone(
+ void prepare_cloning_ctx(
const SnapContext& snapc,
const ObjectState& initial_obs,
const SnapSet& initial_snapset,
PGBackend& backend,
ceph::os::Transaction& txn);
+ /// complete clone, populate clone_obc, return log entry
+ pg_log_entry_t complete_cloning_ctx();
/**
* should_clone
@@ -264,12 +266,6 @@ private:
*/
void update_clone_overlap();
- std::vector<pg_log_entry_t> flush_clone_metadata(
- std::vector<pg_log_entry_t>&& log_entries,
- SnapMapper& snap_mapper,
- OSDriver& osdriver,
- ceph::os::Transaction& txn);
-
private:
// this gizmo could be wrapped in std::optional for the sake of lazy
// initialization. we don't need it for ops that doesn't have effect
@@ -400,15 +396,22 @@ public:
std::tuple<interruptible_future<>, interruptible_future<>>;
using rep_op_fut_t =
interruptible_future<rep_op_fut_tuple>;
- template <typename MutFunc>
- rep_op_fut_t flush_changes_n_do_ops_effects(
+ rep_op_fut_t flush_changes_and_submit(
const std::vector<OSDOp>& ops,
SnapMapper& snap_mapper,
- OSDriver& osdriver,
- MutFunc mut_func) &&;
- std::vector<pg_log_entry_t> prepare_transaction(
- const std::vector<OSDOp>& ops);
- void fill_op_params(modified_by m);
+ OSDriver& osdriver);
+ pg_log_entry_t prepare_head_update(
+ const std::vector<OSDOp>& ops,
+ ceph::os::Transaction &txn);
+
+ void check_init_op_params(OpsExecuter::modified_by m) {
+ if (!osd_op_params) {
+ osd_op_params.emplace();
+ osd_op_params->req_id = msg->get_reqid();
+ osd_op_params->mtime = msg->get_mtime();
+ osd_op_params->user_modify = (m == modified_by::user);
+ }
+ }
ObjectContextRef get_obc() const {
return obc;
@@ -443,7 +446,7 @@ public:
ObjectContextRef prepare_clone(
const hobject_t& coid,
- eversion_t version);
+ const ObjectState& initial_obs);
void apply_stats();
};
@@ -485,76 +488,6 @@ auto OpsExecuter::with_effect_on_obc(
return std::forward<MainFunc>(main_func)(ctx_ref);
}
-template <typename MutFunc>
-OpsExecuter::rep_op_fut_t
-OpsExecuter::flush_changes_n_do_ops_effects(
- const std::vector<OSDOp>& ops,
- SnapMapper& snap_mapper,
- OSDriver& osdriver,
- MutFunc mut_func) &&
-{
- const bool want_mutate = !txn.empty();
- // osd_op_params are instantiated by every wr-like operation.
- assert(osd_op_params || !want_mutate);
- assert(obc);
-
- auto submitted = interruptor::now();
- auto all_completed = interruptor::now();
-
- if (cloning_ctx) {
- ceph_assert(want_mutate);
- }
-
- apply_stats();
- if (want_mutate) {
- auto log_entries = flush_clone_metadata(
- prepare_transaction(ops),
- snap_mapper,
- osdriver,
- txn);
-
- if (auto log_rit = log_entries.rbegin(); log_rit != log_entries.rend()) {
- ceph_assert(log_rit->version == osd_op_params->at_version);
- }
-
- /*
- * This works around the gcc bug causing the generated code to incorrectly
- * execute unconditionally before the predicate.
- *
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101244
- */
- auto clone_obc = cloning_ctx
- ? std::move(cloning_ctx->clone_obc)
- : nullptr;
- auto [_submitted, _all_completed] = co_await mut_func(
- std::move(txn),
- std::move(obc),
- std::move(*osd_op_params),
- std::move(log_entries),
- std::move(clone_obc));
-
- submitted = std::move(_submitted);
- all_completed = std::move(_all_completed);
- }
-
- if (op_effects.size()) [[unlikely]] {
- // need extra ref pg due to apply_stats() which can be executed after
- // informing snap mapper
- all_completed =
- std::move(all_completed).then_interruptible([this, pg=this->pg] {
- // let's do the cleaning of `op_effects` in destructor
- return interruptor::do_for_each(op_effects,
- [pg=std::move(pg)](auto& op_effect) {
- return op_effect->execute(pg);
- });
- });
- }
-
- co_return std::make_tuple(
- std::move(submitted),
- std::move(all_completed));
-}
-
template <class Func>
struct OpsExecuter::RollbackHelper {
void rollback_obc_if_modified();
diff --git a/src/crimson/osd/osd_operation.h b/src/crimson/osd/osd_operation.h
index fd8b049c0bf..2897a7e1623 100644
--- a/src/crimson/osd/osd_operation.h
+++ b/src/crimson/osd/osd_operation.h
@@ -50,24 +50,30 @@ struct PGPeeringPipeline {
};
struct CommonPGPipeline {
- struct WaitForActive : OrderedExclusivePhaseT<WaitForActive> {
- static constexpr auto type_name = "CommonPGPipeline:::wait_for_active";
- } wait_for_active;
- struct RecoverMissing : OrderedConcurrentPhaseT<RecoverMissing> {
- static constexpr auto type_name = "CommonPGPipeline::recover_missing";
- } recover_missing;
- struct CheckAlreadyCompleteGetObc : OrderedExclusivePhaseT<CheckAlreadyCompleteGetObc> {
- static constexpr auto type_name = "CommonPGPipeline::check_already_complete_get_obc";
- } check_already_complete_get_obc;
- struct LockOBC : OrderedConcurrentPhaseT<LockOBC> {
- static constexpr auto type_name = "CommonPGPipeline::lock_obc";
- } lock_obc;
+ struct WaitPGReady : OrderedConcurrentPhaseT<WaitPGReady> {
+ static constexpr auto type_name = "CommonPGPipeline:::wait_pg_ready";
+ } wait_pg_ready;
+ struct GetOBC : OrderedExclusivePhaseT<GetOBC> {
+ static constexpr auto type_name = "CommonPGPipeline:::get_obc";
+ } get_obc;
+};
+
+struct PGRepopPipeline {
+ struct Process : OrderedExclusivePhaseT<Process> {
+ static constexpr auto type_name = "PGRepopPipeline::process";
+ } process;
+};
+
+struct CommonOBCPipeline {
struct Process : OrderedExclusivePhaseT<Process> {
- static constexpr auto type_name = "CommonPGPipeline::process";
+ static constexpr auto type_name = "CommonOBCPipeline::process";
} process;
struct WaitRepop : OrderedConcurrentPhaseT<WaitRepop> {
- static constexpr auto type_name = "ClientRequest::PGPipeline::wait_repop";
+ static constexpr auto type_name = "CommonOBCPipeline::wait_repop";
} wait_repop;
+ struct SendReply : OrderedExclusivePhaseT<SendReply> {
+ static constexpr auto type_name = "CommonOBCPipeline::send_reply";
+ } send_reply;
};
diff --git a/src/crimson/osd/osd_operation_external_tracking.h b/src/crimson/osd/osd_operation_external_tracking.h
index d2786a95e4d..8dd17bb036d 100644
--- a/src/crimson/osd/osd_operation_external_tracking.h
+++ b/src/crimson/osd/osd_operation_external_tracking.h
@@ -25,24 +25,20 @@ struct LttngBackend
ConnectionPipeline::AwaitMap::BlockingEvent::Backend,
ConnectionPipeline::GetPGMapping::BlockingEvent::Backend,
PerShardPipeline::CreateOrWaitPG::BlockingEvent::Backend,
+ CommonPGPipeline::WaitPGReady::BlockingEvent::Backend,
+ CommonPGPipeline::WaitPGReady::BlockingEvent::ExitBarrierEvent::Backend,
+ CommonPGPipeline::GetOBC::BlockingEvent::Backend,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
PGMap::PGCreationBlockingEvent::Backend,
- ClientRequest::PGPipeline::AwaitMap::BlockingEvent::Backend,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
- ClientRequest::PGPipeline::WaitForActive::BlockingEvent::Backend,
PGActivationBlocker::BlockingEvent::Backend,
scrub::PGScrubber::BlockingEvent::Backend,
- ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::Backend,
- ClientRequest::PGPipeline::RecoverMissing::
- BlockingEvent::ExitBarrierEvent::Backend,
- ClientRequest::PGPipeline::CheckAlreadyCompleteGetObc::BlockingEvent::Backend,
- ClientRequest::PGPipeline::LockOBC::BlockingEvent::Backend,
- ClientRequest::PGPipeline::LockOBC::BlockingEvent::ExitBarrierEvent::Backend,
- ClientRequest::PGPipeline::Process::BlockingEvent::Backend,
- ClientRequest::PGPipeline::WaitRepop::BlockingEvent::Backend,
- ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend,
- ClientRequest::PGPipeline::SendReply::BlockingEvent::Backend,
- ClientRequest::CompletionEvent::Backend
+ ClientRequest::CompletionEvent::Backend,
+ CommonOBCPipeline::Process::BlockingEvent::Backend,
+ CommonOBCPipeline::WaitRepop::BlockingEvent::Backend,
+ CommonOBCPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend,
+ CommonOBCPipeline::SendReply::BlockingEvent::Backend,
+ PGRepopPipeline::Process::BlockingEvent::Backend
{
void handle(ClientRequest::StartEvent&,
const Operation&) override {}
@@ -72,24 +68,28 @@ struct LttngBackend
const PerShardPipeline::CreateOrWaitPG& blocker) override {
}
- void handle(PGMap::PGCreationBlockingEvent&,
- const Operation&,
- const PGMap::PGCreationBlocker&) override {
+ void handle(CommonPGPipeline::WaitPGReady::BlockingEvent& ev,
+ const Operation& op,
+ const CommonPGPipeline::WaitPGReady& blocker) override {
}
- void handle(ClientRequest::PGPipeline::AwaitMap::BlockingEvent& ev,
+ void handle(CommonPGPipeline::WaitPGReady::BlockingEvent::ExitBarrierEvent& ev,
+ const Operation& op) override {
+ }
+
+ void handle(CommonPGPipeline::GetOBC::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::AwaitMap& blocker) override {
+ const CommonPGPipeline::GetOBC& blocker) override {
}
- void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&,
+ void handle(PGMap::PGCreationBlockingEvent&,
const Operation&,
- const PG_OSDMapGate::OSDMapBlocker&) override {
+ const PGMap::PGCreationBlocker&) override {
}
- void handle(ClientRequest::PGPipeline::WaitForActive::BlockingEvent& ev,
- const Operation& op,
- const ClientRequest::PGPipeline::WaitForActive& blocker) override {
+ void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&,
+ const Operation&,
+ const PG_OSDMapGate::OSDMapBlocker&) override {
}
void handle(PGActivationBlocker::BlockingEvent& ev,
@@ -102,51 +102,33 @@ struct LttngBackend
const scrub::PGScrubber& blocker) override {
}
- void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent& ev,
- const Operation& op,
- const ClientRequest::PGPipeline::RecoverMissing& blocker) override {
- }
-
- void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::ExitBarrierEvent& ev,
- const Operation& op) override {
- }
-
- void handle(ClientRequest::PGPipeline::CheckAlreadyCompleteGetObc::BlockingEvent& ev,
+ void handle(CommonOBCPipeline::Process::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::CheckAlreadyCompleteGetObc& blocker) override {
+ const CommonOBCPipeline::Process& blocker) override {
}
-
- void handle(ClientRequest::PGPipeline::LockOBC::BlockingEvent& ev,
+ void handle(CommonOBCPipeline::WaitRepop::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::LockOBC& blocker) override {
+ const CommonOBCPipeline::WaitRepop& blocker) override {
}
- void handle(ClientRequest::PGPipeline::LockOBC::BlockingEvent::ExitBarrierEvent& ev,
+ void handle(CommonOBCPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev,
const Operation& op) override {
}
- void handle(ClientRequest::PGPipeline::Process::BlockingEvent& ev,
+ void handle(CommonOBCPipeline::SendReply::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::Process& blocker) override {
+ const CommonOBCPipeline::SendReply& blocker) override {
}
- void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent& ev,
+ void handle(PGRepopPipeline::Process::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::WaitRepop& blocker) override {
- }
-
- void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev,
- const Operation& op) override {
- }
-
- void handle(ClientRequest::PGPipeline::SendReply::BlockingEvent& ev,
- const Operation& op,
- const ClientRequest::PGPipeline::SendReply& blocker) override {
+ const PGRepopPipeline::Process& blocker) override {
}
void handle(ClientRequest::CompletionEvent&,
const Operation&) override {}
+
};
struct HistoricBackend
@@ -155,24 +137,20 @@ struct HistoricBackend
ConnectionPipeline::AwaitMap::BlockingEvent::Backend,
ConnectionPipeline::GetPGMapping::BlockingEvent::Backend,
PerShardPipeline::CreateOrWaitPG::BlockingEvent::Backend,
+ CommonPGPipeline::WaitPGReady::BlockingEvent::Backend,
+ CommonPGPipeline::WaitPGReady::BlockingEvent::ExitBarrierEvent::Backend,
+ CommonPGPipeline::GetOBC::BlockingEvent::Backend,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
PGMap::PGCreationBlockingEvent::Backend,
- ClientRequest::PGPipeline::AwaitMap::BlockingEvent::Backend,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent::Backend,
- ClientRequest::PGPipeline::WaitForActive::BlockingEvent::Backend,
PGActivationBlocker::BlockingEvent::Backend,
scrub::PGScrubber::BlockingEvent::Backend,
- ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::Backend,
- ClientRequest::PGPipeline::RecoverMissing::
- BlockingEvent::ExitBarrierEvent::Backend,
- ClientRequest::PGPipeline::CheckAlreadyCompleteGetObc::BlockingEvent::Backend,
- ClientRequest::PGPipeline::LockOBC::BlockingEvent::Backend,
- ClientRequest::PGPipeline::LockOBC::BlockingEvent::ExitBarrierEvent::Backend,
- ClientRequest::PGPipeline::Process::BlockingEvent::Backend,
- ClientRequest::PGPipeline::WaitRepop::BlockingEvent::Backend,
- ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend,
- ClientRequest::PGPipeline::SendReply::BlockingEvent::Backend,
- ClientRequest::CompletionEvent::Backend
+ ClientRequest::CompletionEvent::Backend,
+ CommonOBCPipeline::Process::BlockingEvent::Backend,
+ CommonOBCPipeline::WaitRepop::BlockingEvent::Backend,
+ CommonOBCPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent::Backend,
+ CommonOBCPipeline::SendReply::BlockingEvent::Backend,
+ PGRepopPipeline::Process::BlockingEvent::Backend
{
void handle(ClientRequest::StartEvent&,
const Operation&) override {}
@@ -202,24 +180,28 @@ struct HistoricBackend
const PerShardPipeline::CreateOrWaitPG& blocker) override {
}
- void handle(PGMap::PGCreationBlockingEvent&,
- const Operation&,
- const PGMap::PGCreationBlocker&) override {
+ void handle(CommonPGPipeline::WaitPGReady::BlockingEvent& ev,
+ const Operation& op,
+ const CommonPGPipeline::WaitPGReady& blocker) override {
}
- void handle(ClientRequest::PGPipeline::AwaitMap::BlockingEvent& ev,
+ void handle(CommonPGPipeline::WaitPGReady::BlockingEvent::ExitBarrierEvent& ev,
+ const Operation& op) override {
+ }
+
+ void handle(CommonPGPipeline::GetOBC::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::AwaitMap& blocker) override {
+ const CommonPGPipeline::GetOBC& blocker) override {
}
- void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&,
+ void handle(PGMap::PGCreationBlockingEvent&,
const Operation&,
- const PG_OSDMapGate::OSDMapBlocker&) override {
+ const PGMap::PGCreationBlocker&) override {
}
- void handle(ClientRequest::PGPipeline::WaitForActive::BlockingEvent& ev,
- const Operation& op,
- const ClientRequest::PGPipeline::WaitForActive& blocker) override {
+ void handle(PG_OSDMapGate::OSDMapBlocker::BlockingEvent&,
+ const Operation&,
+ const PG_OSDMapGate::OSDMapBlocker&) override {
}
void handle(PGActivationBlocker::BlockingEvent& ev,
@@ -232,54 +214,36 @@ struct HistoricBackend
const scrub::PGScrubber& blocker) override {
}
- void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent& ev,
- const Operation& op,
- const ClientRequest::PGPipeline::RecoverMissing& blocker) override {
- }
-
- void handle(ClientRequest::PGPipeline::RecoverMissing::BlockingEvent::ExitBarrierEvent& ev,
- const Operation& op) override {
+ static const ClientRequest& to_client_request(const Operation& op) {
+#ifdef NDEBUG
+ return static_cast<const ClientRequest&>(op);
+#else
+ return dynamic_cast<const ClientRequest&>(op);
+#endif
}
- void handle(ClientRequest::PGPipeline::CheckAlreadyCompleteGetObc::BlockingEvent& ev,
+ void handle(CommonOBCPipeline::Process::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::CheckAlreadyCompleteGetObc& blocker) override {
+ const CommonOBCPipeline::Process& blocker) override {
}
- void handle(ClientRequest::PGPipeline::LockOBC::BlockingEvent& ev,
+ void handle(CommonOBCPipeline::WaitRepop::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::LockOBC& blocker) override {
+ const CommonOBCPipeline::WaitRepop& blocker) override {
}
- void handle(ClientRequest::PGPipeline::LockOBC::BlockingEvent::ExitBarrierEvent& ev,
+ void handle(CommonOBCPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev,
const Operation& op) override {
}
- void handle(ClientRequest::PGPipeline::Process::BlockingEvent& ev,
- const Operation& op,
- const ClientRequest::PGPipeline::Process& blocker) override {
- }
-
- void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent& ev,
+ void handle(CommonOBCPipeline::SendReply::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::WaitRepop& blocker) override {
- }
-
- void handle(ClientRequest::PGPipeline::WaitRepop::BlockingEvent::ExitBarrierEvent& ev,
- const Operation& op) override {
+ const CommonOBCPipeline::SendReply& blocker) override {
}
- void handle(ClientRequest::PGPipeline::SendReply::BlockingEvent& ev,
+ void handle(PGRepopPipeline::Process::BlockingEvent& ev,
const Operation& op,
- const ClientRequest::PGPipeline::SendReply& blocker) override {
- }
-
- static const ClientRequest& to_client_request(const Operation& op) {
-#ifdef NDEBUG
- return static_cast<const ClientRequest&>(op);
-#else
- return dynamic_cast<const ClientRequest&>(op);
-#endif
+ const PGRepopPipeline::Process& blocker) override {
}
void handle(ClientRequest::CompletionEvent&, const Operation& op) override {
diff --git a/src/crimson/osd/osd_operations/client_request.cc b/src/crimson/osd/osd_operations/client_request.cc
index 61a56600a57..fcd0f318db2 100644
--- a/src/crimson/osd/osd_operations/client_request.cc
+++ b/src/crimson/osd/osd_operations/client_request.cc
@@ -101,7 +101,7 @@ PerShardPipeline &ClientRequest::get_pershard_pipeline(
return shard_services.get_client_request_pipeline();
}
-ClientRequest::PGPipeline &ClientRequest::client_pp(PG &pg)
+CommonPGPipeline &ClientRequest::client_pp(PG &pg)
{
return pg.request_pg_pipeline;
}
@@ -140,6 +140,15 @@ ClientRequest::interruptible_future<> ClientRequest::with_pg_process_interruptib
DEBUGDPP("{} start", *pgref, *this);
PG &pg = *pgref;
+
+ DEBUGDPP("{}.{}: entering wait_pg_ready stage",
+ *pgref, *this, this_instance_id);
+ // The prior stage is OrderedExclusive (PerShardPipeline::create_or_wait_pg)
+ // and wait_pg_ready is OrderedConcurrent. This transition, therefore, cannot
+ // block and using enter_stage_sync is legal and more efficient than
+ // enter_stage.
+ ihref.enter_stage_sync(client_pp(pg).wait_pg_ready, *this);
+
if (!m->get_hobj().get_key().empty()) {
// There are no users of locator. It was used to ensure that multipart-upload
// parts would end up in the same PG so that they could be clone_range'd into
@@ -156,28 +165,22 @@ ClientRequest::interruptible_future<> ClientRequest::with_pg_process_interruptib
DEBUGDPP("{}: discarding {}", *pgref, *this, this_instance_id);
co_return;
}
- DEBUGDPP("{}.{}: entering await_map stage",
- *pgref, *this, this_instance_id);
- co_await ihref.enter_stage<interruptor>(client_pp(pg).await_map, *this);
- DEBUGDPP("{}.{}: entered await_map stage, waiting for map",
- pg, *this, this_instance_id);
+
auto map_epoch = co_await interruptor::make_interruptible(
ihref.enter_blocker(
*this, pg.osdmap_gate, &decltype(pg.osdmap_gate)::wait_for_map,
m->get_min_epoch(), nullptr));
- DEBUGDPP("{}.{}: map epoch got {}, entering wait_for_active",
+ DEBUGDPP("{}.{}: waited for epoch {}, waiting for active",
pg, *this, this_instance_id, map_epoch);
- co_await ihref.enter_stage<interruptor>(client_pp(pg).wait_for_active, *this);
-
- DEBUGDPP("{}.{}: entered wait_for_active stage, waiting for active",
- pg, *this, this_instance_id);
co_await interruptor::make_interruptible(
ihref.enter_blocker(
*this,
pg.wait_for_active_blocker,
&decltype(pg.wait_for_active_blocker)::wait));
+ co_await ihref.enter_stage<interruptor>(client_pp(pg).get_obc, *this);
+
if (int res = op_info.set_from_op(&*m, *pg.get_osdmap());
res != 0) {
co_await reply_op_error(pgref, res);
@@ -238,10 +241,6 @@ ClientRequest::interruptible_future<> ClientRequest::with_pg_process_interruptib
DEBUGDPP("{}.{}: process[_pg]_op complete, completing handle",
*pgref, *this, this_instance_id);
co_await interruptor::make_interruptible(ihref.handle.complete());
-
- DEBUGDPP("{}.{}: process[_pg]_op complete,"
- "removing request from orderer",
- *pgref, *this, this_instance_id);
}
seastar::future<> ClientRequest::with_pg_process(
@@ -257,10 +256,13 @@ seastar::future<> ClientRequest::with_pg_process(
auto instance_handle = get_instance_handle();
auto &ihref = *instance_handle;
return interruptor::with_interruption(
- [this, pgref, this_instance_id, &ihref]() mutable {
+ [FNAME, this, pgref, this_instance_id, &ihref]() mutable {
return with_pg_process_interruptible(
pgref, this_instance_id, ihref
- ).then_interruptible([this, pgref] {
+ ).then_interruptible([FNAME, this, this_instance_id, pgref] {
+ DEBUGDPP("{}.{}: with_pg_process_interruptible complete,"
+ " completing request",
+ *pgref, *this, this_instance_id);
complete_request(*pgref);
});
}, [FNAME, this, this_instance_id, pgref](std::exception_ptr eptr) {
@@ -268,9 +270,10 @@ seastar::future<> ClientRequest::with_pg_process(
*pgref, *this, this_instance_id, eptr);
}, pgref, pgref->get_osdmap_epoch()).finally(
[this, FNAME, opref=std::move(opref), pgref,
- this_instance_id, instance_handle=std::move(instance_handle), &ihref] {
+ this_instance_id, instance_handle=std::move(instance_handle), &ihref]() mutable {
DEBUGDPP("{}.{}: exit", *pgref, *this, this_instance_id);
- ihref.handle.exit();
+ return ihref.handle.complete(
+ ).finally([instance_handle=std::move(instance_handle)] {});
});
}
@@ -345,7 +348,13 @@ ClientRequest::process_op(
instance_handle_t &ihref, Ref<PG> pg, unsigned this_instance_id)
{
LOG_PREFIX(ClientRequest::process_op);
- ihref.enter_stage_sync(client_pp(*pg).recover_missing, *this);
+ ihref.obc_orderer = pg->obc_loader.get_obc_orderer(m->get_hobj());
+ auto obc_manager = pg->obc_loader.get_obc_manager(
+ *(ihref.obc_orderer),
+ m->get_hobj());
+ co_await ihref.enter_stage<interruptor>(
+ ihref.obc_orderer->obc_pp().process, *this);
+
if (!pg->is_primary()) {
DEBUGDPP(
"Skipping recover_missings on non primary pg for soid {}",
@@ -365,16 +374,6 @@ ClientRequest::process_op(
}
}
- /**
- * The previous stage of recover_missing is a concurrent phase.
- * Checking for already_complete requests must done exclusively.
- * Since get_obc is also an exclusive stage, we can merge both stages into
- * a single stage and avoid stage switching overhead.
- */
- DEBUGDPP("{}.{}: entering check_already_complete_get_obc",
- *pg, *this, this_instance_id);
- co_await ihref.enter_stage<interruptor>(
- client_pp(*pg).check_already_complete_get_obc, *this);
DEBUGDPP("{}.{}: checking already_complete",
*pg, *this, this_instance_id);
auto completed = co_await pg->already_complete(m->get_reqid());
@@ -402,12 +401,6 @@ ClientRequest::process_op(
DEBUGDPP("{}.{}: past scrub blocker, getting obc",
*pg, *this, this_instance_id);
- auto obc_manager = pg->obc_loader.get_obc_manager(m->get_hobj());
-
- // initiate load_and_lock in order, but wait concurrently
- ihref.enter_stage_sync(
- client_pp(*pg).lock_obc, *this);
-
int load_err = co_await pg->obc_loader.load_and_lock(
obc_manager, pg->get_lock_type(op_info)
).si_then([]() -> int {
@@ -425,13 +418,8 @@ ClientRequest::process_op(
co_return;
}
- DEBUGDPP("{}.{}: got obc {}, entering process stage",
+ DEBUGDPP("{}.{}: obc {} loaded and locked, calling do_process",
*pg, *this, this_instance_id, obc_manager.get_obc()->obs);
- co_await ihref.enter_stage<interruptor>(
- client_pp(*pg).process, *this);
-
- DEBUGDPP("{}.{}: in process stage, calling do_process",
- *pg, *this, this_instance_id);
co_await do_process(
ihref, pg, obc_manager.get_obc(), this_instance_id
);
@@ -553,12 +541,14 @@ ClientRequest::do_process(
std::move(ox), m->ops);
co_await std::move(submitted);
}
- co_await ihref.enter_stage<interruptor>(client_pp(*pg).wait_repop, *this);
+ co_await ihref.enter_stage<interruptor>(
+ ihref.obc_orderer->obc_pp().wait_repop, *this);
co_await std::move(all_completed);
}
- co_await ihref.enter_stage<interruptor>(client_pp(*pg).send_reply, *this);
+ co_await ihref.enter_stage<interruptor>(
+ ihref.obc_orderer->obc_pp().send_reply, *this);
if (ret) {
int err = -ret->value();
diff --git a/src/crimson/osd/osd_operations/client_request.h b/src/crimson/osd/osd_operations/client_request.h
index 6d1043e2783..98443bdfc0f 100644
--- a/src/crimson/osd/osd_operations/client_request.h
+++ b/src/crimson/osd/osd_operations/client_request.h
@@ -11,6 +11,7 @@
#include "osd/osd_op_util.h"
#include "crimson/net/Connection.h"
#include "crimson/osd/object_context.h"
+#include "crimson/osd/object_context_loader.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request_common.h"
@@ -41,22 +42,6 @@ class ClientRequest final : public PhasedOperationT<ClientRequest>,
unsigned instance_id = 0;
public:
- class PGPipeline : public CommonPGPipeline {
- public:
- struct AwaitMap : OrderedExclusivePhaseT<AwaitMap> {
- static constexpr auto type_name = "ClientRequest::PGPipeline::await_map";
- } await_map;
- struct SendReply : OrderedExclusivePhaseT<SendReply> {
- static constexpr auto type_name = "ClientRequest::PGPipeline::send_reply";
- } send_reply;
- friend class ClientRequest;
- friend class LttngBackend;
- friend class HistoricBackend;
- friend class ReqRequest;
- friend class LogMissingRequest;
- friend class LogMissingRequestReply;
- };
-
/**
* instance_handle_t
*
@@ -93,20 +78,18 @@ public:
// don't leave any references on the source core, so we just bypass it by using
// intrusive_ptr instead.
using ref_t = boost::intrusive_ptr<instance_handle_t>;
+ std::optional<ObjectContextLoader::Orderer> obc_orderer;
PipelineHandle handle;
std::tuple<
- PGPipeline::AwaitMap::BlockingEvent,
+ CommonPGPipeline::WaitPGReady::BlockingEvent,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent,
- PGPipeline::WaitForActive::BlockingEvent,
PGActivationBlocker::BlockingEvent,
- PGPipeline::RecoverMissing::BlockingEvent,
+ CommonPGPipeline::GetOBC::BlockingEvent,
+ CommonOBCPipeline::Process::BlockingEvent,
scrub::PGScrubber::BlockingEvent,
- PGPipeline::CheckAlreadyCompleteGetObc::BlockingEvent,
- PGPipeline::LockOBC::BlockingEvent,
- PGPipeline::Process::BlockingEvent,
- PGPipeline::WaitRepop::BlockingEvent,
- PGPipeline::SendReply::BlockingEvent,
+ CommonOBCPipeline::WaitRepop::BlockingEvent,
+ CommonOBCPipeline::SendReply::BlockingEvent,
CompletionEvent
> pg_tracking_events;
@@ -293,7 +276,7 @@ private:
unsigned this_instance_id);
bool is_pg_op() const;
- PGPipeline &client_pp(PG &pg);
+ CommonPGPipeline &client_pp(PG &pg);
template <typename Errorator>
using interruptible_errorator =
diff --git a/src/crimson/osd/osd_operations/internal_client_request.cc b/src/crimson/osd/osd_operations/internal_client_request.cc
index 4790025065a..b8f7646bc74 100644
--- a/src/crimson/osd/osd_operations/internal_client_request.cc
+++ b/src/crimson/osd/osd_operations/internal_client_request.cc
@@ -52,39 +52,17 @@ CommonPGPipeline& InternalClientRequest::client_pp()
}
InternalClientRequest::interruptible_future<>
-InternalClientRequest::do_process(
- crimson::osd::ObjectContextRef obc,
- std::vector<OSDOp> &osd_ops)
-{
- LOG_PREFIX(InternalClientRequest::do_process);
- auto params = get_do_osd_ops_params();
- OpsExecuter ox(
- pg, obc, op_info, params, params.get_connection(), SnapContext{});
- co_await pg->run_executer(
- ox, obc, op_info, osd_ops
- ).handle_error_interruptible(
- crimson::ct_error::all_same_way(
- [this, FNAME](auto e) {
- ERRORDPPI("{}: got unexpected error {}", *pg, *this, e);
- ceph_assert(0 == "should not return an error");
- return interruptor::now();
- })
- );
-
- auto [submitted, completed] = co_await pg->submit_executer(
- std::move(ox), osd_ops);
-
- co_await std::move(submitted);
- co_await std::move(completed);
-}
-
-InternalClientRequest::interruptible_future<>
InternalClientRequest::with_interruption()
{
LOG_PREFIX(InternalClientRequest::with_interruption);
assert(pg->is_active());
- co_await enter_stage<interruptor>(client_pp().recover_missing);
+ obc_orderer = pg->obc_loader.get_obc_orderer(get_target_oid());
+ auto obc_manager = pg->obc_loader.get_obc_manager(
+ *obc_orderer,
+ get_target_oid());
+
+ co_await enter_stage<interruptor>(obc_orderer->obc_pp().process);
bool unfound = co_await do_recover_missing(
pg, get_target_oid(), osd_reqid_t());
@@ -94,10 +72,8 @@ InternalClientRequest::with_interruption()
std::make_error_code(std::errc::operation_canceled),
fmt::format("{} is unfound, drop it!", get_target_oid()));
}
- co_await enter_stage<interruptor>(
- client_pp().check_already_complete_get_obc);
- DEBUGI("{}: getting obc lock", *this);
+ DEBUGI("{}: generating ops", *this);
auto osd_ops = create_osd_ops();
@@ -107,26 +83,37 @@ InternalClientRequest::with_interruption()
std::as_const(osd_ops), pg->get_pgid().pgid, *pg->get_osdmap());
assert(ret == 0);
- auto obc_manager = pg->obc_loader.get_obc_manager(get_target_oid());
-
- // initiate load_and_lock in order, but wait concurrently
- enter_stage_sync(client_pp().lock_obc);
-
co_await pg->obc_loader.load_and_lock(
obc_manager, pg->get_lock_type(op_info)
).handle_error_interruptible(
crimson::ct_error::assert_all("unexpected error")
);
- DEBUGDPP("{}: got obc {}, entering process stage",
- *pg, *this, obc_manager.get_obc()->obs);
- co_await enter_stage<interruptor>(client_pp().process);
+ auto params = get_do_osd_ops_params();
+ OpsExecuter ox(
+ pg, obc_manager.get_obc(), op_info, params, params.get_connection(),
+ SnapContext{});
+ co_await pg->run_executer(
+ ox, obc_manager.get_obc(), op_info, osd_ops
+ ).handle_error_interruptible(
+ crimson::ct_error::all_same_way(
+ [this, FNAME](auto e) {
+ ERRORDPPI("{}: got unexpected error {}", *pg, *this, e);
+ ceph_assert(0 == "should not return an error");
+ return interruptor::now();
+ })
+ );
+
+ auto [submitted, completed] = co_await pg->submit_executer(
+ std::move(ox), osd_ops);
+
+ co_await std::move(submitted);
- DEBUGDPP("{}: in process stage, calling do_process",
- *pg, *this);
- co_await do_process(obc_manager.get_obc(), osd_ops);
+ co_await enter_stage<interruptor>(obc_orderer->obc_pp().wait_repop);
+
+ co_await std::move(completed);
- logger().debug("{}: complete", *this);
+ DEBUGDPP("{}: complete", *pg, *this);
co_await interruptor::make_interruptible(handle.complete());
co_return;
}
@@ -148,7 +135,7 @@ seastar::future<> InternalClientRequest::start()
return seastar::now();
}).finally([this] {
logger().debug("{}: exit", *this);
- handle.exit();
+ return handle.complete();
});
}
diff --git a/src/crimson/osd/osd_operations/internal_client_request.h b/src/crimson/osd/osd_operations/internal_client_request.h
index 6023db0a8db..1cfde4ab080 100644
--- a/src/crimson/osd/osd_operations/internal_client_request.h
+++ b/src/crimson/osd/osd_operations/internal_client_request.h
@@ -4,6 +4,7 @@
#pragma once
#include "crimson/common/type_helpers.h"
+#include "crimson/osd/object_context_loader.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request_common.h"
#include "crimson/osd/pg.h"
@@ -45,11 +46,10 @@ private:
crimson::osd::ObjectContextRef obc,
std::vector<OSDOp> &osd_ops);
- seastar::future<> do_process();
-
Ref<PG> pg;
epoch_t start_epoch;
OpInfo op_info;
+ std::optional<ObjectContextLoader::Orderer> obc_orderer;
PipelineHandle handle;
public:
@@ -57,12 +57,8 @@ public:
std::tuple<
StartEvent,
- CommonPGPipeline::WaitForActive::BlockingEvent,
- PGActivationBlocker::BlockingEvent,
- CommonPGPipeline::RecoverMissing::BlockingEvent,
- CommonPGPipeline::CheckAlreadyCompleteGetObc::BlockingEvent,
- CommonPGPipeline::LockOBC::BlockingEvent,
- CommonPGPipeline::Process::BlockingEvent,
+ CommonOBCPipeline::Process::BlockingEvent,
+ CommonOBCPipeline::WaitRepop::BlockingEvent,
CompletionEvent
> tracking_events;
};
diff --git a/src/crimson/osd/osd_operations/logmissing_request.cc b/src/crimson/osd/osd_operations/logmissing_request.cc
index 8147c969260..274744cdd92 100644
--- a/src/crimson/osd/osd_operations/logmissing_request.cc
+++ b/src/crimson/osd/osd_operations/logmissing_request.cc
@@ -58,9 +58,9 @@ PerShardPipeline &LogMissingRequest::get_pershard_pipeline(
return shard_services.get_replicated_request_pipeline();
}
-ClientRequest::PGPipeline &LogMissingRequest::client_pp(PG &pg)
+PGRepopPipeline &LogMissingRequest::repop_pipeline(PG &pg)
{
- return pg.request_pg_pipeline;
+ return pg.repop_pipeline;
}
seastar::future<> LogMissingRequest::with_pg(
@@ -73,7 +73,7 @@ seastar::future<> LogMissingRequest::with_pg(
return interruptor::with_interruption([this, pg] {
LOG_PREFIX(LogMissingRequest::with_pg);
DEBUGI("{}: pg present", *this);
- return this->template enter_stage<interruptor>(client_pp(*pg).await_map
+ return this->template enter_stage<interruptor>(repop_pipeline(*pg).process
).then_interruptible([this, pg] {
return this->template with_blocking_event<
PG_OSDMapGate::OSDMapBlocker::BlockingEvent
diff --git a/src/crimson/osd/osd_operations/logmissing_request.h b/src/crimson/osd/osd_operations/logmissing_request.h
index 51c9d540cb5..e12243ce430 100644
--- a/src/crimson/osd/osd_operations/logmissing_request.h
+++ b/src/crimson/osd/osd_operations/logmissing_request.h
@@ -77,14 +77,14 @@ public:
ConnectionPipeline::AwaitMap::BlockingEvent,
ConnectionPipeline::GetPGMapping::BlockingEvent,
PerShardPipeline::CreateOrWaitPG::BlockingEvent,
- ClientRequest::PGPipeline::AwaitMap::BlockingEvent,
+ PGRepopPipeline::Process::BlockingEvent,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent,
PGMap::PGCreationBlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent
> tracking_events;
private:
- ClientRequest::PGPipeline &client_pp(PG &pg);
+ PGRepopPipeline &repop_pipeline(PG &pg);
crimson::net::ConnectionRef l_conn;
crimson::net::ConnectionXcoreRef r_conn;
diff --git a/src/crimson/osd/osd_operations/logmissing_request_reply.cc b/src/crimson/osd/osd_operations/logmissing_request_reply.cc
index fb122a95cd1..5640610bd01 100644
--- a/src/crimson/osd/osd_operations/logmissing_request_reply.cc
+++ b/src/crimson/osd/osd_operations/logmissing_request_reply.cc
@@ -56,11 +56,6 @@ PerShardPipeline &LogMissingRequestReply::get_pershard_pipeline(
return shard_services.get_replicated_request_pipeline();
}
-ClientRequest::PGPipeline &LogMissingRequestReply::client_pp(PG &pg)
-{
- return pg.request_pg_pipeline;
-}
-
seastar::future<> LogMissingRequestReply::with_pg(
ShardServices &shard_services, Ref<PG> pg)
{
diff --git a/src/crimson/osd/osd_operations/logmissing_request_reply.h b/src/crimson/osd/osd_operations/logmissing_request_reply.h
index c741b41bd0f..71651d16789 100644
--- a/src/crimson/osd/osd_operations/logmissing_request_reply.h
+++ b/src/crimson/osd/osd_operations/logmissing_request_reply.h
@@ -82,8 +82,6 @@ public:
> tracking_events;
private:
- ClientRequest::PGPipeline &client_pp(PG &pg);
-
crimson::net::ConnectionRef l_conn;
crimson::net::ConnectionXcoreRef r_conn;
diff --git a/src/crimson/osd/osd_operations/replicated_request.cc b/src/crimson/osd/osd_operations/replicated_request.cc
index 5ca11e5dd15..332ec4dfeb7 100644
--- a/src/crimson/osd/osd_operations/replicated_request.cc
+++ b/src/crimson/osd/osd_operations/replicated_request.cc
@@ -58,9 +58,9 @@ PerShardPipeline &RepRequest::get_pershard_pipeline(
return shard_services.get_replicated_request_pipeline();
}
-ClientRequest::PGPipeline &RepRequest::client_pp(PG &pg)
+PGRepopPipeline &RepRequest::repop_pipeline(PG &pg)
{
- return pg.request_pg_pipeline;
+ return pg.repop_pipeline;
}
seastar::future<> RepRequest::with_pg(
@@ -72,7 +72,7 @@ seastar::future<> RepRequest::with_pg(
return interruptor::with_interruption([this, pg] {
LOG_PREFIX(RepRequest::with_pg);
DEBUGI("{}: pg present", *this);
- return this->template enter_stage<interruptor>(client_pp(*pg).await_map
+ return this->template enter_stage<interruptor>(repop_pipeline(*pg).process
).then_interruptible([this, pg] {
return this->template with_blocking_event<
PG_OSDMapGate::OSDMapBlocker::BlockingEvent
diff --git a/src/crimson/osd/osd_operations/replicated_request.h b/src/crimson/osd/osd_operations/replicated_request.h
index ff5dea6d6db..1e84fd108e2 100644
--- a/src/crimson/osd/osd_operations/replicated_request.h
+++ b/src/crimson/osd/osd_operations/replicated_request.h
@@ -77,14 +77,14 @@ public:
ConnectionPipeline::AwaitMap::BlockingEvent,
ConnectionPipeline::GetPGMapping::BlockingEvent,
PerShardPipeline::CreateOrWaitPG::BlockingEvent,
- ClientRequest::PGPipeline::AwaitMap::BlockingEvent,
+ PGRepopPipeline::Process::BlockingEvent,
PG_OSDMapGate::OSDMapBlocker::BlockingEvent,
PGMap::PGCreationBlockingEvent,
OSD_OSDMapGate::OSDMapBlocker::BlockingEvent
> tracking_events;
private:
- ClientRequest::PGPipeline &client_pp(PG &pg);
+ PGRepopPipeline &repop_pipeline(PG &pg);
crimson::net::ConnectionRef l_conn;
crimson::net::ConnectionXcoreRef r_conn;
diff --git a/src/crimson/osd/osd_operations/snaptrim_event.cc b/src/crimson/osd/osd_operations/snaptrim_event.cc
index 8cab6125682..f8fb7aef6f2 100644
--- a/src/crimson/osd/osd_operations/snaptrim_event.cc
+++ b/src/crimson/osd/osd_operations/snaptrim_event.cc
@@ -388,20 +388,24 @@ SnapTrimObjSubEvent::remove_or_update(
SnapTrimObjSubEvent::snap_trim_obj_subevent_ret_t
SnapTrimObjSubEvent::start()
{
+ obc_orderer = pg->obc_loader.get_obc_orderer(
+ coid);
+
ceph_assert(pg->is_active_clean());
- auto exit_handle = seastar::defer([this] {
- logger().debug("{}: exit", *this);
- handle.exit();
+ auto exit_handle = seastar::defer([this, opref = IRef(this)] {
+ logger().debug("{}: exit", *opref);
+ std::ignore = handle.complete().then([opref = std::move(opref)] {});
});
co_await enter_stage<interruptor>(
- client_pp().check_already_complete_get_obc);
+ obc_orderer->obc_pp().process);
logger().debug("{}: getting obc for {}", *this, coid);
auto obc_manager = pg->obc_loader.get_obc_manager(
+ *obc_orderer,
coid, false /* resolve_oid */);
co_await pg->obc_loader.load_and_lock(
@@ -411,43 +415,39 @@ SnapTrimObjSubEvent::start()
crimson::ct_error::assert_all{"unexpected error in SnapTrimObjSubEvent"}
);
- co_await process_and_submit(
- obc_manager.get_head_obc(), obc_manager.get_obc()
- ).handle_error_interruptible(
- remove_or_update_iertr::pass_further{},
- crimson::ct_error::assert_all{"unexpected error in SnapTrimObjSubEvent"}
- );
-
- logger().debug("{}: completed", *this);
- co_await interruptor::make_interruptible(handle.complete());
-}
-
-ObjectContextLoader::load_obc_iertr::future<>
-SnapTrimObjSubEvent::process_and_submit(ObjectContextRef head_obc,
- ObjectContextRef clone_obc) {
- logger().debug("{}: got clone_obc={}", *this, clone_obc->get_oid());
+ logger().debug("{}: got obc={}", *this, obc_manager.get_obc()->get_oid());
- co_await enter_stage<interruptor>(client_pp().process);
-
- logger().debug("{}: processing clone_obc={}", *this, clone_obc->get_oid());
-
- auto txn = co_await remove_or_update(clone_obc, head_obc);
-
- auto [submitted, all_completed] = co_await pg->submit_transaction(
- std::move(clone_obc),
- nullptr,
- std::move(txn),
- std::move(osd_op_p),
- std::move(log_entries)
- );
+ auto all_completed = interruptor::now();
+ {
+ // as with PG::submit_executer, we need to build the pg log entries
+ // and submit the transaction atomically
+ co_await interruptor::make_interruptible(pg->submit_lock.lock());
+ auto unlocker = seastar::defer([this] {
+ pg->submit_lock.unlock();
+ });
- co_await std::move(submitted);
+ logger().debug("{}: calling remove_or_update obc={}",
+ *this, obc_manager.get_obc()->get_oid());
+
+ auto txn = co_await remove_or_update(
+ obc_manager.get_obc(), obc_manager.get_head_obc());
+
+ auto submitted = interruptor::now();
+ std::tie(submitted, all_completed) = co_await pg->submit_transaction(
+ ObjectContextRef(obc_manager.get_obc()),
+ nullptr,
+ std::move(txn),
+ std::move(osd_op_p),
+ std::move(log_entries)
+ );
+ co_await std::move(submitted);
+ }
- co_await enter_stage<interruptor>(client_pp().wait_repop);
+ co_await enter_stage<interruptor>(obc_orderer->obc_pp().wait_repop);
co_await std::move(all_completed);
- co_return;
+ logger().debug("{}: completed", *this);
}
void SnapTrimObjSubEvent::print(std::ostream &lhs) const
diff --git a/src/crimson/osd/osd_operations/snaptrim_event.h b/src/crimson/osd/osd_operations/snaptrim_event.h
index 1164b3169d2..a2b4d357568 100644
--- a/src/crimson/osd/osd_operations/snaptrim_event.h
+++ b/src/crimson/osd/osd_operations/snaptrim_event.h
@@ -6,6 +6,7 @@
#include <iostream>
#include <seastar/core/future.hh>
+#include "crimson/osd/object_context_loader.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/common/subop_blocker.h"
@@ -112,10 +113,6 @@ public:
private:
object_stat_sum_t delta_stats;
- ObjectContextLoader::load_obc_iertr::future<> process_and_submit(
- ObjectContextRef head_obc,
- ObjectContextRef clone_obc);
-
snap_trim_obj_subevent_ret_t remove_clone(
ObjectContextRef obc,
ObjectContextRef head_obc,
@@ -158,6 +155,7 @@ private:
}
Ref<PG> pg;
+ std::optional<ObjectContextLoader::Orderer> obc_orderer;
PipelineHandle handle;
osd_op_params_t osd_op_p;
const hobject_t coid;
@@ -169,9 +167,8 @@ public:
std::tuple<
StartEvent,
- CommonPGPipeline::CheckAlreadyCompleteGetObc::BlockingEvent,
- CommonPGPipeline::Process::BlockingEvent,
- CommonPGPipeline::WaitRepop::BlockingEvent,
+ CommonOBCPipeline::Process::BlockingEvent,
+ CommonOBCPipeline::WaitRepop::BlockingEvent,
CompletionEvent
> tracking_events;
};
diff --git a/src/crimson/osd/pg.cc b/src/crimson/osd/pg.cc
index 1e2988efbbe..d812d822550 100644
--- a/src/crimson/osd/pg.cc
+++ b/src/crimson/osd/pg.cc
@@ -868,45 +868,6 @@ std::ostream& operator<<(std::ostream& os, const PG& pg)
return os;
}
-void PG::mutate_object(
- ObjectContextRef& obc,
- ceph::os::Transaction& txn,
- osd_op_params_t& osd_op_p)
-{
- if (obc->obs.exists) {
- obc->obs.oi.prior_version = obc->obs.oi.version;
- obc->obs.oi.version = osd_op_p.at_version;
- if (osd_op_p.user_modify)
- obc->obs.oi.user_version = osd_op_p.at_version.version;
- obc->obs.oi.last_reqid = osd_op_p.req_id;
- obc->obs.oi.mtime = osd_op_p.mtime;
- obc->obs.oi.local_mtime = ceph_clock_now();
-
- // object_info_t
- {
- ceph::bufferlist osv;
- obc->obs.oi.encode_no_oid(osv, CEPH_FEATURES_ALL);
- // TODO: get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
- txn.setattr(coll_ref->get_cid(), ghobject_t{obc->obs.oi.soid}, OI_ATTR, osv);
- }
-
- // snapset
- if (obc->obs.oi.soid.snap == CEPH_NOSNAP) {
- logger().debug("final snapset {} in {}",
- obc->ssc->snapset, obc->obs.oi.soid);
- ceph::bufferlist bss;
- encode(obc->ssc->snapset, bss);
- txn.setattr(coll_ref->get_cid(), ghobject_t{obc->obs.oi.soid}, SS_ATTR, bss);
- obc->ssc->exists = true;
- } else {
- logger().debug("no snapset (this is a clone)");
- }
- } else {
- // reset cached ObjectState without enforcing eviction
- obc->obs.oi = object_info_t(obc->obs.oi.soid);
- }
-}
-
void PG::enqueue_push_for_backfill(
const hobject_t &obj,
const eversion_t &v,
@@ -1039,8 +1000,15 @@ PG::interruptible_future<eversion_t> PG::submit_error_log(
const std::error_code e,
ceph_tid_t rep_tid)
{
- logger().debug("{}: {} rep_tid: {} error: {}",
- __func__, *m, rep_tid, e);
+ // as with submit_executer, need to ensure that log numbering and submission
+ // are atomic
+ co_await interruptor::make_interruptible(submit_lock.lock());
+ auto unlocker = seastar::defer([this] {
+ submit_lock.unlock();
+ });
+ LOG_PREFIX(PG::submit_error_log);
+ DEBUGDPP("{} rep_tid: {} error: {}",
+ *this, *m, rep_tid, e);
const osd_reqid_t &reqid = m->get_reqid();
mempool::osd_pglog::list<pg_log_entry_t> log_entries;
log_entries.push_back(pg_log_entry_t(pg_log_entry_t::ERROR,
@@ -1061,47 +1029,45 @@ PG::interruptible_future<eversion_t> PG::submit_error_log(
log_entries, t, peering_state.get_pg_trim_to(),
peering_state.get_pg_committed_to());
- return seastar::do_with(log_entries, set<pg_shard_t>{},
- [this, t=std::move(t), rep_tid](auto& log_entries, auto& waiting_on) mutable {
- return interruptor::do_for_each(get_acting_recovery_backfill(),
- [this, log_entries, waiting_on, rep_tid]
- (auto& i) mutable {
- pg_shard_t peer(i);
- if (peer == pg_whoami) {
- return seastar::now();
- }
- ceph_assert(peering_state.get_peer_missing().count(peer));
- ceph_assert(peering_state.has_peer_info(peer));
- auto log_m = crimson::make_message<MOSDPGUpdateLogMissing>(
- log_entries,
- spg_t(peering_state.get_info().pgid.pgid, i.shard),
- pg_whoami.shard,
- get_osdmap_epoch(),
- get_last_peering_reset(),
- rep_tid,
- peering_state.get_pg_trim_to(),
- peering_state.get_pg_committed_to());
- waiting_on.insert(peer);
- logger().debug("submit_error_log: sending log"
- "missing_request (rep_tid: {} entries: {})"
- " to osd {}", rep_tid, log_entries, peer.osd);
- return shard_services.send_to_osd(peer.osd,
- std::move(log_m),
- get_osdmap_epoch());
- }).then_interruptible([this, waiting_on, t=std::move(t), rep_tid] () mutable {
- waiting_on.insert(pg_whoami);
- logger().debug("submit_error_log: inserting rep_tid {}", rep_tid);
- log_entry_update_waiting_on.insert(
- std::make_pair(rep_tid,
- log_update_t{std::move(waiting_on)}));
- return shard_services.get_store().do_transaction(
- get_collection_ref(), std::move(t)
- ).then([this] {
- peering_state.update_trim_to();
- return seastar::make_ready_future<eversion_t>(projected_last_update);
- });
- });
- });
+
+ set<pg_shard_t> waiting_on;
+ for (const auto &peer: get_acting_recovery_backfill()) {
+ if (peer == pg_whoami) {
+ continue;
+ }
+ ceph_assert(peering_state.get_peer_missing().count(peer));
+ ceph_assert(peering_state.has_peer_info(peer));
+ auto log_m = crimson::make_message<MOSDPGUpdateLogMissing>(
+ log_entries,
+ spg_t(peering_state.get_info().pgid.pgid, peer.shard),
+ pg_whoami.shard,
+ get_osdmap_epoch(),
+ get_last_peering_reset(),
+ rep_tid,
+ peering_state.get_pg_trim_to(),
+ peering_state.get_pg_committed_to());
+ waiting_on.insert(peer);
+
+ DEBUGDPP("sending log missing_request (rep_tid: {} entries: {}) to osd {}",
+ *this, rep_tid, log_entries, peer.osd);
+ co_await interruptor::make_interruptible(
+ shard_services.send_to_osd(
+ peer.osd,
+ std::move(log_m),
+ get_osdmap_epoch()));
+ }
+ waiting_on.insert(pg_whoami);
+ DEBUGDPP("inserting rep_tid {}", *this, rep_tid);
+ log_entry_update_waiting_on.insert(
+ std::make_pair(rep_tid,
+ log_update_t{std::move(waiting_on)}));
+ co_await interruptor::make_interruptible(
+ shard_services.get_store().do_transaction(
+ get_collection_ref(), std::move(t)
+ ));
+
+ peering_state.update_trim_to();
+ co_return projected_last_update;
}
PG::run_executer_fut PG::run_executer(
@@ -1157,27 +1123,25 @@ PG::submit_executer_fut PG::submit_executer(
OpsExecuter &&ox,
const std::vector<OSDOp>& ops) {
LOG_PREFIX(PG::submit_executer);
- // transaction must commit at this point
- return std::move(
+ DEBUGDPP("", *this);
+
+ // we need to build the pg log entries and submit the transaction
+ // atomically to ensure log ordering
+ co_await interruptor::make_interruptible(submit_lock.lock());
+ auto unlocker = seastar::defer([this] {
+ submit_lock.unlock();
+ });
+
+ auto [submitted, completed] = co_await std::move(
ox
- ).flush_changes_n_do_ops_effects(
+ ).flush_changes_and_submit(
ops,
snap_mapper,
- osdriver,
- [FNAME, this](auto&& txn,
- auto&& obc,
- auto&& osd_op_p,
- auto&& log_entries,
- auto&& new_clone) {
- DEBUGDPP("object {} submitting txn", *this, obc->get_oid());
- mutate_object(obc, txn, osd_op_p);
- return submit_transaction(
- std::move(obc),
- std::move(new_clone),
- std::move(txn),
- std::move(osd_op_p),
- std::move(log_entries));
- });
+ osdriver
+ );
+ co_return std::make_tuple(
+ std::move(submitted).then_interruptible([unlocker=std::move(unlocker)] {}),
+ std::move(completed));
}
PG::interruptible_future<MURef<MOSDOpReply>> PG::do_pg_ops(Ref<MOSDOp> m)
@@ -1324,20 +1288,21 @@ void PG::log_operation(
bool transaction_applied,
ObjectStore::Transaction &txn,
bool async) {
- logger().debug("{}", __func__);
+ LOG_PREFIX(PG::log_operation);
+ DEBUGDPP("", *this);
if (is_primary()) {
ceph_assert(trim_to <= peering_state.get_pg_committed_to());
}
auto last = logv.rbegin();
if (is_primary() && last != logv.rend()) {
- logger().debug("{} on primary, trimming projected log",
- __func__);
+ DEBUGDPP("on primary, trimming projected log", *this);
projected_log.skip_can_rollback_to_to_head();
projected_log.trim(shard_services.get_cct(), last->version,
nullptr, nullptr, nullptr);
}
if (!is_primary()) { // && !is_ec_pg()
+ DEBUGDPP("on replica, clearing obc", *this);
replica_clear_repop_obc(logv);
}
if (!logv.empty()) {
@@ -1354,13 +1319,13 @@ void PG::log_operation(
void PG::replica_clear_repop_obc(
const std::vector<pg_log_entry_t> &logv) {
- logger().debug("{} clearing {} entries", __func__, logv.size());
- for (auto &&e: logv) {
- logger().debug(" {} get_object_boundary(from): {} "
- " head version(to): {}",
- e.soid,
- e.soid.get_object_boundary(),
- e.soid.get_head());
+ LOG_PREFIX(PG::replica_clear_repop_obc);
+ DEBUGDPP("clearing obc for {} log entries", logv.size());
+ for (auto &&e: logv) {
+ DEBUGDPP("clearing entry for {} from: {} to: {}",
+ e.soid,
+ e.soid.get_object_boundary(),
+ e.soid.get_head());
/* Have to blast all clones, they share a snapset */
obc_registry.clear_range(
e.soid.get_object_boundary(), e.soid.get_head());
diff --git a/src/crimson/osd/pg.h b/src/crimson/osd/pg.h
index 0c25cf25839..d87d0b2d0e9 100644
--- a/src/crimson/osd/pg.h
+++ b/src/crimson/osd/pg.h
@@ -78,7 +78,8 @@ class PG : public boost::intrusive_ref_counter<
using ec_profile_t = std::map<std::string,std::string>;
using cached_map_t = OSDMapService::cached_map_t;
- ClientRequest::PGPipeline request_pg_pipeline;
+ CommonPGPipeline request_pg_pipeline;
+ PGRepopPipeline repop_pipeline;
PGPeeringPipeline peering_request_pg_pipeline;
ClientRequest::Orderer client_request_orderer;
@@ -664,6 +665,7 @@ private:
const OpInfo &op_info,
std::vector<OSDOp>& ops);
+ seastar::shared_mutex submit_lock;
using submit_executer_ret = std::tuple<
interruptible_future<>,
interruptible_future<>>;
@@ -676,6 +678,8 @@ private:
struct do_osd_ops_params_t;
interruptible_future<MURef<MOSDOpReply>> do_pg_ops(Ref<MOSDOp> m);
+
+public:
interruptible_future<
std::tuple<interruptible_future<>, interruptible_future<>>>
submit_transaction(
@@ -684,6 +688,8 @@ private:
ceph::os::Transaction&& txn,
osd_op_params_t&& oop,
std::vector<pg_log_entry_t>&& log_entries);
+
+private:
interruptible_future<> repair_object(
const hobject_t& oid,
eversion_t& v);
@@ -892,10 +898,6 @@ private:
const hobject_t &obj,
const eversion_t &v,
const std::vector<pg_shard_t> &peers);
- void mutate_object(
- ObjectContextRef& obc,
- ceph::os::Transaction& txn,
- osd_op_params_t& osd_op_p);
bool can_discard_replica_op(const Message& m, epoch_t m_map_epoch) const;
bool can_discard_op(const MOSDOp& m) const;
void context_registry_on_change();
diff --git a/src/crimson/osd/pg_backend.cc b/src/crimson/osd/pg_backend.cc
index 24a381b4cf7..a40b28caa8b 100644
--- a/src/crimson/osd/pg_backend.cc
+++ b/src/crimson/osd/pg_backend.cc
@@ -1283,22 +1283,6 @@ PGBackend::rm_xattr(
return rm_xattr_iertr::now();
}
-void PGBackend::clone(
- /* const */object_info_t& snap_oi,
- const ObjectState& os,
- const ObjectState& d_os,
- ceph::os::Transaction& txn)
-{
- // See OpsExecuter::execute_clone documentation
- txn.clone(coll->get_cid(), ghobject_t{os.oi.soid}, ghobject_t{d_os.oi.soid});
- {
- ceph::bufferlist bv;
- snap_oi.encode_no_oid(bv, CEPH_FEATURES_ALL);
- txn.setattr(coll->get_cid(), ghobject_t{d_os.oi.soid}, OI_ATTR, bv);
- }
- txn.rmattr(coll->get_cid(), ghobject_t{d_os.oi.soid}, SS_ATTR);
-}
-
using get_omap_ertr =
crimson::os::FuturizedStore::Shard::read_errorator::extend<
crimson::ct_error::enodata>;
@@ -1835,3 +1819,32 @@ PGBackend::read_ierrorator::future<> PGBackend::tmapget(
read_errorator::pass_further{});
}
+void PGBackend::set_metadata(
+ const hobject_t &obj,
+ object_info_t &oi,
+ const SnapSet *ss /* non-null iff head */,
+ ceph::os::Transaction& txn)
+{
+ ceph_assert((obj.is_head() && ss) || (!obj.is_head() && !ss));
+ {
+ ceph::bufferlist bv;
+ oi.encode_no_oid(bv, CEPH_FEATURES_ALL);
+ txn.setattr(coll->get_cid(), ghobject_t{obj}, OI_ATTR, bv);
+ }
+ if (ss) {
+ ceph::bufferlist bss;
+ encode(*ss, bss);
+ txn.setattr(coll->get_cid(), ghobject_t{obj}, SS_ATTR, bss);
+ }
+}
+
+void PGBackend::clone_for_write(
+ const hobject_t &from,
+ const hobject_t &to,
+ ceph::os::Transaction &txn)
+{
+ // See OpsExecuter::execute_clone documentation
+ txn.clone(coll->get_cid(), ghobject_t{from}, ghobject_t{to});
+ txn.rmattr(coll->get_cid(), ghobject_t{to}, SS_ATTR);
+}
+
diff --git a/src/crimson/osd/pg_backend.h b/src/crimson/osd/pg_backend.h
index 813218983fd..c24176a10e7 100644
--- a/src/crimson/osd/pg_backend.h
+++ b/src/crimson/osd/pg_backend.h
@@ -308,11 +308,6 @@ public:
ObjectState& os,
const OSDOp& osd_op,
ceph::os::Transaction& trans);
- void clone(
- /* const */object_info_t& snap_oi,
- const ObjectState& os,
- const ObjectState& d_os,
- ceph::os::Transaction& trans);
interruptible_future<struct stat> stat(
CollectionRef c,
const ghobject_t& oid) const;
@@ -411,6 +406,20 @@ public:
ceph::os::Transaction& trans,
osd_op_params_t& osd_op_params,
object_stat_sum_t& delta_stats);
+
+ /// sets oi and (for head) ss attrs
+ void set_metadata(
+ const hobject_t &obj,
+ object_info_t &oi,
+ const SnapSet *ss /* non-null iff head */,
+ ceph::os::Transaction& trans);
+
+ /// clone from->to and clear ss attribute on to
+ void clone_for_write(
+ const hobject_t &from,
+ const hobject_t &to,
+ ceph::os::Transaction& trans);
+
virtual rep_op_fut_t
submit_transaction(const std::set<pg_shard_t> &pg_shards,
const hobject_t& hoid,
diff --git a/src/include/cephfs/ceph_ll_client.h b/src/include/cephfs/ceph_ll_client.h
index ac5b7c22471..458edce590b 100644
--- a/src/include/cephfs/ceph_ll_client.h
+++ b/src/include/cephfs/ceph_ll_client.h
@@ -110,6 +110,9 @@ struct ceph_statx {
* others in the future, we disallow setting any that aren't recognized.
*/
#define CEPH_REQ_FLAG_MASK (AT_SYMLINK_NOFOLLOW|AT_STATX_DONT_SYNC)
+#if defined(__linux__) && defined(AT_EMPTY_PATH)
+#define CEPH_AT_EMPTY_PATH (CEPH_REQ_FLAG_MASK|AT_EMPTY_PATH)
+#endif
/* fallocate mode flags */
#ifndef FALLOC_FL_KEEP_SIZE
diff --git a/src/include/cephfs/libcephfs.h b/src/include/cephfs/libcephfs.h
index ba0b76e072b..f26eabfbd5c 100644
--- a/src/include/cephfs/libcephfs.h
+++ b/src/include/cephfs/libcephfs.h
@@ -937,7 +937,7 @@ int ceph_fstatx(struct ceph_mount_info *cmount, int fd, struct ceph_statx *stx,
* @param relpath to the file/directory to get statistics of
* @param stx the ceph_statx struct that will be filled in with the file's statistics.
* @param want bitfield of CEPH_STATX_* flags showing designed attributes
- * @param flags bitfield that can be used to set AT_* modifier flags (AT_STATX_SYNC_AS_STAT, AT_STATX_FORCE_SYNC, AT_STATX_DONT_SYNC and AT_SYMLINK_NOFOLLOW)
+ * @param flags bitfield that can be used to set AT_* modifier flags (AT_STATX_DONT_SYNC, AT_SYMLINK_NOFOLLOW and AT_EMPTY_PATH)
* @returns 0 on success or negative error code on failure.
*/
int ceph_statxat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
@@ -1104,7 +1104,7 @@ int ceph_lchown(struct ceph_mount_info *cmount, const char *path, int uid, int g
* @param relpath the relpath of the file/directory to change the ownership of.
* @param uid the user id to set on the file/directory.
* @param gid the group id to set on the file/directory.
- * @param flags bitfield that can be used to set AT_* modifier flags (AT_SYMLINK_NOFOLLOW)
+ * @param flags bitfield that can be used to set AT_* modifier flags (AT_SYMLINK_NOFOLLOW and AT_EMPTY_PATH)
* @returns 0 on success or negative error code on failure.
*/
int ceph_chownat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
diff --git a/src/libcephfs.cc b/src/libcephfs.cc
index 7eea6665f61..60da6145787 100644
--- a/src/libcephfs.cc
+++ b/src/libcephfs.cc
@@ -982,7 +982,11 @@ extern "C" int ceph_statxat(struct ceph_mount_info *cmount, int dirfd, const cha
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
+#ifdef CEPH_AT_EMPTY_PATH
+ if (flags & ~CEPH_AT_EMPTY_PATH)
+#else
if (flags & ~CEPH_REQ_FLAG_MASK)
+#endif
return -CEPHFS_EINVAL;
return cmount->get_client()->statxat(dirfd, relpath, stx, cmount->default_perms,
want, flags);
diff --git a/src/librbd/ObjectMap.cc b/src/librbd/ObjectMap.cc
index 65e3fc4a4c2..160bb4dcf9e 100644
--- a/src/librbd/ObjectMap.cc
+++ b/src/librbd/ObjectMap.cc
@@ -107,32 +107,6 @@ bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
}
template <typename I>
-bool ObjectMap<I>::object_may_not_exist(uint64_t object_no) const
-{
- ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
-
- // Fall back to default logic if object map is disabled or invalid
- if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
- m_image_ctx.image_lock)) {
- return true;
- }
-
- bool flags_set;
- int r = m_image_ctx.test_flags(m_image_ctx.snap_id,
- RBD_FLAG_OBJECT_MAP_INVALID,
- m_image_ctx.image_lock, &flags_set);
- if (r < 0 || flags_set) {
- return true;
- }
-
- uint8_t state = (*this)[object_no];
- bool nonexistent = (state != OBJECT_EXISTS && state != OBJECT_EXISTS_CLEAN);
- ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r="
- << nonexistent << dendl;
- return nonexistent;
-}
-
-template <typename I>
bool ObjectMap<I>::update_required(const ceph::BitVector<2>::Iterator& it,
uint8_t new_state) {
ceph_assert(ceph_mutex_is_locked(m_lock));
diff --git a/src/librbd/ObjectMap.h b/src/librbd/ObjectMap.h
index 35ea4cb88f9..5e7fcbbe9dd 100644
--- a/src/librbd/ObjectMap.h
+++ b/src/librbd/ObjectMap.h
@@ -65,7 +65,6 @@ public:
void close(Context *on_finish);
bool set_object_map(ceph::BitVector<2> &target_object_map);
bool object_may_exist(uint64_t object_no) const;
- bool object_may_not_exist(uint64_t object_no) const;
void aio_save(Context *on_finish);
void aio_resize(uint64_t new_size, uint8_t default_object_state,
diff --git a/src/librbd/operation/FlattenRequest.cc b/src/librbd/operation/FlattenRequest.cc
index 7bc34681924..8034637e8e6 100644
--- a/src/librbd/operation/FlattenRequest.cc
+++ b/src/librbd/operation/FlattenRequest.cc
@@ -49,15 +49,6 @@ public:
return -ERESTART;
}
- {
- std::shared_lock image_lock{image_ctx.image_lock};
- if (image_ctx.object_map != nullptr &&
- !image_ctx.object_map->object_may_not_exist(m_object_no)) {
- // can skip because the object already exists
- return 1;
- }
- }
-
if (!io::util::trigger_copyup(
&image_ctx, m_object_no, m_io_context, this)) {
// stop early if the parent went away - it just means
diff --git a/src/mon/NVMeofGwMon.cc b/src/mon/NVMeofGwMon.cc
index 0fe5c3e655f..c9a6b789b89 100644
--- a/src/mon/NVMeofGwMon.cc
+++ b/src/mon/NVMeofGwMon.cc
@@ -66,11 +66,6 @@ void NVMeofGwMon::on_shutdown()
void NVMeofGwMon::tick()
{
- if (++tick_ratio == 10) {
- global_rebalance_index++;
- dout(20) << "rebalance index " << global_rebalance_index << dendl;
- tick_ratio = 0;
- }
if (!is_active() || !mon.is_leader()) {
dout(10) << "NVMeofGwMon leader : " << mon.is_leader()
<< "active : " << is_active() << dendl;
@@ -329,8 +324,9 @@ bool NVMeofGwMon::preprocess_command(MonOpRequestRef op)
if (HAVE_FEATURE(mon.get_quorum_con_features(), NVMEOFHA)) {
f->dump_string("features", "LB");
if (map.created_gws[group_key].size()) {
- uint32_t index = (global_rebalance_index %
- map.created_gws[group_key].size()) + 1;
+ time_t seconds_since_1970 = time(NULL);
+ uint32_t index = ((seconds_since_1970/60) %
+ map.created_gws[group_key].size()) + 1;
f->dump_unsigned("rebalance_ana_group", index);
}
}
@@ -625,15 +621,15 @@ bool NVMeofGwMon::prepare_beacon(MonOpRequestRef op)
avail = gw_availability_t::GW_CREATED;
dout(20) << "No-subsystems condition detected for GW " << gw_id <<dendl;
} else {
- bool listener_found = true;
+ bool listener_found = false;
for (auto &subs: sub) {
- if (subs.listeners.size() == 0) {
- listener_found = false;
- dout(10) << "No-listeners condition detected for GW " << gw_id << " for nqn " << subs.nqn << dendl;
+ if (subs.listeners.size()) {
+ listener_found = true;
break;
}
}
if (!listener_found) {
+ dout(10) << "No-listeners condition detected for GW " << gw_id << dendl;
avail = gw_availability_t::GW_CREATED;
}
}// for HA no-subsystems and no-listeners are same usecases
diff --git a/src/mon/NVMeofGwMon.h b/src/mon/NVMeofGwMon.h
index 2d13e153bd2..7fae8b766a5 100644
--- a/src/mon/NVMeofGwMon.h
+++ b/src/mon/NVMeofGwMon.h
@@ -83,9 +83,6 @@ public:
void check_sub(Subscription *sub);
private:
- // used for calculate pool & group GW responsible for rebalance
- uint32_t global_rebalance_index = 1;
- uint8_t tick_ratio = 0;
void synchronize_last_beacon();
void process_gw_down(const NvmeGwId &gw_id,
const NvmeGroupKey& group_key, bool &propose_pending,
diff --git a/src/msg/async/Event.cc b/src/msg/async/Event.cc
index 08e117ea54a..16abb1368b0 100644
--- a/src/msg/async/Event.cc
+++ b/src/msg/async/Event.cc
@@ -347,7 +347,7 @@ void EventCenter::wakeup()
return ;
ldout(cct, 20) << __func__ << dendl;
- char buf = 'c';
+ static constexpr char buf = 'c';
// wake up "event_wait"
#ifdef _WIN32
int n = send(notify_send_fd, &buf, sizeof(buf), 0);
diff --git a/src/nvmeof/NVMeofGwMonitorClient.cc b/src/nvmeof/NVMeofGwMonitorClient.cc
index ce3328aec51..1b128055e08 100644
--- a/src/nvmeof/NVMeofGwMonitorClient.cc
+++ b/src/nvmeof/NVMeofGwMonitorClient.cc
@@ -42,7 +42,6 @@ NVMeofGwMonitorClient::NVMeofGwMonitorClient(int argc, const char **argv) :
monc{g_ceph_context, poolctx},
client_messenger(Messenger::create(g_ceph_context, "async", entity_name_t::CLIENT(-1), "client", getpid())),
objecter{g_ceph_context, client_messenger.get(), &monc, poolctx},
- client{client_messenger.get(), &monc, &objecter},
timer(g_ceph_context, beacon_lock),
orig_argc(argc),
orig_argv(argv)
@@ -134,7 +133,6 @@ int NVMeofGwMonitorClient::init()
// Initialize Messenger
client_messenger->add_dispatcher_tail(this);
client_messenger->add_dispatcher_head(&objecter);
- client_messenger->add_dispatcher_tail(&client);
client_messenger->start();
poolctx.start(2);
@@ -190,7 +188,6 @@ int NVMeofGwMonitorClient::init()
objecter.init();
objecter.enable_blocklist_events();
objecter.start();
- client.init();
timer.init();
{
@@ -302,8 +299,7 @@ void NVMeofGwMonitorClient::shutdown()
std::lock_guard bl(beacon_lock);
timer.shutdown();
}
- // client uses monc and objecter
- client.shutdown();
+
// Stop asio threads, so leftover events won't call into shut down
// monclient/objecter.
poolctx.finish();
diff --git a/src/nvmeof/NVMeofGwMonitorClient.h b/src/nvmeof/NVMeofGwMonitorClient.h
index 6dd167e4e58..e01c823afb5 100644
--- a/src/nvmeof/NVMeofGwMonitorClient.h
+++ b/src/nvmeof/NVMeofGwMonitorClient.h
@@ -21,7 +21,6 @@
#include "common/Timer.h"
#include "common/LogClient.h"
-#include "client/Client.h"
#include "mon/MonClient.h"
#include "osdc/Objecter.h"
#include "messages/MNVMeofGwMap.h"
@@ -58,7 +57,6 @@ protected:
MonClient monc;
std::unique_ptr<Messenger> client_messenger;
Objecter objecter;
- Client client;
std::map<NvmeGroupKey, NvmeGwMonClientStates> map;
ceph::mutex lock = ceph::make_mutex("NVMeofGw::lock");
// allow beacons to be sent independently of handle_nvmeof_gw_map
diff --git a/src/os/bluestore/bluestore_tool.cc b/src/os/bluestore/bluestore_tool.cc
index d62721b4366..16f1e6434e0 100644
--- a/src/os/bluestore/bluestore_tool.cc
+++ b/src/os/bluestore/bluestore_tool.cc
@@ -1136,7 +1136,7 @@ int main(int argc, char **argv)
}
return r;
}
- } else if (action == "free-dump" || action == "free-score" || action == "fragmentation") {
+ } else if (action == "free-dump" || action == "free-score" || action == "free-fragmentation") {
AdminSocket *admin_socket = g_ceph_context->get_admin_socket();
ceph_assert(admin_socket);
std::string action_name = action == "free-dump" ? "dump" :
diff --git a/src/osd/PG.cc b/src/osd/PG.cc
index 307651fd627..cd7cad777bc 100644
--- a/src/osd/PG.cc
+++ b/src/osd/PG.cc
@@ -1278,7 +1278,6 @@ Scrub::schedule_result_t PG::start_scrubbing(
pg_cond.allow_deep =
!(get_osdmap()->test_flag(CEPH_OSDMAP_NODEEP_SCRUB) ||
pool.info.has_flag(pg_pool_t::FLAG_NODEEP_SCRUB));
- pg_cond.has_deep_errors = (info.stats.stats.sum.num_deep_scrub_errors > 0);
pg_cond.can_autorepair =
(cct->_conf->osd_scrub_auto_repair &&
get_pgbackend()->auto_repair_supported());
diff --git a/src/osd/scrubber/pg_scrubber.cc b/src/osd/scrubber/pg_scrubber.cc
index aa53df5ae8a..fea8c757040 100644
--- a/src/osd/scrubber/pg_scrubber.cc
+++ b/src/osd/scrubber/pg_scrubber.cc
@@ -2299,26 +2299,6 @@ Scrub::schedule_result_t PgScrubber::start_scrub_session(
}
}
- // restricting shallow scrubs of PGs that have deep errors:
- if (pg_cond.has_deep_errors && trgt.is_shallow()) {
- if (trgt.urgency() < urgency_t::operator_requested) {
- // if there are deep errors, we should have scheduled a deep scrub first.
- // If we are here trying to perform a shallow scrub, it means that for some
- // reason that deep scrub failed to be initiated. We will not try a shallow
- // scrub until this is solved.
- dout(10) << __func__ << ": Regular scrub skipped due to deep-scrub errors"
- << dendl;
- requeue_penalized(
- s_or_d, delay_both_targets_t::no, delay_cause_t::pg_state, clock_now);
- return schedule_result_t::target_specific_failure;
- } else {
- // we will honor the request anyway, but will report the issue
- m_osds->clog->error() << fmt::format(
- "osd.{} pg {} Regular scrub request, deep-scrub details will be lost",
- m_osds->whoami, m_pg_id);
- }
- }
-
// if only explicitly requested repairing is allowed - skip other types
// of scrubbing
if (osd_restrictions.allow_requested_repair_only &&
diff --git a/src/osd/scrubber_common.h b/src/osd/scrubber_common.h
index 809107e593b..2ab5570a715 100644
--- a/src/osd/scrubber_common.h
+++ b/src/osd/scrubber_common.h
@@ -109,7 +109,6 @@ static_assert(sizeof(Scrub::OSDRestrictions) <= sizeof(uint32_t));
struct ScrubPGPreconds {
bool allow_shallow{true};
bool allow_deep{true};
- bool has_deep_errors{false};
bool can_autorepair{false};
};
static_assert(sizeof(Scrub::ScrubPGPreconds) <= sizeof(uint32_t));
@@ -181,9 +180,8 @@ struct formatter<Scrub::ScrubPGPreconds> {
auto format(const Scrub::ScrubPGPreconds& conds, FormatContext& ctx) const
{
return fmt::format_to(
- ctx.out(), "allowed(shallow/deep):{:1}/{:1},deep-err:{:1},can-autorepair:{:1}",
- conds.allow_shallow, conds.allow_deep, conds.has_deep_errors,
- conds.can_autorepair);
+ ctx.out(), "allowed(shallow/deep):{:1}/{:1},can-autorepair:{:1}",
+ conds.allow_shallow, conds.allow_deep, conds.can_autorepair);
}
};
diff --git a/src/osdc/CMakeLists.txt b/src/osdc/CMakeLists.txt
index 205ad3d4f42..637ce327555 100644
--- a/src/osdc/CMakeLists.txt
+++ b/src/osdc/CMakeLists.txt
@@ -1,9 +1,8 @@
set(osdc_files
Filer.cc
ObjectCacher.cc
- Objecter.cc
- error_code.cc
- Striper.cc)
+ error_code.cc)
+# Objecter.cc and Striper.cc are part of libcommon
add_library(osdc STATIC ${osdc_files})
target_link_libraries(osdc ceph-common)
if(WITH_EVENTTRACE)
diff --git a/src/pybind/mgr/cephadm/services/nvmeof.py b/src/pybind/mgr/cephadm/services/nvmeof.py
index 0c9dc244c57..b3fd526815e 100644
--- a/src/pybind/mgr/cephadm/services/nvmeof.py
+++ b/src/pybind/mgr/cephadm/services/nvmeof.py
@@ -38,6 +38,8 @@ class NvmeofService(CephService):
spec = cast(NvmeofServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
nvmeof_gw_id = daemon_spec.daemon_id
host_ip = self.mgr.inventory.get_addr(daemon_spec.host)
+ map_addr = spec.addr_map.get(daemon_spec.host) if spec.addr_map else None
+ map_discovery_addr = spec.discovery_addr_map.get(daemon_spec.host) if spec.discovery_addr_map else None
keyring = self.get_keyring_with_caps(self.get_auth_entity(nvmeof_gw_id),
['mon', 'profile rbd',
@@ -47,8 +49,14 @@ class NvmeofService(CephService):
transport_tcp_options = json.dumps(spec.transport_tcp_options) if spec.transport_tcp_options else None
name = '{}.{}'.format(utils.name_to_config_section('nvmeof'), nvmeof_gw_id)
rados_id = name[len('client.'):] if name.startswith('client.') else name
- addr = spec.addr or host_ip
- discovery_addr = spec.discovery_addr or host_ip
+
+ # The address is first searched in the per node address map,
+ # then in the spec address configuration.
+ # If neither is defined, the host IP is used as a fallback.
+ addr = map_addr or spec.addr or host_ip
+ self.mgr.log.info(f"gateway address: {addr} from {map_addr=} {spec.addr=} {host_ip=}")
+ discovery_addr = map_discovery_addr or spec.discovery_addr or host_ip
+ self.mgr.log.info(f"discovery address: {discovery_addr} from {map_discovery_addr=} {spec.discovery_addr=} {host_ip=}")
context = {
'spec': spec,
'name': name,
diff --git a/src/pybind/mgr/dashboard/controllers/cluster_configuration.py b/src/pybind/mgr/dashboard/controllers/cluster_configuration.py
index da5be2cc81d..292f381d79f 100644
--- a/src/pybind/mgr/dashboard/controllers/cluster_configuration.py
+++ b/src/pybind/mgr/dashboard/controllers/cluster_configuration.py
@@ -1,12 +1,14 @@
# -*- coding: utf-8 -*-
+from typing import Optional
+
import cherrypy
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from ..services.ceph_service import CephService
-from . import APIDoc, APIRouter, EndpointDoc, RESTController
+from . import APIDoc, APIRouter, EndpointDoc, Param, RESTController
FILTER_SCHEMA = [{
"name": (str, 'Name of the config option'),
@@ -80,22 +82,33 @@ class ClusterConfiguration(RESTController):
return config_options
- def create(self, name, value):
+ @EndpointDoc("Create/Update Cluster Configuration",
+ parameters={
+ 'name': Param(str, 'Config option name'),
+ 'value': (
+ [
+ {
+ 'section': Param(
+ str, 'Section/Client where config needs to be updated'
+ ),
+ 'value': Param(str, 'Value of the config option')
+ }
+ ], 'Section and Value of the config option'
+ ),
+ 'force_update': Param(bool, 'Force update the config option', False, None)
+ }
+ )
+ def create(self, name, value, force_update: Optional[bool] = None):
# Check if config option is updateable at runtime
- self._updateable_at_runtime([name])
+ self._updateable_at_runtime([name], force_update)
- # Update config option
- avail_sections = ['global', 'mon', 'mgr', 'osd', 'mds', 'client']
+ for entry in value:
+ section = entry['section']
+ entry_value = entry['value']
- for section in avail_sections:
- for entry in value:
- if entry['value'] is None:
- break
-
- if entry['section'] == section:
- CephService.send_command('mon', 'config set', who=section, name=name,
- value=str(entry['value']))
- break
+ if entry_value not in (None, ''):
+ CephService.send_command('mon', 'config set', who=section, name=name,
+ value=str(entry_value))
else:
CephService.send_command('mon', 'config rm', who=section, name=name)
@@ -116,11 +129,24 @@ class ClusterConfiguration(RESTController):
raise cherrypy.HTTPError(404)
- def _updateable_at_runtime(self, config_option_names):
+ def _updateable_at_runtime(self, config_option_names, force_update=False):
not_updateable = []
for name in config_option_names:
config_option = self._get_config_option(name)
+
+ # making rgw configuration to be editable by bypassing 'can_update_at_runtime'
+ # as the same can be done via CLI.
+ if force_update and 'rgw' in name and not config_option['can_update_at_runtime']:
+ break
+
+ if force_update and 'rgw' not in name and not config_option['can_update_at_runtime']:
+ raise DashboardException(
+ msg=f'Only the configuration containing "rgw" can be edited at runtime with'
+ f' force_update flag, hence not able to update "{name}"',
+ code='config_option_not_updatable_at_runtime',
+ component='cluster_configuration'
+ )
if not config_option['can_update_at_runtime']:
not_updateable.append(name)
diff --git a/src/pybind/mgr/dashboard/controllers/rgw_iam.py b/src/pybind/mgr/dashboard/controllers/rgw_iam.py
new file mode 100644
index 00000000000..458bbbb7321
--- /dev/null
+++ b/src/pybind/mgr/dashboard/controllers/rgw_iam.py
@@ -0,0 +1,52 @@
+from typing import Optional
+
+from ..security import Scope
+from ..services.rgw_iam import RgwAccounts
+from ..tools import str_to_bool
+from . import APIDoc, APIRouter, EndpointDoc, RESTController, allow_empty_body
+
+
+@APIRouter('rgw/accounts', Scope.RGW)
+@APIDoc("RGW User Accounts API", "RgwUserAccounts")
+class RgwUserAccountsController(RESTController):
+
+ @allow_empty_body
+ def create(self, account_name: Optional[str] = None,
+ account_id: Optional[str] = None, email: Optional[str] = None):
+ return RgwAccounts.create_account(account_name, account_id, email)
+
+ def list(self, detailed: bool = False):
+ detailed = str_to_bool(detailed)
+ return RgwAccounts.get_accounts(detailed)
+
+ @EndpointDoc("Get RGW Account by id",
+ parameters={'account_id': (str, 'Account id')})
+ def get(self, account_id: str):
+ return RgwAccounts.get_account(account_id)
+
+ @EndpointDoc("Delete RGW Account",
+ parameters={'account_id': (str, 'Account id')})
+ def delete(self, account_id):
+ return RgwAccounts.delete_account(account_id)
+
+ @EndpointDoc("Update RGW account info",
+ parameters={'account_id': (str, 'Account id')})
+ @allow_empty_body
+ def set(self, account_id: str, account_name: Optional[str] = None,
+ email: Optional[str] = None):
+ return RgwAccounts.modify_account(account_id, account_name, email)
+
+ @EndpointDoc("Set RGW Account/Bucket quota",
+ parameters={'account_id': (str, 'Account id'),
+ 'max_size': (str, 'Max size')})
+ @RESTController.Resource(method='PUT', path='/quota')
+ @allow_empty_body
+ def set_quota(self, quota_type: str, account_id: str, max_size: str, max_objects: str):
+ return RgwAccounts.set_quota(quota_type, account_id, max_size, max_objects)
+
+ @EndpointDoc("Enable/Disable RGW Account/Bucket quota",
+ parameters={'account_id': (str, 'Account id')})
+ @RESTController.Resource(method='PUT', path='/quota/status')
+ @allow_empty_body
+ def set_quota_status(self, quota_type: str, account_id: str, quota_status: str):
+ return RgwAccounts.set_quota_status(quota_type, account_id, quota_status)
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.e2e-spec.ts
index 983140a44c4..b71719c4396 100644
--- a/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.e2e-spec.ts
+++ b/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.e2e-spec.ts
@@ -30,7 +30,6 @@ describe('Configuration page', () => {
beforeEach(() => {
configuration.clearTableSearchInput();
- configuration.getTableCount('found').as('configFound');
});
after(() => {
@@ -50,6 +49,8 @@ describe('Configuration page', () => {
});
it('should verify modified filter is applied properly', () => {
+ configuration.clearFilter();
+ configuration.getTableCount('found').as('configFound');
configuration.filterTable('Modified', 'no');
configuration.getTableCount('found').as('unmodifiedConfigs');
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.po.ts b/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.po.ts
index 82e79a676ec..4132387d0f1 100644
--- a/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.po.ts
+++ b/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.po.ts
@@ -12,7 +12,6 @@ export class ConfigurationPageHelper extends PageHelper {
configClear(name: string) {
this.navigateTo();
const valList = ['global', 'mon', 'mgr', 'osd', 'mds', 'client']; // Editable values
-
this.getFirstTableCell(name).click();
cy.contains('button', 'Edit').click();
// Waits for the data to load
@@ -26,6 +25,8 @@ export class ConfigurationPageHelper extends PageHelper {
cy.wait(3 * 1000);
+ this.clearFilter();
+
// Enter config setting name into filter box
this.searchTable(name, 100);
@@ -49,6 +50,7 @@ export class ConfigurationPageHelper extends PageHelper {
* Ex: [global, '2'] is the global value with an input of 2
*/
edit(name: string, ...values: [string, string][]) {
+ this.clearFilter();
this.getFirstTableCell(name).click();
cy.contains('button', 'Edit').click();
@@ -78,4 +80,12 @@ export class ConfigurationPageHelper extends PageHelper {
cy.contains('[data-testid=config-details-table]', `${value[0]}\: ${value[1]}`);
});
}
+
+ clearFilter() {
+ cy.get('div.filter-tags') // Find the div with class filter-tags
+ .find('button.cds--btn.cds--btn--ghost') // Find the button with specific classes
+ .contains('Clear filters') // Ensure the button contains the text "Clear filters"
+ .should('be.visible') // Assert that the button is visible
+ .click();
+ }
}
diff --git a/src/pybind/mgr/dashboard/frontend/package-lock.json b/src/pybind/mgr/dashboard/frontend/package-lock.json
index a091d3577ec..7b3e9beeb9f 100644
--- a/src/pybind/mgr/dashboard/frontend/package-lock.json
+++ b/src/pybind/mgr/dashboard/frontend/package-lock.json
@@ -29,7 +29,7 @@
"@types/file-saver": "2.0.1",
"async-mutex": "0.2.4",
"bootstrap": "5.2.3",
- "carbon-components-angular": "5.48.0",
+ "carbon-components-angular": "5.56.2",
"chart.js": "4.4.0",
"chartjs-adapter-moment": "1.0.1",
"detect-browser": "5.2.0",
@@ -11208,9 +11208,9 @@
]
},
"node_modules/carbon-components-angular": {
- "version": "5.48.0",
- "resolved": "https://registry.npmjs.org/carbon-components-angular/-/carbon-components-angular-5.48.0.tgz",
- "integrity": "sha512-NZwpKBKgkgaR51S0Pm16MvashO4g8B+dzGeNE8l/RYRWXpVDLShR6YnBc80t2iMtTolxGiPwHmzpyMieVtIGLg==",
+ "version": "5.56.2",
+ "resolved": "https://registry.npmjs.org/carbon-components-angular/-/carbon-components-angular-5.56.2.tgz",
+ "integrity": "sha512-mU5Nep4HwwRQIrToasLwfR8cB1ph6Hn3jnLjDw0kWN+NJj5HEwPizOhbTPQbQxvBo0stro2fMWW0x3ge519Hgg==",
"hasInstallScript": true,
"dependencies": {
"@carbon/icon-helpers": "10.37.0",
diff --git a/src/pybind/mgr/dashboard/frontend/package.json b/src/pybind/mgr/dashboard/frontend/package.json
index 3348c5ff097..aa0035dcb78 100644
--- a/src/pybind/mgr/dashboard/frontend/package.json
+++ b/src/pybind/mgr/dashboard/frontend/package.json
@@ -63,7 +63,7 @@
"@types/file-saver": "2.0.1",
"async-mutex": "0.2.4",
"bootstrap": "5.2.3",
- "carbon-components-angular": "5.48.0",
+ "carbon-components-angular": "5.56.2",
"chart.js": "4.4.0",
"chartjs-adapter-moment": "1.0.1",
"detect-browser": "5.2.0",
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form-create-request.model.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form-create-request.model.ts
index bca65a887c5..b6d23e35c9d 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form-create-request.model.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form-create-request.model.ts
@@ -1,4 +1,5 @@
export class ConfigFormCreateRequestModel {
name: string;
value: Array<any> = [];
+ force_update: boolean = false;
}
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.html b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.html
index 741c18d52a6..a6775dcee17 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.html
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.html
@@ -150,7 +150,7 @@
</div>
<!-- Footer -->
<div class="card-footer">
- <cd-form-button-panel (submitActionEvent)="submit()"
+ <cd-form-button-panel (submitActionEvent)="forceUpdate ? openCriticalConfirmModal() : submit()"
[form]="configForm"
[submitText]="actionLabels.UPDATE"
wrappingClass="text-right"></cd-form-button-panel>
@@ -158,3 +158,4 @@
</div>
</form>
</div>
+
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.ts
index b6e9e700be4..118cb18430a 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration-form/configuration-form.component.ts
@@ -13,6 +13,10 @@ import { CdForm } from '~/app/shared/forms/cd-form';
import { CdFormGroup } from '~/app/shared/forms/cd-form-group';
import { NotificationService } from '~/app/shared/services/notification.service';
import { ConfigFormCreateRequestModel } from './configuration-form-create-request.model';
+import { CriticalConfirmationModalComponent } from '~/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component';
+import { ModalCdsService } from '~/app/shared/services/modal-cds.service';
+
+const RGW = 'rgw';
@Component({
selector: 'cd-configuration-form',
@@ -29,13 +33,15 @@ export class ConfigurationFormComponent extends CdForm implements OnInit {
maxValue: number;
patternHelpText: string;
availSections = ['global', 'mon', 'mgr', 'osd', 'mds', 'client'];
+ forceUpdate: boolean;
constructor(
public actionLabels: ActionLabelsI18n,
private route: ActivatedRoute,
private router: Router,
private configService: ConfigurationService,
- private notificationService: NotificationService
+ private notificationService: NotificationService,
+ private modalService: ModalCdsService
) {
super();
this.createForm();
@@ -95,7 +101,6 @@ export class ConfigurationFormComponent extends CdForm implements OnInit {
setResponse(response: ConfigFormModel) {
this.response = response;
const validators = this.getValidators(response);
-
this.configForm.get('name').setValue(response.name);
this.configForm.get('desc').setValue(response.desc);
this.configForm.get('long_desc').setValue(response.long_desc);
@@ -118,7 +123,7 @@ export class ConfigurationFormComponent extends CdForm implements OnInit {
this.configForm.get('values').get(value.section).setValue(sectionValue);
});
}
-
+ this.forceUpdate = !this.response.can_update_at_runtime && response.name.includes(RGW);
this.availSections.forEach((section) => {
this.configForm.get('values').get(section).setValidators(validators);
});
@@ -134,7 +139,7 @@ export class ConfigurationFormComponent extends CdForm implements OnInit {
this.availSections.forEach((section) => {
const sectionValue = this.configForm.getValue(section);
- if (sectionValue !== null && sectionValue !== '') {
+ if (sectionValue !== null) {
values.push({ section: section, value: sectionValue });
}
});
@@ -143,12 +148,28 @@ export class ConfigurationFormComponent extends CdForm implements OnInit {
const request = new ConfigFormCreateRequestModel();
request.name = this.configForm.getValue('name');
request.value = values;
+ if (this.forceUpdate) {
+ request.force_update = this.forceUpdate;
+ }
return request;
}
return null;
}
+ openCriticalConfirmModal() {
+ this.modalService.show(CriticalConfirmationModalComponent, {
+ buttonText: $localize`Force Edit`,
+ actionDescription: $localize`force edit`,
+ itemDescription: $localize`configuration`,
+ infoMessage: 'Updating this configuration might require restarting the client',
+ submitAction: () => {
+ this.modalService.dismissAll();
+ this.submit();
+ }
+ });
+ }
+
submit() {
const request = this.createRequest();
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.ts
index a57603d4c8a..7a446ce808c 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/configuration/configuration.component.ts
@@ -12,6 +12,8 @@ import { CdTableSelection } from '~/app/shared/models/cd-table-selection';
import { Permission } from '~/app/shared/models/permissions';
import { AuthStorageService } from '~/app/shared/services/auth-storage.service';
+const RGW = 'rgw';
+
@Component({
selector: 'cd-configuration',
templateUrl: './configuration.component.html',
@@ -26,10 +28,26 @@ export class ConfigurationComponent extends ListWithDetails implements OnInit {
selection = new CdTableSelection();
filters: CdTableColumn[] = [
{
+ name: $localize`Modified`,
+ prop: 'modified',
+ filterOptions: [$localize`yes`, $localize`no`],
+ filterInitValue: $localize`yes`,
+ filterPredicate: (row, value) => {
+ if (value === 'yes' && row.hasOwnProperty('value')) {
+ return true;
+ }
+
+ if (value === 'no' && !row.hasOwnProperty('value')) {
+ return true;
+ }
+
+ return false;
+ }
+ },
+ {
name: $localize`Level`,
prop: 'level',
filterOptions: ['basic', 'advanced', 'dev'],
- filterInitValue: 'basic',
filterPredicate: (row, value) => {
enum Level {
basic = 0,
@@ -60,22 +78,6 @@ export class ConfigurationComponent extends ListWithDetails implements OnInit {
}
return row.source.includes(value);
}
- },
- {
- name: $localize`Modified`,
- prop: 'modified',
- filterOptions: ['yes', 'no'],
- filterPredicate: (row, value) => {
- if (value === 'yes' && row.hasOwnProperty('value')) {
- return true;
- }
-
- if (value === 'no' && !row.hasOwnProperty('value')) {
- return true;
- }
-
- return false;
- }
}
];
@@ -143,7 +145,9 @@ export class ConfigurationComponent extends ListWithDetails implements OnInit {
if (selection.selected.length !== 1) {
return false;
}
-
- return selection.selected[0].can_update_at_runtime;
+ if ((this.selection.selected[0].name as string).includes(RGW)) {
+ return true;
+ }
+ return this.selection.selected[0].can_update_at_runtime;
}
}
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.ts
index 40c2e95d1e0..a07dcfcdd35 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.ts
@@ -29,6 +29,7 @@ import { PlacementPipe } from './placement.pipe';
import { ServiceFormComponent } from './service-form/service-form.component';
import { SettingsService } from '~/app/shared/api/settings.service';
import { ModalCdsService } from '~/app/shared/services/modal-cds.service';
+import { CellTemplate } from '~/app/shared/enum/cell-template.enum';
const BASE_URL = 'services';
@@ -176,6 +177,16 @@ export class ServicesComponent extends ListWithDetails implements OnChanges, OnI
prop: 'status.last_refresh',
pipe: this.relativeDatePipe,
flexGrow: 1
+ },
+ {
+ name: $localize`Ports`,
+ prop: 'status.ports',
+ flexGrow: 1,
+ cellTransformation: CellTemplate.map,
+ customTemplateConfig: {
+ undefined: '-',
+ '': '-'
+ }
}
];
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.html b/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.html
index 3a9ce12df9d..16963b06920 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.html
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.html
@@ -60,6 +60,7 @@
</cd-dashboard-area-chart>
<cd-dashboard-area-chart chartTitle="Latency"
dataUnits="ms"
+ decimals="2"
[labelsArray]="['GET', 'PUT']"
[dataArray]="[queriesResults.AVG_GET_LATENCY, queriesResults.AVG_PUT_LATENCY]">
</cd-dashboard-area-chart>
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/auth.module.ts b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/auth.module.ts
index c0e0517896c..f1f04f7c2f0 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/auth.module.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/auth.module.ts
@@ -17,13 +17,48 @@ import { UserFormComponent } from './user-form/user-form.component';
import { UserListComponent } from './user-list/user-list.component';
import { UserPasswordFormComponent } from './user-password-form/user-password-form.component';
import { UserTabsComponent } from './user-tabs/user-tabs.component';
-import { ButtonModule, GridModule, IconModule, InputModule } from 'carbon-components-angular';
+
+import {
+ ButtonModule,
+ CheckboxModule,
+ DatePickerModule,
+ GridModule,
+ IconModule,
+ IconService,
+ InputModule,
+ ModalModule,
+ NumberModule,
+ RadioModule,
+ SelectModule,
+ UIShellModule,
+ TimePickerModule,
+ ComboBoxModule
+} from 'carbon-components-angular';
+// Icons
+import ChevronDown from '@carbon/icons/es/chevron--down/16';
+import Close from '@carbon/icons/es/close/32';
+import AddFilled from '@carbon/icons/es/add--filled/32';
+import SubtractFilled from '@carbon/icons/es/subtract--filled/32';
+import Reset from '@carbon/icons/es/reset/32';
+import EyeIcon from '@carbon/icons/es/view/16';
@NgModule({
imports: [
CommonModule,
FormsModule,
ReactiveFormsModule,
SharedModule,
+ UIShellModule,
+ InputModule,
+ GridModule,
+ ButtonModule,
+ IconModule,
+ CheckboxModule,
+ RadioModule,
+ SelectModule,
+ NumberModule,
+ ModalModule,
+ DatePickerModule,
+ TimePickerModule,
NgbNavModule,
NgbPopoverModule,
NgxPipeFunctionModule,
@@ -31,8 +66,8 @@ import { ButtonModule, GridModule, IconModule, InputModule } from 'carbon-compon
NgbModule,
IconModule,
GridModule,
- ButtonModule,
- InputModule
+ InputModule,
+ ComboBoxModule
],
declarations: [
LoginComponent,
@@ -46,7 +81,11 @@ import { ButtonModule, GridModule, IconModule, InputModule } from 'carbon-compon
UserPasswordFormComponent
]
})
-export class AuthModule {}
+export class AuthModule {
+ constructor(private iconService: IconService) {
+ this.iconService.registerAll([ChevronDown, Close, AddFilled, SubtractFilled, Reset, EyeIcon]);
+ }
+}
const routes: Routes = [
{ path: '', redirectTo: 'users', pathMatch: 'full' },
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form-role.model.ts b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form-role.model.ts
index 2d323b04ea5..abf529196f6 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form-role.model.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form-role.model.ts
@@ -4,11 +4,12 @@ export class UserFormRoleModel implements SelectOption {
name: string;
description: string;
selected = false;
- scopes_permissions: object;
- enabled = true;
-
- constructor(name: string, description: string) {
+ scopes_permissions?: object;
+ enabled: boolean;
+ content: string;
+ constructor(name: string, description: string, content: string) {
this.name = name;
this.description = description;
+ this.content = content;
}
}
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.html b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.html
index 4169d54c39f..d2e52158473 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.html
+++ b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.html
@@ -1,249 +1,205 @@
-<div class="cd-col-form"
- *cdFormLoading="loading">
- <form name="userForm"
- #formDir="ngForm"
- [formGroup]="userForm"
- novalidate>
- <div class="card">
+<div cdsCol
+ [columnNumbers]="{md: 4}">
+ <ng-container *cdFormLoading="loading">
+ <form #frm="ngForm"
+ #formDir="ngForm"
+ [formGroup]="userForm"
+ novalidate>
<div i18n="form title"
- class="card-header">{{ action | titlecase }} {{ resource | upperFirst }}</div>
- <div class="card-body">
-
- <!-- Username -->
- <div class="form-group row">
- <label class="cd-col-form-label"
- [ngClass]="{'required': mode !== userFormMode.editing}"
- for="username"
- i18n>Username</label>
- <div class="cd-col-form-input">
- <input class="form-control"
- type="text"
- placeholder="Username..."
- id="username"
- name="username"
- formControlName="username"
- autocomplete="off"
- autofocus
- ngbTooltip="White spaces at the beginning and end will be trimmed"
- i18n-ngbTooltip
- cdTrim>
- <span class="invalid-feedback"
- *ngIf="userForm.showError('username', formDir, 'required')"
- i18n>This field is required.</span>
- <span class="invalid-feedback"
- *ngIf="userForm.showError('username', formDir, 'notUnique')"
- i18n>The username already exists.</span>
- </div>
- </div>
-
- <!-- Password -->
- <div class="form-group row"
- *ngIf="!authStorageService.isSSO()">
- <label class="cd-col-form-label"
- for="password">
- <ng-container i18n>Password</ng-container>
- <cd-helper *ngIf="passwordPolicyHelpText.length > 0"
- class="text-pre-wrap"
- html="{{ passwordPolicyHelpText }}">
- </cd-helper>
- </label>
- <div class="cd-col-form-input">
- <div class="input-group">
- <input class="form-control"
- type="password"
- placeholder="Password..."
- id="password"
- name="password"
- autocomplete="new-password"
- formControlName="password">
- <button type="button"
- class="btn btn-light"
- cdPasswordButton="password">
- </button>
- </div>
- <div class="password-strength-level">
- <div class="{{ passwordStrengthLevelClass }}"
- data-toggle="tooltip"
- title="{{ passwordValuation }}">
- </div>
- </div>
- <span class="invalid-feedback"
- *ngIf="userForm.showError('password', formDir, 'required')"
- i18n>This field is required.</span>
- <span class="invalid-feedback"
- *ngIf="userForm.showError('password', formDir, 'passwordPolicy')">
- {{ passwordValuation }}
- </span>
- </div>
- </div>
-
- <!-- Confirm password -->
- <div class="form-group row"
- *ngIf="!authStorageService.isSSO()">
- <label i18n
- class="cd-col-form-label"
- for="confirmpassword">Confirm password</label>
- <div class="cd-col-form-input">
- <div class="input-group">
- <input class="form-control"
- type="password"
- placeholder="Confirm password..."
- id="confirmpassword"
- name="confirmpassword"
- autocomplete="new-password"
- formControlName="confirmpassword">
- <button type="button"
- class="btn btn-light"
- cdPasswordButton="confirmpassword">
- </button>
- <span class="invalid-feedback"
- *ngIf="userForm.showError('confirmpassword', formDir, 'match')"
- i18n>Password confirmation doesn't match the password.</span>
- </div>
- <span class="invalid-feedback"
- *ngIf="userForm.showError('confirmpassword', formDir, 'required')"
- i18n>This field is required.</span>
- </div>
- </div>
+ class="form-header">{{ action | titlecase }} {{ resource | upperFirst }}
+ </div>
+ <!-- UserName -->
+ <div class="form-item">
+ <cds-text-label labelInputID="username"
+ cdRequiredField="Username"
+ [invalid]="!userForm.controls.username.valid && userForm.controls.username.dirty"
+ [invalidText]="usernameError"
+ i18n>Username
+ <input cdsText
+ placeholder="Username..."
+ i18n-placeholder
+ id="username"
+ formControlName="username"
+ [invalid]="!userForm.controls.username.valid && userForm.controls.username.dirty"
+ autofocus
+ ngbTooltip="White spaces at the beginning and end will be trimmed"
+ i18n-ngbTooltip
+ cdTrim>
+ </cds-text-label>
+ <ng-template #usernameError>
+ <span *ngIf="userForm.showError('username', formDir, 'required')">
+ <ng-container i18n>
+ This field is required.
+ </ng-container>
+ </span>
+ <span *ngIf="userForm.showError('username', formDir, 'notUnique')">
+ <ng-container i18n>
+ The username already exists.
+ </ng-container>
+ </span>
+ </ng-template>
+ </div>
+ <!-- Password -->
+ <div class="form-item">
+ <cds-password-label labelInputID="password"
+ label="Password..."
+ [invalid]="!userForm.controls.password.valid && userForm.controls.password.dirty"
+ [invalidText]="passwordError"
+ i18n>Password
+ <cd-helper *ngIf="passwordPolicyHelpText.length > 0"
+ class="text-pre-wrap"
+ html="{{ passwordPolicyHelpText }}">
+ </cd-helper>
+ <input cdsPassword
+ type="password"
+ placeholder="Password..."
+ id="password"
+ autocomplete="new-password"
+ formControlName="password"
+ >
+ </cds-password-label>
+ <ng-template #passwordError>
+ <span class="invalid-feedback"
+ *ngIf="userForm.showError('password', formDir, 'match')"
+ i18n>Password confirmation doesn't match the password.
+ </span>
+ <span class="invalid-feedback"
+ *ngIf="userForm.showError('password', formDir, 'required')"
+ i18n>This field is required.</span>
+ <span class="invalid-feedback"
+ *ngIf="userForm.showError('password', formDir, 'passwordPolicy')">
+ {{ passwordValuation }}
+ </span>
+ </ng-template>
+ </div>
- <!-- Password expiration date -->
- <div class="form-group row"
- *ngIf="!authStorageService.isSSO()">
- <label class="cd-col-form-label"
- [ngClass]="{'required': pwdExpirationSettings.pwdExpirationSpan > 0}"
- for="pwdExpirationDate">
- <ng-container i18n>Password expiration date</ng-container>
- <cd-helper class="text-pre-wrap"
- *ngIf="pwdExpirationSettings.pwdExpirationSpan == 0">
- <p>
+ <!-- Confirm password -->
+ <div class="form-item">
+ <cds-password-label labelInputID="confirmpassword"
+ label="Confirm password..."
+ [invalid]="!userForm.controls.confirmpassword.valid && userForm.controls.confirmpassword.dirty"
+ [invalidText]="confirmpasswordError"
+ i18n> Confirm password
+ <input cdsPassword
+ type="password"
+ placeholder="Confirm password..."
+ id="confirmpassword"
+ formControlName="confirmpassword">
+ </cds-password-label>
+ <ng-template #confirmpasswordError>
+ <span class="invalid-feedback"
+ *ngIf="userForm.showError('confirmpassword', formDir, 'match')"
+ i18n>Password confirmation doesn't match the password.</span>
+ <span class="invalid-feedback"
+ *ngIf="userForm.showError('confirmpassword', formDir, 'required')"
+ i18n>This field is required.</span>
+ </ng-template>
+ </div>
+ <!-- Password expiration date -->
+ <div class="form-item"
+ *ngIf="!authStorageService.isSSO()">
+ <cds-text-label [ngClass]="{'required': pwdExpirationSettings.pwdExpirationSpan > 0}">{{'Password Expiration Date'}}
+ <cd-helper class="text-pre-wrap"
+ *ngIf="pwdExpirationSettings.pwdExpirationSpan == 0">
+ <span>
The Dashboard setting defining the expiration interval of
passwords is currently set to <strong>0</strong>. This means
if a date is set, the user password will only expire once.
- </p>
- <p>
- Consider configuring the Dashboard setting
- <a routerLink="/mgr-modules/edit/dashboard"
- class="alert-link">USER_PWD_EXPIRATION_SPAN</a>
- in order to let passwords expire periodically.
- </p>
- </cd-helper>
- </label>
- <div class="cd-col-form-input">
- <div class="input-group">
- <input class="form-control"
- i18n-placeholder
- placeholder="Password expiration date..."
- id="pwdExpirationDate"
- name="pwdExpirationDate"
- formControlName="pwdExpirationDate"
- [ngbPopover]="popContent"
- triggers="manual"
- #p="ngbPopover"
- (click)="p.open()"
- (keypress)="p.close()">
- <button type="button"
- class="btn btn-light"
- (click)="clearExpirationDate()">
- <i class="icon-prepend {{ icons.destroy }}"></i>
- </button>
- <span class="invalid-feedback"
- *ngIf="userForm.showError('pwdExpirationDate', formDir, 'required')"
- i18n>This field is required.</span>
- </div>
- </div>
- </div>
-
- <!-- Name -->
- <div class="form-group row">
- <label i18n
- class="cd-col-form-label"
- for="name">Full name</label>
- <div class="cd-col-form-input">
- <input class="form-control"
- type="text"
- placeholder="Full name..."
- id="name"
- name="name"
- formControlName="name">
- </div>
- </div>
-
- <!-- Email -->
- <div class="form-group row">
- <label i18n
- class="cd-col-form-label"
- for="email">Email</label>
- <div class="cd-col-form-input">
- <input class="form-control"
- type="email"
- placeholder="Email..."
- id="email"
- name="email"
- formControlName="email">
-
- <span class="invalid-feedback"
- *ngIf="userForm.showError('email', formDir, 'email')"
- i18n>Invalid email.</span>
- </div>
- </div>
-
- <!-- Roles -->
- <div class="form-group row">
- <label class="cd-col-form-label"
- i18n>Roles</label>
- <div class="cd-col-form-input">
- <span class="no-border full-height"
- *ngIf="allRoles">
- <cd-select-badges [data]="userForm.controls.roles.value"
- [options]="allRoles"
- [messages]="messages"></cd-select-badges>
</span>
- </div>
- </div>
-
- <!-- Enabled -->
- <div class="form-group row"
- *ngIf="!isCurrentUser()">
- <div class="cd-col-form-offset">
- <div class="custom-control custom-checkbox">
- <input type="checkbox"
- class="custom-control-input"
- id="enabled"
- name="enabled"
- formControlName="enabled">
- <label class="custom-control-label"
- for="enabled"
- i18n>Enabled</label>
- </div>
- </div>
- </div>
-
- <!-- Force change password -->
- <div class="form-group row"
- *ngIf="!isCurrentUser() && !authStorageService.isSSO()">
- <div class="cd-col-form-offset">
- <div class="custom-control custom-checkbox">
- <input type="checkbox"
- class="custom-control-input"
- id="pwdUpdateRequired"
- name="pwdUpdateRequired"
- formControlName="pwdUpdateRequired">
- <label class="custom-control-label"
- for="pwdUpdateRequired"
- i18n>User must change password at next logon</label>
- </div>
- </div>
- </div>
-
+ <span>Consider configuring the Dashboard setting
+ <a routerLink="/mgr-modules/edit/dashboard"
+ class="alert-link">USER_PWD_EXPIRATION_SPAN</a>
+ in order to let passwords expire periodically.
+ </span>
+ </cd-helper>
+ <cd-date-time-picker [control]="userForm.get('pwdExpirationDate')"
+ placeHolder="Password expiration date"
+ [hasTime]="false"
+ [defaultDate]="passwordexp"
+ i18n-name
+ >
+ </cd-date-time-picker>
+ </cds-text-label>
+ <span class="invalid-feedback"
+ *ngIf="userForm.showError('pwdExpirationDate', formDir, 'required')"
+ i18n>This field is required.
+ </span>
+ </div>
+ <!--Full Name-->
+ <div class="form-item">
+ <cds-text-label for="name"
+ i18n> Full Name
+ <input cdsText
+ type="text"
+ placeholder="Full Name..."
+ id="name"
+ formControlName="name">
+ </cds-text-label>
+ </div>
+ <!-- Email -->
+ <div class="form-item">
+ <cds-text-label for="email"
+ [invalid]="!userForm.controls.email.valid && userForm.controls.email.dirty"
+ [invalidText]="emailError"
+ i18n>
+ Email
+ <input cdsText
+ type="email"
+ placeholder="Email..."
+ id="email"
+ formControlName="email">
+ </cds-text-label>
+ <ng-template #emailError>
+ <span class="invalid-feedback"
+ *ngIf="userForm.showError('email', formDir, 'email')"
+ i18n>Invalid email.
+ </span>
+ </ng-template>
+ </div>
+ <!-- Roles -->
+ <div class="form-item"
+ *ngIf="allRoles">
+ <cds-combo-box label="Roles"
+ type="multi"
+ selectionFeedback="top-after-reopen"
+ for="roles"
+ formControlName="roles"
+ id="roles"
+ placeholder="Select Roles..."
+ i18n-placeholder
+ [appendInline]="true"
+ [items]="allRoles"
+ itemValueKey="name"
+ i18n>
+ <cds-dropdown-list></cds-dropdown-list>
+ </cds-combo-box>
+ </div>
+ <!-- Enabled -->
+ <div class="form-item"
+ *ngIf="!isCurrentUser()">
+ <cds-checkbox id="enabled"
+ formControlName="enabled"
+ name="enabled"
+ i18n>Enabled
+ </cds-checkbox>
</div>
- <div class="card-footer">
- <cd-form-button-panel (submitActionEvent)="submit()"
- [form]="userForm"
- [submitText]="(action | titlecase) + ' ' + (resource | upperFirst)"
- wrappingClass="text-right"></cd-form-button-panel>
+ <!-- Force change password -->
+ <div class="form-item"
+ *ngIf="!isCurrentUser() && !authStorageService.isSSO()">
+ <cds-checkbox id="pwdUpdateRequired"
+ formControlName="pwdUpdateRequired"
+ name="pwdUpdateRequired"
+ i18n>User must change password at next logon
+ </cds-checkbox>
</div>
- </div>
- </form>
+ <!--Submit Button-->
+ <cd-form-button-panel (submitActionEvent)="submit()"
+ [form]="userForm"
+ [submitText]="(action | titlecase) + ' ' + (resource | upperFirst)"
+ wrappingClass="text-right">
+ </cd-form-button-panel>
+ </form>
+ </ng-container>
</div>
<ng-template #removeSelfUserReadUpdatePermissionTpl>
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.ts
index 7c02b86eae0..009d4c193e4 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/core/auth/user-form/user-form.component.ts
@@ -55,7 +55,8 @@ export class UserFormComponent extends CdForm implements OnInit {
icons = Icons;
pwdExpirationSettings: CdPwdExpirationSettings;
pwdExpirationFormat = 'YYYY-MM-DD';
-
+ selectedRole: string[];
+ passwordexp: boolean = false;
constructor(
private authService: AuthService,
private authStorageService: AuthStorageService,
@@ -91,6 +92,7 @@ export class UserFormComponent extends CdForm implements OnInit {
password: [
'',
[],
+
[
CdValidators.passwordPolicy(
this.userService,
@@ -105,7 +107,7 @@ export class UserFormComponent extends CdForm implements OnInit {
]
],
confirmpassword: [''],
- pwdExpirationDate: [undefined],
+ pwdExpirationDate: [''],
email: ['', [CdValidators.email]],
roles: [[]],
enabled: [true, [Validators.required]],
@@ -121,8 +123,10 @@ export class UserFormComponent extends CdForm implements OnInit {
if (this.router.url.startsWith('/user-management/users/edit')) {
this.mode = this.userFormMode.editing;
this.action = this.actionLabels.EDIT;
+ this.passwordexp = false;
} else {
this.action = this.actionLabels.CREATE;
+ this.passwordexp = true;
}
const observables = [this.roleService.list(), this.settingsService.getStandardSettings()];
@@ -130,6 +134,7 @@ export class UserFormComponent extends CdForm implements OnInit {
(result: [UserFormRoleModel[], CdPwdExpirationSettings]) => {
this.allRoles = _.map(result[0], (role) => {
role.enabled = true;
+ role.content = `${role.name}, ${role.description}`;
return role;
});
this.pwdExpirationSettings = new CdPwdExpirationSettings(result[1]);
@@ -158,7 +163,6 @@ export class UserFormComponent extends CdForm implements OnInit {
this.userService.get(username).subscribe((userFormModel: UserFormModel) => {
this.response = _.cloneDeep(userFormModel);
this.setResponse(userFormModel);
-
this.loadingReady();
});
});
@@ -173,20 +177,28 @@ export class UserFormComponent extends CdForm implements OnInit {
this.userForm.get(key).setValue(response[key])
);
const expirationDate = response['pwdExpirationDate'];
+
if (expirationDate) {
+ this.passwordexp = false;
this.userForm
.get('pwdExpirationDate')
.setValue(moment(expirationDate * 1000).format(this.pwdExpirationFormat));
+ } else {
+ this.passwordexp = true;
}
}
getRequest(): UserFormModel {
const userFormModel = new UserFormModel();
+
['username', 'password', 'name', 'email', 'roles', 'enabled', 'pwdUpdateRequired'].forEach(
- (key) => (userFormModel[key] = this.userForm.get(key).value)
+ (key) => {
+ userFormModel[key] = this.userForm.get(key).value;
+ }
);
const expirationDate = this.userForm.get('pwdExpirationDate').value;
if (expirationDate) {
+ this.passwordexp = false;
const mom = moment(expirationDate, this.pwdExpirationFormat);
if (
this.mode !== this.userFormMode.editing ||
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/api/prometheus.service.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/api/prometheus.service.ts
index 317293be07c..fedc7b8de0f 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/api/prometheus.service.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/api/prometheus.service.ts
@@ -163,11 +163,9 @@ export class PrometheusService {
checkNan
) {
queriesResults[queryName].forEach((valueArray: any[]) => {
- valueArray.forEach((val, index) => {
- if (isNaN(parseFloat(val[1]))) {
- valueArray[index][1] = '0';
- }
- });
+ if (isNaN(parseFloat(valueArray[1]))) {
+ valueArray[1] = '0';
+ }
});
}
});
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.model.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.model.ts
index d3ebc5f37c6..0e1c0906f4a 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.model.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/config-option/config-option.model.ts
@@ -9,4 +9,5 @@ export class ConfigFormModel {
min: any;
max: any;
services: Array<string>;
+ can_update_at_runtime: boolean;
}
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.html b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.html
index 4b973187dbb..90e3a1d2be8 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.html
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/critical-confirmation-modal/critical-confirmation-modal.component.html
@@ -49,7 +49,7 @@
[form]="deletionForm"
[submitText]="(actionDescription | titlecase) + ' ' + itemDescription"
[modalForm]="true"
- [submitBtnType]="actionDescription === 'delete' || 'remove' ? 'danger' : 'primary'"></cd-form-button-panel>
+ [submitBtnType]="(actionDescription === 'delete' || actionDescription === 'remove') ? 'danger' : 'primary'"></cd-form-button-panel>
</cds-modal>
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.html b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.html
index 328e72cc595..ccdb70e39e4 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.html
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.html
@@ -1,22 +1,23 @@
-<div cdsCol
- class="form-item">
- <div cdsRow>
-<cds-date-picker [label]="name"
- i18n-label
- placeholder="NOT PROTECTED"
- formControlname="expiresAt"
- dateFormat="Y/m/d"
- [value]="date"
- (valueChange)="onModelChange($event)"
- [helperText]="helperText"
- [disabled]="disabled"
- cdsTheme="theme"></cds-date-picker>
-<cds-timepicker (valueChange)="onModelChange($event)"
- [(ngModel)]="time"
- label="Select a time"
- [disabled]="disabled"
- pattern="(1[012]|[0-9]):[0-5][0-9]"
- *ngIf="hasTime">
+<div cdsCol>
+ <div cdsRow
+ class="form-item-append">
+ <cds-text-label>{{name}}
+ <cds-date-picker i18n-label
+ [placeholder]="placeHolder"
+ formControlname="expiresAt"
+ dateFormat="Y/m/d"
+ [value]="date"
+ (valueChange)="onModelChange($event)"
+ [helperText]="helperText"
+ [disabled]="disabled"
+ cdsTheme="theme"></cds-date-picker>
+ </cds-text-label>
+ <cds-text-label *ngIf="hasTime">Select a time
+ <cds-timepicker (valueChange)="onModelChange($event)"
+ [(ngModel)]="time"
+ [disabled]="disabled"
+ pattern="(1[012]|[0-9]):[0-5][0-9]"
+ *ngIf="hasTime">
<cds-timepicker-select [(ngModel)]="ampm"
[disabled]="disabled"
(valueChange)="onModelChange($event)">
@@ -24,5 +25,7 @@
value="AM">AM</option>
<option value="PM">PM</option>
</cds-timepicker-select>
-</cds-timepicker></div>
+</cds-timepicker>
+</cds-text-label>
+</div>
</div>
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.scss b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.scss
index e69de29bb2d..39f2a7115a1 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.scss
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.scss
@@ -0,0 +1,3 @@
+.form-item-append {
+ margin-top: 1rem;
+}
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.ts
index 4841d2ed92d..3458d9171a7 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/date-time-picker/date-time-picker.component.ts
@@ -25,7 +25,8 @@ export class DateTimePickerComponent implements OnInit {
@Input()
helperText = '';
-
+ @Input()
+ placeHolder = '';
@Input()
disabled = false;
@@ -39,9 +40,8 @@ export class DateTimePickerComponent implements OnInit {
date: { [key: number]: string }[] = [];
time: string;
ampm: string;
-
sub: Subscription;
-
+ @Input() defaultDate: boolean = false;
constructor(private calendar: NgbCalendar) {}
ngOnInit() {
@@ -59,8 +59,12 @@ export class DateTimePickerComponent implements OnInit {
if (!mom.isValid() || mom.isBefore(moment())) {
mom = moment();
}
+ if (this.defaultDate) {
+ this.date.push([]);
+ } else {
+ this.date.push(mom.format('YYYY-MM-DD'));
+ }
- this.date.push(mom.format('YYYY-MM-DD'));
const time = mom.format('HH:mm:ss');
this.time = mom.format('hh:mm');
this.ampm = mom.hour() >= 12 ? 'PM' : 'AM';
@@ -76,7 +80,9 @@ export class DateTimePickerComponent implements OnInit {
onModelChange(event?: any) {
if (event) {
- if (Array.isArray(event)) {
+ if (event.length === 0) {
+ this.datetime.date = { date: null, time: null, ampm: null };
+ } else if (Array.isArray(event)) {
this.datetime.date = moment(event[0]).format('YYYY-MM-DD');
} else if (event && ['AM', 'PM'].includes(event)) {
const initialMoment = moment(this.datetime.time, 'hh:mm:ss A');
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.html b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.html
index da1a4800f7f..81ad90914b6 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.html
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/helper/helper.component.html
@@ -5,7 +5,8 @@
<ng-content></ng-content>
</ng-template>
-<cds-tooltip [description]="popoverTpl">
+<cds-tooltip [description]="popoverTpl"
+ [autoAlign]="true">
<svg cdsIcon="information"
size="16"
title="info"></svg>
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/dashboard-promqls.enum.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/dashboard-promqls.enum.ts
index 361a404a11b..f1bbebed51d 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/dashboard-promqls.enum.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/dashboard-promqls.enum.ts
@@ -11,8 +11,8 @@ export enum Promqls {
export enum RgwPromqls {
RGW_REQUEST_PER_SECOND = 'sum(rate(ceph_rgw_req[1m]))',
- AVG_GET_LATENCY = 'sum(rate(ceph_rgw_op_get_obj_lat_sum[1m])) / sum(rate(ceph_rgw_op_get_obj_lat_count[1m]))',
- AVG_PUT_LATENCY = 'sum(rate(ceph_rgw_op_put_obj_lat_sum[1m])) / sum(rate(ceph_rgw_op_put_obj_lat_count[1m]))',
+ AVG_GET_LATENCY = '(sum(rate(ceph_rgw_op_get_obj_lat_sum[1m])) / sum(rate(ceph_rgw_op_get_obj_lat_count[1m]))) * 1000',
+ AVG_PUT_LATENCY = '(sum(rate(ceph_rgw_op_put_obj_lat_sum[1m])) / sum(rate(ceph_rgw_op_put_obj_lat_count[1m]))) * 1000',
GET_BANDWIDTH = 'sum(rate(ceph_rgw_op_get_obj_bytes[1m]))',
PUT_BANDWIDTH = 'sum(rate(ceph_rgw_op_put_obj_bytes[1m]))'
}
diff --git a/src/pybind/mgr/dashboard/frontend/src/styles/themes/_content.scss b/src/pybind/mgr/dashboard/frontend/src/styles/themes/_content.scss
index 37f89aba17f..0725b63dbfd 100644
--- a/src/pybind/mgr/dashboard/frontend/src/styles/themes/_content.scss
+++ b/src/pybind/mgr/dashboard/frontend/src/styles/themes/_content.scss
@@ -33,6 +33,7 @@ $content-theme: map-merge(
text-primary: vv.$dark,
text-secondary: vv.$dark,
text-disabled: vv.$gray-500,
+ icon-secondary: vv.$gray-800,
field-01: colors.$gray-10,
interactive: vv.$primary
)
diff --git a/src/pybind/mgr/dashboard/openapi.yaml b/src/pybind/mgr/dashboard/openapi.yaml
index de836298064..de1b3e8b60e 100644
--- a/src/pybind/mgr/dashboard/openapi.yaml
+++ b/src/pybind/mgr/dashboard/openapi.yaml
@@ -3977,10 +3977,27 @@ paths:
application/json:
schema:
properties:
+ force_update:
+ description: Force update the config option
+ type: boolean
name:
+ description: Config option name
type: string
value:
- type: string
+ description: Section and Value of the config option
+ items:
+ properties:
+ section:
+ description: Section/Client where config needs to be updated
+ type: string
+ value:
+ description: Value of the config option
+ type: string
+ required:
+ - section
+ - value
+ type: object
+ type: array
required:
- name
- value
@@ -4007,6 +4024,7 @@ paths:
trace.
security:
- jwt: []
+ summary: Create/Update Cluster Configuration
tags:
- ClusterConfiguration
put:
@@ -10790,6 +10808,274 @@ paths:
- jwt: []
tags:
- Prometheus
+ /api/rgw/accounts:
+ get:
+ parameters:
+ - default: false
+ in: query
+ name: detailed
+ schema:
+ type: boolean
+ responses:
+ '200':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: OK
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ tags:
+ - RgwUserAccounts
+ post:
+ parameters: []
+ requestBody:
+ content:
+ application/json:
+ schema:
+ properties:
+ account_id:
+ type: integer
+ account_name:
+ type: integer
+ email:
+ type: string
+ type: object
+ responses:
+ '201':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Resource created.
+ '202':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Operation is still executing. Please check the task queue.
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ tags:
+ - RgwUserAccounts
+ /api/rgw/accounts/{account_id}:
+ delete:
+ parameters:
+ - description: Account id
+ in: path
+ name: account_id
+ required: true
+ schema:
+ type: string
+ responses:
+ '202':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Operation is still executing. Please check the task queue.
+ '204':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Resource deleted.
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ summary: Delete RGW Account
+ tags:
+ - RgwUserAccounts
+ get:
+ parameters:
+ - description: Account id
+ in: path
+ name: account_id
+ required: true
+ schema:
+ type: string
+ responses:
+ '200':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: OK
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ summary: Get RGW Account by id
+ tags:
+ - RgwUserAccounts
+ put:
+ parameters:
+ - description: Account id
+ in: path
+ name: account_id
+ required: true
+ schema:
+ type: string
+ requestBody:
+ content:
+ application/json:
+ schema:
+ properties:
+ account_name:
+ type: integer
+ email:
+ type: string
+ type: object
+ responses:
+ '200':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Resource updated.
+ '202':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Operation is still executing. Please check the task queue.
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ summary: Update RGW account info
+ tags:
+ - RgwUserAccounts
+ /api/rgw/accounts/{account_id}/quota:
+ put:
+ parameters:
+ - description: Account id
+ in: path
+ name: account_id
+ required: true
+ schema:
+ type: string
+ requestBody:
+ content:
+ application/json:
+ schema:
+ properties:
+ max_objects:
+ type: string
+ max_size:
+ description: Max size
+ type: string
+ quota_type:
+ type: string
+ required:
+ - quota_type
+ - max_size
+ - max_objects
+ type: object
+ responses:
+ '200':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Resource updated.
+ '202':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Operation is still executing. Please check the task queue.
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ summary: Set RGW Account/Bucket quota
+ tags:
+ - RgwUserAccounts
+ /api/rgw/accounts/{account_id}/quota/status:
+ put:
+ parameters:
+ - description: Account id
+ in: path
+ name: account_id
+ required: true
+ schema:
+ type: string
+ requestBody:
+ content:
+ application/json:
+ schema:
+ properties:
+ quota_status:
+ type: string
+ quota_type:
+ type: string
+ required:
+ - quota_type
+ - quota_status
+ type: object
+ responses:
+ '200':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Resource updated.
+ '202':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: Operation is still executing. Please check the task queue.
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ summary: Enable/Disable RGW Account/Bucket quota
+ tags:
+ - RgwUserAccounts
/api/rgw/bucket:
get:
parameters:
@@ -15966,6 +16252,8 @@ tags:
name: RgwSite
- description: RGW User Management API
name: RgwUser
+- description: RGW User Accounts API
+ name: RgwUserAccounts
- description: '*No description available*'
name: RgwZone
- description: '*No description available*'
diff --git a/src/pybind/mgr/dashboard/services/rgw_iam.py b/src/pybind/mgr/dashboard/services/rgw_iam.py
index dbf00df25e0..5f490323441 100644
--- a/src/pybind/mgr/dashboard/services/rgw_iam.py
+++ b/src/pybind/mgr/dashboard/services/rgw_iam.py
@@ -1,12 +1,13 @@
from subprocess import SubprocessError
-from typing import List
+from typing import List, Optional
from .. import mgr
from ..exceptions import DashboardException
class RgwAccounts:
- def send_rgw_cmd(self, command: List[str]):
+ @classmethod
+ def send_rgw_cmd(cls, command: List[str]):
try:
exit_code, out, err = mgr.send_rgwadmin_command(command)
@@ -19,6 +20,78 @@ class RgwAccounts:
except SubprocessError as e:
raise DashboardException(e, component='rgw')
- def get_accounts(self):
+ @classmethod
+ def get_accounts(cls, detailed: bool = False):
+ """
+ Query account Id's, optionally returning full details.
+
+ :param detailed: Boolean to indicate if full account details are required.
+ """
get_accounts_cmd = ['account', 'list']
- return self.send_rgw_cmd(get_accounts_cmd)
+ account_list = cls.send_rgw_cmd(get_accounts_cmd)
+ detailed_account_list = []
+ if detailed:
+ for account in account_list:
+ detailed_account_list.append(cls.get_account(account))
+ return detailed_account_list
+ return account_list
+
+ @classmethod
+ def get_account(cls, account_id: str):
+ get_account_cmd = ['account', 'get', '--account-id', account_id]
+ return cls.send_rgw_cmd(get_account_cmd)
+
+ @classmethod
+ def create_account(cls, account_name: Optional[str] = None,
+ account_id: Optional[str] = None, email: Optional[str] = None):
+ create_accounts_cmd = ['account', 'create']
+
+ if account_name:
+ create_accounts_cmd += ['--account-name', account_name]
+
+ if account_id:
+ create_accounts_cmd += ['--account_id', account_id]
+
+ if email:
+ create_accounts_cmd += ['--email', email]
+
+ return cls.send_rgw_cmd(create_accounts_cmd)
+
+ @classmethod
+ def modify_account(cls, account_id: str, account_name: Optional[str] = None,
+ email: Optional[str] = None):
+ modify_accounts_cmd = ['account', 'modify', '--account-id', account_id]
+
+ if account_name:
+ modify_accounts_cmd += ['--account-name', account_name]
+
+ if email:
+ modify_accounts_cmd += ['--email', email]
+
+ return cls.send_rgw_cmd(modify_accounts_cmd)
+
+ @classmethod
+ def delete_account(cls, account_id: str):
+ modify_accounts_cmd = ['account', 'rm', '--account-id', account_id]
+
+ return cls.send_rgw_cmd(modify_accounts_cmd)
+
+ @classmethod
+ def get_account_stats(cls, account_id: str):
+ account_stats_cmd = ['account', 'stats', '--account-id', account_id]
+
+ return cls.send_rgw_cmd(account_stats_cmd)
+
+ @classmethod
+ def set_quota(cls, quota_type: str, account_id: str, max_size: str, max_objects: str):
+ set_quota_cmd = ['quota', 'set', '--quota-scope', quota_type, '--account-id', account_id,
+ '--max-size', max_size, '--max-objects', max_objects]
+
+ return cls.send_rgw_cmd(set_quota_cmd)
+
+ @classmethod
+ def set_quota_status(cls, quota_type: str, account_id: str, quota_status: str):
+ set_quota_status_cmd = ['quota', quota_status, '--quota-scope', quota_type,
+ '--account-id', account_id]
+
+ return cls.send_rgw_cmd(set_quota_status_cmd)
diff --git a/src/pybind/mgr/dashboard/tests/test_rgw_iam.py b/src/pybind/mgr/dashboard/tests/test_rgw_iam.py
new file mode 100644
index 00000000000..133b5a0d390
--- /dev/null
+++ b/src/pybind/mgr/dashboard/tests/test_rgw_iam.py
@@ -0,0 +1,292 @@
+from unittest import TestCase
+from unittest.mock import patch
+
+from ..controllers.rgw_iam import RgwUserAccountsController
+from ..services.rgw_iam import RgwAccounts
+
+
+class TestRgwUserAccountsController(TestCase):
+
+ @patch.object(RgwAccounts, 'create_account')
+ def test_create_account(self, mock_create_account):
+ mockReturnVal = {
+ "id": "RGW18661471562806836",
+ "tenant": "",
+ "name": "",
+ "email": "",
+ "quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "bucket_quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "max_users": 1000,
+ "max_roles": 1000,
+ "max_groups": 1000,
+ "max_buckets": 1000,
+ "max_access_keys": 4
+ }
+
+ # Mock the return value of the create_account method
+ mock_create_account.return_value = mockReturnVal
+
+ controller = RgwUserAccountsController()
+ result = controller.create(account_name='test_account', account_id='RGW18661471562806836',
+ email='test@example.com')
+
+ # Check if the account creation method was called with the correct parameters
+ mock_create_account.assert_called_with('test_account', 'RGW18661471562806836',
+ 'test@example.com')
+ # Check the returned result
+ self.assertEqual(result, mockReturnVal)
+
+ @patch.object(RgwAccounts, 'get_accounts')
+ def test_list_accounts(self, mock_get_accounts):
+ mock_return_value = [
+ "RGW22222222222222222",
+ "RGW59378973811515857",
+ "RGW11111111111111111"
+ ]
+
+ mock_get_accounts.return_value = mock_return_value
+
+ controller = RgwUserAccountsController()
+ result = controller.list(detailed=False)
+
+ mock_get_accounts.assert_called_with(False)
+
+ self.assertEqual(result, mock_return_value)
+
+ @patch.object(RgwAccounts, 'get_accounts')
+ def test_list_accounts_with_details(self, mock_get_accounts):
+ mock_return_value = [
+ {
+ "id": "RGW22222222222222222",
+ "tenant": "",
+ "name": "Account2",
+ "email": "account2@ceph.com",
+ "quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "bucket_quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "max_users": 1000,
+ "max_roles": 1000,
+ "max_groups": 1000,
+ "max_buckets": 1000,
+ "max_access_keys": 4
+ },
+ {
+ "id": "RGW11111111111111111",
+ "tenant": "",
+ "name": "Account1",
+ "email": "account1@ceph.com",
+ "quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "bucket_quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "max_users": 1000,
+ "max_roles": 1000,
+ "max_groups": 1000,
+ "max_buckets": 1000,
+ "max_access_keys": 4
+ }
+ ]
+
+ mock_get_accounts.return_value = mock_return_value
+
+ controller = RgwUserAccountsController()
+ result = controller.list(detailed=True)
+
+ mock_get_accounts.assert_called_with(True)
+
+ self.assertEqual(result, mock_return_value)
+
+ @patch.object(RgwAccounts, 'get_account')
+ def test_get_account(self, mock_get_account):
+ mock_return_value = {
+ "id": "RGW22222222222222222",
+ "tenant": "",
+ "name": "Account2",
+ "email": "account2@ceph.com",
+ "quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "bucket_quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "max_users": 1000,
+ "max_roles": 1000,
+ "max_groups": 1000,
+ "max_buckets": 1000,
+ "max_access_keys": 4
+ }
+ mock_get_account.return_value = mock_return_value
+
+ controller = RgwUserAccountsController()
+ result = controller.get(account_id='RGW22222222222222222')
+
+ mock_get_account.assert_called_with('RGW22222222222222222')
+
+ self.assertEqual(result, mock_return_value)
+
+ @patch.object(RgwAccounts, 'delete_account')
+ def test_delete_account(self, mock_delete_account):
+ mock_delete_account.return_value = None
+
+ controller = RgwUserAccountsController()
+ result = controller.delete(account_id='RGW59378973811515857')
+
+ mock_delete_account.assert_called_with('RGW59378973811515857')
+
+ self.assertEqual(result, None)
+
+ @patch.object(RgwAccounts, 'modify_account')
+ def test_set_account_name(self, mock_modify_account):
+ mock_return_value = mock_return_value = {
+ "id": "RGW59378973811515857",
+ "tenant": "",
+ "name": "new_account_name",
+ "email": "new_email@example.com",
+ "quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "bucket_quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": -1
+ },
+ "max_users": 1000,
+ "max_roles": 1000,
+ "max_groups": 1000,
+ "max_buckets": 1000,
+ "max_access_keys": 4
+ }
+ mock_modify_account.return_value = mock_return_value
+
+ controller = RgwUserAccountsController()
+ result = controller.set(account_id='RGW59378973811515857', account_name='new_account_name',
+ email='new_email@example.com')
+
+ mock_modify_account.assert_called_with('RGW59378973811515857', 'new_account_name',
+ 'new_email@example.com')
+
+ self.assertEqual(result, mock_return_value)
+
+ @patch.object(RgwAccounts, 'set_quota')
+ def test_set_quota(self, mock_set_quota):
+ mock_return_value = {
+ "id": "RGW11111111111111111",
+ "tenant": "",
+ "name": "Account1",
+ "email": "account1@ceph.com",
+ "quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": 10737418240,
+ "max_size_kb": 10485760,
+ "max_objects": 1000000
+ },
+ "bucket_quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": 1000000
+ },
+ "max_users": 1000,
+ "max_roles": 1000,
+ "max_groups": 1000,
+ "max_buckets": 1000,
+ "max_access_keys": 4
+ }
+
+ mock_set_quota.return_value = mock_return_value
+
+ controller = RgwUserAccountsController()
+ result = controller.set_quota(quota_type='account', account_id='RGW11111111111111111',
+ max_size='10GB', max_objects='1000')
+
+ mock_set_quota.assert_called_with('account', 'RGW11111111111111111', '10GB', '1000')
+
+ self.assertEqual(result, mock_return_value)
+
+ @patch.object(RgwAccounts, 'set_quota_status')
+ def test_set_quota_status(self, mock_set_quota_status):
+ mock_return_value = {
+ "id": "RGW11111111111111111",
+ "tenant": "",
+ "name": "Account1",
+ "email": "account1@ceph.com",
+ "quota": {
+ "enabled": True,
+ "check_on_raw": False,
+ "max_size": 10737418240,
+ "max_size_kb": 10485760,
+ "max_objects": 1000000
+ },
+ "bucket_quota": {
+ "enabled": False,
+ "check_on_raw": False,
+ "max_size": -1,
+ "max_size_kb": 0,
+ "max_objects": 1000000
+ },
+ "max_users": 1000,
+ "max_roles": 1000,
+ "max_groups": 1000,
+ "max_buckets": 1000,
+ "max_access_keys": 4
+ }
+
+ mock_set_quota_status.return_value = mock_return_value
+
+ controller = RgwUserAccountsController()
+ result = controller.set_quota_status(quota_type='account',
+ account_id='RGW11111111111111111',
+ quota_status='enabled')
+
+ mock_set_quota_status.assert_called_with('account', 'RGW11111111111111111', 'enabled')
+
+ self.assertEqual(result, mock_return_value)
diff --git a/src/python-common/ceph/deployment/service_spec.py b/src/python-common/ceph/deployment/service_spec.py
index e779356ae0c..9fdecb30346 100644
--- a/src/python-common/ceph/deployment/service_spec.py
+++ b/src/python-common/ceph/deployment/service_spec.py
@@ -1330,6 +1330,7 @@ class NvmeofServiceSpec(ServiceSpec):
name: Optional[str] = None,
group: Optional[str] = None,
addr: Optional[str] = None,
+ addr_map: Optional[Dict[str, str]] = None,
port: Optional[int] = None,
pool: Optional[str] = None,
enable_auth: bool = False,
@@ -1380,6 +1381,7 @@ class NvmeofServiceSpec(ServiceSpec):
{"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7},
tgt_cmd_extra_args: Optional[str] = None,
discovery_addr: Optional[str] = None,
+ discovery_addr_map: Optional[Dict[str, str]] = None,
discovery_port: Optional[int] = None,
log_level: Optional[str] = 'INFO',
log_files_enabled: Optional[bool] = True,
@@ -1414,6 +1416,8 @@ class NvmeofServiceSpec(ServiceSpec):
self.pool = pool
#: ``addr`` address of the nvmeof gateway
self.addr = addr
+ #: ``addr_map`` per node address map of the nvmeof gateways
+ self.addr_map = addr_map
#: ``port`` port of the nvmeof gateway
self.port = port or 5500
#: ``name`` name of the nvmeof gateway
@@ -1512,6 +1516,8 @@ class NvmeofServiceSpec(ServiceSpec):
self.tgt_cmd_extra_args = tgt_cmd_extra_args
#: ``discovery_addr`` address of the discovery service
self.discovery_addr = discovery_addr
+ #: ``discovery_addr_map`` per node address map of the discovery service
+ self.discovery_addr_map = discovery_addr_map
#: ``discovery_port`` port of the discovery service
self.discovery_port = discovery_port or 8009
#: ``log_level`` the nvmeof gateway log level
@@ -1538,7 +1544,7 @@ class NvmeofServiceSpec(ServiceSpec):
self.monitor_client_log_file_dir = monitor_client_log_file_dir
def get_port_start(self) -> List[int]:
- return [5500, 4420, 8009]
+ return [self.port, 4420, self.discovery_port]
def validate(self) -> None:
# TODO: what other parameters should be validated as part of this function?
diff --git a/src/rgw/CMakeLists.txt b/src/rgw/CMakeLists.txt
index 96f3237b896..3727c525ce7 100644
--- a/src/rgw/CMakeLists.txt
+++ b/src/rgw/CMakeLists.txt
@@ -90,6 +90,7 @@ set(librgw_common_srcs
rgw_notify_event_type.cc
rgw_period_history.cc
rgw_period_puller.cc
+ rgw_s3_filter.cc
rgw_pubsub.cc
rgw_coroutine.cc
rgw_cr_rest.cc
diff --git a/src/rgw/driver/posix/notify.h b/src/rgw/driver/posix/notify.h
index 9f6088a893a..4463abc57c2 100644
--- a/src/rgw/driver/posix/notify.h
+++ b/src/rgw/driver/posix/notify.h
@@ -212,7 +212,7 @@ namespace file::listing {
void signal_shutdown() {
uint64_t msg{sig_shutdown};
- (void) write(efd, &msg, sizeof(uint64_t));
+ std::ignore = write(efd, &msg, sizeof(uint64_t));
}
friend class Notify;
diff --git a/src/rgw/driver/rados/rgw_sal_rados.cc b/src/rgw/driver/rados/rgw_sal_rados.cc
index 1eb4eecceea..88da446c3de 100644
--- a/src/rgw/driver/rados/rgw_sal_rados.cc
+++ b/src/rgw/driver/rados/rgw_sal_rados.cc
@@ -723,7 +723,7 @@ int RadosBucket::merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new
attrs[it.first] = it.second;
}
return store->ctl()->bucket->set_bucket_instance_attrs(get_info(),
- new_attrs, &get_info().objv_tracker, y, dpp);
+ attrs, &get_info().objv_tracker, y, dpp);
}
int RadosBucket::try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime, optional_yield y)
diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc
index a73db542014..95a7af6a0fa 100644
--- a/src/rgw/rgw_admin.cc
+++ b/src/rgw/rgw_admin.cc
@@ -2553,35 +2553,104 @@ std::ostream& operator<<(std::ostream& out, const indented& h) {
return out << std::setw(h.w) << h.header << std::setw(1) << ' ';
}
-static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* driver, const RGWZone& zone,
+struct bucket_source_sync_info {
+ const RGWZone& _source;
+ std::string_view error;
+ std::map<int,std::string> shards_behind;
+ int total_shards;
+ std::string_view status;
+ rgw_bucket bucket_source;
+
+ bucket_source_sync_info(const RGWZone& source): _source(source) {}
+
+ void _print_plaintext(std::ostream& out, int width) const {
+ out << indented{width, "source zone"} << _source.id << " (" << _source.name << ")" << std::endl;
+ if (!error.empty()) {
+ out << indented{width} << error << std::endl;
+ return;
+ }
+ out << indented{width, "source bucket"} << bucket_source << std::endl;
+ if (!status.empty()) {
+ out << indented{width} << status << std::endl;
+ return;
+ }
+ out << indented{width} << "incremental sync on " << total_shards << " shards\n";
+ if (!shards_behind.empty()) {
+ out << indented{width} << "bucket is behind on " << shards_behind.size() << " shards\n";
+ set<int> shard_ids;
+ for (auto const& [shard_id, _] : shards_behind) {
+ shard_ids.insert(shard_id);
+ }
+ out << indented{width} << "behind shards: [" << shard_ids << "]\n";
+ } else {
+ out << indented{width} << "bucket is caught up with source\n";
+ }
+ }
+
+ void _print_formatter(std::ostream& out, Formatter* formatter) const {
+ formatter->open_object_section("source");
+ formatter->dump_string("source_zone", _source.id);
+ formatter->dump_string("source_name", _source.name);
+
+ if (!error.empty()) {
+ formatter->dump_string("error", error);
+ formatter->close_section();
+ formatter->flush(out);
+ return;
+ }
+
+ formatter->dump_string("source_bucket", bucket_source.name);
+ formatter->dump_string("source_bucket_id", bucket_source.bucket_id);
+
+ if (!status.empty()) {
+ formatter->dump_string("status", status);
+ formatter->close_section();
+ formatter->flush(out);
+ return;
+ }
+
+ formatter->dump_int("total_shards", total_shards);
+ formatter->open_array_section("behind_shards");
+ for (auto const& [id, marker] : shards_behind) {
+ formatter->open_object_section("shard");
+ formatter->dump_int("shard_id", id);
+ formatter->dump_string("shard_marker", marker);
+ formatter->close_section();
+ }
+ formatter->close_section();
+ formatter->close_section();
+ formatter->flush(out);
+ }
+};
+
+static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* driver,
+ const RGWZone& zone,
const RGWZone& source, RGWRESTConn *conn,
const RGWBucketInfo& bucket_info,
rgw_sync_bucket_pipe pipe,
- int width, std::ostream& out)
+ bucket_source_sync_info& source_sync_info)
{
- out << indented{width, "source zone"} << source.id << " (" << source.name << ")" << std::endl;
-
// syncing from this zone?
if (!driver->svc()->zone->zone_syncs_from(zone, source)) {
- out << indented{width} << "does not sync from zone\n";
+ source_sync_info.error = "does not sync from zone";
return 0;
}
if (!pipe.source.bucket) {
- ldpp_dout(dpp, -1) << __func__ << "(): missing source bucket" << dendl;
+ source_sync_info.error = fmt::format("{} (): missing source bucket", __func__);
return -EINVAL;
}
std::unique_ptr<rgw::sal::Bucket> source_bucket;
int r = init_bucket(*pipe.source.bucket, &source_bucket);
if (r < 0) {
- ldpp_dout(dpp, -1) << "failed to read source bucket info: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read source bucket info: {}", cpp_strerror(r));
return r;
}
- out << indented{width, "source bucket"} << source_bucket->get_key() << std::endl;
- pipe.source.bucket = source_bucket->get_key();
+ source_sync_info.bucket_source = source_bucket->get_key();
+ pipe.source.bucket = source_bucket->get_key();
pipe.dest.bucket = bucket_info.bucket;
uint64_t gen = 0;
@@ -2592,15 +2661,15 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
r = rgw_read_bucket_full_sync_status(dpp, driver, pipe, &full_status, null_yield);
if (r >= 0) {
if (full_status.state == BucketSyncState::Init) {
- out << indented{width} << "init: bucket sync has not started\n";
+ source_sync_info.status = "init: bucket sync has not started";
return 0;
}
if (full_status.state == BucketSyncState::Stopped) {
- out << indented{width} << "stopped: bucket sync is disabled\n";
+ source_sync_info.status = "stopped: bucket sync is disabled";
return 0;
}
if (full_status.state == BucketSyncState::Full) {
- out << indented{width} << "full sync: " << full_status.full.count << " objects completed\n";
+ source_sync_info.status = fmt::format("full sync: {} objects completed", full_status.full.count);
return 0;
}
gen = full_status.incremental_gen;
@@ -2609,46 +2678,45 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
// no full status, but there may be per-shard status from before upgrade
const auto& logs = source_bucket->get_info().layout.logs;
if (logs.empty()) {
- out << indented{width} << "init: bucket sync has not started\n";
+ source_sync_info.status = "init: bucket sync has not started";
return 0;
}
const auto& log = logs.front();
if (log.gen > 0) {
// this isn't the backward-compatible case, so we just haven't started yet
- out << indented{width} << "init: bucket sync has not started\n";
+ source_sync_info.status = "init: bucket sync has not started";
return 0;
}
if (log.layout.type != rgw::BucketLogType::InIndex) {
- ldpp_dout(dpp, -1) << "unrecognized log layout type " << log.layout.type << dendl;
+ source_sync_info.error = fmt::format("unrecognized log layout type {}", to_string(log.layout.type));
return -EINVAL;
}
// use shard count from our log gen=0
shard_status.resize(rgw::num_shards(log.layout.in_index));
} else {
- lderr(driver->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read bucket full sync status: {}", cpp_strerror(r));
return r;
}
r = rgw_read_bucket_inc_sync_status(dpp, driver, pipe, gen, &shard_status);
if (r < 0) {
- lderr(driver->ctx()) << "failed to read bucket incremental sync status: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read bucket incremental sync status: {}", cpp_strerror(r));
return r;
}
const int total_shards = shard_status.size();
-
- out << indented{width} << "incremental sync on " << total_shards << " shards\n";
+ source_sync_info.total_shards = total_shards;
rgw_bucket_index_marker_info remote_info;
BucketIndexShardsManager remote_markers;
r = rgw_read_remote_bilog_info(dpp, conn, source_bucket->get_key(),
remote_info, remote_markers, null_yield);
if (r < 0) {
- ldpp_dout(dpp, -1) << "failed to read remote log: " << cpp_strerror(r) << dendl;
+ source_sync_info.error = fmt::format("failed to read remote log: {}", cpp_strerror(r));
return r;
}
- std::set<int> shards_behind;
+ std::map<int, std::string> shards_behind;
for (const auto& r : remote_markers.get()) {
auto shard_id = r.first;
if (r.second.empty()) {
@@ -2656,21 +2724,17 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra
}
if (shard_id >= total_shards) {
// unexpected shard id. we don't have status for it, so we're behind
- shards_behind.insert(shard_id);
+ shards_behind[shard_id] = r.second;
continue;
}
auto& m = shard_status[shard_id];
const auto pos = BucketIndexShardsManager::get_shard_marker(m.inc_marker.position);
if (pos < r.second) {
- shards_behind.insert(shard_id);
+ shards_behind[shard_id] = r.second;
}
}
- if (!shards_behind.empty()) {
- out << indented{width} << "bucket is behind on " << shards_behind.size() << " shards\n";
- out << indented{width} << "behind shards: [" << shards_behind << "]\n";
- } else {
- out << indented{width} << "bucket is caught up with source\n";
- }
+
+ source_sync_info.shards_behind = std::move(shards_behind);
return 0;
}
@@ -2881,25 +2945,82 @@ static int bucket_sync_info(rgw::sal::Driver* driver, const RGWBucketInfo& info,
return 0;
}
+struct bucket_sync_status_info {
+ std::vector<bucket_source_sync_info> source_status_info;
+ rgw::sal::Zone* _zone;
+ const rgw::sal::ZoneGroup* _zonegroup;
+ const RGWBucketInfo& _bucket_info;
+ const int width = 15;
+ std::string error;
+
+ bucket_sync_status_info(const RGWBucketInfo& bucket_info): _bucket_info(bucket_info) {}
+
+ void print(std::ostream& out, bool use_formatter, Formatter* formatter) {
+ if (use_formatter) {
+ _print_formatter(out, formatter);
+ } else {
+ _print_plaintext(out);
+ }
+ }
+
+ void _print_plaintext(std::ostream& out) {
+ out << indented{width, "realm"} << _zone->get_realm_id() << " (" << _zone->get_realm_name() << ")" << std::endl;
+ out << indented{width, "zonegroup"} << _zonegroup->get_id() << " (" << _zonegroup->get_name() << ")" << std::endl;
+ out << indented{width, "zone"} << _zone->get_id() << " (" << _zone->get_name() << ")" << std::endl;
+ out << indented{width, "bucket"} << _bucket_info.bucket << std::endl;
+ out << indented{width, "current time"}
+ << to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms) << "\n\n";
+
+ if (!error.empty()){
+ out << error << std::endl;
+ }
+
+ for (const auto &info : source_status_info) {
+ info._print_plaintext(out, width);
+ }
+ }
+
+ void _print_formatter(std::ostream& out, Formatter* formatter) {
+ formatter->open_object_section("test");
+ formatter->dump_string("realm", _zone->get_realm_id());
+ formatter->dump_string("realm_name", _zone->get_realm_name());
+ formatter->dump_string("zonegroup", _zonegroup->get_id());
+ formatter->dump_string("zonegroup_name", _zonegroup->get_name());
+ formatter->dump_string("zone", _zone->get_id());
+ formatter->dump_string("zone_name", _zone->get_name());
+ formatter->dump_string("bucket", _bucket_info.bucket.name);
+ formatter->dump_string("bucket_instance_id", _bucket_info.bucket.bucket_id);
+ formatter->dump_string("current_time", to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms));
+
+ if (!error.empty()) {
+ formatter->dump_string("error", error);
+ }
+
+ formatter->open_array_section("sources");
+ for (const auto &info : source_status_info) {
+ info._print_formatter(out, formatter);
+ }
+ formatter->close_section();
+
+ formatter->close_section();
+ formatter->flush(out);
+ }
+
+};
+
static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& info,
const rgw_zone_id& source_zone_id,
std::optional<rgw_bucket>& opt_source_bucket,
- std::ostream& out)
+ bucket_sync_status_info& bucket_sync_info)
{
const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup();
rgw::sal::Zone* zone = driver->get_zone();
- constexpr int width = 15;
-
- out << indented{width, "realm"} << zone->get_realm_id() << " (" << zone->get_realm_name() << ")\n";
- out << indented{width, "zonegroup"} << zonegroup.get_id() << " (" << zonegroup.get_name() << ")\n";
- out << indented{width, "zone"} << zone->get_id() << " (" << zone->get_name() << ")\n";
- out << indented{width, "bucket"} << info.bucket << "\n";
- out << indented{width, "current time"}
- << to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms) << "\n\n";
+ bucket_sync_info._zone = zone;
+ bucket_sync_info._zonegroup = &zonegroup;
if (!static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) {
- out << "Sync is disabled for bucket " << info.bucket.name << " or bucket has no sync sources" << std::endl;
+ bucket_sync_info.error = fmt::format("Sync is disabled for bucket {} or bucket has no sync sources", info.bucket.name);
return 0;
}
@@ -2907,7 +3028,7 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf
int r = driver->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
if (r < 0) {
- ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
+ bucket_sync_info.error = fmt::format("ERROR: failed to get policy handler for bucket ({}): r={}: {}", info.bucket.name, r, cpp_strerror(-r));
return r;
}
@@ -2920,13 +3041,12 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf
std::unique_ptr<rgw::sal::Zone> zone;
int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(source_zone_id.id, &zone);
if (ret < 0) {
- ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup "
- << zonegroup.get_name() << dendl;
+ bucket_sync_info.error = fmt::format("Source zone not found in zonegroup {}", zonegroup.get_name());
return -EINVAL;
}
auto c = zone_conn_map.find(source_zone_id);
if (c == zone_conn_map.end()) {
- ldpp_dout(dpp(), -1) << "No connection to zone " << zone->get_name() << dendl;
+ bucket_sync_info.error = fmt::format("No connection to zone {}", zone->get_name());
return -EINVAL;
}
zone_ids.insert(source_zone_id);
@@ -2957,10 +3077,15 @@ static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& inf
continue;
}
if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) {
- bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), z->second,
+ bucket_source_sync_info source_sync_info(z->second);
+ auto ret = bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), z->second,
c->second,
info, pipe,
- width, out);
+ source_sync_info);
+
+ if (ret == 0) {
+ bucket_sync_info.source_status_info.emplace_back(std::move(source_sync_info));
+ }
}
}
}
@@ -3488,6 +3613,7 @@ int main(int argc, const char **argv)
list<string> tags_rm;
int placement_inline_data = true;
bool placement_inline_data_specified = false;
+ bool format_arg_passed = false;
int64_t max_objects = -1;
int64_t max_size = -1;
@@ -3867,6 +3993,7 @@ int main(int argc, const char **argv)
new_bucket_name = val;
} else if (ceph_argparse_witharg(args, i, &val, "--format", (char*)NULL)) {
format = val;
+ format_arg_passed = true;
} else if (ceph_argparse_witharg(args, i, &val, "--categories", (char*)NULL)) {
string cat_str = val;
list<string> cat_list;
@@ -7599,7 +7726,7 @@ int main(int argc, const char **argv)
<< "' to target bucket '" << configuration.target_bucket << "'" << std::endl;
return -ret;
}
- cerr << "flushed pending logging object '" << obj_name
+ cout << "flushed pending logging object '" << obj_name
<< "' to target bucket '" << configuration.target_bucket << "'" << std::endl;
return 0;
}
@@ -9901,7 +10028,18 @@ next:
if (ret < 0) {
return -ret;
}
- bucket_sync_status(driver, bucket->get_info(), source_zone, opt_source_bucket, std::cout);
+
+ auto bucket_info = bucket->get_info();
+ bucket_sync_status_info bucket_sync_info(bucket_info);
+
+ ret = bucket_sync_status(driver, bucket_info, source_zone,
+ opt_source_bucket, bucket_sync_info);
+
+ if (ret == 0) {
+ bucket_sync_info.print(std::cout, format_arg_passed, formatter.get());
+ } else {
+ cerr << "failed to get bucket sync status. see logs for more info" << std::endl;
+ }
}
if (opt_cmd == OPT::BUCKET_SYNC_MARKERS) {
diff --git a/src/rgw/rgw_asio_frontend.cc b/src/rgw/rgw_asio_frontend.cc
index 30e1e77fd15..ebe42d96de9 100644
--- a/src/rgw/rgw_asio_frontend.cc
+++ b/src/rgw/rgw_asio_frontend.cc
@@ -1194,8 +1194,11 @@ void AsioFrontend::pause()
l.signal.emit(boost::asio::cancellation_type::terminal);
}
- // close all connections so outstanding requests fail quickly
- connections.close(ec);
+ const bool graceful_stop{ g_ceph_context->_conf->rgw_graceful_stop };
+ if (!graceful_stop) {
+ // close all connections so outstanding requests fail quickly
+ connections.close(ec);
+ }
// pause and wait until outstanding requests complete
pause_mutex.lock(ec);
diff --git a/src/rgw/rgw_bucket_layout.cc b/src/rgw/rgw_bucket_layout.cc
index f8c485d89c3..1f8db396a0d 100644
--- a/src/rgw/rgw_bucket_layout.cc
+++ b/src/rgw/rgw_bucket_layout.cc
@@ -376,9 +376,9 @@ void encode_json_impl(const char *name, const BucketLayout& l, ceph::Formatter *
for (const auto& log : l.logs) {
encode_json("log", log, f);
}
+ f->close_section(); // logs[]
utime_t jt(l.judge_reshard_lock_time);
encode_json("judge_reshard_lock_time", jt, f);
- f->close_section(); // logs[]
f->close_section();
}
void decode_json_obj(BucketLayout& l, JSONObj *obj)
diff --git a/src/rgw/rgw_bucket_logging.cc b/src/rgw/rgw_bucket_logging.cc
index 87a242d9952..d24a53024f1 100644
--- a/src/rgw/rgw_bucket_logging.cc
+++ b/src/rgw/rgw_bucket_logging.cc
@@ -31,6 +31,9 @@ bool configuration::decode_xml(XMLObj* obj) {
logging_type = LoggingType::Standard;
} else if (type == "Journal") {
logging_type = LoggingType::Journal;
+ if (iter = o->find("Filter"); XMLObj* const filter_o = iter.get_next()) {
+ RGWXMLDecoder::decode_xml("S3Key", key_filter, filter_o);
+ }
} else {
// we don't allow for type "Any" in the configuration
throw RGWXMLDecoder::err("invalid bucket logging record type: '" + type + "'");
@@ -73,6 +76,11 @@ void configuration::dump_xml(Formatter *f) const {
break;
case LoggingType::Journal:
::encode_xml("LoggingType", "Journal", f);
+ if (key_filter.has_content()) {
+ f->open_object_section("Filter");
+ ::encode_xml("S3Key", key_filter, f);
+ f->close_section(); // Filter
+ }
break;
case LoggingType::Any:
::encode_xml("LoggingType", "", f);
@@ -118,6 +126,10 @@ void configuration::dump(Formatter *f) const {
break;
case LoggingType::Journal:
encode_json("loggingType", "Journal", f);
+ if (key_filter.has_content()) {
+ Formatter::ObjectSection s(*f, "Filter");
+ encode_json("S3Key", key_filter, f);
+ }
break;
case LoggingType::Any:
encode_json("loggingType", "", f);
@@ -526,6 +538,11 @@ int log_record(rgw::sal::Driver* driver,
if (type != LoggingType::Any && configuration.logging_type != type) {
return 0;
}
+ if (configuration.key_filter.has_content()) {
+ if (!match(configuration.key_filter, obj->get_name())) {
+ return 0;
+ }
+ }
ldpp_dout(dpp, 20) << "INFO: found matching logging configuration of bucket '" << s->bucket->get_name() <<
"' configuration: " << configuration.to_json_str() << dendl;
if (auto ret = log_record(driver, obj, s, op_name, etag, size, configuration, dpp, y, async_completion, log_source_bucket); ret < 0) {
diff --git a/src/rgw/rgw_bucket_logging.h b/src/rgw/rgw_bucket_logging.h
index d380cf0ef09..d4877bafb0f 100644
--- a/src/rgw/rgw_bucket_logging.h
+++ b/src/rgw/rgw_bucket_logging.h
@@ -10,6 +10,7 @@
#include "include/buffer.h"
#include "include/encoding.h"
#include "common/async/yield_context.h"
+#include "rgw_s3_filter.h"
class XMLObj;
namespace ceph { class Formatter; }
@@ -48,6 +49,14 @@ namespace rgw::bucketlogging {
<LoggingType>Standard|Journal</LoggingType> <!-- Ceph extension -->
<ObjectRollTime>integer</ObjectRollTime> <!-- Ceph extension -->
<RecordsBatchSize>integer</RecordsBatchSize> <!-- Ceph extension -->
+ <Filter>
+ <S3Key>
+ <FilterRule>
+ <Name>suffix/prefix/regex</Name>
+ <Value></Value>
+ </FilterRule>
+ </S3Key>
+ </Filter>
</LoggingEnabled>
</BucketLoggingStatus>
*/
@@ -78,6 +87,7 @@ struct configuration {
PartitionDateSource date_source = PartitionDateSource::DeliveryTime;
// EventTime: use only year, month, and day. The hour, minutes and seconds are set to 00 in the key
// DeliveryTime: the time the log object was created
+ rgw_s3_key_filter key_filter;
bool decode_xml(XMLObj *obj);
void dump_xml(Formatter *f) const;
void dump(Formatter *f) const; // json
@@ -92,6 +102,9 @@ struct configuration {
encode(static_cast<int>(logging_type), bl);
encode(records_batch_size, bl);
encode(static_cast<int>(date_source), bl);
+ if (logging_type == LoggingType::Journal) {
+ encode(key_filter, bl);
+ }
ENCODE_FINISH(bl);
}
@@ -108,6 +121,9 @@ struct configuration {
decode(records_batch_size, bl);
decode(type, bl);
date_source = static_cast<PartitionDateSource>(type);
+ if (logging_type == LoggingType::Journal) {
+ decode(key_filter, bl);
+ }
DECODE_FINISH(bl);
}
};
diff --git a/src/rgw/rgw_file_int.h b/src/rgw/rgw_file_int.h
index 0a1db645207..84eff1e252e 100644
--- a/src/rgw/rgw_file_int.h
+++ b/src/rgw/rgw_file_int.h
@@ -2298,6 +2298,8 @@ public:
std::string uri;
std::map<std::string, buffer::list> attrs;
RGWLibFS::BucketStats& bs;
+ real_time ctime;
+ bool name_matched = false;
RGWStatBucketRequest(CephContext* _cct, std::unique_ptr<rgw::sal::User> _user,
const std::string& _path,
@@ -2312,9 +2314,7 @@ public:
return (iter != attrs.end()) ? &(iter->second) : nullptr;
}
- real_time get_ctime() const {
- return bucket->get_creation_time();
- }
+ real_time get_ctime() { return ctime; }
bool only_bucket() override { return false; }
@@ -2342,22 +2342,26 @@ public:
return 0;
}
- virtual int get_params() {
- return 0;
+ int get_params(optional_yield) override { return 0; }
+
+ void complete() override {
+ // get_state() will no longer be there after execute_req()
+ // so save what we need from get_state()->bucket
+ ctime = get_state()->bucket->get_creation_time();
+ name_matched = get_state()->bucket->get_name().length() > 0;
+
+ RGWOp::complete();
}
void send_response() override {
- bucket->get_creation_time() = get_state()->bucket->get_info().creation_time;
bs.size = stats.size;
bs.size_rounded = stats.size_rounded;
- bs.creation_time = bucket->get_creation_time();
+ bs.creation_time = get_state()->bucket->get_info().creation_time;
bs.num_entries = stats.num_objects;
std::swap(attrs, get_state()->bucket_attrs);
}
- bool matched() {
- return (bucket->get_name().length() > 0);
- }
+ bool matched() { return name_matched; }
}; /* RGWStatBucketRequest */
diff --git a/src/rgw/rgw_iam_policy.cc b/src/rgw/rgw_iam_policy.cc
index ce76ed4c3c3..2a5c9cd313e 100644
--- a/src/rgw/rgw_iam_policy.cc
+++ b/src/rgw/rgw_iam_policy.cc
@@ -113,6 +113,7 @@ static const actpair actpairs[] =
{ "s3:PutBucketCORS", s3PutBucketCORS },
{ "s3:PutBucketEncryption", s3PutBucketEncryption },
{ "s3:PutBucketLogging", s3PutBucketLogging },
+ { "s3:PostBucketLogging", s3PostBucketLogging },
{ "s3:PutBucketNotification", s3PutBucketNotification },
{ "s3:PutBucketOwnershipControls", s3PutBucketOwnershipControls },
{ "s3:PutBucketPolicy", s3PutBucketPolicy },
@@ -1406,6 +1407,9 @@ const char* action_bit_string(uint64_t action) {
case s3PutBucketLogging:
return "s3:PutBucketLogging";
+ case s3PostBucketLogging:
+ return "s3:PostBucketLogging";
+
case s3GetBucketTagging:
return "s3:GetBucketTagging";
diff --git a/src/rgw/rgw_iam_policy.h b/src/rgw/rgw_iam_policy.h
index 1494cbf0b81..0476926143f 100644
--- a/src/rgw/rgw_iam_policy.h
+++ b/src/rgw/rgw_iam_policy.h
@@ -81,6 +81,7 @@ enum {
s3PutBucketNotification,
s3GetBucketLogging,
s3PutBucketLogging,
+ s3PostBucketLogging,
s3GetBucketTagging,
s3PutBucketTagging,
s3GetBucketWebsite,
@@ -298,6 +299,7 @@ inline int op_to_perm(std::uint64_t op) {
case s3PutBucketCORS:
case s3PutBucketEncryption:
case s3PutBucketLogging:
+ case s3PostBucketLogging:
case s3PutBucketNotification:
case s3PutBucketPolicy:
case s3PutBucketRequestPayment:
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index 8378d9ef22e..ee42ab647a1 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -1350,9 +1350,9 @@ void RGWDeleteBucketTags::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
- rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_TAGS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ op_ret = s->bucket->put_info(this, false, real_time(), y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
<< s->bucket->get_name()
@@ -3135,17 +3135,19 @@ static int load_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y,
void RGWStatBucket::execute(optional_yield y)
{
- if (!s->bucket_exists) {
- op_ret = -ERR_NO_SUCH_BUCKET;
+ op_ret = get_params(y);
+ if (op_ret < 0) {
return;
}
- op_ret = driver->load_bucket(this, s->bucket->get_key(), &bucket, y);
- if (op_ret) {
+ if (!s->bucket_exists) {
+ op_ret = -ERR_NO_SUCH_BUCKET;
return;
}
- op_ret = load_bucket_stats(this, y, *s->bucket, stats);
+ if (report_stats) {
+ op_ret = load_bucket_stats(this, y, *s->bucket, stats);
+ }
}
int RGWListBucket::verify_permission(optional_yield y)
@@ -6384,9 +6386,9 @@ void RGWDeleteCORS::execute(optional_yield y)
return op_ret;
}
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_CORS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
if (op_ret < 0) {
ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket->get_name()
<< " returned err=" << op_ret << dendl;
@@ -7040,17 +7042,30 @@ void RGWAbortMultipart::execute(optional_yield y)
return;
upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id);
+ meta_obj = upload->get_meta_obj();
+ meta_obj->set_in_extra_data(true);
+ meta_obj->get_obj_attrs(s->yield, this);
+
jspan_context trace_ctx(false, false);
if (tracing::rgw::tracer.is_enabled()) {
// read meta object attributes for trace info
- meta_obj = upload->get_meta_obj();
- meta_obj->set_in_extra_data(true);
- meta_obj->get_obj_attrs(s->yield, this);
extract_span_context(meta_obj->get_attrs(), trace_ctx);
}
multipart_trace = tracing::rgw::tracer.add_span(name(), trace_ctx);
+ int max_lock_secs_mp =
+ s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
+ utime_t dur(max_lock_secs_mp, 0);
+ auto serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart");
+ op_ret = serializer->try_lock(this, dur, y);
+ if (op_ret < 0) {
+ if (op_ret == -ENOENT) {
+ op_ret = -ERR_NO_SUCH_UPLOAD;
+ }
+ return;
+ }
op_ret = upload->abort(this, s->cct, y);
+ serializer->unlock();
}
int RGWListMultipart::verify_permission(optional_yield y)
@@ -7354,6 +7369,12 @@ void RGWDeleteMultiObj::execute(optional_yield y)
return;
}
+ if (multi_delete->objects.empty()) {
+ s->err.message = "Missing required element Object";
+ op_ret = -ERR_MALFORMED_XML;
+ return;
+ }
+
constexpr int DEFAULT_MAX_NUM = 1000;
int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num;
if (max_num < 0) {
@@ -8509,9 +8530,9 @@ void RGWDeleteBucketPolicy::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_IAM_POLICY);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
return op_ret;
}, y);
}
@@ -9029,9 +9050,9 @@ void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] {
- rgw::sal::Attrs attrs(s->bucket_attrs);
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_PUBLIC_ACCESS);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
+ op_ret = s->bucket->put_info(this, false, real_time(), s->yield);
return op_ret;
}, y);
}
@@ -9140,10 +9161,10 @@ void RGWDeleteBucketEncryption::execute(optional_yield y)
}
op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] {
- rgw::sal::Attrs attrs = s->bucket->get_attrs();
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_POLICY);
attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_KEY_ID);
- op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
+ op_ret = s->bucket->put_info(this, false, real_time(), y);
return op_ret;
}, y);
}
diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h
index 0098b67e032..9f747501729 100644
--- a/src/rgw/rgw_op.h
+++ b/src/rgw/rgw_op.h
@@ -1087,14 +1087,15 @@ public:
class RGWStatBucket : public RGWOp {
protected:
- std::unique_ptr<rgw::sal::Bucket> bucket;
RGWStorageStats stats;
+ bool report_stats{true};
public:
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
+ virtual int get_params(optional_yield y) = 0;
void send_response() override = 0;
const char* name() const override { return "stat_bucket"; }
RGWOpType get_type() override { return RGW_OP_STAT_BUCKET; }
diff --git a/src/rgw/rgw_op_type.h b/src/rgw/rgw_op_type.h
index a36274add40..49faea6403d 100644
--- a/src/rgw/rgw_op_type.h
+++ b/src/rgw/rgw_op_type.h
@@ -117,6 +117,7 @@ enum RGWOpType {
RGW_OP_DETACH_GROUP_POLICY,
RGW_OP_LIST_ATTACHED_GROUP_POLICIES,
RGW_OP_PUT_BUCKET_LOGGING,
+ RGW_OP_POST_BUCKET_LOGGING,
/* rgw specific */
RGW_OP_ADMIN_SET_METADATA,
RGW_OP_GET_OBJ_LAYOUT,
diff --git a/src/rgw/rgw_pubsub.cc b/src/rgw/rgw_pubsub.cc
index cb68d72d7da..87a46bd61a6 100644
--- a/src/rgw/rgw_pubsub.cc
+++ b/src/rgw/rgw_pubsub.cc
@@ -62,214 +62,6 @@ void set_event_id(std::string& id, const std::string& hash, const utime_t& ts) {
}
}
-void rgw_s3_key_filter::dump(Formatter *f) const {
- if (!has_content()) {
- return;
- }
- f->open_array_section("FilterRules");
- if (!prefix_rule.empty()) {
- f->open_object_section("");
- ::encode_json("Name", "prefix", f);
- ::encode_json("Value", prefix_rule, f);
- f->close_section();
- }
- if (!suffix_rule.empty()) {
- f->open_object_section("");
- ::encode_json("Name", "suffix", f);
- ::encode_json("Value", suffix_rule, f);
- f->close_section();
- }
- if (!regex_rule.empty()) {
- f->open_object_section("");
- ::encode_json("Name", "regex", f);
- ::encode_json("Value", regex_rule, f);
- f->close_section();
- }
- f->close_section();
-}
-
-bool rgw_s3_key_filter::decode_xml(XMLObj* obj) {
- XMLObjIter iter = obj->find("FilterRule");
- XMLObj *o;
-
- const auto throw_if_missing = true;
- auto prefix_not_set = true;
- auto suffix_not_set = true;
- auto regex_not_set = true;
- std::string name;
-
- while ((o = iter.get_next())) {
- RGWXMLDecoder::decode_xml("Name", name, o, throw_if_missing);
- if (name == "prefix" && prefix_not_set) {
- prefix_not_set = false;
- RGWXMLDecoder::decode_xml("Value", prefix_rule, o, throw_if_missing);
- } else if (name == "suffix" && suffix_not_set) {
- suffix_not_set = false;
- RGWXMLDecoder::decode_xml("Value", suffix_rule, o, throw_if_missing);
- } else if (name == "regex" && regex_not_set) {
- regex_not_set = false;
- RGWXMLDecoder::decode_xml("Value", regex_rule, o, throw_if_missing);
- } else {
- throw RGWXMLDecoder::err("invalid/duplicate S3Key filter rule name: '" + name + "'");
- }
- }
- return true;
-}
-
-void rgw_s3_key_filter::dump_xml(Formatter *f) const {
- if (!prefix_rule.empty()) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", "prefix", f);
- ::encode_xml("Value", prefix_rule, f);
- f->close_section();
- }
- if (!suffix_rule.empty()) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", "suffix", f);
- ::encode_xml("Value", suffix_rule, f);
- f->close_section();
- }
- if (!regex_rule.empty()) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", "regex", f);
- ::encode_xml("Value", regex_rule, f);
- f->close_section();
- }
-}
-
-bool rgw_s3_key_filter::has_content() const {
- return !(prefix_rule.empty() && suffix_rule.empty() && regex_rule.empty());
-}
-
-void rgw_s3_key_value_filter::dump(Formatter *f) const {
- if (!has_content()) {
- return;
- }
- f->open_array_section("FilterRules");
- for (const auto& key_value : kv) {
- f->open_object_section("");
- ::encode_json("Name", key_value.first, f);
- ::encode_json("Value", key_value.second, f);
- f->close_section();
- }
- f->close_section();
-}
-
-bool rgw_s3_key_value_filter::decode_xml(XMLObj* obj) {
- kv.clear();
- XMLObjIter iter = obj->find("FilterRule");
- XMLObj *o;
-
- const auto throw_if_missing = true;
-
- std::string key;
- std::string value;
-
- while ((o = iter.get_next())) {
- RGWXMLDecoder::decode_xml("Name", key, o, throw_if_missing);
- RGWXMLDecoder::decode_xml("Value", value, o, throw_if_missing);
- kv.emplace(key, value);
- }
- return true;
-}
-
-void rgw_s3_key_value_filter::dump_xml(Formatter *f) const {
- for (const auto& key_value : kv) {
- f->open_object_section("FilterRule");
- ::encode_xml("Name", key_value.first, f);
- ::encode_xml("Value", key_value.second, f);
- f->close_section();
- }
-}
-
-bool rgw_s3_key_value_filter::has_content() const {
- return !kv.empty();
-}
-
-void rgw_s3_filter::dump(Formatter *f) const {
- encode_json("S3Key", key_filter, f);
- encode_json("S3Metadata", metadata_filter, f);
- encode_json("S3Tags", tag_filter, f);
-}
-
-bool rgw_s3_filter::decode_xml(XMLObj* obj) {
- RGWXMLDecoder::decode_xml("S3Key", key_filter, obj);
- RGWXMLDecoder::decode_xml("S3Metadata", metadata_filter, obj);
- RGWXMLDecoder::decode_xml("S3Tags", tag_filter, obj);
- return true;
-}
-
-void rgw_s3_filter::dump_xml(Formatter *f) const {
- if (key_filter.has_content()) {
- ::encode_xml("S3Key", key_filter, f);
- }
- if (metadata_filter.has_content()) {
- ::encode_xml("S3Metadata", metadata_filter, f);
- }
- if (tag_filter.has_content()) {
- ::encode_xml("S3Tags", tag_filter, f);
- }
-}
-
-bool rgw_s3_filter::has_content() const {
- return key_filter.has_content() ||
- metadata_filter.has_content() ||
- tag_filter.has_content();
-}
-
-bool match(const rgw_s3_key_filter& filter, const std::string& key) {
- const auto key_size = key.size();
- const auto prefix_size = filter.prefix_rule.size();
- if (prefix_size != 0) {
- // prefix rule exists
- if (prefix_size > key_size) {
- // if prefix is longer than key, we fail
- return false;
- }
- if (!std::equal(filter.prefix_rule.begin(), filter.prefix_rule.end(), key.begin())) {
- return false;
- }
- }
- const auto suffix_size = filter.suffix_rule.size();
- if (suffix_size != 0) {
- // suffix rule exists
- if (suffix_size > key_size) {
- // if suffix is longer than key, we fail
- return false;
- }
- if (!std::equal(filter.suffix_rule.begin(), filter.suffix_rule.end(), (key.end() - suffix_size))) {
- return false;
- }
- }
- if (!filter.regex_rule.empty()) {
- // TODO add regex chaching in the filter
- const std::regex base_regex(filter.regex_rule);
- if (!std::regex_match(key, base_regex)) {
- return false;
- }
- }
- return true;
-}
-
-bool match(const rgw_s3_key_value_filter& filter, const KeyValueMap& kv) {
- // all filter pairs must exist with the same value in the object's metadata/tags
- // object metadata/tags may include items not in the filter
- return std::includes(kv.begin(), kv.end(), filter.kv.begin(), filter.kv.end());
-}
-
-bool match(const rgw_s3_key_value_filter& filter, const KeyMultiValueMap& kv) {
- // all filter pairs must exist with the same value in the object's metadata/tags
- // object metadata/tags may include items not in the filter
- for (auto& filter : filter.kv) {
- auto result = kv.equal_range(filter.first);
- if (std::any_of(result.first, result.second, [&filter](const std::pair<std::string, std::string>& p) { return p.second == filter.second;}))
- continue;
- else
- return false;
- }
- return true;
-}
-
bool match(const rgw::notify::EventTypeList& events, rgw::notify::EventType event) {
// if event list exists, and none of the events in the list matches the event type, filter the message
if (!events.empty() && std::find(events.begin(), events.end(), event) == events.end()) {
diff --git a/src/rgw/rgw_pubsub.h b/src/rgw/rgw_pubsub.h
index 8a6b290cb85..176ada95204 100644
--- a/src/rgw/rgw_pubsub.h
+++ b/src/rgw/rgw_pubsub.h
@@ -9,94 +9,10 @@
#include "rgw_zone.h"
#include "rgw_notify_event_type.h"
#include <boost/container/flat_map.hpp>
+#include "rgw_s3_filter.h"
class XMLObj;
-struct rgw_s3_key_filter {
- std::string prefix_rule;
- std::string suffix_rule;
- std::string regex_rule;
-
- bool has_content() const;
-
- void dump(Formatter *f) const;
- bool decode_xml(XMLObj *obj);
- void dump_xml(Formatter *f) const;
-
- void encode(bufferlist& bl) const {
- ENCODE_START(1, 1, bl);
- encode(prefix_rule, bl);
- encode(suffix_rule, bl);
- encode(regex_rule, bl);
- ENCODE_FINISH(bl);
- }
-
- void decode(bufferlist::const_iterator& bl) {
- DECODE_START(1, bl);
- decode(prefix_rule, bl);
- decode(suffix_rule, bl);
- decode(regex_rule, bl);
- DECODE_FINISH(bl);
- }
-};
-WRITE_CLASS_ENCODER(rgw_s3_key_filter)
-
-using KeyValueMap = boost::container::flat_map<std::string, std::string>;
-using KeyMultiValueMap = std::multimap<std::string, std::string>;
-
-struct rgw_s3_key_value_filter {
- KeyValueMap kv;
-
- bool has_content() const;
-
- void dump(Formatter *f) const;
- bool decode_xml(XMLObj *obj);
- void dump_xml(Formatter *f) const;
-
- void encode(bufferlist& bl) const {
- ENCODE_START(1, 1, bl);
- encode(kv, bl);
- ENCODE_FINISH(bl);
- }
- void decode(bufferlist::const_iterator& bl) {
- DECODE_START(1, bl);
- decode(kv, bl);
- DECODE_FINISH(bl);
- }
-};
-WRITE_CLASS_ENCODER(rgw_s3_key_value_filter)
-
-struct rgw_s3_filter {
- rgw_s3_key_filter key_filter;
- rgw_s3_key_value_filter metadata_filter;
- rgw_s3_key_value_filter tag_filter;
-
- bool has_content() const;
-
- void dump(Formatter *f) const;
- bool decode_xml(XMLObj *obj);
- void dump_xml(Formatter *f) const;
-
- void encode(bufferlist& bl) const {
- ENCODE_START(2, 1, bl);
- encode(key_filter, bl);
- encode(metadata_filter, bl);
- encode(tag_filter, bl);
- ENCODE_FINISH(bl);
- }
-
- void decode(bufferlist::const_iterator& bl) {
- DECODE_START(2, bl);
- decode(key_filter, bl);
- decode(metadata_filter, bl);
- if (struct_v >= 2) {
- decode(tag_filter, bl);
- }
- DECODE_FINISH(bl);
- }
-};
-WRITE_CLASS_ENCODER(rgw_s3_filter)
-
using OptionalFilter = std::optional<rgw_s3_filter>;
struct rgw_pubsub_topic_filter;
diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc
index 03241b7369a..ac5e65c0dd6 100644
--- a/src/rgw/rgw_rest.cc
+++ b/src/rgw/rgw_rest.cc
@@ -666,8 +666,10 @@ static void build_redirect_url(req_state *s, const string& redirect_base, string
dest_uri = dest_uri.substr(0, dest_uri.size() - 1);
}
dest_uri += s->info.request_uri;
- dest_uri += "?";
- dest_uri += s->info.request_params;
+ if (!s->info.request_params.empty()) {
+ dest_uri += "?";
+ dest_uri += s->info.request_params;
+ }
}
void abort_early(req_state *s, RGWOp* op, int err_no,
@@ -1668,7 +1670,6 @@ int RGWDeleteMultiObj_ObjStore::get_params(optional_yield y)
return op_ret;
}
-
void RGWRESTOp::send_response()
{
if (!flusher.did_start()) {
diff --git a/src/rgw/rgw_rest_bucket_logging.cc b/src/rgw/rgw_rest_bucket_logging.cc
index 8d17114d804..ed12ce855a9 100644
--- a/src/rgw/rgw_rest_bucket_logging.cc
+++ b/src/rgw/rgw_rest_bucket_logging.cc
@@ -209,6 +209,84 @@ class RGWPutBucketLoggingOp : public RGWDefaultResponseOp {
}
};
+// Post /<bucket name>/?logging
+// actual configuration is XML encoded in the body of the message
+class RGWPostBucketLoggingOp : public RGWDefaultResponseOp {
+ int verify_permission(optional_yield y) override {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PostBucketLogging)) {
+ return -EACCES;
+ }
+
+ return 0;
+ }
+
+ const char* name() const override { return "post_bucket_logging"; }
+ RGWOpType get_type() override { return RGW_OP_POST_BUCKET_LOGGING; }
+ uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; }
+
+ void execute(optional_yield y) override {
+ op_ret = verify_bucket_logging_params(this, s);
+ if (op_ret < 0) {
+ return;
+ }
+
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name),
+ &bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << s->bucket_name << "', ret = " << op_ret << dendl;
+ return;
+ }
+ const auto& bucket_attrs = bucket->get_attrs();
+ auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING);
+ if (iter == bucket_attrs.end()) {
+ ldpp_dout(this, 1) << "WARNING: no logging configured on bucket" << dendl;
+ return;
+ }
+ rgw::bucketlogging::configuration configuration;
+ try {
+ configuration.enabled = true;
+ decode(configuration, iter->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 1) << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "'. error: " << err.what() << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, configuration.target_bucket),
+ &target_bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl;
+ return;
+ }
+ std::string obj_name;
+ RGWObjVersionTracker objv_tracker;
+ op_ret = target_bucket->get_logging_object_name(obj_name, configuration.target_prefix, null_yield, this, &objv_tracker);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get pending logging object name from target bucket '" << configuration.target_bucket << "'" << dendl;
+ return;
+ }
+ op_ret = rgw::bucketlogging::rollover_logging_object(configuration, target_bucket, obj_name, this, null_yield, true, &objv_tracker);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to flush pending logging object '" << obj_name
+ << "' to target bucket '" << configuration.target_bucket << "'" << dendl;
+ return;
+ }
+ ldpp_dout(this, 20) << "flushed pending logging object '" << obj_name
+ << "' to target bucket '" << configuration.target_bucket << "'" << dendl;
+ }
+};
+
+RGWOp* RGWHandler_REST_BucketLogging_S3::create_post_op() {
+ return new RGWPostBucketLoggingOp();
+}
+
RGWOp* RGWHandler_REST_BucketLogging_S3::create_put_op() {
return new RGWPutBucketLoggingOp();
}
diff --git a/src/rgw/rgw_rest_bucket_logging.h b/src/rgw/rgw_rest_bucket_logging.h
index ca2220084bb..0b31d88dad8 100644
--- a/src/rgw/rgw_rest_bucket_logging.h
+++ b/src/rgw/rgw_rest_bucket_logging.h
@@ -14,5 +14,6 @@ public:
virtual ~RGWHandler_REST_BucketLogging_S3() = default;
static RGWOp* create_get_op();
static RGWOp* create_put_op();
+ static RGWOp* create_post_op();
};
diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc
index b7046118ef3..30ebe8e8965 100644
--- a/src/rgw/rgw_rest_s3.cc
+++ b/src/rgw/rgw_rest_s3.cc
@@ -2401,34 +2401,41 @@ void RGWGetBucketWebsite_ObjStore_S3::send_response()
rgw_flush_formatter_and_reset(s, s->formatter);
}
-static void dump_bucket_metadata(req_state *s, rgw::sal::Bucket* bucket,
+static void dump_bucket_metadata(req_state *s,
RGWStorageStats& stats)
{
dump_header(s, "X-RGW-Object-Count", static_cast<long long>(stats.num_objects));
dump_header(s, "X-RGW-Bytes-Used", static_cast<long long>(stats.size));
+}
- // only bucket's owner is allowed to get the quota settings of the account
- if (s->auth.identity->is_owner_of(bucket->get_owner())) {
- const auto& user_info = s->user->get_info();
- const auto& bucket_quota = s->bucket->get_info().quota; // bucket quota
- dump_header(s, "X-RGW-Quota-Max-Buckets", static_cast<long long>(user_info.max_buckets));
-
- if (user_info.quota.user_quota.enabled){
- dump_header(s, "X-RGW-Quota-User-Size", static_cast<long long>(user_info.quota.user_quota.max_size));
- dump_header(s, "X-RGW-Quota-User-Objects", static_cast<long long>(user_info.quota.user_quota.max_objects));
- }
+int RGWStatBucket_ObjStore_S3::get_params(optional_yield y)
+{
+ report_stats = s->info.args.exists("read-stats");
- if (bucket_quota.enabled){
- dump_header(s, "X-RGW-Quota-Bucket-Size", static_cast<long long>(bucket_quota.max_size));
- dump_header(s, "X-RGW-Quota-Bucket-Objects", static_cast<long long>(bucket_quota.max_objects));
- }
- }
+ return 0;
}
void RGWStatBucket_ObjStore_S3::send_response()
{
if (op_ret >= 0) {
- dump_bucket_metadata(s, bucket.get(), stats);
+ if (report_stats) {
+ dump_bucket_metadata(s, stats);
+ }
+ // only bucket's owner is allowed to get the quota settings of the account
+ if (s->auth.identity->is_owner_of(s->bucket->get_owner())) {
+ const auto& user_info = s->user->get_info();
+ const auto& bucket_quota = s->bucket->get_info().quota; // bucket quota
+
+ dump_header(s, "X-RGW-Quota-Max-Buckets", static_cast<long long>(user_info.max_buckets));
+ if (user_info.quota.user_quota.enabled) {
+ dump_header(s, "X-RGW-Quota-User-Size", static_cast<long long>(user_info.quota.user_quota.max_size));
+ dump_header(s, "X-RGW-Quota-User-Objects", static_cast<long long>(user_info.quota.user_quota.max_objects));
+ }
+ if (bucket_quota.enabled) {
+ dump_header(s, "X-RGW-Quota-Bucket-Size", static_cast<long long>(bucket_quota.max_size));
+ dump_header(s, "X-RGW-Quota-Bucket-Objects", static_cast<long long>(bucket_quota.max_objects));
+ }
+ }
}
set_req_state_err(s, op_ret);
@@ -4947,6 +4954,10 @@ RGWOp *RGWHandler_REST_Bucket_S3::op_post()
return new RGWDeleteMultiObj_ObjStore_S3;
}
+ if (s->info.args.exists("logging")) {
+ return RGWHandler_REST_BucketLogging_S3::create_post_op();
+ }
+
if (s->info.args.exists("mdsearch")) {
if (!s->cct->_conf->rgw_enable_mdsearch) {
return NULL;
@@ -6104,7 +6115,8 @@ AWSGeneralAbstractor::get_auth_data_v4(const req_state* const s,
case RGW_OP_GET_BUCKET_PUBLIC_ACCESS_BLOCK:
case RGW_OP_DELETE_BUCKET_PUBLIC_ACCESS_BLOCK:
case RGW_OP_GET_OBJ://s3select its post-method(payload contain the query) , the request is get-object
- case RGW_OP_PUT_BUCKET_LOGGING:
+ case RGW_OP_PUT_BUCKET_LOGGING:
+ case RGW_OP_POST_BUCKET_LOGGING:
case RGW_OP_GET_BUCKET_LOGGING:
break;
default:
diff --git a/src/rgw/rgw_rest_s3.h b/src/rgw/rgw_rest_s3.h
index c47edb38855..50160d79a42 100644
--- a/src/rgw/rgw_rest_s3.h
+++ b/src/rgw/rgw_rest_s3.h
@@ -242,6 +242,7 @@ public:
~RGWStatBucket_ObjStore_S3() override {}
void send_response() override;
+ int get_params(optional_yield y) override;
};
class RGWCreateBucket_ObjStore_S3 : public RGWCreateBucket_ObjStore {
diff --git a/src/rgw/rgw_rest_sts.cc b/src/rgw/rgw_rest_sts.cc
index f2bd9429a55..1101da0af3c 100644
--- a/src/rgw/rgw_rest_sts.cc
+++ b/src/rgw/rgw_rest_sts.cc
@@ -436,6 +436,9 @@ WebTokenEngine::validate_signature(const DoutPrefixProvider* dpp, const jwt::dec
.allow_algorithm(jwt::algorithm::ps512{cert});
verifier.verify(decoded);
+ } else {
+ ldpp_dout(dpp, 0) << "Unsupported algorithm: " << algorithm << dendl;
+ throw -EINVAL;
}
} catch (std::runtime_error& e) {
ldpp_dout(dpp, 0) << "Signature validation failed: " << e.what() << dendl;
diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc
index 35c36d1ae1a..b8ff3ca2fe8 100644
--- a/src/rgw/rgw_rest_swift.cc
+++ b/src/rgw/rgw_rest_swift.cc
@@ -447,7 +447,6 @@ int RGWListBucket_ObjStore_SWIFT::get_params(optional_yield y)
}
static void dump_container_metadata(req_state *,
- const rgw::sal::Bucket*,
const std::optional<RGWStorageStats>& stats,
const RGWQuotaInfo&,
const RGWBucketWebsiteConf&);
@@ -458,7 +457,7 @@ void RGWListBucket_ObjStore_SWIFT::send_response()
map<string, bool>::iterator pref_iter = common_prefixes.begin();
dump_start(s);
- dump_container_metadata(s, s->bucket.get(), stats, quota.bucket_quota,
+ dump_container_metadata(s, stats, quota.bucket_quota,
s->bucket->get_info().website_conf);
s->formatter->open_array_section_with_attrs("container",
@@ -558,7 +557,6 @@ next:
} // RGWListBucket_ObjStore_SWIFT::send_response
static void dump_container_metadata(req_state *s,
- const rgw::sal::Bucket* bucket,
const std::optional<RGWStorageStats>& stats,
const RGWQuotaInfo& quota,
const RGWBucketWebsiteConf& ws_conf)
@@ -683,7 +681,7 @@ void RGWStatBucket_ObjStore_SWIFT::send_response()
{
if (op_ret >= 0) {
op_ret = STATUS_NO_CONTENT;
- dump_container_metadata(s, bucket.get(), stats, quota.bucket_quota,
+ dump_container_metadata(s, stats, quota.bucket_quota,
s->bucket->get_info().website_conf);
}
@@ -2640,7 +2638,7 @@ RGWOp* RGWSwiftWebsiteHandler::get_ws_listing_op()
/* Generate the header now. */
set_req_state_err(s, op_ret);
dump_errno(s);
- dump_container_metadata(s, s->bucket.get(), stats, quota.bucket_quota,
+ dump_container_metadata(s, stats, quota.bucket_quota,
s->bucket->get_info().website_conf);
end_header(s, this, "text/html");
if (op_ret < 0) {
diff --git a/src/rgw/rgw_rest_swift.h b/src/rgw/rgw_rest_swift.h
index eb1c4422e34..ec206a5160f 100644
--- a/src/rgw/rgw_rest_swift.h
+++ b/src/rgw/rgw_rest_swift.h
@@ -86,6 +86,7 @@ public:
RGWStatBucket_ObjStore_SWIFT() {}
~RGWStatBucket_ObjStore_SWIFT() override {}
+ int get_params(optional_yield y) override { return 0; }
void send_response() override;
};
diff --git a/src/rgw/rgw_s3_filter.cc b/src/rgw/rgw_s3_filter.cc
new file mode 100644
index 00000000000..05a7c4a7293
--- /dev/null
+++ b/src/rgw/rgw_s3_filter.cc
@@ -0,0 +1,269 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+#include "rgw_pubsub.h"
+#include "rgw_tools.h"
+#include "rgw_xml.h"
+#include "rgw_s3_filter.h"
+#include "common/errno.h"
+#include "rgw_sal.h"
+#include <regex>
+#include <algorithm>
+
+void rgw_s3_key_filter::dump(Formatter *f) const {
+ if (!has_content()) {
+ return;
+ }
+ f->open_array_section("FilterRules");
+ if (!prefix_rule.empty()) {
+ f->open_object_section("");
+ ::encode_json("Name", "prefix", f);
+ ::encode_json("Value", prefix_rule, f);
+ f->close_section();
+ }
+ if (!suffix_rule.empty()) {
+ f->open_object_section("");
+ ::encode_json("Name", "suffix", f);
+ ::encode_json("Value", suffix_rule, f);
+ f->close_section();
+ }
+ if (!regex_rule.empty()) {
+ f->open_object_section("");
+ ::encode_json("Name", "regex", f);
+ ::encode_json("Value", regex_rule, f);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+bool rgw_s3_key_filter::decode_xml(XMLObj* obj) {
+ XMLObjIter iter = obj->find("FilterRule");
+ XMLObj *o;
+
+ const auto throw_if_missing = true;
+ auto prefix_not_set = true;
+ auto suffix_not_set = true;
+ auto regex_not_set = true;
+ std::string name;
+
+ while ((o = iter.get_next())) {
+ RGWXMLDecoder::decode_xml("Name", name, o, throw_if_missing);
+ if (name == "prefix" && prefix_not_set) {
+ prefix_not_set = false;
+ RGWXMLDecoder::decode_xml("Value", prefix_rule, o, throw_if_missing);
+ } else if (name == "suffix" && suffix_not_set) {
+ suffix_not_set = false;
+ RGWXMLDecoder::decode_xml("Value", suffix_rule, o, throw_if_missing);
+ } else if (name == "regex" && regex_not_set) {
+ regex_not_set = false;
+ RGWXMLDecoder::decode_xml("Value", regex_rule, o, throw_if_missing);
+ } else {
+ throw RGWXMLDecoder::err("invalid/duplicate S3Key filter rule name: '" + name + "'");
+ }
+ }
+ return true;
+}
+
+void rgw_s3_key_filter::dump_xml(Formatter *f) const {
+ if (!prefix_rule.empty()) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", "prefix", f);
+ ::encode_xml("Value", prefix_rule, f);
+ f->close_section();
+ }
+ if (!suffix_rule.empty()) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", "suffix", f);
+ ::encode_xml("Value", suffix_rule, f);
+ f->close_section();
+ }
+ if (!regex_rule.empty()) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", "regex", f);
+ ::encode_xml("Value", regex_rule, f);
+ f->close_section();
+ }
+}
+
+bool rgw_s3_key_filter::has_content() const {
+ return !(prefix_rule.empty() && suffix_rule.empty() && regex_rule.empty());
+}
+
+void rgw_s3_key_value_filter::dump(Formatter *f) const {
+ if (!has_content()) {
+ return;
+ }
+ f->open_array_section("FilterRules");
+ for (const auto& key_value : kv) {
+ f->open_object_section("");
+ ::encode_json("Name", key_value.first, f);
+ ::encode_json("Value", key_value.second, f);
+ f->close_section();
+ }
+ f->close_section();
+}
+
+bool rgw_s3_key_value_filter::decode_xml(XMLObj* obj) {
+ kv.clear();
+ XMLObjIter iter = obj->find("FilterRule");
+ XMLObj *o;
+
+ const auto throw_if_missing = true;
+
+ std::string key;
+ std::string value;
+
+ while ((o = iter.get_next())) {
+ RGWXMLDecoder::decode_xml("Name", key, o, throw_if_missing);
+ RGWXMLDecoder::decode_xml("Value", value, o, throw_if_missing);
+ kv.emplace(key, value);
+ }
+ return true;
+}
+
+void rgw_s3_key_value_filter::dump_xml(Formatter *f) const {
+ for (const auto& key_value : kv) {
+ f->open_object_section("FilterRule");
+ ::encode_xml("Name", key_value.first, f);
+ ::encode_xml("Value", key_value.second, f);
+ f->close_section();
+ }
+}
+
+bool rgw_s3_key_value_filter::has_content() const {
+ return !kv.empty();
+}
+
+void rgw_s3_filter::dump(Formatter *f) const {
+ encode_json("S3Key", key_filter, f);
+ encode_json("S3Metadata", metadata_filter, f);
+ encode_json("S3Tags", tag_filter, f);
+}
+
+bool rgw_s3_filter::decode_xml(XMLObj* obj) {
+ RGWXMLDecoder::decode_xml("S3Key", key_filter, obj);
+ RGWXMLDecoder::decode_xml("S3Metadata", metadata_filter, obj);
+ RGWXMLDecoder::decode_xml("S3Tags", tag_filter, obj);
+ return true;
+}
+
+void rgw_s3_filter::dump_xml(Formatter *f) const {
+ if (key_filter.has_content()) {
+ ::encode_xml("S3Key", key_filter, f);
+ }
+ if (metadata_filter.has_content()) {
+ ::encode_xml("S3Metadata", metadata_filter, f);
+ }
+ if (tag_filter.has_content()) {
+ ::encode_xml("S3Tags", tag_filter, f);
+ }
+}
+
+bool rgw_s3_filter::has_content() const {
+ return key_filter.has_content() ||
+ metadata_filter.has_content() ||
+ tag_filter.has_content();
+}
+
+bool match(const rgw_s3_key_filter& filter, const std::string& key) {
+ const auto key_size = key.size();
+ const auto prefix_size = filter.prefix_rule.size();
+ if (prefix_size != 0) {
+ // prefix rule exists
+ if (prefix_size > key_size) {
+ // if prefix is longer than key, we fail
+ return false;
+ }
+ if (!std::equal(filter.prefix_rule.begin(), filter.prefix_rule.end(), key.begin())) {
+ return false;
+ }
+ }
+ const auto suffix_size = filter.suffix_rule.size();
+ if (suffix_size != 0) {
+ // suffix rule exists
+ if (suffix_size > key_size) {
+ // if suffix is longer than key, we fail
+ return false;
+ }
+ if (!std::equal(filter.suffix_rule.begin(), filter.suffix_rule.end(), (key.end() - suffix_size))) {
+ return false;
+ }
+ }
+ if (!filter.regex_rule.empty()) {
+ // TODO add regex caching in the filter
+ const std::regex base_regex(filter.regex_rule);
+ if (!std::regex_match(key, base_regex)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyValueMap& kv) {
+ // all filter pairs must exist with the same value in the object's metadata/tags
+ // object metadata/tags may include items not in the filter
+ return std::includes(kv.begin(), kv.end(), filter.kv.begin(), filter.kv.end());
+}
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyMultiValueMap& kv) {
+ // all filter pairs must exist with the same value in the object's metadata/tags
+ // object metadata/tags may include items not in the filter
+ for (auto& filter : filter.kv) {
+ auto result = kv.equal_range(filter.first);
+ if (std::any_of(result.first, result.second, [&filter](const std::pair<std::string, std::string>& p) { return p.second == filter.second;}))
+ continue;
+ else
+ return false;
+ }
+ return true;
+}
+
+bool match(const rgw_s3_filter& s3_filter, const rgw::sal::Object* obj) {
+ if (obj == nullptr) {
+ return false;
+ }
+
+ if (match(s3_filter.key_filter, obj->get_name())) {
+ return true;
+ }
+
+ const auto &attrs = obj->get_attrs();
+ if (!s3_filter.metadata_filter.kv.empty()) {
+ KeyValueMap attrs_map;
+ for (auto& attr : attrs) {
+ if (boost::algorithm::starts_with(attr.first, RGW_ATTR_META_PREFIX)) {
+ std::string_view key(attr.first);
+ key.remove_prefix(sizeof(RGW_ATTR_PREFIX)-1);
+ // we want to pass a null terminated version
+ // of the bufferlist, hence "to_str().c_str()"
+ attrs_map.emplace(key, attr.second.to_str().c_str());
+ }
+ }
+ if (match(s3_filter.metadata_filter, attrs_map)) {
+ return true;
+ }
+ }
+
+ if (!s3_filter.tag_filter.kv.empty()) {
+ // tag filter exists
+ // try to fetch tags from the attributes
+ KeyMultiValueMap tags;
+ const auto attr_iter = attrs.find(RGW_ATTR_TAGS);
+ if (attr_iter != attrs.end()) {
+ auto bliter = attr_iter->second.cbegin();
+ RGWObjTags obj_tags;
+ try {
+ ::decode(obj_tags, bliter);
+ } catch (buffer::error &) {
+ // not able to decode tags
+ return false;
+ }
+ tags = std::move(obj_tags.get_tags());
+ }
+ if (match(s3_filter.tag_filter, tags)) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/src/rgw/rgw_s3_filter.h b/src/rgw/rgw_s3_filter.h
new file mode 100644
index 00000000000..9bbc4ef0088
--- /dev/null
+++ b/src/rgw/rgw_s3_filter.h
@@ -0,0 +1,102 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+#pragma once
+
+#include "rgw_tools.h"
+#include <boost/container/flat_map.hpp>
+
+class XMLObj;
+
+struct rgw_s3_key_filter {
+ std::string prefix_rule;
+ std::string suffix_rule;
+ std::string regex_rule;
+
+ bool has_content() const;
+
+ void dump(Formatter *f) const;
+ bool decode_xml(XMLObj *obj);
+ void dump_xml(Formatter *f) const;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(prefix_rule, bl);
+ encode(suffix_rule, bl);
+ encode(regex_rule, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(prefix_rule, bl);
+ decode(suffix_rule, bl);
+ decode(regex_rule, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_s3_key_filter)
+
+using KeyValueMap = boost::container::flat_map<std::string, std::string>;
+using KeyMultiValueMap = std::multimap<std::string, std::string>;
+
+struct rgw_s3_key_value_filter {
+ KeyValueMap kv;
+
+ bool has_content() const;
+
+ void dump(Formatter *f) const;
+ bool decode_xml(XMLObj *obj);
+ void dump_xml(Formatter *f) const;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(kv, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(kv, bl);
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_s3_key_value_filter)
+
+struct rgw_s3_filter {
+ rgw_s3_key_filter key_filter;
+ rgw_s3_key_value_filter metadata_filter;
+ rgw_s3_key_value_filter tag_filter;
+
+ bool has_content() const;
+
+ void dump(Formatter *f) const;
+ bool decode_xml(XMLObj *obj);
+ void dump_xml(Formatter *f) const;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(2, 1, bl);
+ encode(key_filter, bl);
+ encode(metadata_filter, bl);
+ encode(tag_filter, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(2, bl);
+ decode(key_filter, bl);
+ decode(metadata_filter, bl);
+ if (struct_v >= 2) {
+ decode(tag_filter, bl);
+ }
+ DECODE_FINISH(bl);
+ }
+};
+WRITE_CLASS_ENCODER(rgw_s3_filter)
+
+bool match(const rgw_s3_key_filter& filter, const std::string& key);
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyValueMap& kv);
+
+bool match(const rgw_s3_key_value_filter& filter, const KeyMultiValueMap& kv);
+
+bool match(const rgw_s3_filter& filter, const rgw::sal::Object* obj);
diff --git a/src/rgw/rgw_sal_dbstore.cc b/src/rgw/rgw_sal_dbstore.cc
index d3af42cf2ec..0e4f95846d1 100644
--- a/src/rgw/rgw_sal_dbstore.cc
+++ b/src/rgw/rgw_sal_dbstore.cc
@@ -271,7 +271,7 @@ namespace rgw::sal {
/* XXX: handle has_instance_obj like in set_bucket_instance_attrs() */
- ret = store->getDB()->update_bucket(dpp, "attrs", info, false, nullptr, &new_attrs, nullptr, &get_info().objv_tracker);
+ ret = store->getDB()->update_bucket(dpp, "attrs", info, false, nullptr, &attrs, nullptr, &get_info().objv_tracker);
return ret;
}
diff --git a/src/rgw/rgw_sal_dbstore.h b/src/rgw/rgw_sal_dbstore.h
index 107ba735a63..b54249df031 100644
--- a/src/rgw/rgw_sal_dbstore.h
+++ b/src/rgw/rgw_sal_dbstore.h
@@ -268,22 +268,22 @@ protected:
class DBZone : public StoreZone {
protected:
DBStore* store;
- RGWRealm *realm{nullptr};
- DBZoneGroup *zonegroup{nullptr};
- RGWZone *zone_public_config{nullptr}; /* external zone params, e.g., entrypoints, log flags, etc. */
- RGWZoneParams *zone_params{nullptr}; /* internal zone params, e.g., rados pools */
- RGWPeriod *current_period{nullptr};
+ std::unique_ptr<RGWRealm> realm;
+ std::unique_ptr<DBZoneGroup> zonegroup;
+ std::unique_ptr<RGWZone> zone_public_config; /* external zone params, e.g., entrypoints, log flags, etc. */
+ std::unique_ptr<RGWZoneParams> zone_params; /* internal zone params, e.g., rados pools */
+ std::unique_ptr<RGWPeriod> current_period;
public:
DBZone(DBStore* _store) : store(_store) {
- realm = new RGWRealm();
+ realm = std::make_unique<RGWRealm>();
std::unique_ptr<RGWZoneGroup> rzg = std::make_unique<RGWZoneGroup>("default", "default");
rzg->api_name = "default";
rzg->is_master = true;
- zonegroup = new DBZoneGroup(store, std::move(rzg));
- zone_public_config = new RGWZone();
- zone_params = new RGWZoneParams();
- current_period = new RGWPeriod();
+ zonegroup = std::make_unique<DBZoneGroup>(store, std::move(rzg));
+ zone_public_config = std::make_unique<RGWZone>();
+ zone_params = std::make_unique<RGWZoneParams>();
+ current_period = std::make_unique<RGWPeriod>();
// XXX: only default and STANDARD supported for now
RGWZonePlacementInfo info;
@@ -292,13 +292,7 @@ protected:
info.storage_classes = sc;
zone_params->placement_pools["default"] = info;
}
- ~DBZone() {
- delete realm;
- delete zonegroup;
- delete zone_public_config;
- delete zone_params;
- delete current_period;
- }
+ ~DBZone() = default;
virtual std::unique_ptr<Zone> clone() override {
return std::make_unique<DBZone>(store);
diff --git a/src/script/ceph-backport.sh b/src/script/ceph-backport.sh
index a56509e3d3a..c216ed32d9b 100755
--- a/src/script/ceph-backport.sh
+++ b/src/script/ceph-backport.sh
@@ -779,7 +779,7 @@ function maybe_deduce_remote {
else
assert_fail "bad remote_type ->$remote_type<- in maybe_deduce_remote"
fi
- remote=$(git remote -v | grep --extended-regexp --ignore-case '(://|@)github.com(/|:|:/)'${url_component}'/ceph(\s|\.|\/)' | head -n1 | cut -f 1)
+ remote=$(git remote -v | grep --extended-regexp --ignore-case '(://|@)github.com(/|:|:/)'${url_component}'/ceph(\s|\.|\/|-)' | head -n1 | cut -f 1)
echo "$remote"
}
diff --git a/src/test/cli/rbd/help.t b/src/test/cli/rbd/help.t
index 984175a97b9..5f304258358 100644
--- a/src/test/cli/rbd/help.t
+++ b/src/test/cli/rbd/help.t
@@ -916,7 +916,7 @@
[--group-namespace <group-namespace>]
[--group <group>] [--image-pool <image-pool>]
[--image-namespace <image-namespace>]
- [--image <image>] [--pool <pool>]
+ [--image <image>]
<group-spec> <image-spec>
Add an image to a group.
@@ -934,7 +934,6 @@
--image-pool arg image pool name
--image-namespace arg image namespace name
--image arg image name
- -p [ --pool ] arg pool name unless overridden
rbd help group image list
usage: rbd group image list [--format <format>] [--pretty-format]
@@ -960,8 +959,7 @@
[--group-namespace <group-namespace>]
[--group <group>] [--image-pool <image-pool>]
[--image-namespace <image-namespace>]
- [--image <image>] [--pool <pool>]
- [--image-id <image-id>]
+ [--image <image>] [--image-id <image-id>]
<group-spec> <image-spec>
Remove an image from a group.
@@ -979,7 +977,6 @@
--image-pool arg image pool name
--image-namespace arg image namespace name
--image arg image name
- -p [ --pool ] arg pool name unless overridden
--image-id arg image id
rbd help group info
diff --git a/src/test/cls_log/test_cls_log.cc b/src/test/cls_log/test_cls_log.cc
index f8c1a32494a..91e38844dec 100644
--- a/src/test/cls_log/test_cls_log.cc
+++ b/src/test/cls_log/test_cls_log.cc
@@ -332,7 +332,6 @@ TEST_F(cls_log, trim_by_marker)
utime_t start_time = ceph_clock_now();
generate_log(ioctx, oid, 10, start_time, true);
- utime_t zero_time;
std::vector<cls_log_entry> log1;
{
list<cls_log_entry> entries;
diff --git a/src/test/common/CMakeLists.txt b/src/test/common/CMakeLists.txt
index 33ff38b932d..3c9cf0003aa 100644
--- a/src/test/common/CMakeLists.txt
+++ b/src/test/common/CMakeLists.txt
@@ -229,6 +229,12 @@ add_executable(unittest_xmlformatter
add_ceph_unittest(unittest_xmlformatter)
target_link_libraries(unittest_xmlformatter ceph-common)
+add_executable(unittest_htmlformatter
+ test_htmlformatter.cc
+ )
+add_ceph_unittest(unittest_htmlformatter)
+target_link_libraries(unittest_htmlformatter ceph-common)
+
# unittest_bit_vector
add_executable(unittest_bit_vector
test_bit_vector.cc
diff --git a/src/test/common/test_htmlformatter.cc b/src/test/common/test_htmlformatter.cc
new file mode 100644
index 00000000000..0a8d827b53a
--- /dev/null
+++ b/src/test/common/test_htmlformatter.cc
@@ -0,0 +1,26 @@
+#include "gtest/gtest.h"
+
+#include "common/HTMLFormatter.h"
+#include <sstream>
+#include <string>
+
+using namespace ceph;
+
+TEST(htmlformatter, dump_format_large_item)
+{
+ std::stringstream sout;
+ HTMLFormatter formatter(false);
+
+ std::string base_url("http://example.com");
+ std::string bucket_name("bucket");
+ std::string object_key(1024, 'a');
+
+ formatter.dump_format("Location", "%s/%s/%s", base_url.c_str(), bucket_name.c_str(), object_key.c_str());
+
+ formatter.flush(sout);
+
+ std::string uri = base_url + "/" + bucket_name + "/" + object_key;
+ std::string expected_output = "<li>Location: " + uri + "</li>";
+
+ EXPECT_EQ(expected_output, sout.str());
+} \ No newline at end of file
diff --git a/src/test/common/test_intrusive_lru.cc b/src/test/common/test_intrusive_lru.cc
index af8edb8e2bf..1410b73fcaa 100644
--- a/src/test/common/test_intrusive_lru.cc
+++ b/src/test/common/test_intrusive_lru.cc
@@ -177,9 +177,9 @@ TEST(LRU, clear_range) {
auto [live_ref2, existed2] = cache.add(5, 4);
ASSERT_FALSE(existed2);
- cache.clear_range(0,4);
+ cache.clear_range(0, 4, [](auto&){});
- // Should not exists (Unreferenced):
+ // Should not exist
{
auto [ref, existed] = cache.add(1, 4);
ASSERT_FALSE(existed);
@@ -192,21 +192,27 @@ TEST(LRU, clear_range) {
auto [ref, existed] = cache.add(3, 4);
ASSERT_FALSE(existed);
}
- // Should exist (Still being referenced):
{
auto [ref, existed] = cache.add(4, 4);
- ASSERT_TRUE(existed);
+ ASSERT_FALSE(existed);
}
- // Should exists (Still being referenced and wasn't removed)
+ ASSERT_TRUE(live_ref1->is_invalidated());
+ // Should exist, wasn't removed)
{
auto [ref, existed] = cache.add(5, 4);
ASSERT_TRUE(existed);
}
- // Test out of bound deletion:
+ ASSERT_FALSE(live_ref2->is_invalidated());
+ // Test clear_range with right bound past last entry
+ cache.clear_range(3, 8, [](auto&){});
+ ASSERT_TRUE(live_ref2->is_invalidated());
{
- cache.clear_range(3,8);
auto [ref, existed] = cache.add(4, 4);
- ASSERT_TRUE(existed);
+ ASSERT_FALSE(existed);
+ }
+ {
+ auto [ref, existed] = cache.add(5, 4);
+ ASSERT_FALSE(existed);
}
{
auto [ref, existed] = cache.add(3, 4);
diff --git a/src/test/common/test_json_formatter.cc b/src/test/common/test_json_formatter.cc
index 9cc19b24ad1..d0ddd262c0a 100644
--- a/src/test/common/test_json_formatter.cc
+++ b/src/test/common/test_json_formatter.cc
@@ -102,3 +102,27 @@ TEST(formatter, dump_inf_or_nan)
EXPECT_EQ(parser.find_obj("nan_val")->get_data(), "null");
EXPECT_EQ(parser.find_obj("nan_val_alt")->get_data(), "null");
}
+
+TEST(formatter, dump_large_item) {
+ JSONFormatter formatter;
+ formatter.open_object_section("large_item");
+
+ std::string base_url("http://example.com");
+ std::string bucket_name("bucket");
+ std::string object_key(1024, 'a');
+
+ std::string full_url = base_url + "/" + bucket_name + "/" + object_key;
+ formatter.dump_format("Location", "%s/%s/%s", base_url.c_str(), bucket_name.c_str(), object_key.c_str());
+
+ formatter.close_section();
+ bufferlist bl;
+ formatter.flush(bl);
+
+ // std::cout << std::string(bl.c_str(), bl.length()) << std::endl;
+
+ JSONParser parser;
+ parser.parse(bl.c_str(), bl.length());
+
+ EXPECT_TRUE(parser.parse(bl.c_str(), bl.length()));
+ EXPECT_EQ(parser.find_obj("Location")->get_data(), full_url);
+}
diff --git a/src/test/common/test_tableformatter.cc b/src/test/common/test_tableformatter.cc
index b152014a2b5..90de133d315 100644
--- a/src/test/common/test_tableformatter.cc
+++ b/src/test/common/test_tableformatter.cc
@@ -250,6 +250,23 @@ TEST(tableformatter, multiline_keyval)
EXPECT_EQ(cmp, sout.str());
}
+TEST(tableformatter, dump_large_item) {
+ std::stringstream sout;
+ TableFormatter* formatter = (TableFormatter*) Formatter::create("table-kv");
+
+ std::string base_url("http://example.com");
+ std::string bucket_name("bucket");
+ std::string object_key(1024, 'a');
+
+ std::string full_url = base_url + "/" + bucket_name + "/" + object_key;
+ formatter->dump_format("Location", "%s/%s/%s", base_url.c_str(), bucket_name.c_str(), object_key.c_str());
+ formatter->flush(sout);
+ delete formatter;
+
+ std::string cmp = "key::Location=\"" + full_url + "\" \n";
+ EXPECT_EQ(cmp, sout.str());
+}
+
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 &&
diff --git a/src/test/common/test_xmlformatter.cc b/src/test/common/test_xmlformatter.cc
index 9ac6dde456e..abbe9e4e25e 100644
--- a/src/test/common/test_xmlformatter.cc
+++ b/src/test/common/test_xmlformatter.cc
@@ -163,3 +163,25 @@ TEST(xmlformatter, pretty_lowercased_underscored)
"<string_item>String</string_item>\n\n";
EXPECT_EQ(cmp, sout.str());
}
+
+TEST(xmlformatter, dump_format_large_item)
+{
+ std::stringstream sout;
+ XMLFormatter formatter(
+ true, // pretty
+ false, // lowercased
+ false); // underscored
+
+ std::string base_url("http://example.com");
+ std::string bucket_name("bucket");
+ std::string object_key(1024, 'a');
+
+ formatter.dump_format("Location", "%s/%s/%s", base_url.c_str(), bucket_name.c_str(), object_key.c_str());
+
+ formatter.flush(sout);
+
+ std::string uri = base_url + "/" + bucket_name + "/" + object_key;
+ std::string expected_output = "<Location>" + uri + "</Location>\n\n";
+
+ EXPECT_EQ(expected_output, sout.str());
+} \ No newline at end of file
diff --git a/src/test/fio/fio_librgw.cc b/src/test/fio/fio_librgw.cc
index bac4ff2daac..89c09647b61 100644
--- a/src/test/fio/fio_librgw.cc
+++ b/src/test/fio/fio_librgw.cc
@@ -300,8 +300,6 @@ namespace {
*/
static void fio_librgw_cleanup(struct thread_data *td)
{
- int r = 0;
-
dprint(FD_IO, "fio_librgw_cleanup\n");
/* cleanup specific data */
@@ -312,9 +310,9 @@ namespace {
data->release_handles();
if (data->bucket_fh) {
- r = rgw_fh_rele(data->fs, data->bucket_fh, 0 /* flags */);
+ rgw_fh_rele(data->fs, data->bucket_fh, 0 /* flags */);
}
- r = rgw_umount(data->fs, RGW_UMOUNT_FLAG_NONE);
+ rgw_umount(data->fs, RGW_UMOUNT_FLAG_NONE);
librgw_shutdown(data->rgw_h);
td->io_ops_data = nullptr;
delete data;
diff --git a/src/test/libcephfs/test.cc b/src/test/libcephfs/test.cc
index 6f10d2bbd4e..8760f594ae6 100644
--- a/src/test/libcephfs/test.cc
+++ b/src/test/libcephfs/test.cc
@@ -2706,6 +2706,19 @@ TEST(LibCephFS, Statxat) {
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFDIR);
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_2, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
+ // test relative to root with empty relpath
+#if defined(__linux__) && defined(AT_EMPTY_PATH)
+ int dir_fd = ceph_openat(cmount, fd, dir_name, O_DIRECTORY | O_RDONLY, 0);
+ ASSERT_LE(0, dir_fd);
+ ASSERT_EQ(ceph_statxat(cmount, dir_fd, "", &stx, 0, AT_EMPTY_PATH), 0);
+ ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFDIR);
+ ASSERT_EQ(0, ceph_close(cmount, dir_fd));
+ int file_fd = ceph_openat(cmount, fd, rel_file_name_2, O_RDONLY, 0);
+ ASSERT_LE(0, file_fd);
+ ASSERT_EQ(ceph_statxat(cmount, file_fd, "", &stx, 0, AT_EMPTY_PATH), 0);
+ ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
+ ASSERT_EQ(0, ceph_close(cmount, file_fd));
+#endif
ASSERT_EQ(0, ceph_close(cmount, fd));
// test relative to dir
@@ -2713,6 +2726,14 @@ TEST(LibCephFS, Statxat) {
ASSERT_LE(0, fd);
ASSERT_EQ(ceph_statxat(cmount, fd, rel_file_name_1, &stx, 0, 0), 0);
ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
+ // test relative to dir with empty relpath
+#if defined(__linux__) && defined(AT_EMPTY_PATH)
+ int rel_file_fd = ceph_openat(cmount, fd, rel_file_name_1, O_RDONLY, 0);
+ ASSERT_LE(0, rel_file_fd);
+ ASSERT_EQ(ceph_statxat(cmount, rel_file_fd, "", &stx, 0, AT_EMPTY_PATH), 0);
+ ASSERT_EQ(stx.stx_mode & S_IFMT, S_IFREG);
+ ASSERT_EQ(0, ceph_close(cmount, rel_file_fd));
+#endif
// delete the dirtree, recreate and verify
ASSERT_EQ(0, ceph_unlink(cmount, file_path));
@@ -3265,6 +3286,13 @@ TEST(LibCephFS, Chownat) {
// change ownership to nobody -- we assume nobody exists and id is always 65534
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "0"), 0);
ASSERT_EQ(ceph_chownat(cmount, fd, rel_file_path, 65534, 65534, 0), 0);
+ // change relative fd ownership with AT_EMPTY_PATH
+#if defined(__linux__) && defined(AT_EMPTY_PATH)
+ int file_fd = ceph_openat(cmount, fd, rel_file_path, O_RDONLY, 0);
+ ASSERT_LE(0, file_fd);
+ ASSERT_EQ(ceph_chownat(cmount, file_fd, "", 65534, 65534, AT_EMPTY_PATH), 0);
+ ceph_close(cmount, file_fd);
+#endif
ASSERT_EQ(ceph_conf_set(cmount, "client_permissions", "1"), 0);
ceph_close(cmount, fd);
diff --git a/src/test/librbd/test_internal.cc b/src/test/librbd/test_internal.cc
index 008fdcfa7be..8f6cbb9e807 100644
--- a/src/test/librbd/test_internal.cc
+++ b/src/test/librbd/test_internal.cc
@@ -1571,6 +1571,83 @@ TEST_F(TestInternal, FlattenNoEmptyObjects)
rados_ioctx_destroy(d_ioctx);
}
+TEST_F(TestInternal, FlattenInconsistentObjectMap)
+{
+ REQUIRE_FEATURE(RBD_FEATURE_LAYERING | RBD_FEATURE_OBJECT_MAP);
+ REQUIRE(!is_feature_enabled(RBD_FEATURE_STRIPINGV2));
+
+ librbd::ImageCtx* ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+ librbd::NoOpProgressContext no_op;
+ ASSERT_EQ(0, ictx->operations->resize((1 << ictx->order) * 5, true, no_op));
+
+ bufferlist bl;
+ bl.append(std::string(256, '1'));
+ for (int i = 1; i < 5; i++) {
+ ASSERT_EQ(256, api::Io<>::write(*ictx, (1 << ictx->order) * i, 256,
+ bufferlist{bl}, 0));
+ }
+
+ ASSERT_EQ(0, snap_create(*ictx, "snap"));
+ ASSERT_EQ(0, snap_protect(*ictx, "snap"));
+
+ uint64_t features;
+ ASSERT_EQ(0, librbd::get_features(ictx, &features));
+
+ std::string clone_name = get_temp_image_name();
+ int order = ictx->order;
+ ASSERT_EQ(0, librbd::clone(m_ioctx, m_image_name.c_str(), "snap", m_ioctx,
+ clone_name.c_str(), features, &order, 0, 0));
+
+ close_image(ictx);
+ ASSERT_EQ(0, open_image(clone_name, &ictx));
+
+ C_SaferCond lock_ctx;
+ {
+ std::shared_lock owner_locker{ictx->owner_lock};
+ ictx->exclusive_lock->try_acquire_lock(&lock_ctx);
+ }
+ ASSERT_EQ(0, lock_ctx.wait());
+ ASSERT_TRUE(ictx->exclusive_lock->is_lock_owner());
+
+ ceph::BitVector<2> inconsistent_object_map;
+ inconsistent_object_map.resize(5);
+ inconsistent_object_map[0] = OBJECT_NONEXISTENT;
+ inconsistent_object_map[1] = OBJECT_NONEXISTENT;
+ inconsistent_object_map[2] = OBJECT_EXISTS;
+ inconsistent_object_map[3] = OBJECT_EXISTS_CLEAN;
+ // OBJECT_PENDING shouldn't happen within parent overlap, but test
+ // anyway
+ inconsistent_object_map[4] = OBJECT_PENDING;
+
+ auto object_map = new librbd::ObjectMap<>(*ictx, CEPH_NOSNAP);
+ C_SaferCond save_ctx;
+ {
+ std::shared_lock owner_locker{ictx->owner_lock};
+ std::unique_lock image_locker{ictx->image_lock};
+ object_map->set_object_map(inconsistent_object_map);
+ object_map->aio_save(&save_ctx);
+ }
+ ASSERT_EQ(0, save_ctx.wait());
+ object_map->put();
+
+ close_image(ictx);
+ ASSERT_EQ(0, open_image(clone_name, &ictx));
+ ASSERT_EQ(0, ictx->operations->flatten(no_op));
+
+ bufferptr read_ptr(256);
+ bufferlist read_bl;
+ read_bl.push_back(read_ptr);
+
+ librbd::io::ReadResult read_result{&read_bl};
+ for (int i = 1; i < 5; i++) {
+ ASSERT_EQ(256, api::Io<>::read(*ictx, (1 << ictx->order) * i, 256,
+ librbd::io::ReadResult{read_result}, 0));
+ EXPECT_TRUE(bl.contents_equal(read_bl));
+ }
+}
+
TEST_F(TestInternal, PoolMetadataConfApply) {
REQUIRE_FORMAT_V2();
diff --git a/src/test/rgw/bucket_notification/api.py b/src/test/rgw/bucket_notification/api.py
index e7ec31f1711..e84aa16edc7 100644
--- a/src/test/rgw/bucket_notification/api.py
+++ b/src/test/rgw/bucket_notification/api.py
@@ -247,12 +247,16 @@ def delete_all_topics(conn, tenant, cluster):
if tenant == '':
topics_result = admin(['topic', 'list'], cluster)
topics_json = json.loads(topics_result[0])
+ if 'topics' not in topics_json:
+ topics_json = topics_json.get('result',{})
for topic in topics_json['topics']:
rm_result = admin(['topic', 'rm', '--topic', topic['name']], cluster)
print(rm_result)
else:
topics_result = admin(['topic', 'list', '--tenant', tenant], cluster)
topics_json = json.loads(topics_result[0])
+ if 'topics' not in topics_json:
+ topics_json = topics_json.get('result',{})
for topic in topics_json['topics']:
rm_result = admin(['topic', 'rm', '--tenant', tenant, '--topic', topic['name']], cluster)
print(rm_result)
diff --git a/src/test/rgw/bucket_notification/test_bn.py b/src/test/rgw/bucket_notification/test_bn.py
index 359990b3531..90ee33617fe 100644
--- a/src/test/rgw/bucket_notification/test_bn.py
+++ b/src/test/rgw/bucket_notification/test_bn.py
@@ -4359,6 +4359,242 @@ def test_ps_s3_multiple_topics_notification():
http_server.close()
+@attr('data_path_v2_test')
+def test_ps_s3_list_topics_migration():
+ """ test list topics on migration"""
+ if get_config_cluster() == 'noname':
+ return SkipTest('realm is needed for migration test')
+
+ # Initialize connections and configurations
+ conn1 = connection()
+ tenant = 'kaboom1'
+ conn2 = connect_random_user(tenant)
+ bucket_name = gen_bucket_name()
+ topics = [f"{bucket_name}{TOPIC_SUFFIX}{i}" for i in range(1, 7)]
+ tenant_topics = [f"{tenant}_{topic}" for topic in topics]
+
+ # Define topic names with version
+ topic_versions = {
+ "topic1_v2": f"{topics[0]}_v2",
+ "topic2_v2": f"{topics[1]}_v2",
+ "topic3_v1": f"{topics[2]}_v1",
+ "topic4_v1": f"{topics[3]}_v1",
+ "topic5_v1": f"{topics[4]}_v1",
+ "topic6_v1": f"{topics[5]}_v1",
+ "tenant_topic1_v2": f"{tenant_topics[0]}_v2",
+ "tenant_topic2_v1": f"{tenant_topics[1]}_v1",
+ "tenant_topic3_v1": f"{tenant_topics[2]}_v1"
+ }
+
+ # Get necessary configurations
+ host = get_ip()
+ http_port = random.randint(10000, 20000)
+ endpoint_address = 'http://' + host + ':' + str(http_port)
+ endpoint_args = 'push-endpoint=' + endpoint_address + '&persistent=true'
+ zonegroup = get_config_zonegroup()
+ conf_cluster = get_config_cluster()
+
+ # Make sure there are no leftover topics on v2
+ zonegroup_modify_feature(enable=True, feature_name=zonegroup_feature_notification_v2)
+ delete_all_topics(conn1, '', conf_cluster)
+ delete_all_topics(conn2, tenant, conf_cluster)
+
+ # Start v1 notification
+ # Make sure there are no leftover topics on v1
+ zonegroup_modify_feature(enable=False, feature_name=zonegroup_feature_notification_v2)
+ delete_all_topics(conn1, '', conf_cluster)
+ delete_all_topics(conn2, tenant, conf_cluster)
+
+ # Create s3 - v1 topics
+ topic_conf = PSTopicS3(conn1, topic_versions['topic3_v1'], zonegroup, endpoint_args=endpoint_args)
+ topic_arn3 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_versions['topic4_v1'], zonegroup, endpoint_args=endpoint_args)
+ topic_arn4 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_versions['topic5_v1'], zonegroup, endpoint_args=endpoint_args)
+ topic_arn5 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_versions['topic6_v1'], zonegroup, endpoint_args=endpoint_args)
+ topic_arn6 = topic_conf.set_config()
+ tenant_topic_conf = PSTopicS3(conn2, topic_versions['tenant_topic2_v1'], zonegroup, endpoint_args=endpoint_args)
+ tenant_topic_arn2 = tenant_topic_conf.set_config()
+ tenant_topic_conf = PSTopicS3(conn2, topic_versions['tenant_topic3_v1'], zonegroup, endpoint_args=endpoint_args)
+ tenant_topic_arn3 = tenant_topic_conf.set_config()
+
+ # Start v2 notification
+ zonegroup_modify_feature(enable=True, feature_name=zonegroup_feature_notification_v2)
+
+ # Create s3 - v2 topics
+ topic_conf = PSTopicS3(conn1, topic_versions['topic1_v2'], zonegroup, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_versions['topic2_v2'], zonegroup, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf.set_config()
+ tenant_topic_conf = PSTopicS3(conn2, topic_versions['tenant_topic1_v2'], zonegroup, endpoint_args=endpoint_args)
+ tenant_topic_arn1 = tenant_topic_conf.set_config()
+
+ # Verify topics list
+ try:
+ # Verify no tenant topics
+ res, status = topic_conf.get_list()
+ assert_equal(status // 100, 2)
+ listTopicsResponse = res.get('ListTopicsResponse', {})
+ listTopicsResult = listTopicsResponse.get('ListTopicsResult', {})
+ topics = listTopicsResult.get('Topics', {})
+ member = topics['member'] if topics else []
+ assert_equal(len(member), 6)
+
+ # Verify tenant topics
+ res, status = tenant_topic_conf.get_list()
+ assert_equal(status // 100, 2)
+ listTopicsResponse = res.get('ListTopicsResponse', {})
+ listTopicsResult = listTopicsResponse.get('ListTopicsResult', {})
+ topics = listTopicsResult.get('Topics', {})
+ member = topics['member'] if topics else []
+ assert_equal(len(member), 3)
+ finally:
+ # Cleanup created topics
+ topic_conf.del_config(topic_arn1)
+ topic_conf.del_config(topic_arn2)
+ topic_conf.del_config(topic_arn3)
+ topic_conf.del_config(topic_arn4)
+ topic_conf.del_config(topic_arn5)
+ topic_conf.del_config(topic_arn6)
+ tenant_topic_conf.del_config(tenant_topic_arn1)
+ tenant_topic_conf.del_config(tenant_topic_arn2)
+ tenant_topic_conf.del_config(tenant_topic_arn3)
+
+
+@attr('basic_test')
+def test_ps_s3_list_topics():
+ """ test list topics"""
+
+ # Initialize connections, topic names and configurations
+ conn1 = connection()
+ tenant = 'kaboom1'
+ conn2 = connect_random_user(tenant)
+ bucket_name = gen_bucket_name()
+ topic_name1 = bucket_name + TOPIC_SUFFIX + '1'
+ topic_name2 = bucket_name + TOPIC_SUFFIX + '2'
+ topic_name3 = bucket_name + TOPIC_SUFFIX + '3'
+ tenant_topic_name1 = tenant + "_" + topic_name1
+ tenant_topic_name2 = tenant + "_" + topic_name2
+ host = get_ip()
+ http_port = random.randint(10000, 20000)
+ endpoint_address = 'http://' + host + ':' + str(http_port)
+ endpoint_args = 'push-endpoint=' + endpoint_address + '&persistent=true'
+ zonegroup = get_config_zonegroup()
+
+ # Make sure there are no leftover topics
+ delete_all_topics(conn1, '', get_config_cluster())
+ delete_all_topics(conn2, tenant, get_config_cluster())
+
+ # Create s3 - v2 topics
+ topic_conf = PSTopicS3(conn1, topic_name1, zonegroup, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_name2, zonegroup, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_name3, zonegroup, endpoint_args=endpoint_args)
+ topic_arn3 = topic_conf.set_config()
+ tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name1, zonegroup, endpoint_args=endpoint_args)
+ tenant_topic_arn1 = tenant_topic_conf.set_config()
+ tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name2, zonegroup, endpoint_args=endpoint_args)
+ tenant_topic_arn2 = tenant_topic_conf.set_config()
+
+ # Verify topics list
+ try:
+ # Verify no tenant topics
+ res, status = topic_conf.get_list()
+ assert_equal(status // 100, 2)
+ listTopicsResponse = res.get('ListTopicsResponse', {})
+ listTopicsResult = listTopicsResponse.get('ListTopicsResult', {})
+ topics = listTopicsResult.get('Topics', {})
+ member = topics['member'] if topics else [] # version 2
+ assert_equal(len(member), 3)
+
+ # Verify topics for tenant
+ res, status = tenant_topic_conf.get_list()
+ assert_equal(status // 100, 2)
+ listTopicsResponse = res.get('ListTopicsResponse', {})
+ listTopicsResult = listTopicsResponse.get('ListTopicsResult', {})
+ topics = listTopicsResult.get('Topics', {})
+ member = topics['member'] if topics else []
+ assert_equal(len(member), 2)
+ finally:
+ # Cleanup created topics
+ topic_conf.del_config(topic_arn1)
+ topic_conf.del_config(topic_arn2)
+ topic_conf.del_config(topic_arn3)
+ tenant_topic_conf.del_config(tenant_topic_arn1)
+ tenant_topic_conf.del_config(tenant_topic_arn2)
+
+@attr('data_path_v2_test')
+def test_ps_s3_list_topics_v1():
+ """ test list topics on v1"""
+ if get_config_cluster() == 'noname':
+ return SkipTest('realm is needed')
+
+ # Initialize connections and configurations
+ conn1 = connection()
+ tenant = 'kaboom1'
+ conn2 = connect_random_user(tenant)
+ bucket_name = gen_bucket_name()
+ topic_name1 = bucket_name + TOPIC_SUFFIX + '1'
+ topic_name2 = bucket_name + TOPIC_SUFFIX + '2'
+ topic_name3 = bucket_name + TOPIC_SUFFIX + '3'
+ tenant_topic_name1 = tenant + "_" + topic_name1
+ tenant_topic_name2 = tenant + "_" + topic_name2
+ host = get_ip()
+ http_port = random.randint(10000, 20000)
+ endpoint_address = 'http://' + host + ':' + str(http_port)
+ endpoint_args = 'push-endpoint=' + endpoint_address + '&persistent=true'
+ zonegroup = get_config_zonegroup()
+ conf_cluster = get_config_cluster()
+
+ # Make sure there are no leftover topics
+ delete_all_topics(conn1, '', conf_cluster)
+ delete_all_topics(conn2, tenant, conf_cluster)
+
+ # Make sure that we disable v2
+ zonegroup_modify_feature(enable=False, feature_name=zonegroup_feature_notification_v2)
+
+ # Create s3 - v1 topics
+ topic_conf = PSTopicS3(conn1, topic_name1, zonegroup, endpoint_args=endpoint_args)
+ topic_arn1 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_name2, zonegroup, endpoint_args=endpoint_args)
+ topic_arn2 = topic_conf.set_config()
+ topic_conf = PSTopicS3(conn1, topic_name3, zonegroup, endpoint_args=endpoint_args)
+ topic_arn3 = topic_conf.set_config()
+ tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name1, zonegroup, endpoint_args=endpoint_args)
+ tenant_topic_arn1 = tenant_topic_conf.set_config()
+ tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name2, zonegroup, endpoint_args=endpoint_args)
+ tenant_topic_arn2 = tenant_topic_conf.set_config()
+
+ # Verify topics list
+ try:
+ # Verify no tenant topics
+ res, status = topic_conf.get_list()
+ assert_equal(status // 100, 2)
+ listTopicsResponse = res.get('ListTopicsResponse', {})
+ listTopicsResult = listTopicsResponse.get('ListTopicsResult', {})
+ topics = listTopicsResult.get('Topics', {})
+ member = topics['member'] if topics else []
+ assert_equal(len(member), 3)
+
+ # Verify tenant topics
+ res, status = tenant_topic_conf.get_list()
+ assert_equal(status // 100, 2)
+ listTopicsResponse = res.get('ListTopicsResponse', {})
+ listTopicsResult = listTopicsResponse.get('ListTopicsResult', {})
+ topics = listTopicsResult.get('Topics', {})
+ member = topics['member'] if topics else []
+ assert_equal(len(member), 2)
+ finally:
+ # Cleanup created topics
+ topic_conf.del_config(topic_arn1)
+ topic_conf.del_config(topic_arn2)
+ topic_conf.del_config(topic_arn3)
+ tenant_topic_conf.del_config(tenant_topic_arn1)
+ tenant_topic_conf.del_config(tenant_topic_arn2)
+
+
@attr('basic_test')
def test_ps_s3_topic_permissions():
""" test s3 topic set/get/delete permissions """
diff --git a/src/tools/monmaptool.cc b/src/tools/monmaptool.cc
index f1b86e00362..dc882a006a2 100644
--- a/src/tools/monmaptool.cc
+++ b/src/tools/monmaptool.cc
@@ -375,6 +375,10 @@ int main(int argc, const char **argv)
return r;
}
+ if (handle_features(features, monmap)) {
+ modified = true;
+ }
+
if (min_mon_release != ceph_release_t::unknown) {
monmap.min_mon_release = min_mon_release;
cout << "setting min_mon_release = " << min_mon_release << std::endl;
@@ -459,10 +463,6 @@ int main(int argc, const char **argv)
monmap.remove(p);
}
- if (handle_features(features, monmap)) {
- modified = true;
- }
-
if (!print && !modified && !show_features) {
cerr << "no action specified" << std::endl;
helpful_exit();
diff --git a/src/tools/rbd/Utils.cc b/src/tools/rbd/Utils.cc
index 95c8725aa33..b20dca05bc6 100644
--- a/src/tools/rbd/Utils.cc
+++ b/src/tools/rbd/Utils.cc
@@ -337,11 +337,14 @@ int get_pool_image_snapshot_names(const po::variables_map &vm,
SpecValidation spec_validation) {
std::string pool_key = (mod == at::ARGUMENT_MODIFIER_DEST ?
at::DEST_POOL_NAME : at::POOL_NAME);
+ std::string namespace_key = (mod == at::ARGUMENT_MODIFIER_DEST ?
+ at::DEST_NAMESPACE_NAME : at::NAMESPACE_NAME);
std::string image_key = (mod == at::ARGUMENT_MODIFIER_DEST ?
at::DEST_IMAGE_NAME : at::IMAGE_NAME);
+
return get_pool_generic_snapshot_names(vm, mod, spec_arg_index, pool_key,
- pool_name, namespace_name, image_key,
- "image", image_name, snap_name,
+ pool_name, namespace_key, namespace_name,
+ image_key, "image", image_name, snap_name,
image_name_required, snapshot_presence,
spec_validation);
}
@@ -351,6 +354,7 @@ int get_pool_generic_snapshot_names(const po::variables_map &vm,
size_t *spec_arg_index,
const std::string& pool_key,
std::string *pool_name,
+ const std::string& namespace_key,
std::string *namespace_name,
const std::string& generic_key,
const std::string& generic_key_desc,
@@ -359,8 +363,6 @@ int get_pool_generic_snapshot_names(const po::variables_map &vm,
bool generic_name_required,
SnapshotPresence snapshot_presence,
SpecValidation spec_validation) {
- std::string namespace_key = (mod == at::ARGUMENT_MODIFIER_DEST ?
- at::DEST_NAMESPACE_NAME : at::NAMESPACE_NAME);
std::string snap_key = (mod == at::ARGUMENT_MODIFIER_DEST ?
at::DEST_SNAPSHOT_NAME : at::SNAPSHOT_NAME);
diff --git a/src/tools/rbd/Utils.h b/src/tools/rbd/Utils.h
index 5076fd7fe9c..6aa0f2fdbdf 100644
--- a/src/tools/rbd/Utils.h
+++ b/src/tools/rbd/Utils.h
@@ -163,10 +163,11 @@ int get_pool_generic_snapshot_names(
const boost::program_options::variables_map &vm,
argument_types::ArgumentModifier mod, size_t *spec_arg_index,
const std::string& pool_key, std::string *pool_name,
- std::string *namespace_name, const std::string& generic_key,
- const std::string& generic_key_desc, std::string *generic_name,
- std::string *snap_name, bool generic_name_required,
- SnapshotPresence snapshot_presence, SpecValidation spec_validation);
+ const std::string& namespace_key, std::string *namespace_name,
+ const std::string& generic_key, const std::string& generic_key_desc,
+ std::string *generic_name, std::string *snap_name,
+ bool generic_name_required, SnapshotPresence snapshot_presence,
+ SpecValidation spec_validation);
int get_pool_image_id(const boost::program_options::variables_map &vm,
size_t *spec_arg_index,
diff --git a/src/tools/rbd/action/Group.cc b/src/tools/rbd/action/Group.cc
index d97e120d438..100bdc19496 100644
--- a/src/tools/rbd/action/Group.cc
+++ b/src/tools/rbd/action/Group.cc
@@ -28,6 +28,9 @@ static const std::string DEST_GROUP_NAME("dest-group");
static const std::string GROUP_POOL_NAME("group-" + at::POOL_NAME);
static const std::string IMAGE_POOL_NAME("image-" + at::POOL_NAME);
+static const std::string GROUP_NAMESPACE_NAME("group-" + at::NAMESPACE_NAME);
+static const std::string IMAGE_NAMESPACE_NAME("image-" + at::NAMESPACE_NAME);
+
void add_group_option(po::options_description *opt,
at::ArgumentModifier modifier) {
std::string name = GROUP_NAME;
@@ -107,8 +110,8 @@ int execute_create(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, nullptr, true,
- utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -187,8 +190,8 @@ int execute_remove(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, nullptr, true,
- utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -221,8 +224,8 @@ int execute_rename(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, nullptr, true,
- utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -233,9 +236,9 @@ int execute_rename(const po::variables_map &vm,
r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_DEST, &arg_index, at::DEST_POOL_NAME,
- &dest_pool_name, &dest_namespace_name, DEST_GROUP_NAME, "group",
- &dest_group_name, nullptr, true, utils::SNAPSHOT_PRESENCE_NONE,
- utils::SPEC_VALIDATION_FULL);
+ &dest_pool_name, at::DEST_NAMESPACE_NAME, &dest_namespace_name,
+ DEST_GROUP_NAME, "group", &dest_group_name, nullptr, true,
+ utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -283,8 +286,8 @@ int execute_info(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, nullptr, true,
- utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -335,8 +338,9 @@ int execute_add(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, GROUP_POOL_NAME,
- &group_pool_name, &group_namespace_name, GROUP_NAME, "group", &group_name,
- nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ &group_pool_name, GROUP_NAMESPACE_NAME, &group_namespace_name,
+ GROUP_NAME, "group", &group_name, nullptr, true,
+ utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -347,9 +351,9 @@ int execute_add(const po::variables_map &vm,
r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, IMAGE_POOL_NAME,
- &image_pool_name, &image_namespace_name, at::IMAGE_NAME, "image",
- &image_name, nullptr, true, utils::SNAPSHOT_PRESENCE_NONE,
- utils::SPEC_VALIDATION_FULL);
+ &image_pool_name, IMAGE_NAMESPACE_NAME, &image_namespace_name,
+ at::IMAGE_NAME, "image", &image_name, nullptr, true,
+ utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -393,8 +397,9 @@ int execute_remove_image(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, GROUP_POOL_NAME,
- &group_pool_name, &group_namespace_name, GROUP_NAME, "group", &group_name,
- nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ &group_pool_name, GROUP_NAMESPACE_NAME, &group_namespace_name,
+ GROUP_NAME, "group", &group_name, nullptr, true,
+ utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -410,9 +415,9 @@ int execute_remove_image(const po::variables_map &vm,
r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, IMAGE_POOL_NAME,
- &image_pool_name, &image_namespace_name, at::IMAGE_NAME, "image",
- &image_name, nullptr, image_id.empty(), utils::SNAPSHOT_PRESENCE_NONE,
- utils::SPEC_VALIDATION_FULL);
+ &image_pool_name, IMAGE_NAMESPACE_NAME, &image_namespace_name,
+ at::IMAGE_NAME, "image", &image_name, nullptr, image_id.empty(),
+ utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -464,8 +469,8 @@ int execute_list_images(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, nullptr, true,
- utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -563,8 +568,9 @@ int execute_group_snap_create(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, &snap_name, true,
- utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ &snap_name, true, utils::SNAPSHOT_PRESENCE_REQUIRED,
+ utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -604,8 +610,9 @@ int execute_group_snap_remove(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, &snap_name, true,
- utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ &snap_name, true, utils::SNAPSHOT_PRESENCE_REQUIRED,
+ utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -640,8 +647,9 @@ int execute_group_snap_rename(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, &source_snap_name, true,
- utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ &source_snap_name, true, utils::SNAPSHOT_PRESENCE_REQUIRED,
+ utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -696,8 +704,8 @@ int execute_group_snap_list(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, nullptr, true,
- utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ nullptr, true, utils::SNAPSHOT_PRESENCE_NONE, utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -764,8 +772,9 @@ int execute_group_snap_info(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, &group_snap_name, true,
- utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ &group_snap_name, true, utils::SNAPSHOT_PRESENCE_REQUIRED,
+ utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -872,8 +881,9 @@ int execute_group_snap_rollback(const po::variables_map &vm,
int r = utils::get_pool_generic_snapshot_names(
vm, at::ARGUMENT_MODIFIER_NONE, &arg_index, at::POOL_NAME, &pool_name,
- &namespace_name, GROUP_NAME, "group", &group_name, &snap_name, true,
- utils::SNAPSHOT_PRESENCE_REQUIRED, utils::SPEC_VALIDATION_FULL);
+ at::NAMESPACE_NAME, &namespace_name, GROUP_NAME, "group", &group_name,
+ &snap_name, true, utils::SNAPSHOT_PRESENCE_REQUIRED,
+ utils::SPEC_VALIDATION_FULL);
if (r < 0) {
return r;
}
@@ -954,9 +964,6 @@ void get_add_arguments(po::options_description *positional,
add_prefixed_pool_option(options, "image");
add_prefixed_namespace_option(options, "image");
at::add_image_option(options, at::ARGUMENT_MODIFIER_NONE);
-
- at::add_pool_option(options, at::ARGUMENT_MODIFIER_NONE,
- " unless overridden");
}
void get_remove_image_arguments(po::options_description *positional,
@@ -979,8 +986,6 @@ void get_remove_image_arguments(po::options_description *positional,
add_prefixed_namespace_option(options, "image");
at::add_image_option(options, at::ARGUMENT_MODIFIER_NONE);
- at::add_pool_option(options, at::ARGUMENT_MODIFIER_NONE,
- " unless overridden");
at::add_image_id_option(options);
}
diff --git a/src/tools/rbd/action/MirrorPool.cc b/src/tools/rbd/action/MirrorPool.cc
index 58e2d4dc329..6a546c3f73a 100644
--- a/src/tools/rbd/action/MirrorPool.cc
+++ b/src/tools/rbd/action/MirrorPool.cc
@@ -355,6 +355,10 @@ protected:
virtual ~ImageRequestBase() {
}
+ virtual bool open_read_only() const {
+ return false;
+ }
+
virtual bool skip_get_info() const {
return false;
}
@@ -429,8 +433,13 @@ private:
librbd::RBD rbd;
auto aio_completion = utils::create_aio_completion<
ImageRequestBase, &ImageRequestBase::handle_open_image>(this);
- rbd.aio_open(m_io_ctx, m_image, m_image_name.c_str(), nullptr,
- aio_completion);
+ if (open_read_only()) {
+ rbd.aio_open_read_only(m_io_ctx, m_image, m_image_name.c_str(), nullptr,
+ aio_completion);
+ } else {
+ rbd.aio_open(m_io_ctx, m_image, m_image_name.c_str(), nullptr,
+ aio_completion);
+ }
}
void handle_open_image(int r) {
@@ -604,6 +613,10 @@ public:
}
protected:
+ bool open_read_only() const override {
+ return true;
+ }
+
bool skip_get_info() const override {
return true;
}